From 783c253fa5c79c2012d745531a34b18e89efe24f Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Wed, 22 Aug 2018 10:56:48 +0200 Subject: [PATCH] Add upstream patches for RK3328 --- config/kernel/linux-rk3328-default.config | 2 +- .../rk3328-default/04-patch-4.4.143-144.patch | 4228 +++++++++++++++++ .../rk3328-default/04-patch-4.4.144-145.patch | 1006 ++++ .../rk3328-default/04-patch-4.4.145-146.patch | 2741 +++++++++++ .../rk3328-default/04-patch-4.4.146-147.patch | 254 + .../rk3328-default/04-patch-4.4.147-148.patch | 1873 ++++++++ .../rk3328-default/04-patch-4.4.148-149.patch | 1201 +++++ .../rk3328-default/04-patch-4.4.149-150.patch | 36 + 8 files changed, 11340 insertions(+), 1 deletion(-) create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.143-144.patch create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.144-145.patch create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.145-146.patch create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.146-147.patch create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.147-148.patch create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.148-149.patch create mode 100644 patch/kernel/rk3328-default/04-patch-4.4.149-150.patch diff --git a/config/kernel/linux-rk3328-default.config b/config/kernel/linux-rk3328-default.config index 3bb357688..fd0dd6cfa 100644 --- a/config/kernel/linux-rk3328-default.config +++ b/config/kernel/linux-rk3328-default.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.143 Kernel Configuration +# Linux/arm64 4.4.150 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y diff --git a/patch/kernel/rk3328-default/04-patch-4.4.143-144.patch b/patch/kernel/rk3328-default/04-patch-4.4.143-144.patch new file mode 100644 index 000000000..d0155cc70 --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.143-144.patch @@ -0,0 +1,4228 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index ea6a043f5beb..50f95689ab38 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -276,6 +276,7 @@ What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 ++ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + Date: January 2018 + Contact: Linux kernel mailing list + Description: Information about CPU vulnerabilities +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index e60d0b5809c1..3fd53e193b7f 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -2460,6 +2460,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + allow data leaks with this option, which is equivalent + to spectre_v2=off. + ++ nospec_store_bypass_disable ++ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability ++ + noxsave [BUGS=X86] Disables x86 extended register state save + and restore using xsave. The kernel will fallback to + enabling legacy floating-point and sse state. +@@ -3623,6 +3626,48 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + Not specifying this option is equivalent to + spectre_v2=auto. + ++ spec_store_bypass_disable= ++ [HW] Control Speculative Store Bypass (SSB) Disable mitigation ++ (Speculative Store Bypass vulnerability) ++ ++ Certain CPUs are vulnerable to an exploit against a ++ a common industry wide performance optimization known ++ as "Speculative Store Bypass" in which recent stores ++ to the same memory location may not be observed by ++ later loads during speculative execution. The idea ++ is that such stores are unlikely and that they can ++ be detected prior to instruction retirement at the ++ end of a particular speculation execution window. ++ ++ In vulnerable processors, the speculatively forwarded ++ store can be used in a cache side channel attack, for ++ example to read memory to which the attacker does not ++ directly have access (e.g. inside sandboxed code). ++ ++ This parameter controls whether the Speculative Store ++ Bypass optimization is used. ++ ++ on - Unconditionally disable Speculative Store Bypass ++ off - Unconditionally enable Speculative Store Bypass ++ auto - Kernel detects whether the CPU model contains an ++ implementation of Speculative Store Bypass and ++ picks the most appropriate mitigation. If the ++ CPU is not vulnerable, "off" is selected. If the ++ CPU is vulnerable the default mitigation is ++ architecture and Kconfig dependent. See below. ++ prctl - Control Speculative Store Bypass per thread ++ via prctl. Speculative Store Bypass is enabled ++ for a process by default. The state of the control ++ is inherited on fork. ++ seccomp - Same as "prctl" above, but all seccomp threads ++ will disable SSB unless they explicitly opt out. ++ ++ Not specifying this option is equivalent to ++ spec_store_bypass_disable=auto. ++ ++ Default mitigations: ++ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl" ++ + spia_io_base= [HW,MTD] + spia_fio_base= + spia_pedr= +diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt +new file mode 100644 +index 000000000000..32f3d55c54b7 +--- /dev/null ++++ b/Documentation/spec_ctrl.txt +@@ -0,0 +1,94 @@ ++=================== ++Speculation Control ++=================== ++ ++Quite some CPUs have speculation-related misfeatures which are in ++fact vulnerabilities causing data leaks in various forms even across ++privilege domains. ++ ++The kernel provides mitigation for such vulnerabilities in various ++forms. Some of these mitigations are compile-time configurable and some ++can be supplied on the kernel command line. ++ ++There is also a class of mitigations which are very expensive, but they can ++be restricted to a certain set of processes or tasks in controlled ++environments. The mechanism to control these mitigations is via ++:manpage:`prctl(2)`. ++ ++There are two prctl options which are related to this: ++ ++ * PR_GET_SPECULATION_CTRL ++ ++ * PR_SET_SPECULATION_CTRL ++ ++PR_GET_SPECULATION_CTRL ++----------------------- ++ ++PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature ++which is selected with arg2 of prctl(2). The return value uses bits 0-3 with ++the following meaning: ++ ++==== ===================== =================================================== ++Bit Define Description ++==== ===================== =================================================== ++0 PR_SPEC_PRCTL Mitigation can be controlled per task by ++ PR_SET_SPECULATION_CTRL. ++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is ++ disabled. ++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is ++ enabled. ++3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A ++ subsequent prctl(..., PR_SPEC_ENABLE) will fail. ++==== ===================== =================================================== ++ ++If all bits are 0 the CPU is not affected by the speculation misfeature. ++ ++If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is ++available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation ++misfeature will fail. ++ ++PR_SET_SPECULATION_CTRL ++----------------------- ++ ++PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which ++is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand ++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or ++PR_SPEC_FORCE_DISABLE. ++ ++Common error codes ++------------------ ++======= ================================================================= ++Value Meaning ++======= ================================================================= ++EINVAL The prctl is not implemented by the architecture or unused ++ prctl(2) arguments are not 0. ++ ++ENODEV arg2 is selecting a not supported speculation misfeature. ++======= ================================================================= ++ ++PR_SET_SPECULATION_CTRL error codes ++----------------------------------- ++======= ================================================================= ++Value Meaning ++======= ================================================================= ++0 Success ++ ++ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor ++ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE. ++ ++ENXIO Control of the selected speculation misfeature is not possible. ++ See PR_GET_SPECULATION_CTRL. ++ ++EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller ++ tried to enable it again. ++======= ================================================================= ++ ++Speculation misfeature controls ++------------------------------- ++- PR_SPEC_STORE_BYPASS: Speculative Store Bypass ++ ++ Invocations: ++ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0); ++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); ++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); ++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); +diff --git a/Makefile b/Makefile +index 54690fee0485..63f3e2438a26 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 143 ++SUBLEVEL = 144 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h +index 429957f1c236..8f1145ed0046 100644 +--- a/arch/arc/include/asm/page.h ++++ b/arch/arc/include/asm/page.h +@@ -102,7 +102,7 @@ typedef pte_t * pgtable_t; + #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + + /* Default Permissions for stack/heaps pages (Non Executable) */ +-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) ++#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define WANT_PAGE_VIRTUAL 1 + +diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h +index e5fec320f158..c07d7b0a4058 100644 +--- a/arch/arc/include/asm/pgtable.h ++++ b/arch/arc/include/asm/pgtable.h +@@ -372,7 +372,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + + /* Decode a PTE containing swap "identifier "into constituents */ + #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) +-#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) ++#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) + + /* NOPs, to keep generic kernel happy */ + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S +index d03bf0e28b8b..48c27c3fdfdb 100644 +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -79,24 +79,33 @@ ENTRY(entry_SYSENTER_compat) + ASM_CLAC /* Clear AC after saving FLAGS */ + + pushq $__USER32_CS /* pt_regs->cs */ +- xorq %r8,%r8 +- pushq %r8 /* pt_regs->ip = 0 (placeholder) */ ++ pushq $0 /* pt_regs->ip = 0 (placeholder) */ + pushq %rax /* pt_regs->orig_ax */ + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ + pushq %rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq $-ENOSYS /* pt_regs->ax */ +- pushq %r8 /* pt_regs->r8 = 0 */ +- pushq %r8 /* pt_regs->r9 = 0 */ +- pushq %r8 /* pt_regs->r10 = 0 */ +- pushq %r8 /* pt_regs->r11 = 0 */ ++ pushq $0 /* pt_regs->r8 = 0 */ ++ xorq %r8, %r8 /* nospec r8 */ ++ pushq $0 /* pt_regs->r9 = 0 */ ++ xorq %r9, %r9 /* nospec r9 */ ++ pushq $0 /* pt_regs->r10 = 0 */ ++ xorq %r10, %r10 /* nospec r10 */ ++ pushq $0 /* pt_regs->r11 = 0 */ ++ xorq %r11, %r11 /* nospec r11 */ + pushq %rbx /* pt_regs->rbx */ ++ xorl %ebx, %ebx /* nospec rbx */ + pushq %rbp /* pt_regs->rbp (will be overwritten) */ +- pushq %r8 /* pt_regs->r12 = 0 */ +- pushq %r8 /* pt_regs->r13 = 0 */ +- pushq %r8 /* pt_regs->r14 = 0 */ +- pushq %r8 /* pt_regs->r15 = 0 */ ++ xorl %ebp, %ebp /* nospec rbp */ ++ pushq $0 /* pt_regs->r12 = 0 */ ++ xorq %r12, %r12 /* nospec r12 */ ++ pushq $0 /* pt_regs->r13 = 0 */ ++ xorq %r13, %r13 /* nospec r13 */ ++ pushq $0 /* pt_regs->r14 = 0 */ ++ xorq %r14, %r14 /* nospec r14 */ ++ pushq $0 /* pt_regs->r15 = 0 */ ++ xorq %r15, %r15 /* nospec r15 */ + cld + + /* +@@ -185,17 +194,26 @@ ENTRY(entry_SYSCALL_compat) + pushq %rdx /* pt_regs->dx */ + pushq %rbp /* pt_regs->cx (stashed in bp) */ + pushq $-ENOSYS /* pt_regs->ax */ +- xorq %r8,%r8 +- pushq %r8 /* pt_regs->r8 = 0 */ +- pushq %r8 /* pt_regs->r9 = 0 */ +- pushq %r8 /* pt_regs->r10 = 0 */ +- pushq %r8 /* pt_regs->r11 = 0 */ ++ pushq $0 /* pt_regs->r8 = 0 */ ++ xorq %r8, %r8 /* nospec r8 */ ++ pushq $0 /* pt_regs->r9 = 0 */ ++ xorq %r9, %r9 /* nospec r9 */ ++ pushq $0 /* pt_regs->r10 = 0 */ ++ xorq %r10, %r10 /* nospec r10 */ ++ pushq $0 /* pt_regs->r11 = 0 */ ++ xorq %r11, %r11 /* nospec r11 */ + pushq %rbx /* pt_regs->rbx */ ++ xorl %ebx, %ebx /* nospec rbx */ + pushq %rbp /* pt_regs->rbp (will be overwritten) */ +- pushq %r8 /* pt_regs->r12 = 0 */ +- pushq %r8 /* pt_regs->r13 = 0 */ +- pushq %r8 /* pt_regs->r14 = 0 */ +- pushq %r8 /* pt_regs->r15 = 0 */ ++ xorl %ebp, %ebp /* nospec rbp */ ++ pushq $0 /* pt_regs->r12 = 0 */ ++ xorq %r12, %r12 /* nospec r12 */ ++ pushq $0 /* pt_regs->r13 = 0 */ ++ xorq %r13, %r13 /* nospec r13 */ ++ pushq $0 /* pt_regs->r14 = 0 */ ++ xorq %r14, %r14 /* nospec r14 */ ++ pushq $0 /* pt_regs->r15 = 0 */ ++ xorq %r15, %r15 /* nospec r15 */ + + /* + * User mode is traced as though IRQs are on, and SYSENTER +@@ -292,17 +310,26 @@ ENTRY(entry_INT80_compat) + pushq %rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq $-ENOSYS /* pt_regs->ax */ +- xorq %r8,%r8 +- pushq %r8 /* pt_regs->r8 = 0 */ +- pushq %r8 /* pt_regs->r9 = 0 */ +- pushq %r8 /* pt_regs->r10 = 0 */ +- pushq %r8 /* pt_regs->r11 = 0 */ ++ pushq $0 /* pt_regs->r8 = 0 */ ++ xorq %r8, %r8 /* nospec r8 */ ++ pushq $0 /* pt_regs->r9 = 0 */ ++ xorq %r9, %r9 /* nospec r9 */ ++ pushq $0 /* pt_regs->r10 = 0 */ ++ xorq %r10, %r10 /* nospec r10 */ ++ pushq $0 /* pt_regs->r11 = 0 */ ++ xorq %r11, %r11 /* nospec r11 */ + pushq %rbx /* pt_regs->rbx */ ++ xorl %ebx, %ebx /* nospec rbx */ + pushq %rbp /* pt_regs->rbp */ ++ xorl %ebp, %ebp /* nospec rbp */ + pushq %r12 /* pt_regs->r12 */ ++ xorq %r12, %r12 /* nospec r12 */ + pushq %r13 /* pt_regs->r13 */ ++ xorq %r13, %r13 /* nospec r13 */ + pushq %r14 /* pt_regs->r14 */ ++ xorq %r14, %r14 /* nospec r14 */ + pushq %r15 /* pt_regs->r15 */ ++ xorq %r15, %r15 /* nospec r15 */ + cld + + /* +diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h +index 20370c6db74b..3d1ec41ae09a 100644 +--- a/arch/x86/include/asm/apm.h ++++ b/arch/x86/include/asm/apm.h +@@ -6,6 +6,8 @@ + #ifndef _ASM_X86_MACH_DEFAULT_APM_H + #define _ASM_X86_MACH_DEFAULT_APM_H + ++#include ++ + #ifdef APM_ZERO_SEGS + # define APM_DO_ZERO_SEGS \ + "pushl %%ds\n\t" \ +@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ ++ firmware_restrict_branch_speculation_start(); + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + "=S" (*esi) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); ++ firmware_restrict_branch_speculation_end(); + } + + static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, +@@ -55,6 +59,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ ++ firmware_restrict_branch_speculation_start(); + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +@@ -67,6 +72,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + "=S" (si) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); ++ firmware_restrict_branch_speculation_end(); + return error; + } + +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index e3a6f66d288c..7f5dcb64cedb 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, + + asm volatile ("cmp %1,%2; sbb %0,%0;" + :"=r" (mask) +- :"r"(size),"r" (index) ++ :"g"(size),"r" (index) + :"cc"); + return mask; + } +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index dd0089841a0f..d72c1db64679 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -28,6 +28,7 @@ enum cpuid_leafs + CPUID_8000_000A_EDX, + CPUID_7_ECX, + CPUID_8000_0007_EBX, ++ CPUID_7_EDX, + }; + + #ifdef CONFIG_X86_FEATURE_NAMES +@@ -78,8 +79,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ ++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ + REQUIRED_MASK_CHECK || \ +- BUILD_BUG_ON_ZERO(NCAPINTS != 18)) ++ BUILD_BUG_ON_ZERO(NCAPINTS != 19)) + + #define DISABLED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ +@@ -100,8 +102,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ ++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ + DISABLED_MASK_CHECK || \ +- BUILD_BUG_ON_ZERO(NCAPINTS != 18)) ++ BUILD_BUG_ON_ZERO(NCAPINTS != 19)) + + #define cpu_has(c, bit) \ + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 205ce70c1d6c..f4b175db70f4 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -12,7 +12,7 @@ + /* + * Defines x86 CPU feature bits + */ +-#define NCAPINTS 18 /* N 32-bit words worth of info */ ++#define NCAPINTS 19 /* N 32-bit words worth of info */ + #define NBUGINTS 1 /* N 32-bit bug flags */ + + /* +@@ -194,13 +194,28 @@ + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ + + #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ +-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ ++ ++#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */ ++ ++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ ++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ + +-#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */ +-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */ + /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ + #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ + ++#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/ ++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ ++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ ++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ ++ ++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ ++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ ++ ++ + /* Virtualization flags: Linux defined, word 8 */ + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ + #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +@@ -251,6 +266,10 @@ + + /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ + #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ ++#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ ++#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ + + /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ + #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +@@ -285,6 +304,15 @@ + #define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ + #define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ + ++ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ ++#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ ++#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ ++#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ ++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ ++ + /* + * BUG word(s) + */ +@@ -302,5 +330,6 @@ + #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ + #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ + #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ ++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h +index 21c5ac15657b..1f8cca459c6c 100644 +--- a/arch/x86/include/asm/disabled-features.h ++++ b/arch/x86/include/asm/disabled-features.h +@@ -59,6 +59,7 @@ + #define DISABLED_MASK15 0 + #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) + #define DISABLED_MASK17 0 +-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) ++#define DISABLED_MASK18 0 ++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) + + #endif /* _ASM_X86_DISABLED_FEATURES_H */ +diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h +index 0010c78c4998..7e5a2ffb6938 100644 +--- a/arch/x86/include/asm/efi.h ++++ b/arch/x86/include/asm/efi.h +@@ -3,6 +3,7 @@ + + #include + #include ++#include + + /* + * We map the EFI regions needed for runtime services non-contiguously, +@@ -39,8 +40,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); + ({ \ + efi_status_t __s; \ + kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ + __s = ((efi_##f##_t __attribute__((regparm(0)))*) \ + efi.systab->runtime->f)(args); \ ++ firmware_restrict_branch_speculation_end(); \ + kernel_fpu_end(); \ + __s; \ + }) +@@ -49,8 +52,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); + #define __efi_call_virt(f, args...) \ + ({ \ + kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ + ((efi_##f##_t __attribute__((regparm(0)))*) \ + efi.systab->runtime->f)(args); \ ++ firmware_restrict_branch_speculation_end(); \ + kernel_fpu_end(); \ + }) + +@@ -71,7 +76,9 @@ extern u64 asmlinkage efi_call(void *fp, ...); + efi_sync_low_kernel_mappings(); \ + preempt_disable(); \ + __kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ + __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ ++ firmware_restrict_branch_speculation_end(); \ + __kernel_fpu_end(); \ + preempt_enable(); \ + __s; \ +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h +index 6999f7d01a0d..e13ff5a14633 100644 +--- a/arch/x86/include/asm/intel-family.h ++++ b/arch/x86/include/asm/intel-family.h +@@ -12,6 +12,7 @@ + */ + + #define INTEL_FAM6_CORE_YONAH 0x0E ++ + #define INTEL_FAM6_CORE2_MEROM 0x0F + #define INTEL_FAM6_CORE2_MEROM_L 0x16 + #define INTEL_FAM6_CORE2_PENRYN 0x17 +@@ -20,6 +21,7 @@ + #define INTEL_FAM6_NEHALEM 0x1E + #define INTEL_FAM6_NEHALEM_EP 0x1A + #define INTEL_FAM6_NEHALEM_EX 0x2E ++ + #define INTEL_FAM6_WESTMERE 0x25 + #define INTEL_FAM6_WESTMERE2 0x1F + #define INTEL_FAM6_WESTMERE_EP 0x2C +@@ -36,9 +38,9 @@ + #define INTEL_FAM6_HASWELL_GT3E 0x46 + + #define INTEL_FAM6_BROADWELL_CORE 0x3D +-#define INTEL_FAM6_BROADWELL_XEON_D 0x56 + #define INTEL_FAM6_BROADWELL_GT3E 0x47 + #define INTEL_FAM6_BROADWELL_X 0x4F ++#define INTEL_FAM6_BROADWELL_XEON_D 0x56 + + #define INTEL_FAM6_SKYLAKE_MOBILE 0x4E + #define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E +@@ -56,13 +58,15 @@ + #define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ + #define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ + #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ +-#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */ +-#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */ ++#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ ++#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */ + #define INTEL_FAM6_ATOM_GOLDMONT 0x5C + #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ ++#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A + + /* Xeon Phi */ + + #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ ++#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ + + #endif /* _ASM_X86_INTEL_FAMILY_H */ +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index b77f5edb03b0..0056bc945cd1 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -8,7 +8,7 @@ + * Interrupt control: + */ + +-static inline unsigned long native_save_fl(void) ++extern inline unsigned long native_save_fl(void) + { + unsigned long flags; + +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h +index 7680b76adafc..3359dfedc7ee 100644 +--- a/arch/x86/include/asm/mmu.h ++++ b/arch/x86/include/asm/mmu.h +@@ -3,12 +3,18 @@ + + #include + #include ++#include + + /* +- * The x86 doesn't have a mmu context, but +- * we put the segment information here. ++ * x86 has arch-specific MMU state beyond what lives in mm_struct. + */ + typedef struct { ++ /* ++ * ctx_id uniquely identifies this mm_struct. A ctx_id will never ++ * be reused, and zero is not a valid ctx_id. ++ */ ++ u64 ctx_id; ++ + #ifdef CONFIG_MODIFY_LDT_SYSCALL + struct ldt_struct *ldt; + #endif +@@ -24,6 +30,11 @@ typedef struct { + atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ + } mm_context_t; + ++#define INIT_MM_CONTEXT(mm) \ ++ .context = { \ ++ .ctx_id = 1, \ ++ } ++ + void leave_mm(int cpu); + + #endif /* _ASM_X86_MMU_H */ +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index 9bfc5fd77015..effc12767cbf 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -11,6 +11,9 @@ + #include + #include + #include ++ ++extern atomic64_t last_mm_ctx_id; ++ + #ifndef CONFIG_PARAVIRT + static inline void paravirt_activate_mm(struct mm_struct *prev, + struct mm_struct *next) +@@ -52,15 +55,15 @@ struct ldt_struct { + /* + * Used for LDT copy/destruction. + */ +-int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +-void destroy_context(struct mm_struct *mm); ++int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); ++void destroy_context_ldt(struct mm_struct *mm); + #else /* CONFIG_MODIFY_LDT_SYSCALL */ +-static inline int init_new_context(struct task_struct *tsk, +- struct mm_struct *mm) ++static inline int init_new_context_ldt(struct task_struct *tsk, ++ struct mm_struct *mm) + { + return 0; + } +-static inline void destroy_context(struct mm_struct *mm) {} ++static inline void destroy_context_ldt(struct mm_struct *mm) {} + #endif + + static inline void load_mm_ldt(struct mm_struct *mm) +@@ -102,6 +105,18 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); + } + ++static inline int init_new_context(struct task_struct *tsk, ++ struct mm_struct *mm) ++{ ++ mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); ++ init_new_context_ldt(tsk, mm); ++ return 0; ++} ++static inline void destroy_context(struct mm_struct *mm) ++{ ++ destroy_context_ldt(mm); ++} ++ + extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk); + +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index b8911aecf035..caa00191e565 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -32,6 +32,15 @@ + #define EFER_FFXSR (1<<_EFER_FFXSR) + + /* Intel MSRs. Some also available on other CPUs */ ++#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ ++#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ ++#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ ++#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ ++#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ ++ ++#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ ++#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ ++ + #define MSR_IA32_PERFCTR0 0x000000c1 + #define MSR_IA32_PERFCTR1 0x000000c2 + #define MSR_FSB_FREQ 0x000000cd +@@ -45,6 +54,16 @@ + #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) + + #define MSR_MTRRcap 0x000000fe ++ ++#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a ++#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ ++#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ ++#define ARCH_CAP_SSB_NO (1 << 4) /* ++ * Not susceptible to Speculative Store Bypass ++ * attack, so no Speculative Store Bypass ++ * control required. ++ */ ++ + #define MSR_IA32_BBL_CR_CTL 0x00000119 + #define MSR_IA32_BBL_CR_CTL3 0x0000011e + +@@ -132,6 +151,7 @@ + + /* DEBUGCTLMSR bits (others vary by model): */ + #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ ++#define DEBUGCTLMSR_BTF_SHIFT 1 + #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ + #define DEBUGCTLMSR_TR (1UL << 6) + #define DEBUGCTLMSR_BTS (1UL << 7) +@@ -308,6 +328,8 @@ + #define MSR_AMD64_IBSOPDATA4 0xc001103d + #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ + ++#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f ++ + /* Fam 16h MSRs */ + #define MSR_F16H_L2I_PERF_CTL 0xc0010230 + #define MSR_F16H_L2I_PERF_CTR 0xc0010231 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 8b910416243c..b4c74c24c890 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + + /* + * Fill the CPU return stack buffer. +@@ -171,6 +172,14 @@ enum spectre_v2_mitigation { + SPECTRE_V2_IBRS, + }; + ++/* The Speculative Store Bypass disable variants */ ++enum ssb_mitigation { ++ SPEC_STORE_BYPASS_NONE, ++ SPEC_STORE_BYPASS_DISABLE, ++ SPEC_STORE_BYPASS_PRCTL, ++ SPEC_STORE_BYPASS_SECCOMP, ++}; ++ + extern char __indirect_thunk_start[]; + extern char __indirect_thunk_end[]; + +@@ -194,6 +203,51 @@ static inline void vmexit_fill_RSB(void) + #endif + } + ++static __always_inline ++void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) ++{ ++ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) ++ : : "c" (msr), ++ "a" ((u32)val), ++ "d" ((u32)(val >> 32)), ++ [feature] "i" (feature) ++ : "memory"); ++} ++ ++static inline void indirect_branch_prediction_barrier(void) ++{ ++ u64 val = PRED_CMD_IBPB; ++ ++ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); ++} ++ ++/* The Intel SPEC CTRL MSR base value cache */ ++extern u64 x86_spec_ctrl_base; ++ ++/* ++ * With retpoline, we must use IBRS to restrict branch prediction ++ * before calling into firmware. ++ * ++ * (Implemented as CPP macros due to header hell.) ++ */ ++#define firmware_restrict_branch_speculation_start() \ ++do { \ ++ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ ++ \ ++ preempt_disable(); \ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ ++ X86_FEATURE_USE_IBRS_FW); \ ++} while (0) ++ ++#define firmware_restrict_branch_speculation_end() \ ++do { \ ++ u64 val = x86_spec_ctrl_base; \ ++ \ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ ++ X86_FEATURE_USE_IBRS_FW); \ ++ preempt_enable(); \ ++} while (0) ++ + #endif /* __ASSEMBLY__ */ + + /* +diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h +index fac9a5c0abe9..6847d85400a8 100644 +--- a/arch/x86/include/asm/required-features.h ++++ b/arch/x86/include/asm/required-features.h +@@ -100,6 +100,7 @@ + #define REQUIRED_MASK15 0 + #define REQUIRED_MASK16 0 + #define REQUIRED_MASK17 0 +-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) ++#define REQUIRED_MASK18 0 ++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) + + #endif /* _ASM_X86_REQUIRED_FEATURES_H */ +diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h +new file mode 100644 +index 000000000000..ae7c2c5cd7f0 +--- /dev/null ++++ b/arch/x86/include/asm/spec-ctrl.h +@@ -0,0 +1,80 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_X86_SPECCTRL_H_ ++#define _ASM_X86_SPECCTRL_H_ ++ ++#include ++#include ++ ++/* ++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR ++ * the guest has, while on VMEXIT we restore the host view. This ++ * would be easier if SPEC_CTRL were architecturally maskable or ++ * shadowable for guests but this is not (currently) the case. ++ * Takes the guest view of SPEC_CTRL MSR as a parameter and also ++ * the guest's version of VIRT_SPEC_CTRL, if emulated. ++ */ ++extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest); ++ ++/** ++ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest ++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL ++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL ++ * (may get translated to MSR_AMD64_LS_CFG bits) ++ * ++ * Avoids writing to the MSR if the content/bits are the same ++ */ ++static inline ++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) ++{ ++ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true); ++} ++ ++/** ++ * x86_spec_ctrl_restore_host - Restore host speculation control registers ++ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL ++ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL ++ * (may get translated to MSR_AMD64_LS_CFG bits) ++ * ++ * Avoids writing to the MSR if the content/bits are the same ++ */ ++static inline ++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) ++{ ++ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false); ++} ++ ++/* AMD specific Speculative Store Bypass MSR data */ ++extern u64 x86_amd_ls_cfg_base; ++extern u64 x86_amd_ls_cfg_ssbd_mask; ++ ++static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) ++{ ++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); ++ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); ++} ++ ++static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) ++{ ++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); ++ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); ++} ++ ++static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) ++{ ++ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; ++} ++ ++#ifdef CONFIG_SMP ++extern void speculative_store_bypass_ht_init(void); ++#else ++static inline void speculative_store_bypass_ht_init(void) { } ++#endif ++ ++extern void speculative_store_bypass_update(unsigned long tif); ++ ++static inline void speculative_store_bypass_update_current(void) ++{ ++ speculative_store_bypass_update(current_thread_info()->flags); ++} ++ ++#endif +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index 18c9aaa8c043..a96e88b243ef 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -92,6 +92,7 @@ struct thread_info { + #define TIF_SIGPENDING 2 /* signal pending */ + #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ + #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ ++#define TIF_SSBD 5 /* Reduced data speculation */ + #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SECCOMP 8 /* secure computing */ +@@ -114,8 +115,9 @@ struct thread_info { + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +-#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) ++#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) ++#define _TIF_SSBD (1 << TIF_SSBD) + #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) +@@ -147,7 +149,7 @@ struct thread_info { + + /* flags to check in __switch_to() */ + #define _TIF_WORK_CTXSW \ +- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) ++ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) + + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index e2a89d2577fb..72cfe3e53af1 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -68,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void) + struct tlb_state { + struct mm_struct *active_mm; + int state; ++ /* last user mm's ctx id */ ++ u64 last_ctx_id; + + /* + * Access to this CR4 shadow and to H/W CR4 is protected by +@@ -109,6 +111,16 @@ static inline void cr4_clear_bits(unsigned long mask) + } + } + ++static inline void cr4_toggle_bits(unsigned long mask) ++{ ++ unsigned long cr4; ++ ++ cr4 = this_cpu_read(cpu_tlbstate.cr4); ++ cr4 ^= mask; ++ this_cpu_write(cpu_tlbstate.cr4, cr4); ++ __write_cr4(cr4); ++} ++ + /* Read the CR4 shadow. */ + static inline unsigned long cr4_read_shadow(void) + { +diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile +index b1b78ffe01d0..7947cee61f61 100644 +--- a/arch/x86/kernel/Makefile ++++ b/arch/x86/kernel/Makefile +@@ -41,6 +41,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o + obj-y += tsc.o tsc_msr.o io_delay.o rtc.o + obj-y += pci-iommu_table.o + obj-y += resource.o ++obj-y += irqflags.o + + obj-y += process.o + obj-y += fpu/ +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index f4fb8f5b0be4..9f6151884249 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -519,6 +520,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) + + if (cpu_has(c, X86_FEATURE_MWAITX)) + use_mwaitx_delay(); ++ ++ if (c->x86 >= 0x15 && c->x86 <= 0x17) { ++ unsigned int bit; ++ ++ switch (c->x86) { ++ case 0x15: bit = 54; break; ++ case 0x16: bit = 33; break; ++ case 0x17: bit = 10; break; ++ default: return; ++ } ++ /* ++ * Try to cache the base value so further operations can ++ * avoid RMW. If that faults, do not enable SSBD. ++ */ ++ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { ++ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); ++ setup_force_cpu_cap(X86_FEATURE_SSBD); ++ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; ++ } ++ } + } + + static void early_init_amd(struct cpuinfo_x86 *c) +@@ -692,6 +713,17 @@ static void init_amd_bd(struct cpuinfo_x86 *c) + } + } + ++static void init_amd_zn(struct cpuinfo_x86 *c) ++{ ++ set_cpu_cap(c, X86_FEATURE_ZEN); ++ /* ++ * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects ++ * all up to and including B1. ++ */ ++ if (c->x86_model <= 1 && c->x86_mask <= 1) ++ set_cpu_cap(c, X86_FEATURE_CPB); ++} ++ + static void init_amd(struct cpuinfo_x86 *c) + { + u32 dummy; +@@ -722,6 +754,7 @@ static void init_amd(struct cpuinfo_x86 *c) + case 0x10: init_amd_gh(c); break; + case 0x12: init_amd_ln(c); break; + case 0x15: init_amd_bd(c); break; ++ case 0x17: init_amd_zn(c); break; + } + + /* Enable workaround for FXSAVE leak */ +@@ -791,8 +824,9 @@ static void init_amd(struct cpuinfo_x86 *c) + if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) + set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); + +- /* AMD CPUs don't reset SS attributes on SYSRET */ +- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); ++ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ ++ if (!cpu_has(c, X86_FEATURE_XENPV)) ++ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); + } + + #ifdef CONFIG_X86_32 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 2bbc74f8a4a8..12a8867071f3 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -11,8 +11,10 @@ + #include + #include + #include ++#include ++#include + +-#include ++#include + #include + #include + #include +@@ -26,6 +28,27 @@ + #include + + static void __init spectre_v2_select_mitigation(void); ++static void __init ssb_select_mitigation(void); ++ ++/* ++ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any ++ * writes to SPEC_CTRL contain whatever reserved bits have been set. ++ */ ++u64 x86_spec_ctrl_base; ++EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); ++ ++/* ++ * The vendor and possibly platform specific bits which can be modified in ++ * x86_spec_ctrl_base. ++ */ ++static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS; ++ ++/* ++ * AMD specific MSR info for Speculative Store Bypass control. ++ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). ++ */ ++u64 x86_amd_ls_cfg_base; ++u64 x86_amd_ls_cfg_ssbd_mask; + + void __init check_bugs(void) + { +@@ -36,9 +59,27 @@ void __init check_bugs(void) + print_cpu_info(&boot_cpu_data); + } + ++ /* ++ * Read the SPEC_CTRL MSR to account for reserved bits which may ++ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD ++ * init code as it is not enumerated and depends on the family. ++ */ ++ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) ++ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ ++ /* Allow STIBP in MSR_SPEC_CTRL if supported */ ++ if (boot_cpu_has(X86_FEATURE_STIBP)) ++ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; ++ + /* Select the proper spectre mitigation before patching alternatives */ + spectre_v2_select_mitigation(); + ++ /* ++ * Select proper mitigation for any exposure to the Speculative Store ++ * Bypass vulnerability. ++ */ ++ ssb_select_mitigation(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -94,6 +135,73 @@ static const char *spectre_v2_strings[] = { + + static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; + ++void ++x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) ++{ ++ u64 msrval, guestval, hostval = x86_spec_ctrl_base; ++ struct thread_info *ti = current_thread_info(); ++ ++ /* Is MSR_SPEC_CTRL implemented ? */ ++ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { ++ /* ++ * Restrict guest_spec_ctrl to supported values. Clear the ++ * modifiable bits in the host base value and or the ++ * modifiable bits from the guest value. ++ */ ++ guestval = hostval & ~x86_spec_ctrl_mask; ++ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; ++ ++ /* SSBD controlled in MSR_SPEC_CTRL */ ++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) ++ hostval |= ssbd_tif_to_spec_ctrl(ti->flags); ++ ++ if (hostval != guestval) { ++ msrval = setguest ? guestval : hostval; ++ wrmsrl(MSR_IA32_SPEC_CTRL, msrval); ++ } ++ } ++ ++ /* ++ * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update ++ * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. ++ */ ++ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && ++ !static_cpu_has(X86_FEATURE_VIRT_SSBD)) ++ return; ++ ++ /* ++ * If the host has SSBD mitigation enabled, force it in the host's ++ * virtual MSR value. If its not permanently enabled, evaluate ++ * current's TIF_SSBD thread flag. ++ */ ++ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) ++ hostval = SPEC_CTRL_SSBD; ++ else ++ hostval = ssbd_tif_to_spec_ctrl(ti->flags); ++ ++ /* Sanitize the guest value */ ++ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; ++ ++ if (hostval != guestval) { ++ unsigned long tif; ++ ++ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ++ ssbd_spec_ctrl_to_tif(hostval); ++ ++ speculative_store_bypass_update(tif); ++ } ++} ++EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); ++ ++static void x86_amd_ssb_disable(void) ++{ ++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; ++ ++ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) ++ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); ++ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) ++ wrmsrl(MSR_AMD64_LS_CFG, msrval); ++} + + #ifdef RETPOLINE + static bool spectre_v2_bad_module; +@@ -162,8 +270,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + return SPECTRE_V2_CMD_NONE; + else { +- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, +- sizeof(arg)); ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; + +@@ -184,8 +291,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && + !IS_ENABLED(CONFIG_RETPOLINE)) { +- pr_err("%s selected but not compiled in. Switching to AUTO select\n", +- mitigation_options[i].option); ++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + +@@ -255,14 +361,14 @@ static void __init spectre_v2_select_mitigation(void) + goto retpoline_auto; + break; + } +- pr_err("kernel not compiled with retpoline; no mitigation available!"); ++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); + return; + + retpoline_auto: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + retpoline_amd: + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { +- pr_err("LFENCE not serializing. Switching to generic retpoline\n"); ++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); + goto retpoline_generic; + } + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : +@@ -280,7 +386,7 @@ retpoline_auto: + pr_info("%s\n", spectre_v2_strings[mode]); + + /* +- * If neither SMEP or KPTI are available, there is a risk of ++ * If neither SMEP nor PTI are available, there is a risk of + * hitting userspace addresses in the RSB after a context switch + * from a shallow call stack to a deeper one. To prevent this fill + * the entire RSB, even when using IBRS. +@@ -294,38 +400,309 @@ retpoline_auto: + if ((!boot_cpu_has(X86_FEATURE_KAISER) && + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); +- pr_info("Filling RSB on context switch\n"); ++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); ++ } ++ ++ /* Initialize Indirect Branch Prediction Barrier if supported */ ++ if (boot_cpu_has(X86_FEATURE_IBPB)) { ++ setup_force_cpu_cap(X86_FEATURE_USE_IBPB); ++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); ++ } ++ ++ /* ++ * Retpoline means the kernel is safe because it has no indirect ++ * branches. But firmware isn't, so use IBRS to protect that. ++ */ ++ if (boot_cpu_has(X86_FEATURE_IBRS)) { ++ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); ++ pr_info("Enabling Restricted Speculation for firmware calls\n"); ++ } ++} ++ ++#undef pr_fmt ++#define pr_fmt(fmt) "Speculative Store Bypass: " fmt ++ ++static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE; ++ ++/* The kernel command line selection */ ++enum ssb_mitigation_cmd { ++ SPEC_STORE_BYPASS_CMD_NONE, ++ SPEC_STORE_BYPASS_CMD_AUTO, ++ SPEC_STORE_BYPASS_CMD_ON, ++ SPEC_STORE_BYPASS_CMD_PRCTL, ++ SPEC_STORE_BYPASS_CMD_SECCOMP, ++}; ++ ++static const char *ssb_strings[] = { ++ [SPEC_STORE_BYPASS_NONE] = "Vulnerable", ++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", ++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", ++ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", ++}; ++ ++static const struct { ++ const char *option; ++ enum ssb_mitigation_cmd cmd; ++} ssb_mitigation_options[] = { ++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ ++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ ++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ ++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ ++ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ ++}; ++ ++static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) ++{ ++ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; ++ char arg[20]; ++ int ret, i; ++ ++ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { ++ return SPEC_STORE_BYPASS_CMD_NONE; ++ } else { ++ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", ++ arg, sizeof(arg)); ++ if (ret < 0) ++ return SPEC_STORE_BYPASS_CMD_AUTO; ++ ++ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { ++ if (!match_option(arg, ret, ssb_mitigation_options[i].option)) ++ continue; ++ ++ cmd = ssb_mitigation_options[i].cmd; ++ break; ++ } ++ ++ if (i >= ARRAY_SIZE(ssb_mitigation_options)) { ++ pr_err("unknown option (%s). Switching to AUTO select\n", arg); ++ return SPEC_STORE_BYPASS_CMD_AUTO; ++ } ++ } ++ ++ return cmd; ++} ++ ++static enum ssb_mitigation __init __ssb_select_mitigation(void) ++{ ++ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; ++ enum ssb_mitigation_cmd cmd; ++ ++ if (!boot_cpu_has(X86_FEATURE_SSBD)) ++ return mode; ++ ++ cmd = ssb_parse_cmdline(); ++ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && ++ (cmd == SPEC_STORE_BYPASS_CMD_NONE || ++ cmd == SPEC_STORE_BYPASS_CMD_AUTO)) ++ return mode; ++ ++ switch (cmd) { ++ case SPEC_STORE_BYPASS_CMD_AUTO: ++ case SPEC_STORE_BYPASS_CMD_SECCOMP: ++ /* ++ * Choose prctl+seccomp as the default mode if seccomp is ++ * enabled. ++ */ ++ if (IS_ENABLED(CONFIG_SECCOMP)) ++ mode = SPEC_STORE_BYPASS_SECCOMP; ++ else ++ mode = SPEC_STORE_BYPASS_PRCTL; ++ break; ++ case SPEC_STORE_BYPASS_CMD_ON: ++ mode = SPEC_STORE_BYPASS_DISABLE; ++ break; ++ case SPEC_STORE_BYPASS_CMD_PRCTL: ++ mode = SPEC_STORE_BYPASS_PRCTL; ++ break; ++ case SPEC_STORE_BYPASS_CMD_NONE: ++ break; ++ } ++ ++ /* ++ * We have three CPU feature flags that are in play here: ++ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. ++ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass ++ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation ++ */ ++ if (mode == SPEC_STORE_BYPASS_DISABLE) { ++ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); ++ /* ++ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses ++ * a completely different MSR and bit dependent on family. ++ */ ++ switch (boot_cpu_data.x86_vendor) { ++ case X86_VENDOR_INTEL: ++ x86_spec_ctrl_base |= SPEC_CTRL_SSBD; ++ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; ++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ break; ++ case X86_VENDOR_AMD: ++ x86_amd_ssb_disable(); ++ break; ++ } + } ++ ++ return mode; ++} ++ ++static void ssb_select_mitigation(void) ++{ ++ ssb_mode = __ssb_select_mitigation(); ++ ++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) ++ pr_info("%s\n", ssb_strings[ssb_mode]); + } + + #undef pr_fmt ++#define pr_fmt(fmt) "Speculation prctl: " fmt ++ ++static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) ++{ ++ bool update; ++ ++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ++ ssb_mode != SPEC_STORE_BYPASS_SECCOMP) ++ return -ENXIO; ++ ++ switch (ctrl) { ++ case PR_SPEC_ENABLE: ++ /* If speculation is force disabled, enable is not allowed */ ++ if (task_spec_ssb_force_disable(task)) ++ return -EPERM; ++ task_clear_spec_ssb_disable(task); ++ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); ++ break; ++ case PR_SPEC_DISABLE: ++ task_set_spec_ssb_disable(task); ++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); ++ break; ++ case PR_SPEC_FORCE_DISABLE: ++ task_set_spec_ssb_disable(task); ++ task_set_spec_ssb_force_disable(task); ++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); ++ break; ++ default: ++ return -ERANGE; ++ } ++ ++ /* ++ * If being set on non-current task, delay setting the CPU ++ * mitigation until it is next scheduled. ++ */ ++ if (task == current && update) ++ speculative_store_bypass_update_current(); ++ ++ return 0; ++} ++ ++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, ++ unsigned long ctrl) ++{ ++ switch (which) { ++ case PR_SPEC_STORE_BYPASS: ++ return ssb_prctl_set(task, ctrl); ++ default: ++ return -ENODEV; ++ } ++} ++ ++#ifdef CONFIG_SECCOMP ++void arch_seccomp_spec_mitigate(struct task_struct *task) ++{ ++ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); ++} ++#endif ++ ++static int ssb_prctl_get(struct task_struct *task) ++{ ++ switch (ssb_mode) { ++ case SPEC_STORE_BYPASS_DISABLE: ++ return PR_SPEC_DISABLE; ++ case SPEC_STORE_BYPASS_SECCOMP: ++ case SPEC_STORE_BYPASS_PRCTL: ++ if (task_spec_ssb_force_disable(task)) ++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; ++ if (task_spec_ssb_disable(task)) ++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE; ++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE; ++ default: ++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) ++ return PR_SPEC_ENABLE; ++ return PR_SPEC_NOT_AFFECTED; ++ } ++} ++ ++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) ++{ ++ switch (which) { ++ case PR_SPEC_STORE_BYPASS: ++ return ssb_prctl_get(task); ++ default: ++ return -ENODEV; ++ } ++} ++ ++void x86_spec_ctrl_setup_ap(void) ++{ ++ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) ++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ ++ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) ++ x86_amd_ssb_disable(); ++} + + #ifdef CONFIG_SYSFS +-ssize_t cpu_show_meltdown(struct device *dev, +- struct device_attribute *attr, char *buf) ++ ++static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, ++ char *buf, unsigned int bug) + { +- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) ++ if (!boot_cpu_has_bug(bug)) + return sprintf(buf, "Not affected\n"); +- if (boot_cpu_has(X86_FEATURE_KAISER)) +- return sprintf(buf, "Mitigation: PTI\n"); ++ ++ switch (bug) { ++ case X86_BUG_CPU_MELTDOWN: ++ if (boot_cpu_has(X86_FEATURE_KAISER)) ++ return sprintf(buf, "Mitigation: PTI\n"); ++ ++ break; ++ ++ case X86_BUG_SPECTRE_V1: ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n"); ++ ++ case X86_BUG_SPECTRE_V2: ++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], ++ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", ++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", ++ spectre_v2_module_string()); ++ ++ case X86_BUG_SPEC_STORE_BYPASS: ++ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); ++ ++ default: ++ break; ++ } ++ + return sprintf(buf, "Vulnerable\n"); + } + +-ssize_t cpu_show_spectre_v1(struct device *dev, +- struct device_attribute *attr, char *buf) ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { +- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) +- return sprintf(buf, "Not affected\n"); +- return sprintf(buf, "Mitigation: __user pointer sanitization\n"); ++ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); + } + +-ssize_t cpu_show_spectre_v2(struct device *dev, +- struct device_attribute *attr, char *buf) ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) + { +- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) +- return sprintf(buf, "Not affected\n"); ++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); ++} + +- return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled], +- spectre_v2_module_string()); ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); ++} ++ ++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); + } + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 736e2843139b..3d21b28f9826 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -43,6 +43,8 @@ + #include + #include + #include ++#include ++#include + + #ifdef CONFIG_X86_LOCAL_APIC + #include +@@ -674,6 +676,40 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) + } + } + ++static void init_speculation_control(struct cpuinfo_x86 *c) ++{ ++ /* ++ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, ++ * and they also have a different bit for STIBP support. Also, ++ * a hypervisor might have set the individual AMD bits even on ++ * Intel CPUs, for finer-grained selection of what's available. ++ */ ++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { ++ set_cpu_cap(c, X86_FEATURE_IBRS); ++ set_cpu_cap(c, X86_FEATURE_IBPB); ++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); ++ } ++ ++ if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) ++ set_cpu_cap(c, X86_FEATURE_STIBP); ++ ++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD)) ++ set_cpu_cap(c, X86_FEATURE_SSBD); ++ ++ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { ++ set_cpu_cap(c, X86_FEATURE_IBRS); ++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); ++ } ++ ++ if (cpu_has(c, X86_FEATURE_AMD_IBPB)) ++ set_cpu_cap(c, X86_FEATURE_IBPB); ++ ++ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { ++ set_cpu_cap(c, X86_FEATURE_STIBP); ++ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); ++ } ++} ++ + void get_cpu_cap(struct cpuinfo_x86 *c) + { + u32 eax, ebx, ecx, edx; +@@ -695,6 +731,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) + cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); + c->x86_capability[CPUID_7_0_EBX] = ebx; + c->x86_capability[CPUID_7_ECX] = ecx; ++ c->x86_capability[CPUID_7_EDX] = edx; + } + + /* Extended state features: level 0x0000000d */ +@@ -765,6 +802,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c) + c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); + + init_scattered_cpuid_features(c); ++ init_speculation_control(c); ++ ++ /* ++ * Clear/Set all flags overridden by options, after probe. ++ * This needs to happen each time we re-probe, which may happen ++ * several times during CPU initialization. ++ */ ++ apply_forced_caps(c); + } + + static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) +@@ -793,6 +838,75 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + #endif + } + ++static const __initconst struct x86_cpu_id cpu_no_speculation[] = { ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, ++ { X86_VENDOR_CENTAUR, 5 }, ++ { X86_VENDOR_INTEL, 5 }, ++ { X86_VENDOR_NSC, 5 }, ++ { X86_VENDOR_ANY, 4 }, ++ {} ++}; ++ ++static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { ++ { X86_VENDOR_AMD }, ++ {} ++}; ++ ++static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, ++ { X86_VENDOR_CENTAUR, 5, }, ++ { X86_VENDOR_INTEL, 5, }, ++ { X86_VENDOR_NSC, 5, }, ++ { X86_VENDOR_AMD, 0x12, }, ++ { X86_VENDOR_AMD, 0x11, }, ++ { X86_VENDOR_AMD, 0x10, }, ++ { X86_VENDOR_AMD, 0xf, }, ++ { X86_VENDOR_ANY, 4, }, ++ {} ++}; ++ ++static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) ++{ ++ u64 ia32_cap = 0; ++ ++ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) ++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); ++ ++ if (!x86_match_cpu(cpu_no_spec_store_bypass) && ++ !(ia32_cap & ARCH_CAP_SSB_NO)) ++ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); ++ ++ if (x86_match_cpu(cpu_no_speculation)) ++ return; ++ ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1); ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2); ++ ++ if (x86_match_cpu(cpu_no_meltdown)) ++ return; ++ ++ /* Rogue Data Cache Load? No! */ ++ if (ia32_cap & ARCH_CAP_RDCL_NO) ++ return; ++ ++ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); ++} ++ + /* + * Do minimum CPU detection early. + * Fields really needed: vendor, cpuid_level, family, model, mask, +@@ -839,11 +953,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) + + setup_force_cpu_cap(X86_FEATURE_ALWAYS); + +- if (c->x86_vendor != X86_VENDOR_AMD) +- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); +- +- setup_force_cpu_bug(X86_BUG_SPECTRE_V1); +- setup_force_cpu_bug(X86_BUG_SPECTRE_V2); ++ cpu_set_bug_bits(c); + + fpu__init_system(c); + +@@ -1132,6 +1242,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) + enable_sep_cpu(); + #endif + mtrr_ap_init(); ++ x86_spec_ctrl_setup_ap(); + } + + struct msr_range { +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h +index 2584265d4745..3b19d82f7932 100644 +--- a/arch/x86/kernel/cpu/cpu.h ++++ b/arch/x86/kernel/cpu/cpu.h +@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], + + extern void get_cpu_cap(struct cpuinfo_x86 *c); + extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); ++ ++extern void x86_spec_ctrl_setup_ap(void); ++ + #endif /* ARCH_X86_CPU_H */ +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 9299e3bdfad6..4dce22d3cb06 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_X86_64 + #include +@@ -25,6 +26,62 @@ + #include + #endif + ++/* ++ * Early microcode releases for the Spectre v2 mitigation were broken. ++ * Information taken from; ++ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf ++ * - https://kb.vmware.com/s/article/52345 ++ * - Microcode revisions observed in the wild ++ * - Release note from 20180108 microcode release ++ */ ++struct sku_microcode { ++ u8 model; ++ u8 stepping; ++ u32 microcode; ++}; ++static const struct sku_microcode spectre_bad_microcodes[] = { ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, ++ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, ++ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, ++ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, ++ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, ++ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, ++ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, ++ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, ++ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, ++ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, ++ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, ++ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, ++ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, ++ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, ++ /* Observed in the wild */ ++ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, ++ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, ++}; ++ ++static bool bad_spectre_microcode(struct cpuinfo_x86 *c) ++{ ++ int i; ++ ++ /* ++ * We know that the hypervisor lie to us on the microcode version so ++ * we may as well hope that it is running the correct version. ++ */ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return false; ++ ++ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { ++ if (c->x86_model == spectre_bad_microcodes[i].model && ++ c->x86_mask == spectre_bad_microcodes[i].stepping) ++ return (c->microcode <= spectre_bad_microcodes[i].microcode); ++ } ++ return false; ++} ++ + static void early_init_intel(struct cpuinfo_x86 *c) + { + u64 misc_enable; +@@ -51,6 +108,22 @@ static void early_init_intel(struct cpuinfo_x86 *c) + rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); + } + ++ /* Now if any of them are set, check the blacklist and clear the lot */ ++ if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || ++ cpu_has(c, X86_FEATURE_INTEL_STIBP) || ++ cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || ++ cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { ++ pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); ++ setup_clear_cpu_cap(X86_FEATURE_IBRS); ++ setup_clear_cpu_cap(X86_FEATURE_IBPB); ++ setup_clear_cpu_cap(X86_FEATURE_STIBP); ++ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); ++ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); ++ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); ++ setup_clear_cpu_cap(X86_FEATURE_SSBD); ++ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); ++ } ++ + /* + * Atom erratum AAE44/AAF40/AAG38/AAH41: + * +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index ddc9b8125918..7b8c8c838191 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -2294,9 +2294,6 @@ static ssize_t store_int_with_restart(struct device *s, + if (check_interval == old_check_interval) + return ret; + +- if (check_interval < 1) +- check_interval = 1; +- + mutex_lock(&mce_sysfs_mutex); + mce_restart(); + mutex_unlock(&mce_sysfs_mutex); +diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S +new file mode 100644 +index 000000000000..3817eb748eb4 +--- /dev/null ++++ b/arch/x86/kernel/irqflags.S +@@ -0,0 +1,26 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#include ++#include ++#include ++ ++/* ++ * unsigned long native_save_fl(void) ++ */ ++ENTRY(native_save_fl) ++ pushf ++ pop %_ASM_AX ++ ret ++ENDPROC(native_save_fl) ++EXPORT_SYMBOL(native_save_fl) ++ ++/* ++ * void native_restore_fl(unsigned long flags) ++ * %eax/%rdi: flags ++ */ ++ENTRY(native_restore_fl) ++ push %_ASM_ARG1 ++ popf ++ ret ++ENDPROC(native_restore_fl) ++EXPORT_SYMBOL(native_restore_fl) +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c +index bc429365b72a..8bc68cfc0d33 100644 +--- a/arch/x86/kernel/ldt.c ++++ b/arch/x86/kernel/ldt.c +@@ -119,7 +119,7 @@ static void free_ldt_struct(struct ldt_struct *ldt) + * we do not have to muck with descriptors here, that is + * done in switch_mm() as needed. + */ +-int init_new_context(struct task_struct *tsk, struct mm_struct *mm) ++int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) + { + struct ldt_struct *new_ldt; + struct mm_struct *old_mm; +@@ -160,7 +160,7 @@ out_unlock: + * + * 64bit: Don't touch the LDT register - we're already in the next thread. + */ +-void destroy_context(struct mm_struct *mm) ++void destroy_context_ldt(struct mm_struct *mm) + { + free_ldt_struct(mm->context.ldt); + mm->context.ldt = NULL; +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index 7c5c5dc90ffa..e18c8798c3a2 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + /* + * per-CPU TSS segments. Threads are completely 'soft' on Linux, +@@ -130,11 +131,6 @@ void flush_thread(void) + fpu__clear(&tsk->thread.fpu); + } + +-static void hard_disable_TSC(void) +-{ +- cr4_set_bits(X86_CR4_TSD); +-} +- + void disable_TSC(void) + { + preempt_disable(); +@@ -143,15 +139,10 @@ void disable_TSC(void) + * Must flip the CPU state synchronously with + * TIF_NOTSC in the current running context. + */ +- hard_disable_TSC(); ++ cr4_set_bits(X86_CR4_TSD); + preempt_enable(); + } + +-static void hard_enable_TSC(void) +-{ +- cr4_clear_bits(X86_CR4_TSD); +-} +- + static void enable_TSC(void) + { + preempt_disable(); +@@ -160,7 +151,7 @@ static void enable_TSC(void) + * Must flip the CPU state synchronously with + * TIF_NOTSC in the current running context. + */ +- hard_enable_TSC(); ++ cr4_clear_bits(X86_CR4_TSD); + preempt_enable(); + } + +@@ -188,48 +179,199 @@ int set_tsc_mode(unsigned int val) + return 0; + } + +-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, +- struct tss_struct *tss) ++static inline void switch_to_bitmap(struct tss_struct *tss, ++ struct thread_struct *prev, ++ struct thread_struct *next, ++ unsigned long tifp, unsigned long tifn) + { +- struct thread_struct *prev, *next; +- +- prev = &prev_p->thread; +- next = &next_p->thread; +- +- if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ +- test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { +- unsigned long debugctl = get_debugctlmsr(); +- +- debugctl &= ~DEBUGCTLMSR_BTF; +- if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) +- debugctl |= DEBUGCTLMSR_BTF; +- +- update_debugctlmsr(debugctl); +- } +- +- if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ +- test_tsk_thread_flag(next_p, TIF_NOTSC)) { +- /* prev and next are different */ +- if (test_tsk_thread_flag(next_p, TIF_NOTSC)) +- hard_disable_TSC(); +- else +- hard_enable_TSC(); +- } +- +- if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { ++ if (tifn & _TIF_IO_BITMAP) { + /* + * Copy the relevant range of the IO bitmap. + * Normally this is 128 bytes or less: + */ + memcpy(tss->io_bitmap, next->io_bitmap_ptr, + max(prev->io_bitmap_max, next->io_bitmap_max)); +- } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { ++ } else if (tifp & _TIF_IO_BITMAP) { + /* + * Clear any possible leftover bits: + */ + memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); + } ++} ++ ++#ifdef CONFIG_SMP ++ ++struct ssb_state { ++ struct ssb_state *shared_state; ++ raw_spinlock_t lock; ++ unsigned int disable_state; ++ unsigned long local_state; ++}; ++ ++#define LSTATE_SSB 0 ++ ++static DEFINE_PER_CPU(struct ssb_state, ssb_state); ++ ++void speculative_store_bypass_ht_init(void) ++{ ++ struct ssb_state *st = this_cpu_ptr(&ssb_state); ++ unsigned int this_cpu = smp_processor_id(); ++ unsigned int cpu; ++ ++ st->local_state = 0; ++ ++ /* ++ * Shared state setup happens once on the first bringup ++ * of the CPU. It's not destroyed on CPU hotunplug. ++ */ ++ if (st->shared_state) ++ return; ++ ++ raw_spin_lock_init(&st->lock); ++ ++ /* ++ * Go over HT siblings and check whether one of them has set up the ++ * shared state pointer already. ++ */ ++ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { ++ if (cpu == this_cpu) ++ continue; ++ ++ if (!per_cpu(ssb_state, cpu).shared_state) ++ continue; ++ ++ /* Link it to the state of the sibling: */ ++ st->shared_state = per_cpu(ssb_state, cpu).shared_state; ++ return; ++ } ++ ++ /* ++ * First HT sibling to come up on the core. Link shared state of ++ * the first HT sibling to itself. The siblings on the same core ++ * which come up later will see the shared state pointer and link ++ * themself to the state of this CPU. ++ */ ++ st->shared_state = st; ++} ++ ++/* ++ * Logic is: First HT sibling enables SSBD for both siblings in the core ++ * and last sibling to disable it, disables it for the whole core. This how ++ * MSR_SPEC_CTRL works in "hardware": ++ * ++ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL ++ */ ++static __always_inline void amd_set_core_ssb_state(unsigned long tifn) ++{ ++ struct ssb_state *st = this_cpu_ptr(&ssb_state); ++ u64 msr = x86_amd_ls_cfg_base; ++ ++ if (!static_cpu_has(X86_FEATURE_ZEN)) { ++ msr |= ssbd_tif_to_amd_ls_cfg(tifn); ++ wrmsrl(MSR_AMD64_LS_CFG, msr); ++ return; ++ } ++ ++ if (tifn & _TIF_SSBD) { ++ /* ++ * Since this can race with prctl(), block reentry on the ++ * same CPU. ++ */ ++ if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) ++ return; ++ ++ msr |= x86_amd_ls_cfg_ssbd_mask; ++ ++ raw_spin_lock(&st->shared_state->lock); ++ /* First sibling enables SSBD: */ ++ if (!st->shared_state->disable_state) ++ wrmsrl(MSR_AMD64_LS_CFG, msr); ++ st->shared_state->disable_state++; ++ raw_spin_unlock(&st->shared_state->lock); ++ } else { ++ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) ++ return; ++ ++ raw_spin_lock(&st->shared_state->lock); ++ st->shared_state->disable_state--; ++ if (!st->shared_state->disable_state) ++ wrmsrl(MSR_AMD64_LS_CFG, msr); ++ raw_spin_unlock(&st->shared_state->lock); ++ } ++} ++#else ++static __always_inline void amd_set_core_ssb_state(unsigned long tifn) ++{ ++ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); ++ ++ wrmsrl(MSR_AMD64_LS_CFG, msr); ++} ++#endif ++ ++static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) ++{ ++ /* ++ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, ++ * so ssbd_tif_to_spec_ctrl() just works. ++ */ ++ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); ++} ++ ++static __always_inline void intel_set_ssb_state(unsigned long tifn) ++{ ++ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); ++ ++ wrmsrl(MSR_IA32_SPEC_CTRL, msr); ++} ++ ++static __always_inline void __speculative_store_bypass_update(unsigned long tifn) ++{ ++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) ++ amd_set_ssb_virt_state(tifn); ++ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) ++ amd_set_core_ssb_state(tifn); ++ else ++ intel_set_ssb_state(tifn); ++} ++ ++void speculative_store_bypass_update(unsigned long tif) ++{ ++ preempt_disable(); ++ __speculative_store_bypass_update(tif); ++ preempt_enable(); ++} ++ ++void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, ++ struct tss_struct *tss) ++{ ++ struct thread_struct *prev, *next; ++ unsigned long tifp, tifn; ++ ++ prev = &prev_p->thread; ++ next = &next_p->thread; ++ ++ tifn = READ_ONCE(task_thread_info(next_p)->flags); ++ tifp = READ_ONCE(task_thread_info(prev_p)->flags); ++ switch_to_bitmap(tss, prev, next, tifp, tifn); ++ + propagate_user_return_notify(prev_p, next_p); ++ ++ if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) && ++ arch_has_block_step()) { ++ unsigned long debugctl, msk; ++ ++ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); ++ debugctl &= ~DEBUGCTLMSR_BTF; ++ msk = tifn & _TIF_BLOCKSTEP; ++ debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; ++ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); ++ } ++ ++ if ((tifp ^ tifn) & _TIF_NOTSC) ++ cr4_toggle_bits(X86_CR4_TSD); ++ ++ if ((tifp ^ tifn) & _TIF_SSBD) ++ __speculative_store_bypass_update(tifn); + } + + /* +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 1f7aefc7b0b4..c017f1c71560 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -75,6 +75,7 @@ + #include + #include + #include ++#include + + /* Number of siblings per CPU package */ + int smp_num_siblings = 1; +@@ -217,6 +218,8 @@ static void notrace start_secondary(void *unused) + */ + check_tsc_sync_target(); + ++ speculative_store_bypass_ht_init(); ++ + /* + * Lock vector_lock and initialize the vectors on this cpu + * before setting the cpu online. We must set it online with +@@ -1209,6 +1212,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) + set_mtrr_aps_delayed_init(); + + smp_quirk_init_udelay(); ++ ++ speculative_store_bypass_ht_init(); + } + + void arch_enable_nonboot_cpus_begin(void) +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 42654375b73f..df7827a981dd 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -37,7 +37,7 @@ + #include + #include + #include +-#include ++#include + + #include + #include "trace.h" +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 63c44a9bf6bb..18143886b186 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -48,7 +48,7 @@ + #include + #include + #include +-#include ++#include + + #include "trace.h" + #include "pmu.h" +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 7cad01af6dcd..6d683bbb3502 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -10,6 +10,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -29,6 +30,8 @@ + * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi + */ + ++atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); ++ + struct flush_tlb_info { + struct mm_struct *flush_mm; + unsigned long flush_start; +@@ -104,6 +107,36 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + unsigned cpu = smp_processor_id(); + + if (likely(prev != next)) { ++ u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); ++ ++ /* ++ * Avoid user/user BTB poisoning by flushing the branch ++ * predictor when switching between processes. This stops ++ * one process from doing Spectre-v2 attacks on another. ++ * ++ * As an optimization, flush indirect branches only when ++ * switching into processes that disable dumping. This ++ * protects high value processes like gpg, without having ++ * too high performance overhead. IBPB is *expensive*! ++ * ++ * This will not flush branches when switching into kernel ++ * threads. It will also not flush if we switch to idle ++ * thread and back to the same process. It will flush if we ++ * switch to a different non-dumpable process. ++ */ ++ if (tsk && tsk->mm && ++ tsk->mm->context.ctx_id != last_ctx_id && ++ get_dumpable(tsk->mm) != SUID_DUMP_USER) ++ indirect_branch_prediction_barrier(); ++ ++ /* ++ * Record last user mm's context id, so we can avoid ++ * flushing branch buffer with IBPB if we switch back ++ * to the same user. ++ */ ++ if (next != &init_mm) ++ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); ++ + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); + this_cpu_write(cpu_tlbstate.active_mm, next); + cpumask_set_cpu(cpu, mm_cpumask(next)); +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index a0ac0f9c307f..f5a8cd96bae4 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -40,6 +40,7 @@ + #include + #include + #include ++#include + + /* + * We allocate runtime services regions bottom-up, starting from -4G, i.e. +@@ -347,6 +348,7 @@ extern efi_status_t efi64_thunk(u32, ...); + \ + efi_sync_low_kernel_mappings(); \ + local_irq_save(flags); \ ++ firmware_restrict_branch_speculation_start(); \ + \ + efi_scratch.prev_cr3 = read_cr3(); \ + write_cr3((unsigned long)efi_scratch.efi_pgt); \ +@@ -357,6 +359,7 @@ extern efi_status_t efi64_thunk(u32, ...); + \ + write_cr3(efi_scratch.prev_cr3); \ + __flush_tlb_all(); \ ++ firmware_restrict_branch_speculation_end(); \ + local_irq_restore(flags); \ + \ + __s; \ +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index cbef64b508e1..82fd84d5e1aa 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -460,6 +460,12 @@ static void __init xen_init_cpuid_mask(void) + cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); + } + ++static void __init xen_init_capabilities(void) ++{ ++ if (xen_pv_domain()) ++ setup_force_cpu_cap(X86_FEATURE_XENPV); ++} ++ + static void xen_set_debugreg(int reg, unsigned long val) + { + HYPERVISOR_set_debugreg(reg, val); +@@ -1587,6 +1593,7 @@ asmlinkage __visible void __init xen_start_kernel(void) + + xen_init_irq_ops(); + xen_init_cpuid_mask(); ++ xen_init_capabilities(); + + #ifdef CONFIG_X86_LOCAL_APIC + /* +@@ -1883,14 +1890,6 @@ bool xen_hvm_need_lapic(void) + } + EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); + +-static void xen_set_cpu_features(struct cpuinfo_x86 *c) +-{ +- if (xen_pv_domain()) { +- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); +- set_cpu_cap(c, X86_FEATURE_XENPV); +- } +-} +- + const struct hypervisor_x86 x86_hyper_xen = { + .name = "Xen", + .detect = xen_platform, +@@ -1898,7 +1897,6 @@ const struct hypervisor_x86 x86_hyper_xen = { + .init_platform = xen_hvm_guest_init, + #endif + .x2apic_available = xen_x2apic_para_available, +- .set_cpu_features = xen_set_cpu_features, + }; + EXPORT_SYMBOL(x86_hyper_xen); + +diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c +index 3f4ebf0261f2..29e50d1229bc 100644 +--- a/arch/x86/xen/smp.c ++++ b/arch/x86/xen/smp.c +@@ -28,6 +28,7 @@ + #include + #include + ++#include + #include + #include + +@@ -87,6 +88,8 @@ static void cpu_bringup(void) + cpu_data(cpu).x86_max_cores = 1; + set_cpu_sibling_map(cpu); + ++ speculative_store_bypass_ht_init(); ++ + xen_setup_cpu_clockevents(); + + notify_cpu_starting(cpu); +@@ -357,6 +360,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) + } + set_cpu_sibling_map(0); + ++ speculative_store_bypass_ht_init(); ++ + xen_pmu_init(0); + + if (xen_smp_intr_init(0)) +diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c +index 7f664c416faf..4ecd0de08557 100644 +--- a/arch/x86/xen/suspend.c ++++ b/arch/x86/xen/suspend.c +@@ -1,11 +1,14 @@ + #include + #include ++#include + + #include + #include + #include + #include + ++#include ++#include + #include + #include + #include +@@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled) + xen_mm_unpin_all(); + } + ++static DEFINE_PER_CPU(u64, spec_ctrl); ++ + void xen_arch_pre_suspend(void) + { + if (xen_pv_domain()) +@@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled) + + static void xen_vcpu_notify_restore(void *data) + { ++ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) ++ wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); ++ + /* Boot processor notified via generic timekeeping_resume() */ + if (smp_processor_id() == 0) + return; +@@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data) + + static void xen_vcpu_notify_suspend(void *data) + { ++ u64 tmp; ++ + tick_suspend_local(); ++ ++ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { ++ rdmsrl(MSR_IA32_SPEC_CTRL, tmp); ++ this_cpu_write(spec_ctrl, tmp); ++ wrmsrl(MSR_IA32_SPEC_CTRL, 0); ++ } + } + + void xen_arch_resume(void) +diff --git a/block/blk-core.c b/block/blk-core.c +index f5f1a55703ae..50d77c90070d 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -651,21 +651,17 @@ EXPORT_SYMBOL(blk_alloc_queue); + int blk_queue_enter(struct request_queue *q, gfp_t gfp) + { + while (true) { +- int ret; +- + if (percpu_ref_tryget_live(&q->q_usage_counter)) + return 0; + + if (!gfpflags_allow_blocking(gfp)) + return -EBUSY; + +- ret = wait_event_interruptible(q->mq_freeze_wq, +- !atomic_read(&q->mq_freeze_depth) || +- blk_queue_dying(q)); ++ wait_event(q->mq_freeze_wq, ++ !atomic_read(&q->mq_freeze_depth) || ++ blk_queue_dying(q)); + if (blk_queue_dying(q)) + return -ENODEV; +- if (ret) +- return ret; + } + } + +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index 3db71afbba93..143edea1076f 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -518,14 +518,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev, + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); ++static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, + &dev_attr_spectre_v1.attr, + &dev_attr_spectre_v2.attr, ++ &dev_attr_spec_store_bypass.attr, + NULL + }; + +diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c +index 8c41c6fcb9ee..acf83569f86f 100644 +--- a/drivers/clk/tegra/clk-tegra30.c ++++ b/drivers/clk/tegra/clk-tegra30.c +@@ -333,11 +333,11 @@ static struct pdiv_map pllu_p[] = { + }; + + static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { +- { 12000000, 480000000, 960, 12, 0, 12}, +- { 13000000, 480000000, 960, 13, 0, 12}, +- { 16800000, 480000000, 400, 7, 0, 5}, +- { 19200000, 480000000, 200, 4, 0, 3}, +- { 26000000, 480000000, 960, 26, 0, 12}, ++ { 12000000, 480000000, 960, 12, 2, 12 }, ++ { 13000000, 480000000, 960, 13, 2, 12 }, ++ { 16800000, 480000000, 400, 7, 2, 5 }, ++ { 19200000, 480000000, 200, 4, 2, 3 }, ++ { 26000000, 480000000, 960, 26, 2, 12 }, + { 0, 0, 0, 0, 0, 0 }, + }; + +@@ -1372,6 +1372,7 @@ static struct tegra_clk_init_table init_table[] __initdata = { + {TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0}, + {TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0}, + {TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0}, ++ { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 }, + {TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */ + }; + +diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c +index c1aaf0336cf2..5cde3ad1665e 100644 +--- a/drivers/mtd/ubi/attach.c ++++ b/drivers/mtd/ubi/attach.c +@@ -174,6 +174,40 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) + return 0; + } + ++/** ++ * add_fastmap - add a Fastmap related physical eraseblock. ++ * @ai: attaching information ++ * @pnum: physical eraseblock number the VID header came from ++ * @vid_hdr: the volume identifier header ++ * @ec: erase counter of the physical eraseblock ++ * ++ * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp ++ * physical eraseblock @pnum and adds it to the 'fastmap' list. ++ * Such blocks can be Fastmap super and data blocks from both the most ++ * recent Fastmap we're attaching from or from old Fastmaps which will ++ * be erased. ++ */ ++static int add_fastmap(struct ubi_attach_info *ai, int pnum, ++ struct ubi_vid_hdr *vid_hdr, int ec) ++{ ++ struct ubi_ainf_peb *aeb; ++ ++ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); ++ if (!aeb) ++ return -ENOMEM; ++ ++ aeb->pnum = pnum; ++ aeb->vol_id = be32_to_cpu(vidh->vol_id); ++ aeb->sqnum = be64_to_cpu(vidh->sqnum); ++ aeb->ec = ec; ++ list_add(&aeb->u.list, &ai->fastmap); ++ ++ dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum, ++ aeb->vol_id, aeb->sqnum); ++ ++ return 0; ++} ++ + /** + * validate_vid_hdr - check volume identifier header. + * @ubi: UBI device description object +@@ -803,13 +837,26 @@ out_unlock: + return err; + } + ++static bool vol_ignored(int vol_id) ++{ ++ switch (vol_id) { ++ case UBI_LAYOUT_VOLUME_ID: ++ return true; ++ } ++ ++#ifdef CONFIG_MTD_UBI_FASTMAP ++ return ubi_is_fm_vol(vol_id); ++#else ++ return false; ++#endif ++} ++ + /** + * scan_peb - scan and process UBI headers of a PEB. + * @ubi: UBI device description object + * @ai: attaching information + * @pnum: the physical eraseblock number +- * @vid: The volume ID of the found volume will be stored in this pointer +- * @sqnum: The sqnum of the found volume will be stored in this pointer ++ * @fast: true if we're scanning for a Fastmap + * + * This function reads UBI headers of PEB @pnum, checks them, and adds + * information about this PEB to the corresponding list or RB-tree in the +@@ -817,9 +864,9 @@ out_unlock: + * successfully handled and a negative error code in case of failure. + */ + static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, +- int pnum, int *vid, unsigned long long *sqnum) ++ int pnum, bool fast) + { +- long long uninitialized_var(ec); ++ long long ec; + int err, bitflips = 0, vol_id = -1, ec_err = 0; + + dbg_bld("scan PEB %d", pnum); +@@ -935,6 +982,20 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, + */ + ai->maybe_bad_peb_count += 1; + case UBI_IO_BAD_HDR: ++ /* ++ * If we're facing a bad VID header we have to drop *all* ++ * Fastmap data structures we find. The most recent Fastmap ++ * could be bad and therefore there is a chance that we attach ++ * from an old one. On a fine MTD stack a PEB must not render ++ * bad all of a sudden, but the reality is different. ++ * So, let's be paranoid and help finding the root cause by ++ * falling back to scanning mode instead of attaching with a ++ * bad EBA table and cause data corruption which is hard to ++ * analyze. ++ */ ++ if (fast) ++ ai->force_full_scan = 1; ++ + if (ec_err) + /* + * Both headers are corrupted. There is a possibility +@@ -991,21 +1052,15 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, + } + + vol_id = be32_to_cpu(vidh->vol_id); +- if (vid) +- *vid = vol_id; +- if (sqnum) +- *sqnum = be64_to_cpu(vidh->sqnum); +- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { ++ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) { + int lnum = be32_to_cpu(vidh->lnum); + + /* Unsupported internal volume */ + switch (vidh->compat) { + case UBI_COMPAT_DELETE: +- if (vol_id != UBI_FM_SB_VOLUME_ID +- && vol_id != UBI_FM_DATA_VOLUME_ID) { +- ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it", +- vol_id, lnum); +- } ++ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it", ++ vol_id, lnum); ++ + err = add_to_list(ai, pnum, vol_id, lnum, + ec, 1, &ai->erase); + if (err) +@@ -1037,7 +1092,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, + if (ec_err) + ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d", + pnum); +- err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); ++ ++ if (ubi_is_fm_vol(vol_id)) ++ err = add_fastmap(ai, pnum, vidh, ec); ++ else ++ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); ++ + if (err) + return err; + +@@ -1186,6 +1246,10 @@ static void destroy_ai(struct ubi_attach_info *ai) + list_del(&aeb->u.list); + kmem_cache_free(ai->aeb_slab_cache, aeb); + } ++ list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) { ++ list_del(&aeb->u.list); ++ kmem_cache_free(ai->aeb_slab_cache, aeb); ++ } + + /* Destroy the volume RB-tree */ + rb = ai->volumes.rb_node; +@@ -1245,7 +1309,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, + cond_resched(); + + dbg_gen("process PEB %d", pnum); +- err = scan_peb(ubi, ai, pnum, NULL, NULL); ++ err = scan_peb(ubi, ai, pnum, false); + if (err < 0) + goto out_vidh; + } +@@ -1311,6 +1375,7 @@ static struct ubi_attach_info *alloc_ai(void) + INIT_LIST_HEAD(&ai->free); + INIT_LIST_HEAD(&ai->erase); + INIT_LIST_HEAD(&ai->alien); ++ INIT_LIST_HEAD(&ai->fastmap); + ai->volumes = RB_ROOT; + ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", + sizeof(struct ubi_ainf_peb), +@@ -1337,52 +1402,58 @@ static struct ubi_attach_info *alloc_ai(void) + */ + static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) + { +- int err, pnum, fm_anchor = -1; +- unsigned long long max_sqnum = 0; ++ int err, pnum; ++ struct ubi_attach_info *scan_ai; + + err = -ENOMEM; + ++ scan_ai = alloc_ai(); ++ if (!scan_ai) ++ goto out; ++ + ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + if (!ech) +- goto out; ++ goto out_ai; + + vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); + if (!vidh) + goto out_ech; + + for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { +- int vol_id = -1; +- unsigned long long sqnum = -1; + cond_resched(); + + dbg_gen("process PEB %d", pnum); +- err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum); ++ err = scan_peb(ubi, scan_ai, pnum, true); + if (err < 0) + goto out_vidh; +- +- if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) { +- max_sqnum = sqnum; +- fm_anchor = pnum; +- } + } + + ubi_free_vid_hdr(ubi, vidh); + kfree(ech); + +- if (fm_anchor < 0) +- return UBI_NO_FASTMAP; ++ if (scan_ai->force_full_scan) ++ err = UBI_NO_FASTMAP; ++ else ++ err = ubi_scan_fastmap(ubi, *ai, scan_ai); + +- destroy_ai(*ai); +- *ai = alloc_ai(); +- if (!*ai) +- return -ENOMEM; ++ if (err) { ++ /* ++ * Didn't attach via fastmap, do a full scan but reuse what ++ * we've aready scanned. ++ */ ++ destroy_ai(*ai); ++ *ai = scan_ai; ++ } else ++ destroy_ai(scan_ai); + +- return ubi_scan_fastmap(ubi, *ai, fm_anchor); ++ return err; + + out_vidh: + ubi_free_vid_hdr(ubi, vidh); + out_ech: + kfree(ech); ++out_ai: ++ destroy_ai(scan_ai); + out: + return err; + } +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c +index c4a25c858c07..03cf0553ec1b 100644 +--- a/drivers/mtd/ubi/eba.c ++++ b/drivers/mtd/ubi/eba.c +@@ -1178,6 +1178,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, + struct ubi_volume *vol; + uint32_t crc; + ++ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); ++ + vol_id = be32_to_cpu(vid_hdr->vol_id); + lnum = be32_to_cpu(vid_hdr->lnum); + +@@ -1346,9 +1348,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, + } + + ubi_assert(vol->eba_tbl[lnum] == from); +- down_read(&ubi->fm_eba_sem); + vol->eba_tbl[lnum] = to; +- up_read(&ubi->fm_eba_sem); + + out_unlock_buf: + mutex_unlock(&ubi->buf_mutex); +diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c +index ed62f1efe6eb..69dd21679a30 100644 +--- a/drivers/mtd/ubi/fastmap-wl.c ++++ b/drivers/mtd/ubi/fastmap-wl.c +@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) + struct ubi_fm_pool *pool = &ubi->fm_wl_pool; + int pnum; + ++ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); ++ + if (pool->used == pool->size) { + /* We cannot update the fastmap here because this + * function is called in atomic context. +@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) + + wrk->anchor = 1; + wrk->func = &wear_leveling_worker; +- schedule_ubi_work(ubi, wrk); ++ __schedule_ubi_work(ubi, wrk); + return 0; + } + +@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, + spin_unlock(&ubi->wl_lock); + + vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; +- return schedule_erase(ubi, e, vol_id, lnum, torture); ++ return schedule_erase(ubi, e, vol_id, lnum, torture, true); + } + + /** +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c +index bba7dd1b5ebf..72e89b352034 100644 +--- a/drivers/mtd/ubi/fastmap.c ++++ b/drivers/mtd/ubi/fastmap.c +@@ -326,6 +326,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, + aeb->pnum = new_aeb->pnum; + aeb->copy_flag = new_vh->copy_flag; + aeb->scrub = new_aeb->scrub; ++ aeb->sqnum = new_aeb->sqnum; + kmem_cache_free(ai->aeb_slab_cache, new_aeb); + + /* new_aeb is older */ +@@ -850,28 +851,58 @@ fail: + return ret; + } + ++/** ++ * find_fm_anchor - find the most recent Fastmap superblock (anchor) ++ * @ai: UBI attach info to be filled ++ */ ++static int find_fm_anchor(struct ubi_attach_info *ai) ++{ ++ int ret = -1; ++ struct ubi_ainf_peb *aeb; ++ unsigned long long max_sqnum = 0; ++ ++ list_for_each_entry(aeb, &ai->fastmap, u.list) { ++ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) { ++ max_sqnum = aeb->sqnum; ++ ret = aeb->pnum; ++ } ++ } ++ ++ return ret; ++} ++ + /** + * ubi_scan_fastmap - scan the fastmap. + * @ubi: UBI device object + * @ai: UBI attach info to be filled +- * @fm_anchor: The fastmap starts at this PEB ++ * @scan_ai: UBI attach info from the first 64 PEBs, ++ * used to find the most recent Fastmap data structure + * + * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, + * UBI_BAD_FASTMAP if one was found but is not usable. + * < 0 indicates an internal error. + */ + int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, +- int fm_anchor) ++ struct ubi_attach_info *scan_ai) + { + struct ubi_fm_sb *fmsb, *fmsb2; + struct ubi_vid_hdr *vh; + struct ubi_ec_hdr *ech; + struct ubi_fastmap_layout *fm; +- int i, used_blocks, pnum, ret = 0; ++ struct ubi_ainf_peb *tmp_aeb, *aeb; ++ int i, used_blocks, pnum, fm_anchor, ret = 0; + size_t fm_size; + __be32 crc, tmp_crc; + unsigned long long sqnum = 0; + ++ fm_anchor = find_fm_anchor(scan_ai); ++ if (fm_anchor < 0) ++ return UBI_NO_FASTMAP; ++ ++ /* Move all (possible) fastmap blocks into our new attach structure. */ ++ list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list) ++ list_move_tail(&aeb->u.list, &ai->fastmap); ++ + down_write(&ubi->fm_protect); + memset(ubi->fm_buf, 0, ubi->fm_size); + +@@ -1484,22 +1515,30 @@ int ubi_update_fastmap(struct ubi_device *ubi) + struct ubi_wl_entry *tmp_e; + + down_write(&ubi->fm_protect); ++ down_write(&ubi->work_sem); ++ down_write(&ubi->fm_eba_sem); + + ubi_refill_pools(ubi); + + if (ubi->ro_mode || ubi->fm_disabled) { ++ up_write(&ubi->fm_eba_sem); ++ up_write(&ubi->work_sem); + up_write(&ubi->fm_protect); + return 0; + } + + ret = ubi_ensure_anchor_pebs(ubi); + if (ret) { ++ up_write(&ubi->fm_eba_sem); ++ up_write(&ubi->work_sem); + up_write(&ubi->fm_protect); + return ret; + } + + new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); + if (!new_fm) { ++ up_write(&ubi->fm_eba_sem); ++ up_write(&ubi->work_sem); + up_write(&ubi->fm_protect); + return -ENOMEM; + } +@@ -1608,16 +1647,14 @@ int ubi_update_fastmap(struct ubi_device *ubi) + new_fm->e[0] = tmp_e; + } + +- down_write(&ubi->work_sem); +- down_write(&ubi->fm_eba_sem); + ret = ubi_write_fastmap(ubi, new_fm); +- up_write(&ubi->fm_eba_sem); +- up_write(&ubi->work_sem); + + if (ret) + goto err; + + out_unlock: ++ up_write(&ubi->fm_eba_sem); ++ up_write(&ubi->work_sem); + up_write(&ubi->fm_protect); + kfree(old_fm); + return ret; +diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h +index de1ea2e4c37d..05d9ec66437c 100644 +--- a/drivers/mtd/ubi/ubi.h ++++ b/drivers/mtd/ubi/ubi.h +@@ -699,6 +699,8 @@ struct ubi_ainf_volume { + * @erase: list of physical eraseblocks which have to be erased + * @alien: list of physical eraseblocks which should not be used by UBI (e.g., + * those belonging to "preserve"-compatible internal volumes) ++ * @fastmap: list of physical eraseblocks which relate to fastmap (e.g., ++ * eraseblocks of the current and not yet erased old fastmap blocks) + * @corr_peb_count: count of PEBs in the @corr list + * @empty_peb_count: count of PEBs which are presumably empty (contain only + * 0xFF bytes) +@@ -709,6 +711,8 @@ struct ubi_ainf_volume { + * @vols_found: number of volumes found + * @highest_vol_id: highest volume ID + * @is_empty: flag indicating whether the MTD device is empty or not ++ * @force_full_scan: flag indicating whether we need to do a full scan and drop ++ all existing Fastmap data structures + * @min_ec: lowest erase counter value + * @max_ec: highest erase counter value + * @max_sqnum: highest sequence number value +@@ -727,6 +731,7 @@ struct ubi_attach_info { + struct list_head free; + struct list_head erase; + struct list_head alien; ++ struct list_head fastmap; + int corr_peb_count; + int empty_peb_count; + int alien_peb_count; +@@ -735,6 +740,7 @@ struct ubi_attach_info { + int vols_found; + int highest_vol_id; + int is_empty; ++ int force_full_scan; + int min_ec; + int max_ec; + unsigned long long max_sqnum; +@@ -907,7 +913,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, + size_t ubi_calc_fm_size(struct ubi_device *ubi); + int ubi_update_fastmap(struct ubi_device *ubi); + int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, +- int fm_anchor); ++ struct ubi_attach_info *scan_ai); + #else + static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; } + #endif +@@ -1101,4 +1107,42 @@ static inline int idx2vol_id(const struct ubi_device *ubi, int idx) + return idx; + } + ++/** ++ * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume. ++ * @vol_id: volume ID ++ */ ++static inline bool ubi_is_fm_vol(int vol_id) ++{ ++ switch (vol_id) { ++ case UBI_FM_SB_VOLUME_ID: ++ case UBI_FM_DATA_VOLUME_ID: ++ return true; ++ } ++ ++ return false; ++} ++ ++/** ++ * ubi_find_fm_block - check whether a PEB is part of the current Fastmap. ++ * @ubi: UBI device description object ++ * @pnum: physical eraseblock to look for ++ * ++ * This function returns a wear leveling object if @pnum relates to the current ++ * fastmap, @NULL otherwise. ++ */ ++static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi, ++ int pnum) ++{ ++ int i; ++ ++ if (ubi->fm) { ++ for (i = 0; i < ubi->fm->used_blocks; i++) { ++ if (ubi->fm->e[i]->pnum == pnum) ++ return ubi->fm->e[i]; ++ } ++ } ++ ++ return NULL; ++} ++ + #endif /* !__UBI_UBI_H__ */ +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c +index ca9746f41ff1..b3c1b8106a68 100644 +--- a/drivers/mtd/ubi/wl.c ++++ b/drivers/mtd/ubi/wl.c +@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, + * failure. + */ + static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, +- int vol_id, int lnum, int torture) ++ int vol_id, int lnum, int torture, bool nested) + { + struct ubi_work *wl_wrk; + +@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, + wl_wrk->lnum = lnum; + wl_wrk->torture = torture; + +- schedule_ubi_work(ubi, wl_wrk); ++ if (nested) ++ __schedule_ubi_work(ubi, wl_wrk); ++ else ++ schedule_ubi_work(ubi, wl_wrk); + return 0; + } + +@@ -658,6 +661,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, + if (!vid_hdr) + return -ENOMEM; + ++ down_read(&ubi->fm_eba_sem); + mutex_lock(&ubi->move_mutex); + spin_lock(&ubi->wl_lock); + ubi_assert(!ubi->move_from && !ubi->move_to); +@@ -884,6 +888,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, + + dbg_wl("done"); + mutex_unlock(&ubi->move_mutex); ++ up_read(&ubi->fm_eba_sem); + return 0; + + /* +@@ -925,6 +930,7 @@ out_not_moved: + } + + mutex_unlock(&ubi->move_mutex); ++ up_read(&ubi->fm_eba_sem); + return 0; + + out_error: +@@ -946,6 +952,7 @@ out_error: + out_ro: + ubi_ro_mode(ubi); + mutex_unlock(&ubi->move_mutex); ++ up_read(&ubi->fm_eba_sem); + ubi_assert(err != 0); + return err < 0 ? err : -EIO; + +@@ -953,6 +960,7 @@ out_cancel: + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + mutex_unlock(&ubi->move_mutex); ++ up_read(&ubi->fm_eba_sem); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; + } +@@ -1075,7 +1083,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) + int err1; + + /* Re-schedule the LEB for erasure */ +- err1 = schedule_erase(ubi, e, vol_id, lnum, 0); ++ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); + if (err1) { + wl_entry_destroy(ubi, e); + err = err1; +@@ -1256,7 +1264,7 @@ retry: + } + spin_unlock(&ubi->wl_lock); + +- err = schedule_erase(ubi, e, vol_id, lnum, torture); ++ err = schedule_erase(ubi, e, vol_id, lnum, torture, false); + if (err) { + spin_lock(&ubi->wl_lock); + wl_tree_add(e, &ubi->used); +@@ -1500,6 +1508,46 @@ static void shutdown_work(struct ubi_device *ubi) + } + } + ++/** ++ * erase_aeb - erase a PEB given in UBI attach info PEB ++ * @ubi: UBI device description object ++ * @aeb: UBI attach info PEB ++ * @sync: If true, erase synchronously. Otherwise schedule for erasure ++ */ ++static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) ++{ ++ struct ubi_wl_entry *e; ++ int err; ++ ++ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); ++ if (!e) ++ return -ENOMEM; ++ ++ e->pnum = aeb->pnum; ++ e->ec = aeb->ec; ++ ubi->lookuptbl[e->pnum] = e; ++ ++ if (sync) { ++ err = sync_erase(ubi, e, false); ++ if (err) ++ goto out_free; ++ ++ wl_tree_add(e, &ubi->free); ++ ubi->free_count++; ++ } else { ++ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false); ++ if (err) ++ goto out_free; ++ } ++ ++ return 0; ++ ++out_free: ++ wl_entry_destroy(ubi, e); ++ ++ return err; ++} ++ + /** + * ubi_wl_init - initialize the WL sub-system using attaching information. + * @ubi: UBI device description object +@@ -1537,17 +1585,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) + list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { + cond_resched(); + +- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); +- if (!e) +- goto out_free; +- +- e->pnum = aeb->pnum; +- e->ec = aeb->ec; +- ubi->lookuptbl[e->pnum] = e; +- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { +- wl_entry_destroy(ubi, e); ++ err = erase_aeb(ubi, aeb, false); ++ if (err) + goto out_free; +- } + + found_pebs++; + } +@@ -1598,19 +1638,49 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) + } + } + +- dbg_wl("found %i PEBs", found_pebs); ++ list_for_each_entry(aeb, &ai->fastmap, u.list) { ++ cond_resched(); + +- if (ubi->fm) { +- ubi_assert(ubi->good_peb_count == +- found_pebs + ubi->fm->used_blocks); ++ e = ubi_find_fm_block(ubi, aeb->pnum); + +- for (i = 0; i < ubi->fm->used_blocks; i++) { +- e = ubi->fm->e[i]; ++ if (e) { ++ ubi_assert(!ubi->lookuptbl[e->pnum]); + ubi->lookuptbl[e->pnum] = e; ++ } else { ++ bool sync = false; ++ ++ /* ++ * Usually old Fastmap PEBs are scheduled for erasure ++ * and we don't have to care about them but if we face ++ * an power cut before scheduling them we need to ++ * take care of them here. ++ */ ++ if (ubi->lookuptbl[aeb->pnum]) ++ continue; ++ ++ /* ++ * The fastmap update code might not find a free PEB for ++ * writing the fastmap anchor to and then reuses the ++ * current fastmap anchor PEB. When this PEB gets erased ++ * and a power cut happens before it is written again we ++ * must make sure that the fastmap attach code doesn't ++ * find any outdated fastmap anchors, hence we erase the ++ * outdated fastmap anchor PEBs synchronously here. ++ */ ++ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID) ++ sync = true; ++ ++ err = erase_aeb(ubi, aeb, sync); ++ if (err) ++ goto out_free; + } ++ ++ found_pebs++; + } +- else +- ubi_assert(ubi->good_peb_count == found_pebs); ++ ++ dbg_wl("found %i PEBs", found_pebs); ++ ++ ubi_assert(ubi->good_peb_count == found_pebs); + + reserved_pebs = WL_RESERVED_PEBS; + ubi_fastmap_init(ubi, &reserved_pebs); +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 1325825d5225..ce3a56bea6e6 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -9278,6 +9278,15 @@ static int tg3_chip_reset(struct tg3 *tp) + + tg3_restore_clk(tp); + ++ /* Increase the core clock speed to fix tx timeout issue for 5762 ++ * with 100Mbps link speed. ++ */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) { ++ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); ++ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | ++ TG3_CPMU_MAC_ORIDE_ENABLE); ++ } ++ + /* Reprobe ASF enable state. */ + tg3_flag_clear(tp, ENABLE_ASF); + tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 8179727d3423..1f2f25a71d18 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -1265,11 +1265,8 @@ static int gen10g_resume(struct phy_device *phydev) + + static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) + { +- /* The default values for phydev->supported are provided by the PHY +- * driver "features" member, we want to reset to sane defaults first +- * before supporting higher speeds. +- */ +- phydev->supported &= PHY_DEFAULT_FEATURES; ++ phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | ++ PHY_10BT_FEATURES); + + switch (max_speed) { + default: +diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c +index da7bae991552..d877ff124365 100644 +--- a/drivers/ptp/ptp_chardev.c ++++ b/drivers/ptp/ptp_chardev.c +@@ -88,6 +88,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, + case PTP_PF_PHYSYNC: + if (chan != 0) + return -EINVAL; ++ break; + default: + return -EINVAL; + } +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index f2e9f59c90d6..2d837b6bd495 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -887,6 +887,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) + spin_unlock_irqrestore(&xhci->lock, flags); + } + ++static bool xhci_pending_portevent(struct xhci_hcd *xhci) ++{ ++ __le32 __iomem **port_array; ++ int port_index; ++ u32 status; ++ u32 portsc; ++ ++ status = readl(&xhci->op_regs->status); ++ if (status & STS_EINT) ++ return true; ++ /* ++ * Checking STS_EINT is not enough as there is a lag between a change ++ * bit being set and the Port Status Change Event that it generated ++ * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. ++ */ ++ ++ port_index = xhci->num_usb2_ports; ++ port_array = xhci->usb2_ports; ++ while (port_index--) { ++ portsc = readl(port_array[port_index]); ++ if (portsc & PORT_CHANGE_MASK || ++ (portsc & PORT_PLS_MASK) == XDEV_RESUME) ++ return true; ++ } ++ port_index = xhci->num_usb3_ports; ++ port_array = xhci->usb3_ports; ++ while (port_index--) { ++ portsc = readl(port_array[port_index]); ++ if (portsc & PORT_CHANGE_MASK || ++ (portsc & PORT_PLS_MASK) == XDEV_RESUME) ++ return true; ++ } ++ return false; ++} ++ + /* + * Stop HC (not bus-specific) + * +@@ -983,7 +1018,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend); + */ + int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + { +- u32 command, temp = 0, status; ++ u32 command, temp = 0; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct usb_hcd *secondary_hcd; + int retval = 0; +@@ -1105,8 +1140,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + done: + if (retval == 0) { + /* Resume root hubs only when have pending events. */ +- status = readl(&xhci->op_regs->status); +- if (status & STS_EINT) { ++ if (xhci_pending_portevent(xhci)) { + usb_hcd_resume_root_hub(xhci->shared_hcd); + usb_hcd_resume_root_hub(hcd); + } +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 1715705acc59..84d8871755b7 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -382,6 +382,10 @@ struct xhci_op_regs { + #define PORT_PLC (1 << 22) + /* port configure error change - port failed to configure its link partner */ + #define PORT_CEC (1 << 23) ++#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ ++ PORT_RC | PORT_PLC | PORT_CEC) ++ ++ + /* Cold Attach Status - xHC can set this bit to report device attached during + * Sx state. Warm port reset should be perfomed to clear this bit and move port + * to connected state. +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index cf644d52c0cf..c81cfb79a339 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -613,13 +613,21 @@ static void fat_set_state(struct super_block *sb, + brelse(bh); + } + ++static void fat_reset_iocharset(struct fat_mount_options *opts) ++{ ++ if (opts->iocharset != fat_default_iocharset) { ++ /* Note: opts->iocharset can be NULL here */ ++ kfree(opts->iocharset); ++ opts->iocharset = fat_default_iocharset; ++ } ++} ++ + static void delayed_free(struct rcu_head *p) + { + struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu); + unload_nls(sbi->nls_disk); + unload_nls(sbi->nls_io); +- if (sbi->options.iocharset != fat_default_iocharset) +- kfree(sbi->options.iocharset); ++ fat_reset_iocharset(&sbi->options); + kfree(sbi); + } + +@@ -1034,7 +1042,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, + opts->fs_fmask = opts->fs_dmask = current_umask(); + opts->allow_utime = -1; + opts->codepage = fat_default_codepage; +- opts->iocharset = fat_default_iocharset; ++ fat_reset_iocharset(opts); + if (is_vfat) { + opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; + opts->rodir = 0; +@@ -1184,8 +1192,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, + + /* vfat specific */ + case Opt_charset: +- if (opts->iocharset != fat_default_iocharset) +- kfree(opts->iocharset); ++ fat_reset_iocharset(opts); + iocharset = match_strdup(&args[0]); + if (!iocharset) + return -ENOMEM; +@@ -1776,8 +1783,7 @@ out_fail: + iput(fat_inode); + unload_nls(sbi->nls_io); + unload_nls(sbi->nls_disk); +- if (sbi->options.iocharset != fat_default_iocharset) +- kfree(sbi->options.iocharset); ++ fat_reset_iocharset(&sbi->options); + sb->s_fs_info = NULL; + kfree(sbi); + return error; +diff --git a/fs/proc/array.c b/fs/proc/array.c +index b6c00ce0e29e..cb71cbae606d 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -79,6 +79,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -332,6 +333,31 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) + #ifdef CONFIG_SECCOMP + seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode); + #endif ++ seq_printf(m, "\nSpeculation_Store_Bypass:\t"); ++ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { ++ case -EINVAL: ++ seq_printf(m, "unknown"); ++ break; ++ case PR_SPEC_NOT_AFFECTED: ++ seq_printf(m, "not vulnerable"); ++ break; ++ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE: ++ seq_printf(m, "thread force mitigated"); ++ break; ++ case PR_SPEC_PRCTL | PR_SPEC_DISABLE: ++ seq_printf(m, "thread mitigated"); ++ break; ++ case PR_SPEC_PRCTL | PR_SPEC_ENABLE: ++ seq_printf(m, "thread vulnerable"); ++ break; ++ case PR_SPEC_DISABLE: ++ seq_printf(m, "globally mitigated"); ++ break; ++ default: ++ seq_printf(m, "vulnerable"); ++ break; ++ } ++ seq_putc(m, '\n'); + } + + static inline void task_context_switch_counts(struct seq_file *m, +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 7e04bcd9af8e..2f9d12022100 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -46,6 +46,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_spec_store_bypass(struct device *dev, ++ struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, +diff --git a/include/linux/nospec.h b/include/linux/nospec.h +index e791ebc65c9c..0c5ef54fd416 100644 +--- a/include/linux/nospec.h ++++ b/include/linux/nospec.h +@@ -7,6 +7,8 @@ + #define _LINUX_NOSPEC_H + #include + ++struct task_struct; ++ + /** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index +@@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, + \ + (typeof(_i)) (_i & _mask); \ + }) ++ ++/* Speculation control prctl */ ++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); ++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, ++ unsigned long ctrl); ++/* Speculation control for seccomp enforced mitigation */ ++void arch_seccomp_spec_mitigate(struct task_struct *task); ++ + #endif /* _LINUX_NOSPEC_H */ +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 90bea398e5e0..725498cc5d30 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2167,6 +2167,8 @@ static inline void memalloc_noio_restore(unsigned int flags) + #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ + #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ + #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ ++#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */ ++#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/ + + + #define TASK_PFA_TEST(name, func) \ +@@ -2190,6 +2192,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) + TASK_PFA_SET(SPREAD_SLAB, spread_slab) + TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) + ++TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) ++TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) ++TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) ++ ++TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) ++TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) ++ + /* + * task->jobctl flags + */ +diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h +index 2296e6b2f690..5a53d34bba26 100644 +--- a/include/linux/seccomp.h ++++ b/include/linux/seccomp.h +@@ -3,7 +3,8 @@ + + #include + +-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) ++#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ ++ SECCOMP_FILTER_FLAG_SPEC_ALLOW) + + #ifdef CONFIG_SECCOMP + +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index a6da214d0584..c28bd8be290a 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -514,6 +514,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1, + * @hash: the packet hash + * @queue_mapping: Queue mapping for multiqueue devices + * @xmit_more: More SKBs are pending for this queue ++ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @ndisc_nodetype: router type (from link layer) + * @ooo_okay: allow the mapping of a socket to a queue to be changed + * @l4_hash: indicate hash is a canonical 4-tuple hash over transport +@@ -594,8 +595,8 @@ struct sk_buff { + fclone:2, + peeked:1, + head_frag:1, +- xmit_more:1; +- /* one bit hole */ ++ xmit_more:1, ++ pfmemalloc:1; + kmemcheck_bitfield_end(flags1); + + /* fields enclosed in headers_start/headers_end are copied +@@ -615,19 +616,18 @@ struct sk_buff { + + __u8 __pkt_type_offset[0]; + __u8 pkt_type:3; +- __u8 pfmemalloc:1; + __u8 ignore_df:1; + __u8 nfctinfo:3; +- + __u8 nf_trace:1; ++ + __u8 ip_summed:2; + __u8 ooo_okay:1; + __u8 l4_hash:1; + __u8 sw_hash:1; + __u8 wifi_acked_valid:1; + __u8 wifi_acked:1; +- + __u8 no_fcs:1; ++ + /* Indicates the inner headers are valid in the skbuff. */ + __u8 encapsulation:1; + __u8 encap_hdr_csum:1; +@@ -635,11 +635,11 @@ struct sk_buff { + __u8 csum_complete_sw:1; + __u8 csum_level:2; + __u8 csum_bad:1; +- + #ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; + #endif + __u8 ipvs_property:1; ++ + __u8 inner_protocol_type:1; + __u8 remcsum_offload:1; + /* 3 or 5 bit hole */ +diff --git a/include/net/ipv6.h b/include/net/ipv6.h +index 84f0d0602433..0e01d570fa22 100644 +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -762,7 +762,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, + * to minimize possbility that any useful information to an + * attacker is leaked. Only lower 20 bits are relevant. + */ +- rol32(hash, 16); ++ hash = rol32(hash, 16); + + flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; + +diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h +index a8d0759a9e40..64776b72e1eb 100644 +--- a/include/uapi/linux/prctl.h ++++ b/include/uapi/linux/prctl.h +@@ -197,4 +197,16 @@ struct prctl_mm_map { + # define PR_CAP_AMBIENT_LOWER 3 + # define PR_CAP_AMBIENT_CLEAR_ALL 4 + ++/* Per task speculation control */ ++#define PR_GET_SPECULATION_CTRL 52 ++#define PR_SET_SPECULATION_CTRL 53 ++/* Speculation control variants */ ++# define PR_SPEC_STORE_BYPASS 0 ++/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ ++# define PR_SPEC_NOT_AFFECTED 0 ++# define PR_SPEC_PRCTL (1UL << 0) ++# define PR_SPEC_ENABLE (1UL << 1) ++# define PR_SPEC_DISABLE (1UL << 2) ++# define PR_SPEC_FORCE_DISABLE (1UL << 3) ++ + #endif /* _LINUX_PRCTL_H */ +diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h +index 0f238a43ff1e..e4acb615792b 100644 +--- a/include/uapi/linux/seccomp.h ++++ b/include/uapi/linux/seccomp.h +@@ -15,7 +15,9 @@ + #define SECCOMP_SET_MODE_FILTER 1 + + /* Valid flags for SECCOMP_SET_MODE_FILTER */ +-#define SECCOMP_FILTER_FLAG_TSYNC 1 ++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) ++/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */ ++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) + + /* + * All BPF programs must return a 32-bit value. +diff --git a/kernel/seccomp.c b/kernel/seccomp.c +index efd384f3f852..9a9203b15cde 100644 +--- a/kernel/seccomp.c ++++ b/kernel/seccomp.c +@@ -16,6 +16,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -214,8 +216,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) + return true; + } + ++void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } ++ + static inline void seccomp_assign_mode(struct task_struct *task, +- unsigned long seccomp_mode) ++ unsigned long seccomp_mode, ++ unsigned long flags) + { + assert_spin_locked(&task->sighand->siglock); + +@@ -225,6 +230,9 @@ static inline void seccomp_assign_mode(struct task_struct *task, + * filter) is set. + */ + smp_mb__before_atomic(); ++ /* Assume default seccomp processes want spec flaw mitigation. */ ++ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) ++ arch_seccomp_spec_mitigate(task); + set_tsk_thread_flag(task, TIF_SECCOMP); + } + +@@ -292,7 +300,7 @@ static inline pid_t seccomp_can_sync_threads(void) + * without dropping the locks. + * + */ +-static inline void seccomp_sync_threads(void) ++static inline void seccomp_sync_threads(unsigned long flags) + { + struct task_struct *thread, *caller; + +@@ -333,7 +341,8 @@ static inline void seccomp_sync_threads(void) + * allow one thread to transition the other. + */ + if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) +- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); ++ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, ++ flags); + } + } + +@@ -452,7 +461,7 @@ static long seccomp_attach_filter(unsigned int flags, + + /* Now that the new filter is in place, synchronize to all threads. */ + if (flags & SECCOMP_FILTER_FLAG_TSYNC) +- seccomp_sync_threads(); ++ seccomp_sync_threads(flags); + + return 0; + } +@@ -747,7 +756,7 @@ static long seccomp_set_mode_strict(void) + #ifdef TIF_NOTSC + disable_TSC(); + #endif +- seccomp_assign_mode(current, seccomp_mode); ++ seccomp_assign_mode(current, seccomp_mode, 0); + ret = 0; + + out: +@@ -805,7 +814,7 @@ static long seccomp_set_mode_filter(unsigned int flags, + /* Do not free the successfully attached filter. */ + prepared = NULL; + +- seccomp_assign_mode(current, seccomp_mode); ++ seccomp_assign_mode(current, seccomp_mode, flags); + out: + spin_unlock_irq(¤t->sighand->siglock); + if (flags & SECCOMP_FILTER_FLAG_TSYNC) +diff --git a/kernel/sys.c b/kernel/sys.c +index 6624919ef0e7..f718742e55e6 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -2075,6 +2075,17 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) + } + #endif + ++int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) ++{ ++ return -EINVAL; ++} ++ ++int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, ++ unsigned long ctrl) ++{ ++ return -EINVAL; ++} ++ + SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, + unsigned long, arg4, unsigned long, arg5) + { +@@ -2269,6 +2280,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, + case PR_GET_FP_MODE: + error = GET_FP_MODE(me); + break; ++ case PR_GET_SPECULATION_CTRL: ++ if (arg3 || arg4 || arg5) ++ return -EINVAL; ++ error = arch_prctl_spec_ctrl_get(me, arg2); ++ break; ++ case PR_SET_SPECULATION_CTRL: ++ if (arg4 || arg5) ++ return -EINVAL; ++ error = arch_prctl_spec_ctrl_set(me, arg2, arg3); ++ break; + default: + error = -EINVAL; + break; +diff --git a/lib/rhashtable.c b/lib/rhashtable.c +index 51282f579760..37ea94b636a3 100644 +--- a/lib/rhashtable.c ++++ b/lib/rhashtable.c +@@ -670,8 +670,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop); + + static size_t rounded_hashtable_size(const struct rhashtable_params *params) + { +- return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), +- (unsigned long)params->min_size); ++ size_t retsize; ++ ++ if (params->nelem_hint) ++ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), ++ (unsigned long)params->min_size); ++ else ++ retsize = max(HASH_DEFAULT_SIZE, ++ (unsigned long)params->min_size); ++ ++ return retsize; + } + + static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) +@@ -728,8 +736,6 @@ int rhashtable_init(struct rhashtable *ht, + struct bucket_table *tbl; + size_t size; + +- size = HASH_DEFAULT_SIZE; +- + if ((!params->key_len && !params->obj_hashfn) || + (params->obj_hashfn && !params->obj_cmpfn)) + return -EINVAL; +@@ -756,8 +762,7 @@ int rhashtable_init(struct rhashtable *ht, + + ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); + +- if (params->nelem_hint) +- size = rounded_hashtable_size(&ht->p); ++ size = rounded_hashtable_size(&ht->p); + + /* The maximum (not average) chain length grows with the + * size of the hash table, at a rate of (log N)/(log log N). +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 55a9facb8e8d..9a8e688724b1 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -996,7 +996,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) + int nid, zid; + int i; + +- while ((memcg = parent_mem_cgroup(memcg))) { ++ for (; memcg; memcg = parent_mem_cgroup(memcg)) { + for_each_node(nid) { + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index fa02c680eebc..55be076706e5 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -828,6 +828,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) + n->cloned = 1; + n->nohdr = 0; + n->peeked = 0; ++ C(pfmemalloc); + n->destructor = NULL; + C(tail); + C(end); +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index c9e68ff48a72..8f05816a8be2 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -297,6 +297,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) + if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { + struct flowi4 fl4 = { + .flowi4_iif = LOOPBACK_IFINDEX, ++ .flowi4_oif = l3mdev_master_ifindex_rcu(dev), + .daddr = ip_hdr(skb)->saddr, + .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), + .flowi4_scope = scope, +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 75abf978ef30..da90c74d12ef 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -141,8 +141,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, + if (write && ret == 0) { + low = make_kgid(user_ns, urange[0]); + high = make_kgid(user_ns, urange[1]); +- if (!gid_valid(low) || !gid_valid(high) || +- (urange[1] < urange[0]) || gid_lt(high, low)) { ++ if (!gid_valid(low) || !gid_valid(high)) ++ return -EINVAL; ++ if (urange[1] < urange[0] || gid_lt(high, low)) { + low = make_kgid(&init_user_ns, 1); + high = make_kgid(&init_user_ns, 0); + } +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c +index 16f8124b1150..59111cadaec2 100644 +--- a/sound/core/rawmidi.c ++++ b/sound/core/rawmidi.c +@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card, + int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, + struct snd_rawmidi_params * params) + { +- char *newbuf; ++ char *newbuf, *oldbuf; + struct snd_rawmidi_runtime *runtime = substream->runtime; + + if (substream->append && substream->use_count > 1) +@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, + return -EINVAL; + } + if (params->buffer_size != runtime->buffer_size) { +- newbuf = krealloc(runtime->buffer, params->buffer_size, +- GFP_KERNEL); ++ newbuf = kmalloc(params->buffer_size, GFP_KERNEL); + if (!newbuf) + return -ENOMEM; ++ spin_lock_irq(&runtime->lock); ++ oldbuf = runtime->buffer; + runtime->buffer = newbuf; + runtime->buffer_size = params->buffer_size; + runtime->avail = runtime->buffer_size; ++ runtime->appl_ptr = runtime->hw_ptr = 0; ++ spin_unlock_irq(&runtime->lock); ++ kfree(oldbuf); + } + runtime->avail_min = params->avail_min; + substream->active_sensing = !params->no_active_sensing; +@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params); + int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, + struct snd_rawmidi_params * params) + { +- char *newbuf; ++ char *newbuf, *oldbuf; + struct snd_rawmidi_runtime *runtime = substream->runtime; + + snd_rawmidi_drain_input(substream); +@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, + return -EINVAL; + } + if (params->buffer_size != runtime->buffer_size) { +- newbuf = krealloc(runtime->buffer, params->buffer_size, +- GFP_KERNEL); ++ newbuf = kmalloc(params->buffer_size, GFP_KERNEL); + if (!newbuf) + return -ENOMEM; ++ spin_lock_irq(&runtime->lock); ++ oldbuf = runtime->buffer; + runtime->buffer = newbuf; + runtime->buffer_size = params->buffer_size; ++ runtime->appl_ptr = runtime->hw_ptr = 0; ++ spin_unlock_irq(&runtime->lock); ++ kfree(oldbuf); + } + runtime->avail_min = params->avail_min; + return 0; +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c +index 882fe83a3554..b3f345433ec7 100644 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c +@@ -1476,15 +1476,19 @@ TEST_F(TRACE_syscall, syscall_dropped) + #define SECCOMP_SET_MODE_FILTER 1 + #endif + +-#ifndef SECCOMP_FLAG_FILTER_TSYNC +-#define SECCOMP_FLAG_FILTER_TSYNC 1 ++#ifndef SECCOMP_FILTER_FLAG_TSYNC ++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) ++#endif ++ ++#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW ++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) + #endif + + #ifndef seccomp +-int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter) ++int seccomp(unsigned int op, unsigned int flags, void *args) + { + errno = 0; +- return syscall(__NR_seccomp, op, flags, filter); ++ return syscall(__NR_seccomp, op, flags, args); + } + #endif + +@@ -1576,6 +1580,78 @@ TEST(seccomp_syscall_mode_lock) + } + } + ++/* ++ * Test detection of known and unknown filter flags. Userspace needs to be able ++ * to check if a filter flag is supported by the current kernel and a good way ++ * of doing that is by attempting to enter filter mode, with the flag bit in ++ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates ++ * that the flag is valid and EINVAL indicates that the flag is invalid. ++ */ ++TEST(detect_seccomp_filter_flags) ++{ ++ unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, ++ SECCOMP_FILTER_FLAG_SPEC_ALLOW }; ++ unsigned int flag, all_flags; ++ int i; ++ long ret; ++ ++ /* Test detection of known-good filter flags */ ++ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { ++ int bits = 0; ++ ++ flag = flags[i]; ++ /* Make sure the flag is a single bit! */ ++ while (flag) { ++ if (flag & 0x1) ++ bits ++; ++ flag >>= 1; ++ } ++ ASSERT_EQ(1, bits); ++ flag = flags[i]; ++ ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); ++ ASSERT_NE(ENOSYS, errno) { ++ TH_LOG("Kernel does not support seccomp syscall!"); ++ } ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EFAULT, errno) { ++ TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!", ++ flag); ++ } ++ ++ all_flags |= flag; ++ } ++ ++ /* Test detection of all known-good filter flags */ ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EFAULT, errno) { ++ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", ++ all_flags); ++ } ++ ++ /* Test detection of an unknown filter flag */ ++ flag = -1; ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno) { ++ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!", ++ flag); ++ } ++ ++ /* ++ * Test detection of an unknown filter flag that may simply need to be ++ * added to this test ++ */ ++ flag = flags[ARRAY_SIZE(flags) - 1] << 1; ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno) { ++ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?", ++ flag); ++ } ++} ++ + TEST(TSYNC_first) + { + struct sock_filter filter[] = { +@@ -1592,7 +1668,7 @@ TEST(TSYNC_first) + TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); + } + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); +@@ -1810,7 +1886,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor) + self->sibling_count++; + } + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &self->apply_prog); + ASSERT_EQ(0, ret) { + TH_LOG("Could install filter on all threads!"); +@@ -1871,7 +1947,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter) + TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); + } + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &self->apply_prog); + ASSERT_NE(ENOSYS, errno) { + TH_LOG("Kernel does not support seccomp syscall!"); +@@ -1919,7 +1995,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence) + self->sibling_count++; + } + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &self->apply_prog); + ASSERT_EQ(self->sibling[0].system_tid, ret) { + TH_LOG("Did not fail on diverged sibling."); +@@ -1971,7 +2047,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) + TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); + } + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &self->apply_prog); + ASSERT_EQ(ret, self->sibling[0].system_tid) { + TH_LOG("Did not fail on diverged sibling."); +@@ -2000,7 +2076,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) + /* Switch to the remaining sibling */ + sib = !sib; + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &self->apply_prog); + ASSERT_EQ(0, ret) { + TH_LOG("Expected the remaining sibling to sync"); +@@ -2023,7 +2099,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) + while (!kill(self->sibling[sib].system_tid, 0)) + sleep(0.1); + +- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, + &self->apply_prog); + ASSERT_EQ(0, ret); /* just us chickens */ + } +diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c +index 49001fa84ead..1203829316b2 100644 +--- a/virt/kvm/eventfd.c ++++ b/virt/kvm/eventfd.c +@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work) + { + struct kvm_kernel_irqfd *irqfd = + container_of(work, struct kvm_kernel_irqfd, shutdown); ++ struct kvm *kvm = irqfd->kvm; + u64 cnt; + ++ /* Make sure irqfd has been initalized in assign path. */ ++ synchronize_srcu(&kvm->irq_srcu); ++ + /* + * Synchronize with the wait-queue and unhook ourselves to prevent + * further events. +@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) + + idx = srcu_read_lock(&kvm->irq_srcu); + irqfd_update(kvm, irqfd); +- srcu_read_unlock(&kvm->irq_srcu, idx); + + list_add_tail(&irqfd->list, &kvm->irqfds.items); + +@@ -419,6 +422,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) + irqfd->consumer.token, ret); + #endif + ++ srcu_read_unlock(&kvm->irq_srcu, idx); + return 0; + + fail: diff --git a/patch/kernel/rk3328-default/04-patch-4.4.144-145.patch b/patch/kernel/rk3328-default/04-patch-4.4.144-145.patch new file mode 100644 index 000000000..f7b3f94d8 --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.144-145.patch @@ -0,0 +1,1006 @@ +diff --git a/Makefile b/Makefile +index 63f3e2438a26..be31491a2d67 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 144 ++SUBLEVEL = 145 + EXTRAVERSION = + NAME = Blurry Fish Butt + +@@ -624,6 +624,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) + KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) + KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) + KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) ++KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) + + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE + KBUILD_CFLAGS += -Os +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h +index 35c9db857ebe..cd8b589111ba 100644 +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -251,7 +251,7 @@ extern int __put_user_8(void *, unsigned long long); + ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ + const typeof(*(p)) __user *__tmp_p = (p); \ +- register const typeof(*(p)) __r2 asm("r2") = (x); \ ++ register typeof(*(p)) __r2 asm("r2") = (x); \ + register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ + register unsigned long __l asm("r1") = __limit; \ + register int __e asm("r0"); \ +diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c +index 8ae4067a5eda..40ecb6e700cd 100644 +--- a/arch/mips/ath79/common.c ++++ b/arch/mips/ath79/common.c +@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); + + void ath79_ddr_wb_flush(u32 reg) + { +- void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; ++ void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); + + /* Flush the DDR write buffer. */ + __raw_writel(0x1, flush_reg); +diff --git a/drivers/base/dd.c b/drivers/base/dd.c +index a641cf3ccad6..1dffb018a7fe 100644 +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -304,14 +304,6 @@ static int really_probe(struct device *dev, struct device_driver *drv) + goto probe_failed; + } + +- /* +- * Ensure devices are listed in devices_kset in correct order +- * It's important to move Dev to the end of devices_kset before +- * calling .probe, because it could be recursive and parent Dev +- * should always go first +- */ +- devices_kset_move_last(dev); +- + if (dev->bus->probe) { + ret = dev->bus->probe(dev); + if (ret) +diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c +index 51670b322409..700b98d9c250 100644 +--- a/drivers/net/can/xilinx_can.c ++++ b/drivers/net/can/xilinx_can.c +@@ -2,6 +2,7 @@ + * + * Copyright (C) 2012 - 2014 Xilinx, Inc. + * Copyright (C) 2009 PetaLogix. All rights reserved. ++ * Copyright (C) 2017 Sandvik Mining and Construction Oy + * + * Description: + * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. +@@ -25,8 +26,10 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + #include + #include +@@ -100,7 +103,7 @@ enum xcan_reg { + #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ + XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ + XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ +- XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) ++ XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) + + /* CAN register bit shift - XCAN___SHIFT */ + #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ +@@ -117,6 +120,7 @@ enum xcan_reg { + /** + * struct xcan_priv - This definition define CAN driver instance + * @can: CAN private data structure. ++ * @tx_lock: Lock for synchronizing TX interrupt handling + * @tx_head: Tx CAN packets ready to send on the queue + * @tx_tail: Tx CAN packets successfully sended on the queue + * @tx_max: Maximum number packets the driver can send +@@ -131,6 +135,7 @@ enum xcan_reg { + */ + struct xcan_priv { + struct can_priv can; ++ spinlock_t tx_lock; + unsigned int tx_head; + unsigned int tx_tail; + unsigned int tx_max; +@@ -158,6 +163,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { + .brp_inc = 1, + }; + ++#define XCAN_CAP_WATERMARK 0x0001 ++struct xcan_devtype_data { ++ unsigned int caps; ++}; ++ + /** + * xcan_write_reg_le - Write a value to the device register little endian + * @priv: Driver private data structure +@@ -237,6 +247,10 @@ static int set_reset_mode(struct net_device *ndev) + usleep_range(500, 10000); + } + ++ /* reset clears FIFOs */ ++ priv->tx_head = 0; ++ priv->tx_tail = 0; ++ + return 0; + } + +@@ -391,6 +405,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf = (struct can_frame *)skb->data; + u32 id, dlc, data[2] = {0, 0}; ++ unsigned long flags; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; +@@ -438,6 +453,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) + data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + + can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); ++ + priv->tx_head++; + + /* Write the Frame to Xilinx CAN TX FIFO */ +@@ -453,10 +471,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) + stats->tx_bytes += cf->can_dlc; + } + ++ /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ ++ if (priv->tx_max > 1) ++ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); ++ + /* Check if the TX buffer is full */ + if ((priv->tx_head - priv->tx_tail) == priv->tx_max) + netif_stop_queue(ndev); + ++ spin_unlock_irqrestore(&priv->tx_lock, flags); ++ + return NETDEV_TX_OK; + } + +@@ -528,6 +552,123 @@ static int xcan_rx(struct net_device *ndev) + return 1; + } + ++/** ++ * xcan_current_error_state - Get current error state from HW ++ * @ndev: Pointer to net_device structure ++ * ++ * Checks the current CAN error state from the HW. Note that this ++ * only checks for ERROR_PASSIVE and ERROR_WARNING. ++ * ++ * Return: ++ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE ++ * otherwise. ++ */ ++static enum can_state xcan_current_error_state(struct net_device *ndev) ++{ ++ struct xcan_priv *priv = netdev_priv(ndev); ++ u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); ++ ++ if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) ++ return CAN_STATE_ERROR_PASSIVE; ++ else if (status & XCAN_SR_ERRWRN_MASK) ++ return CAN_STATE_ERROR_WARNING; ++ else ++ return CAN_STATE_ERROR_ACTIVE; ++} ++ ++/** ++ * xcan_set_error_state - Set new CAN error state ++ * @ndev: Pointer to net_device structure ++ * @new_state: The new CAN state to be set ++ * @cf: Error frame to be populated or NULL ++ * ++ * Set new CAN error state for the device, updating statistics and ++ * populating the error frame if given. ++ */ ++static void xcan_set_error_state(struct net_device *ndev, ++ enum can_state new_state, ++ struct can_frame *cf) ++{ ++ struct xcan_priv *priv = netdev_priv(ndev); ++ u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); ++ u32 txerr = ecr & XCAN_ECR_TEC_MASK; ++ u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; ++ ++ priv->can.state = new_state; ++ ++ if (cf) { ++ cf->can_id |= CAN_ERR_CRTL; ++ cf->data[6] = txerr; ++ cf->data[7] = rxerr; ++ } ++ ++ switch (new_state) { ++ case CAN_STATE_ERROR_PASSIVE: ++ priv->can.can_stats.error_passive++; ++ if (cf) ++ cf->data[1] = (rxerr > 127) ? ++ CAN_ERR_CRTL_RX_PASSIVE : ++ CAN_ERR_CRTL_TX_PASSIVE; ++ break; ++ case CAN_STATE_ERROR_WARNING: ++ priv->can.can_stats.error_warning++; ++ if (cf) ++ cf->data[1] |= (txerr > rxerr) ? ++ CAN_ERR_CRTL_TX_WARNING : ++ CAN_ERR_CRTL_RX_WARNING; ++ break; ++ case CAN_STATE_ERROR_ACTIVE: ++ if (cf) ++ cf->data[1] |= CAN_ERR_CRTL_ACTIVE; ++ break; ++ default: ++ /* non-ERROR states are handled elsewhere */ ++ WARN_ON(1); ++ break; ++ } ++} ++ ++/** ++ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX ++ * @ndev: Pointer to net_device structure ++ * ++ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if ++ * the performed RX/TX has caused it to drop to a lesser state and set ++ * the interface state accordingly. ++ */ ++static void xcan_update_error_state_after_rxtx(struct net_device *ndev) ++{ ++ struct xcan_priv *priv = netdev_priv(ndev); ++ enum can_state old_state = priv->can.state; ++ enum can_state new_state; ++ ++ /* changing error state due to successful frame RX/TX can only ++ * occur from these states ++ */ ++ if (old_state != CAN_STATE_ERROR_WARNING && ++ old_state != CAN_STATE_ERROR_PASSIVE) ++ return; ++ ++ new_state = xcan_current_error_state(ndev); ++ ++ if (new_state != old_state) { ++ struct sk_buff *skb; ++ struct can_frame *cf; ++ ++ skb = alloc_can_err_skb(ndev, &cf); ++ ++ xcan_set_error_state(ndev, new_state, skb ? cf : NULL); ++ ++ if (skb) { ++ struct net_device_stats *stats = &ndev->stats; ++ ++ stats->rx_packets++; ++ stats->rx_bytes += cf->can_dlc; ++ netif_rx(skb); ++ } ++ } ++} ++ + /** + * xcan_err_interrupt - error frame Isr + * @ndev: net_device pointer +@@ -543,16 +684,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; +- u32 err_status, status, txerr = 0, rxerr = 0; ++ u32 err_status; + + skb = alloc_can_err_skb(ndev, &cf); + + err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); + priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); +- txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; +- rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & +- XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); +- status = priv->read_reg(priv, XCAN_SR_OFFSET); + + if (isr & XCAN_IXR_BSOFF_MASK) { + priv->can.state = CAN_STATE_BUS_OFF; +@@ -562,28 +699,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; +- } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { +- priv->can.state = CAN_STATE_ERROR_PASSIVE; +- priv->can.can_stats.error_passive++; +- if (skb) { +- cf->can_id |= CAN_ERR_CRTL; +- cf->data[1] = (rxerr > 127) ? +- CAN_ERR_CRTL_RX_PASSIVE : +- CAN_ERR_CRTL_TX_PASSIVE; +- cf->data[6] = txerr; +- cf->data[7] = rxerr; +- } +- } else if (status & XCAN_SR_ERRWRN_MASK) { +- priv->can.state = CAN_STATE_ERROR_WARNING; +- priv->can.can_stats.error_warning++; +- if (skb) { +- cf->can_id |= CAN_ERR_CRTL; +- cf->data[1] |= (txerr > rxerr) ? +- CAN_ERR_CRTL_TX_WARNING : +- CAN_ERR_CRTL_RX_WARNING; +- cf->data[6] = txerr; +- cf->data[7] = rxerr; +- } ++ } else { ++ enum can_state new_state = xcan_current_error_state(ndev); ++ ++ xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + } + + /* Check for Arbitration lost interrupt */ +@@ -599,7 +718,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) + if (isr & XCAN_IXR_RXOFLW_MASK) { + stats->rx_over_errors++; + stats->rx_errors++; +- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; +@@ -708,26 +826,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) + + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { +- if (isr & XCAN_IXR_RXOK_MASK) { +- priv->write_reg(priv, XCAN_ICR_OFFSET, +- XCAN_IXR_RXOK_MASK); +- work_done += xcan_rx(ndev); +- } else { +- priv->write_reg(priv, XCAN_ICR_OFFSET, +- XCAN_IXR_RXNEMP_MASK); +- break; +- } ++ work_done += xcan_rx(ndev); + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + } + +- if (work_done) ++ if (work_done) { + can_led_event(ndev, CAN_LED_EVENT_RX); ++ xcan_update_error_state_after_rxtx(ndev); ++ } + + if (work_done < quota) { + napi_complete(napi); + ier = priv->read_reg(priv, XCAN_IER_OFFSET); +- ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); ++ ier |= XCAN_IXR_RXNEMP_MASK; + priv->write_reg(priv, XCAN_IER_OFFSET, ier); + } + return work_done; +@@ -742,18 +854,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) + { + struct xcan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; ++ unsigned int frames_in_fifo; ++ int frames_sent = 1; /* TXOK => at least 1 frame was sent */ ++ unsigned long flags; ++ int retries = 0; ++ ++ /* Synchronize with xmit as we need to know the exact number ++ * of frames in the FIFO to stay in sync due to the TXFEMP ++ * handling. ++ * This also prevents a race between netif_wake_queue() and ++ * netif_stop_queue(). ++ */ ++ spin_lock_irqsave(&priv->tx_lock, flags); + +- while ((priv->tx_head - priv->tx_tail > 0) && +- (isr & XCAN_IXR_TXOK_MASK)) { ++ frames_in_fifo = priv->tx_head - priv->tx_tail; ++ ++ if (WARN_ON_ONCE(frames_in_fifo == 0)) { ++ /* clear TXOK anyway to avoid getting back here */ + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); ++ spin_unlock_irqrestore(&priv->tx_lock, flags); ++ return; ++ } ++ ++ /* Check if 2 frames were sent (TXOK only means that at least 1 ++ * frame was sent). ++ */ ++ if (frames_in_fifo > 1) { ++ WARN_ON(frames_in_fifo > priv->tx_max); ++ ++ /* Synchronize TXOK and isr so that after the loop: ++ * (1) isr variable is up-to-date at least up to TXOK clear ++ * time. This avoids us clearing a TXOK of a second frame ++ * but not noticing that the FIFO is now empty and thus ++ * marking only a single frame as sent. ++ * (2) No TXOK is left. Having one could mean leaving a ++ * stray TXOK as we might process the associated frame ++ * via TXFEMP handling as we read TXFEMP *after* TXOK ++ * clear to satisfy (1). ++ */ ++ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { ++ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); ++ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); ++ } ++ ++ if (isr & XCAN_IXR_TXFEMP_MASK) { ++ /* nothing in FIFO anymore */ ++ frames_sent = frames_in_fifo; ++ } ++ } else { ++ /* single frame in fifo, just clear TXOK */ ++ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); ++ } ++ ++ while (frames_sent--) { + can_get_echo_skb(ndev, priv->tx_tail % + priv->tx_max); + priv->tx_tail++; + stats->tx_packets++; +- isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + } +- can_led_event(ndev, CAN_LED_EVENT_TX); ++ + netif_wake_queue(ndev); ++ ++ spin_unlock_irqrestore(&priv->tx_lock, flags); ++ ++ can_led_event(ndev, CAN_LED_EVENT_TX); ++ xcan_update_error_state_after_rxtx(ndev); + } + + /** +@@ -772,6 +937,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) + struct net_device *ndev = (struct net_device *)dev_id; + struct xcan_priv *priv = netdev_priv(ndev); + u32 isr, ier; ++ u32 isr_errors; + + /* Get the interrupt status from Xilinx CAN */ + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); +@@ -790,18 +956,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) + xcan_tx_interrupt(ndev, isr); + + /* Check for the type of error interrupt and Processing it */ +- if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | +- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { +- priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | +- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | +- XCAN_IXR_ARBLST_MASK)); ++ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | ++ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); ++ if (isr_errors) { ++ priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); + xcan_err_interrupt(ndev, isr); + } + + /* Check for the type of receive interrupt and Processing it */ +- if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { ++ if (isr & XCAN_IXR_RXNEMP_MASK) { + ier = priv->read_reg(priv, XCAN_IER_OFFSET); +- ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); ++ ier &= ~XCAN_IXR_RXNEMP_MASK; + priv->write_reg(priv, XCAN_IER_OFFSET, ier); + napi_schedule(&priv->napi); + } +@@ -1030,6 +1195,18 @@ static int __maybe_unused xcan_resume(struct device *dev) + + static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume); + ++static const struct xcan_devtype_data xcan_zynq_data = { ++ .caps = XCAN_CAP_WATERMARK, ++}; ++ ++/* Match table for OF platform binding */ ++static const struct of_device_id xcan_of_match[] = { ++ { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, ++ { .compatible = "xlnx,axi-can-1.00.a", }, ++ { /* end of list */ }, ++}; ++MODULE_DEVICE_TABLE(of, xcan_of_match); ++ + /** + * xcan_probe - Platform registration call + * @pdev: Handle to the platform device structure +@@ -1044,8 +1221,10 @@ static int xcan_probe(struct platform_device *pdev) + struct resource *res; /* IO mem resources */ + struct net_device *ndev; + struct xcan_priv *priv; ++ const struct of_device_id *of_id; ++ int caps = 0; + void __iomem *addr; +- int ret, rx_max, tx_max; ++ int ret, rx_max, tx_max, tx_fifo_depth; + + /* Get the virtual base address for the device */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -1055,7 +1234,8 @@ static int xcan_probe(struct platform_device *pdev) + goto err; + } + +- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); ++ ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", ++ &tx_fifo_depth); + if (ret < 0) + goto err; + +@@ -1063,6 +1243,30 @@ static int xcan_probe(struct platform_device *pdev) + if (ret < 0) + goto err; + ++ of_id = of_match_device(xcan_of_match, &pdev->dev); ++ if (of_id) { ++ const struct xcan_devtype_data *devtype_data = of_id->data; ++ ++ if (devtype_data) ++ caps = devtype_data->caps; ++ } ++ ++ /* There is no way to directly figure out how many frames have been ++ * sent when the TXOK interrupt is processed. If watermark programming ++ * is supported, we can have 2 frames in the FIFO and use TXFEMP ++ * to determine if 1 or 2 frames have been sent. ++ * Theoretically we should be able to use TXFWMEMP to determine up ++ * to 3 frames, but it seems that after putting a second frame in the ++ * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less ++ * than 2 frames in FIFO) is set anyway with no TXOK (a frame was ++ * sent), which is not a sensible state - possibly TXFWMEMP is not ++ * completely synchronized with the rest of the bits? ++ */ ++ if (caps & XCAN_CAP_WATERMARK) ++ tx_max = min(tx_fifo_depth, 2); ++ else ++ tx_max = 1; ++ + /* Create a CAN device instance */ + ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); + if (!ndev) +@@ -1077,6 +1281,7 @@ static int xcan_probe(struct platform_device *pdev) + CAN_CTRLMODE_BERR_REPORTING; + priv->reg_base = addr; + priv->tx_max = tx_max; ++ spin_lock_init(&priv->tx_lock); + + /* Get IRQ for the device */ + ndev->irq = platform_get_irq(pdev, 0); +@@ -1144,9 +1349,9 @@ static int xcan_probe(struct platform_device *pdev) + devm_can_led_init(ndev); + clk_disable_unprepare(priv->bus_clk); + clk_disable_unprepare(priv->can_clk); +- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", ++ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", + priv->reg_base, ndev->irq, priv->can.clock.freq, +- priv->tx_max); ++ tx_fifo_depth, priv->tx_max); + + return 0; + +@@ -1182,14 +1387,6 @@ static int xcan_remove(struct platform_device *pdev) + return 0; + } + +-/* Match table for OF platform binding */ +-static const struct of_device_id xcan_of_match[] = { +- { .compatible = "xlnx,zynq-can-1.0", }, +- { .compatible = "xlnx,axi-can-1.00.a", }, +- { /* end of list */ }, +-}; +-MODULE_DEVICE_TABLE(of, xcan_of_match); +- + static struct platform_driver xcan_driver = { + .probe = xcan_probe, + .remove = xcan_remove, +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +index e3080fbd9d00..7911dc3da98e 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +@@ -2891,7 +2891,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + u32 srqn = qp_get_srqn(qpc) & 0xffffff; + int use_srq = (qp_get_srqn(qpc) >> 24) & 1; + struct res_srq *srq; +- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; ++ int local_qpn = vhcr->in_modifier & 0xffffff; + + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); + if (err) +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 7ed30d0b5273..a501f3ba6a3f 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1771,6 +1771,9 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ + .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ + }, ++ { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ ++ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ ++ }, + + { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ + .driver_info = CLEAR_HALT_CONDITIONS, +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 4d86da0df131..93756664592a 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -1123,10 +1123,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + + if (!udev || udev->state == USB_STATE_NOTATTACHED) { + /* Tell hub_wq to disconnect the device or +- * check for a new connection ++ * check for a new connection or over current condition. ++ * Based on USB2.0 Spec Section 11.12.5, ++ * C_PORT_OVER_CURRENT could be set while ++ * PORT_OVER_CURRENT is not. So check for any of them. + */ + if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || +- (portstatus & USB_PORT_STAT_OVERCURRENT)) ++ (portstatus & USB_PORT_STAT_OVERCURRENT) || ++ (portchange & USB_PORT_STAT_C_OVERCURRENT)) + set_bit(port1, hub->change_bits); + + } else if (portstatus & USB_PORT_STAT_ENABLE) { +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 4191feb765b1..4800bb22cdd6 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -3037,7 +3037,7 @@ static int ffs_func_setup(struct usb_function *f, + __ffs_event_add(ffs, FUNCTIONFS_SETUP); + spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); + +- return USB_GADGET_DELAYED_STATUS; ++ return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; + } + + static void ffs_func_suspend(struct usb_function *f) +diff --git a/include/net/tcp.h b/include/net/tcp.h +index a3696b778757..65babd8a682d 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -376,6 +376,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); + ++void tcp_enter_quickack_mode(struct sock *sk); + static inline void tcp_dec_quickack_mode(struct sock *sk, + const unsigned int pkts) + { +@@ -559,6 +560,7 @@ void tcp_send_fin(struct sock *sk); + void tcp_send_active_reset(struct sock *sk, gfp_t priority); + int tcp_send_synack(struct sock *); + void tcp_push_one(struct sock *, unsigned int mss_now); ++void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); + void tcp_send_ack(struct sock *sk); + void tcp_send_delayed_ack(struct sock *sk); + void tcp_send_loss_probe(struct sock *sk); +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 2017ffa5197a..96c9c0f0905a 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -2087,9 +2087,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) + return err; + } + +- dev->rtnl_link_state = RTNL_LINK_INITIALIZED; +- +- __dev_notify_flags(dev, old_flags, ~0U); ++ if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { ++ __dev_notify_flags(dev, old_flags, 0U); ++ } else { ++ dev->rtnl_link_state = RTNL_LINK_INITIALIZED; ++ __dev_notify_flags(dev, old_flags, ~0U); ++ } + return 0; + } + EXPORT_SYMBOL(rtnl_configure_link); +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 10286432f684..c11bb6d2d00a 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -480,6 +480,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) + to->dev = from->dev; + to->mark = from->mark; + ++ skb_copy_hash(to, from); ++ + /* Copy the flags to each fragment. */ + IPCB(to)->flags = IPCB(from)->flags; + +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index ce9a7fbb7c5f..88426a6a7a85 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -135,15 +135,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) + { + struct sockaddr_in sin; + const struct iphdr *iph = ip_hdr(skb); +- __be16 *ports = (__be16 *)skb_transport_header(skb); ++ __be16 *ports; ++ int end; + +- if (skb_transport_offset(skb) + 4 > skb->len) ++ end = skb_transport_offset(skb) + 4; ++ if (end > 0 && !pskb_may_pull(skb, end)) + return; + + /* All current transport protocols have the port numbers in the + * first four bytes of the transport header and this function is + * written with this assumption in mind. + */ ++ ports = (__be16 *)skb_transport_header(skb); + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = iph->daddr; +diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c +index 55d7da1d2ce9..e63b764e55ea 100644 +--- a/net/ipv4/tcp_dctcp.c ++++ b/net/ipv4/tcp_dctcp.c +@@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + +- /* State has changed from CE=0 to CE=1 and delayed +- * ACK has not sent yet. +- */ +- if (!ca->ce_state && ca->delayed_ack_reserved) { +- u32 tmp_rcv_nxt; +- +- /* Save current rcv_nxt. */ +- tmp_rcv_nxt = tp->rcv_nxt; +- +- /* Generate previous ack with CE=0. */ +- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +- tp->rcv_nxt = ca->prior_rcv_nxt; +- +- tcp_send_ack(sk); +- +- /* Recover current rcv_nxt. */ +- tp->rcv_nxt = tmp_rcv_nxt; ++ if (!ca->ce_state) { ++ /* State has changed from CE=0 to CE=1, force an immediate ++ * ACK to reflect the new CE state. If an ACK was delayed, ++ * send that first to reflect the prior CE state. ++ */ ++ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) ++ __tcp_send_ack(sk, ca->prior_rcv_nxt); ++ tcp_enter_quickack_mode(sk); + } + + ca->prior_rcv_nxt = tp->rcv_nxt; +@@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + +- /* State has changed from CE=1 to CE=0 and delayed +- * ACK has not sent yet. +- */ +- if (ca->ce_state && ca->delayed_ack_reserved) { +- u32 tmp_rcv_nxt; +- +- /* Save current rcv_nxt. */ +- tmp_rcv_nxt = tp->rcv_nxt; +- +- /* Generate previous ack with CE=1. */ +- tp->ecn_flags |= TCP_ECN_DEMAND_CWR; +- tp->rcv_nxt = ca->prior_rcv_nxt; +- +- tcp_send_ack(sk); +- +- /* Recover current rcv_nxt. */ +- tp->rcv_nxt = tmp_rcv_nxt; ++ if (ca->ce_state) { ++ /* State has changed from CE=1 to CE=0, force an immediate ++ * ACK to reflect the new CE state. If an ACK was delayed, ++ * send that first to reflect the prior CE state. ++ */ ++ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) ++ __tcp_send_ack(sk, ca->prior_rcv_nxt); ++ tcp_enter_quickack_mode(sk); + } + + ca->prior_rcv_nxt = tp->rcv_nxt; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 4350ee058441..5c645069a09a 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -187,13 +187,14 @@ static void tcp_incr_quickack(struct sock *sk) + icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); + } + +-static void tcp_enter_quickack_mode(struct sock *sk) ++void tcp_enter_quickack_mode(struct sock *sk) + { + struct inet_connection_sock *icsk = inet_csk(sk); + tcp_incr_quickack(sk); + icsk->icsk_ack.pingpong = 0; + icsk->icsk_ack.ato = TCP_ATO_MIN; + } ++EXPORT_SYMBOL(tcp_enter_quickack_mode); + + /* Send ACKs quickly, if "quick" count is not exhausted + * and the session is not interactive. +@@ -4788,6 +4789,7 @@ restart: + static void tcp_collapse_ofo_queue(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); ++ u32 range_truesize, sum_tiny = 0; + struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); + struct sk_buff *head; + u32 start, end; +@@ -4797,6 +4799,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) + + start = TCP_SKB_CB(skb)->seq; + end = TCP_SKB_CB(skb)->end_seq; ++ range_truesize = skb->truesize; + head = skb; + + for (;;) { +@@ -4811,14 +4814,24 @@ static void tcp_collapse_ofo_queue(struct sock *sk) + if (!skb || + after(TCP_SKB_CB(skb)->seq, end) || + before(TCP_SKB_CB(skb)->end_seq, start)) { +- tcp_collapse(sk, &tp->out_of_order_queue, +- head, skb, start, end); ++ /* Do not attempt collapsing tiny skbs */ ++ if (range_truesize != head->truesize || ++ end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { ++ tcp_collapse(sk, &tp->out_of_order_queue, ++ head, skb, start, end); ++ } else { ++ sum_tiny += range_truesize; ++ if (sum_tiny > sk->sk_rcvbuf >> 3) ++ return; ++ } ++ + head = skb; + if (!skb) + break; + /* Start new segment */ + start = TCP_SKB_CB(skb)->seq; + end = TCP_SKB_CB(skb)->end_seq; ++ range_truesize = skb->truesize; + } else { + if (before(TCP_SKB_CB(skb)->seq, start)) + start = TCP_SKB_CB(skb)->seq; +@@ -4874,6 +4887,9 @@ static int tcp_prune_queue(struct sock *sk) + else if (tcp_under_memory_pressure(sk)) + tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + ++ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) ++ return 0; ++ + tcp_collapse_ofo_queue(sk); + if (!skb_queue_empty(&sk->sk_receive_queue)) + tcp_collapse(sk, &sk->sk_receive_queue, +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 2854db094864..6fa749ce231f 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -177,8 +177,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, + } + + /* Account for an ACK we sent. */ +-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) ++static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, ++ u32 rcv_nxt) + { ++ struct tcp_sock *tp = tcp_sk(sk); ++ ++ if (unlikely(rcv_nxt != tp->rcv_nxt)) ++ return; /* Special ACK sent by DCTCP to reflect ECN */ + tcp_dec_quickack_mode(sk, pkts); + inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); + } +@@ -901,8 +906,8 @@ out: + * We are working here with either a clone of the original + * SKB, or a fresh unique copy made by the retransmit engine. + */ +-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, +- gfp_t gfp_mask) ++static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, ++ int clone_it, gfp_t gfp_mask, u32 rcv_nxt) + { + const struct inet_connection_sock *icsk = inet_csk(sk); + struct inet_sock *inet; +@@ -962,7 +967,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + th->source = inet->inet_sport; + th->dest = inet->inet_dport; + th->seq = htonl(tcb->seq); +- th->ack_seq = htonl(tp->rcv_nxt); ++ th->ack_seq = htonl(rcv_nxt); + *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | + tcb->tcp_flags); + +@@ -1005,7 +1010,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + icsk->icsk_af_ops->send_check(sk, skb); + + if (likely(tcb->tcp_flags & TCPHDR_ACK)) +- tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); ++ tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); + + if (skb->len != tcp_header_size) + tcp_event_data_sent(tp, sk); +@@ -1036,6 +1041,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + return net_xmit_eval(err); + } + ++static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ++ gfp_t gfp_mask) ++{ ++ return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, ++ tcp_sk(sk)->rcv_nxt); ++} ++ + /* This routine just queues the buffer for sending. + * + * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, +@@ -3354,7 +3366,7 @@ void tcp_send_delayed_ack(struct sock *sk) + } + + /* This routine sends an ack and also updates the window. */ +-void tcp_send_ack(struct sock *sk) ++void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) + { + struct sk_buff *buff; + +@@ -3391,9 +3403,14 @@ void tcp_send_ack(struct sock *sk) + + /* Send it off, this clears delayed acks for us. */ + skb_mstamp_get(&buff->skb_mstamp); +- tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); ++ __tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC), rcv_nxt); ++} ++EXPORT_SYMBOL_GPL(__tcp_send_ack); ++ ++void tcp_send_ack(struct sock *sk) ++{ ++ __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); + } +-EXPORT_SYMBOL_GPL(tcp_send_ack); + + /* This routine sends a packet with an out of date sequence + * number. It assumes the other end will try to ack it. +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index cae37bfd12ab..9f6e57ded338 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -657,13 +657,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, + } + if (np->rxopt.bits.rxorigdstaddr) { + struct sockaddr_in6 sin6; +- __be16 *ports = (__be16 *) skb_transport_header(skb); ++ __be16 *ports; ++ int end; + +- if (skb_transport_offset(skb) + 4 <= skb->len) { ++ end = skb_transport_offset(skb) + 4; ++ if (end <= 0 || pskb_may_pull(skb, end)) { + /* All current transport protocols have the port numbers in the + * first four bytes of the transport header and this function is + * written with this assumption in mind. + */ ++ ports = (__be16 *)skb_transport_header(skb); + + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = ipv6_hdr(skb)->daddr; +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 74786783834b..0feede45bd28 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -559,6 +559,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) + to->dev = from->dev; + to->mark = from->mark; + ++ skb_copy_hash(to, from); ++ + #ifdef CONFIG_NET_SCHED + to->tc_index = from->tc_index; + #endif diff --git a/patch/kernel/rk3328-default/04-patch-4.4.145-146.patch b/patch/kernel/rk3328-default/04-patch-4.4.145-146.patch new file mode 100644 index 000000000..5b8de0dd2 --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.145-146.patch @@ -0,0 +1,2741 @@ +diff --git a/Makefile b/Makefile +index be31491a2d67..030f5af05f4e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 145 ++SUBLEVEL = 146 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile +index 91d2068da1b9..0f3fe6a151dc 100644 +--- a/arch/microblaze/boot/Makefile ++++ b/arch/microblaze/boot/Makefile +@@ -21,17 +21,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE + quiet_cmd_cp = CP $< $@$2 + cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) + +-quiet_cmd_strip = STRIP $@ ++quiet_cmd_strip = STRIP $< $@$2 + cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ +- -K _fdt_start vmlinux -o $@ ++ -K _fdt_start $< -o $@$2 + + UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) ++UIMAGE_IN = $@ ++UIMAGE_OUT = $@.ub + + $(obj)/simpleImage.%: vmlinux FORCE + $(call if_changed,cp,.unstrip) + $(call if_changed,objcopy) + $(call if_changed,uimage) +- $(call if_changed,strip) +- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' ++ $(call if_changed,strip,.strip) ++ @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' + + clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb +diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h +index 98c31e5d9579..a7bc901819c8 100644 +--- a/arch/mips/include/asm/pci.h ++++ b/arch/mips/include/asm/pci.h +@@ -89,7 +89,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, + phys_addr_t size = resource_size(rsrc); + + *start = fixup_bigphys_addr(rsrc->start, size); +- *end = rsrc->start + size; ++ *end = rsrc->start + size - 1; + } + + /* +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S +index 78c1eba4c04a..01e274e6907b 100644 +--- a/arch/powerpc/kernel/head_8xx.S ++++ b/arch/powerpc/kernel/head_8xx.S +@@ -720,7 +720,7 @@ start_here: + tovirt(r6,r6) + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l +- stw r5, 0xf0(r0) /* Must match your Abatron config file */ ++ stw r5, 0xf0(0) /* Must match your Abatron config file */ + tophys(r5,r5) + stw r6, 0(r5) + +diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c +index 1f7930037cb7..d9e41b77dd13 100644 +--- a/arch/powerpc/kernel/pci_32.c ++++ b/arch/powerpc/kernel/pci_32.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c +index 515730e499fe..309027208f7c 100644 +--- a/arch/powerpc/mm/slb.c ++++ b/arch/powerpc/mm/slb.c +@@ -69,14 +69,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, + * updating it. No write barriers are needed here, provided + * we only update the current CPU's SLB shadow buffer. + */ +- p->save_area[index].esid = 0; +- p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); +- p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); ++ WRITE_ONCE(p->save_area[index].esid, 0); ++ WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); ++ WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); + } + + static inline void slb_shadow_clear(enum slb_index index) + { +- get_slb_shadow()->save_area[index].esid = 0; ++ WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0); + } + + static inline void create_shadowed_slbe(unsigned long ea, int ssize, +diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c +index f803f4b8ab6f..8608e358217f 100644 +--- a/arch/powerpc/platforms/chrp/time.c ++++ b/arch/powerpc/platforms/chrp/time.c +@@ -27,6 +27,8 @@ + #include + #include + ++#include ++ + extern spinlock_t rtc_lock; + + #define NVRAM_AS0 0x74 +@@ -62,7 +64,7 @@ long __init chrp_time_init(void) + return 0; + } + +-int chrp_cmos_clock_read(int addr) ++static int chrp_cmos_clock_read(int addr) + { + if (nvram_as1 != 0) + outb(addr>>8, nvram_as1); +@@ -70,7 +72,7 @@ int chrp_cmos_clock_read(int addr) + return (inb(nvram_data)); + } + +-void chrp_cmos_clock_write(unsigned long val, int addr) ++static void chrp_cmos_clock_write(unsigned long val, int addr) + { + if (nvram_as1 != 0) + outb(addr>>8, nvram_as1); +diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +index 9b7975706bfc..9485f1024d46 100644 +--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c ++++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +@@ -35,6 +35,8 @@ + */ + #define HW_BROADWAY_ICR 0x00 + #define HW_BROADWAY_IMR 0x04 ++#define HW_STARLET_ICR 0x08 ++#define HW_STARLET_IMR 0x0c + + + /* +@@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d) + void __iomem *io_base = irq_data_get_irq_chip_data(d); + + setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); ++ ++ /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */ ++ clrbits32(io_base + HW_STARLET_IMR, 1 << irq); + } + + +diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c +index 76f5013c35e5..89237b84b096 100644 +--- a/arch/powerpc/platforms/powermac/bootx_init.c ++++ b/arch/powerpc/platforms/powermac/bootx_init.c +@@ -467,7 +467,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4) + boot_infos_t *bi = (boot_infos_t *) r4; + unsigned long hdr; + unsigned long space; +- unsigned long ptr, x; ++ unsigned long ptr; + char *model; + unsigned long offset = reloc_offset(); + +@@ -561,6 +561,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4) + * MMU switched OFF, so this should not be useful anymore. + */ + if (bi->version < 4) { ++ unsigned long x __maybe_unused; ++ + bootx_printf("Touching pages...\n"); + + /* +diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c +index 8dd78f4e1af4..32fc56cf6261 100644 +--- a/arch/powerpc/platforms/powermac/setup.c ++++ b/arch/powerpc/platforms/powermac/setup.c +@@ -359,6 +359,7 @@ static int pmac_late_init(void) + } + machine_late_initcall(powermac, pmac_late_init); + ++void note_bootable_part(dev_t dev, int part, int goodness); + /* + * This is __init_refok because we check for "initializing" before + * touching any of the __init sensitive things and "initializing" +diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h +index 9dd04b9e9782..b2f8c52b3840 100644 +--- a/arch/s390/include/asm/cpu_mf.h ++++ b/arch/s390/include/asm/cpu_mf.h +@@ -113,7 +113,7 @@ struct hws_basic_entry { + + struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ +- unsigned int R:14; /* 16-19 and 20-30 reserved */ ++ unsigned int R:15; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ + } __packed; +@@ -129,7 +129,9 @@ struct hws_trailer_entry { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ +- unsigned long long:61; /* 3 - 63: Reserved */ ++ unsigned int :29; /* 3 - 31: Reserved */ ++ unsigned int bsdes:16; /* 32-47: size of basic SDE */ ++ unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ + }; + unsigned long long flags; /* 0 - 63: All indicators */ + }; +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +index 61215a69b03d..b22e9c4dd111 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +@@ -229,7 +229,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e + u64 prev_count, new_count, delta; + int shift; + +- if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) ++ if (event->hw.idx == UNCORE_PMC_IDX_FIXED) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c +index 2749965afed0..83cadc2605a7 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c +@@ -240,7 +240,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p + { + struct hw_perf_event *hwc = &event->hw; + +- if (hwc->idx >= UNCORE_PMC_IDX_FIXED) ++ if (hwc->idx == UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); + else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 18143886b186..c5a4b1978cbf 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -6843,6 +6843,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu) + HRTIMER_MODE_REL); + vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; + ++ vmx->nested.vpid02 = allocate_vpid(); ++ + vmx->nested.vmxon = true; + + skip_emulated_instruction(vcpu); +@@ -8887,10 +8889,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) + goto free_vmcs; + } + +- if (nested) { ++ if (nested) + nested_vmx_setup_ctls_msrs(vmx); +- vmx->nested.vpid02 = allocate_vpid(); +- } + + vmx->nested.posted_intr_nv = -1; + vmx->nested.current_vmptr = -1ull; +@@ -8899,7 +8899,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) + return &vmx->vcpu; + + free_vmcs: +- free_vpid(vmx->nested.vpid02); + free_loaded_vmcs(vmx->loaded_vmcs); + free_msrs: + kfree(vmx->guest_msrs); +diff --git a/crypto/authenc.c b/crypto/authenc.c +index 55a354d57251..b7290c5b1eaa 100644 +--- a/crypto/authenc.c ++++ b/crypto/authenc.c +@@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, + CRYPTO_TFM_RES_MASK); + + out: ++ memzero_explicit(&keys, sizeof(keys)); + return err; + + badkey: +diff --git a/crypto/authencesn.c b/crypto/authencesn.c +index 52154ef21b5e..fa0c4567f697 100644 +--- a/crypto/authencesn.c ++++ b/crypto/authencesn.c +@@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * + CRYPTO_TFM_RES_MASK); + + out: ++ memzero_explicit(&keys, sizeof(keys)); + return err; + + badkey: +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c +index ae3fe4e64203..3b0b4bd67b71 100644 +--- a/drivers/acpi/pci_root.c ++++ b/drivers/acpi/pci_root.c +@@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) + } + + control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL +- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL + | OSC_PCI_EXPRESS_PME_CONTROL; + ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) ++ control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; ++ + if (pci_aer_available()) { + if (aer_acpi_firmware_first()) + dev_info(&device->dev, +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c +index 75cced210b2a..7db76b5c7ada 100644 +--- a/drivers/ata/libata-eh.c ++++ b/drivers/ata/libata-eh.c +@@ -2198,12 +2198,16 @@ static void ata_eh_link_autopsy(struct ata_link *link) + if (qc->err_mask & ~AC_ERR_OTHER) + qc->err_mask &= ~AC_ERR_OTHER; + +- /* SENSE_VALID trumps dev/unknown error and revalidation */ ++ /* ++ * SENSE_VALID trumps dev/unknown error and revalidation. Upper ++ * layers will determine whether the command is worth retrying ++ * based on the sense data and device class/type. Otherwise, ++ * determine directly if the command is worth retrying using its ++ * error mask and flags. ++ */ + if (qc->flags & ATA_QCFLAG_SENSE_VALID) + qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); +- +- /* determine whether the command is worth retrying */ +- if (ata_eh_worth_retry(qc)) ++ else if (ata_eh_worth_retry(qc)) + qc->flags |= ATA_QCFLAG_RETRY; + + /* accumulate error info */ +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 91676535a1a3..4a899b41145e 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -339,6 +339,9 @@ static const struct usb_device_id blacklist_table[] = { + /* Additional Realtek 8723BU Bluetooth devices */ + { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + ++ /* Additional Realtek 8723DE Bluetooth devices */ ++ { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, ++ + /* Additional Realtek 8821AE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c +index 476d39c7ba20..ecfb9ed2cff6 100644 +--- a/drivers/bluetooth/hci_qca.c ++++ b/drivers/bluetooth/hci_qca.c +@@ -884,7 +884,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) + */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); +- set_current_state(TASK_INTERRUPTIBLE); ++ set_current_state(TASK_RUNNING); + + return 0; + } +diff --git a/drivers/char/random.c b/drivers/char/random.c +index dffd06a3bb76..2916d08ee30e 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1503,14 +1503,22 @@ static int + write_pool(struct entropy_store *r, const char __user *buffer, size_t count) + { + size_t bytes; +- __u32 buf[16]; ++ __u32 t, buf[16]; + const char __user *p = buffer; + + while (count > 0) { ++ int b, i = 0; ++ + bytes = min(count, sizeof(buf)); + if (copy_from_user(&buf, p, bytes)) + return -EFAULT; + ++ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { ++ if (!arch_get_random_int(&t)) ++ break; ++ buf[i] ^= t; ++ } ++ + count -= bytes; + p += bytes; + +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c +index 97a364694bfc..047ef69b7e65 100644 +--- a/drivers/crypto/padlock-aes.c ++++ b/drivers/crypto/padlock-aes.c +@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, + return; + } + ++ count -= initial; ++ + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) +@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) +- : "d"(control_word), "b"(key), "c"(count - initial)); ++ : "d"(control_word), "b"(key), "c"(count)); + } + + static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, +@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + ++ count -= initial; ++ + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) +@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) +- : "d" (control_word), "b" (key), "c" (count-initial)); ++ : "d" (control_word), "b" (key), "c" (count)); + return iv; + } + +diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c +index 55f5d33f6dc7..4251e9ac0373 100644 +--- a/drivers/dma/pxa_dma.c ++++ b/drivers/dma/pxa_dma.c +@@ -1321,7 +1321,7 @@ static int pxad_init_phys(struct platform_device *op, + return 0; + } + +-static const struct of_device_id const pxad_dt_ids[] = { ++static const struct of_device_id pxad_dt_ids[] = { + { .compatible = "marvell,pdma-1.0", }, + {} + }; +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c +index 50d74e5ce41b..355ad1b97df6 100644 +--- a/drivers/gpu/drm/drm_atomic.c ++++ b/drivers/gpu/drm/drm_atomic.c +@@ -960,7 +960,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + { + struct drm_plane *plane = plane_state->plane; + struct drm_crtc_state *crtc_state; +- ++ /* Nothing to do for same crtc*/ ++ if (plane_state->crtc == crtc) ++ return 0; + if (plane_state->crtc) { + crtc_state = drm_atomic_get_crtc_state(plane_state->state, + plane_state->crtc); +diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h +index 860dd2177ca1..283570080d47 100644 +--- a/drivers/gpu/drm/gma500/psb_intel_drv.h ++++ b/drivers/gpu/drm/gma500/psb_intel_drv.h +@@ -252,7 +252,7 @@ extern int intelfb_remove(struct drm_device *dev, + extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, ++extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); + extern int psb_intel_lvds_set_property(struct drm_connector *connector, + struct drm_property *property, +diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c +index 61e3a097a478..ccd1b8bf0fd5 100644 +--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c ++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c +@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) + } + } + +-int psb_intel_lvds_mode_valid(struct drm_connector *connector, ++enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_psb_private *dev_priv = connector->dev->dev_private; +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 1a2a7365d0b5..c6bf378534f8 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -844,7 +844,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) + return ret; + } + +-static int radeon_lvds_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_encoder *encoder = radeon_best_single_encoder(connector); +@@ -993,7 +993,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector) + return ret; + } + +-static int radeon_vga_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; +@@ -1136,7 +1136,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) + return 1; + } + +-static int radeon_tv_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) +@@ -1477,7 +1477,7 @@ static void radeon_dvi_force(struct drm_connector *connector) + radeon_connector->use_digital = true; + } + +-static int radeon_dvi_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; +@@ -1778,7 +1778,7 @@ out: + return ret; + } + +-static int radeon_dp_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; +diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c +index febb21ee190e..584b10d3fc3d 100644 +--- a/drivers/hid/hid-plantronics.c ++++ b/drivers/hid/hid-plantronics.c +@@ -2,7 +2,7 @@ + * Plantronics USB HID Driver + * + * Copyright (c) 2014 JD Cole +- * Copyright (c) 2015 Terry Junge ++ * Copyright (c) 2015-2018 Terry Junge + */ + + /* +@@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev, + unsigned short mapped_key; + unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); + ++ /* special case for PTT products */ ++ if (field->application == HID_GD_JOYSTICK) ++ goto defaulted; ++ + /* handle volume up/down mapping */ + /* non-standard types or multi-HID interfaces - plt_type is PID */ + if (!(plt_type & HID_USAGE_PAGE)) { +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index a5fed668fde1..4248d253c32a 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -1017,6 +1017,14 @@ static int i2c_hid_probe(struct i2c_client *client, + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + ++ /* Make sure there is something at this address */ ++ ret = i2c_smbus_read_byte(client); ++ if (ret < 0) { ++ dev_dbg(&client->dev, "nothing at this address: %d\n", ret); ++ ret = -ENXIO; ++ goto err_pm; ++ } ++ + ret = i2c_hid_fetch_hid_descriptor(ihid); + if (ret < 0) + goto err_pm; +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c +index 8d84c563ba75..616173b7a5e8 100644 +--- a/drivers/infiniband/core/mad.c ++++ b/drivers/infiniband/core/mad.c +@@ -1548,7 +1548,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, + mad_reg_req->oui, 3)) { + method = &(*vendor_table)->vendor_class[ + vclass]->method_table[i]; +- BUG_ON(!*method); ++ if (!*method) ++ goto error3; + goto check_in_use; + } + } +@@ -1558,10 +1559,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, + vclass]->oui[i])) { + method = &(*vendor_table)->vendor_class[ + vclass]->method_table[i]; +- BUG_ON(*method); + /* Allocate method table for this OUI */ +- if ((ret = allocate_method_table(method))) +- goto error3; ++ if (!*method) { ++ ret = allocate_method_table(method); ++ if (ret) ++ goto error3; ++ } + memcpy((*vendor_table)->vendor_class[vclass]->oui[i], + mad_reg_req->oui, 3); + goto check_in_use; +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index 795938edce3f..55aa8d3d752f 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -217,7 +217,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) + return NULL; + + mutex_lock(&mut); +- mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); ++ mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL); + mutex_unlock(&mut); + if (mc->id < 0) + goto error; +@@ -1375,6 +1375,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, + goto err3; + } + ++ mutex_lock(&mut); ++ idr_replace(&multicast_idr, mc, mc->id); ++ mutex_unlock(&mut); ++ + mutex_unlock(&file->mut); + ucma_put_ctx(ctx); + return 0; +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 97f6e05cffce..a716482774db 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1251,6 +1251,8 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0611", 0 }, + { "ELAN0612", 0 }, + { "ELAN0618", 0 }, ++ { "ELAN061D", 0 }, ++ { "ELAN0622", 0 }, + { "ELAN1000", 0 }, + { } + }; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index e484ea2dc787..34be09651ee8 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), + }, + }, ++ { ++ /* Lenovo LaVie Z */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 0663463df2f7..07f307402351 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6145,6 +6145,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) + struct md_rdev *rdev; + int ret = -1; + ++ if (!mddev->pers) ++ return -ENODEV; ++ + rdev = find_rdev(mddev, dev); + if (!rdev) + return -ENXIO; +diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c +index bfe831c10b1c..b95a631f23f9 100644 +--- a/drivers/media/common/siano/smsendian.c ++++ b/drivers/media/common/siano/smsendian.c +@@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer) + switch (msg->x_msg_header.msg_type) { + case MSG_SMS_DATA_DOWNLOAD_REQ: + { +- msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]); ++ msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0])); + break; + } + +@@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer) + sizeof(struct sms_msg_hdr))/4; + + for (i = 0; i < msg_words; i++) +- msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); ++ msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); + + break; + } +@@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer) + { + struct sms_version_res *ver = + (struct sms_version_res *) msg; +- ver->chip_model = le16_to_cpu(ver->chip_model); ++ ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model); + break; + } + +@@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer) + sizeof(struct sms_msg_hdr))/4; + + for (i = 0; i < msg_words; i++) +- msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); ++ msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); + + break; + } +@@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg) + #ifdef __BIG_ENDIAN + struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; + +- phdr->msg_type = le16_to_cpu(phdr->msg_type); +- phdr->msg_length = le16_to_cpu(phdr->msg_length); +- phdr->msg_flags = le16_to_cpu(phdr->msg_flags); ++ phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type); ++ phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length); ++ phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags); + #endif /* __BIG_ENDIAN */ + } + EXPORT_SYMBOL_GPL(smsendian_handle_message_header); +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c +index fb39dfd55e75..46a052c5be2e 100644 +--- a/drivers/media/i2c/smiapp/smiapp-core.c ++++ b/drivers/media/i2c/smiapp/smiapp-core.c +@@ -981,7 +981,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, + if (rval) + goto out; + +- for (i = 0; i < 1000; i++) { ++ for (i = 1000; i > 0; i--) { + rval = smiapp_read( + sensor, + SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s); +@@ -992,11 +992,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, + if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY) + break; + +- if (--i == 0) { +- rval = -ETIMEDOUT; +- goto out; +- } +- ++ } ++ if (!i) { ++ rval = -ETIMEDOUT; ++ goto out; + } + + for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) { +diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c +index 269e0782c7b6..93d53195e8ca 100644 +--- a/drivers/media/pci/saa7164/saa7164-fw.c ++++ b/drivers/media/pci/saa7164/saa7164-fw.c +@@ -430,7 +430,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev) + __func__, fw->size); + + if (fw->size != fwlength) { +- printk(KERN_ERR "xc5000: firmware incorrect size\n"); ++ printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n", ++ fw->size, fwlength); + ret = -ENOMEM; + goto out; + } +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c +index 56e683b19a73..91e02c1ff392 100644 +--- a/drivers/media/platform/omap3isp/isp.c ++++ b/drivers/media/platform/omap3isp/isp.c +@@ -2077,6 +2077,7 @@ error_csiphy: + + static void isp_detach_iommu(struct isp_device *isp) + { ++ arm_iommu_detach_device(isp->dev); + arm_iommu_release_mapping(isp->mapping); + isp->mapping = NULL; + iommu_group_remove_device(isp->dev); +@@ -2110,8 +2111,7 @@ static int isp_attach_iommu(struct isp_device *isp) + mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); + if (IS_ERR(mapping)) { + dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); +- ret = PTR_ERR(mapping); +- goto error; ++ return PTR_ERR(mapping); + } + + isp->mapping = mapping; +@@ -2126,7 +2126,8 @@ static int isp_attach_iommu(struct isp_device *isp) + return 0; + + error: +- isp_detach_iommu(isp); ++ arm_iommu_release_mapping(isp->mapping); ++ isp->mapping = NULL; + return ret; + } + +diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c +index f8e3e83c52a2..20de5e9fc217 100644 +--- a/drivers/media/platform/rcar_jpu.c ++++ b/drivers/media/platform/rcar_jpu.c +@@ -1278,7 +1278,7 @@ static int jpu_open(struct file *file) + /* ...issue software reset */ + ret = jpu_reset(jpu); + if (ret) +- goto device_prepare_rollback; ++ goto jpu_reset_rollback; + } + + jpu->ref_count++; +@@ -1286,6 +1286,8 @@ static int jpu_open(struct file *file) + mutex_unlock(&jpu->mutex); + return 0; + ++jpu_reset_rollback: ++ clk_disable_unprepare(jpu->clk); + device_prepare_rollback: + mutex_unlock(&jpu->mutex); + v4l_prepare_rollback: +diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c +index 471d6a8ae8a4..9326439bc49c 100644 +--- a/drivers/media/radio/si470x/radio-si470x-i2c.c ++++ b/drivers/media/radio/si470x/radio-si470x-i2c.c +@@ -96,7 +96,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); + */ + int si470x_get_register(struct si470x_device *radio, int regnr) + { +- u16 buf[READ_REG_NUM]; ++ __be16 buf[READ_REG_NUM]; + struct i2c_msg msgs[1] = { + { + .addr = radio->client->addr, +@@ -121,7 +121,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr) + int si470x_set_register(struct si470x_device *radio, int regnr) + { + int i; +- u16 buf[WRITE_REG_NUM]; ++ __be16 buf[WRITE_REG_NUM]; + struct i2c_msg msgs[1] = { + { + .addr = radio->client->addr, +@@ -151,7 +151,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr) + static int si470x_get_all_registers(struct si470x_device *radio) + { + int i; +- u16 buf[READ_REG_NUM]; ++ __be16 buf[READ_REG_NUM]; + struct i2c_msg msgs[1] = { + { + .addr = radio->client->addr, +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c +index bb1e19f7ed5a..0c1a42bf27fd 100644 +--- a/drivers/media/v4l2-core/videobuf2-core.c ++++ b/drivers/media/v4l2-core/videobuf2-core.c +@@ -870,9 +870,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) + dprintk(4, "done processing on buffer %d, state: %d\n", + vb->index, state); + +- /* sync buffers */ +- for (plane = 0; plane < vb->num_planes; ++plane) +- call_void_memop(vb, finish, vb->planes[plane].mem_priv); ++ if (state != VB2_BUF_STATE_QUEUED && ++ state != VB2_BUF_STATE_REQUEUEING) { ++ /* sync buffers */ ++ for (plane = 0; plane < vb->num_planes; ++plane) ++ call_void_memop(vb, finish, vb->planes[plane].mem_priv); ++ } + + spin_lock_irqsave(&q->done_lock, flags); + if (state == VB2_BUF_STATE_QUEUED || +diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c +index a1ae0cc2b86d..6ab481ee8ece 100644 +--- a/drivers/memory/tegra/mc.c ++++ b/drivers/memory/tegra/mc.c +@@ -20,14 +20,6 @@ + #include "mc.h" + + #define MC_INTSTATUS 0x000 +-#define MC_INT_DECERR_MTS (1 << 16) +-#define MC_INT_SECERR_SEC (1 << 13) +-#define MC_INT_DECERR_VPR (1 << 12) +-#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) +-#define MC_INT_INVALID_SMMU_PAGE (1 << 10) +-#define MC_INT_ARBITRATION_EMEM (1 << 9) +-#define MC_INT_SECURITY_VIOLATION (1 << 8) +-#define MC_INT_DECERR_EMEM (1 << 6) + + #define MC_INTMASK 0x004 + +@@ -248,12 +240,13 @@ static const char *const error_names[8] = { + static irqreturn_t tegra_mc_irq(int irq, void *data) + { + struct tegra_mc *mc = data; +- unsigned long status, mask; ++ unsigned long status; + unsigned int bit; + + /* mask all interrupts to avoid flooding */ +- status = mc_readl(mc, MC_INTSTATUS); +- mask = mc_readl(mc, MC_INTMASK); ++ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask; ++ if (!status) ++ return IRQ_NONE; + + for_each_set_bit(bit, &status, 32) { + const char *error = status_names[bit] ?: "unknown"; +@@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev) + const struct of_device_id *match; + struct resource *res; + struct tegra_mc *mc; +- u32 value; + int err; + + match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); +@@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev) + + WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); + +- value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | +- MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | +- MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM; +- +- mc_writel(mc, value, MC_INTMASK); ++ mc_writel(mc, mc->soc->intmask, MC_INTMASK); + + return 0; + } +diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h +index ddb16676c3af..24e020b4609b 100644 +--- a/drivers/memory/tegra/mc.h ++++ b/drivers/memory/tegra/mc.h +@@ -14,6 +14,15 @@ + + #include + ++#define MC_INT_DECERR_MTS (1 << 16) ++#define MC_INT_SECERR_SEC (1 << 13) ++#define MC_INT_DECERR_VPR (1 << 12) ++#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) ++#define MC_INT_INVALID_SMMU_PAGE (1 << 10) ++#define MC_INT_ARBITRATION_EMEM (1 << 9) ++#define MC_INT_SECURITY_VIOLATION (1 << 8) ++#define MC_INT_DECERR_EMEM (1 << 6) ++ + static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) + { + return readl(mc->regs + offset); +diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c +index ba8fff3d66a6..6d2a5a849d92 100644 +--- a/drivers/memory/tegra/tegra114.c ++++ b/drivers/memory/tegra/tegra114.c +@@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = { + .atom_size = 32, + .client_id_mask = 0x7f, + .smmu = &tegra114_smmu_soc, ++ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | ++ MC_INT_DECERR_EMEM, + }; +diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c +index 21e7255e3d96..234e74f97a4b 100644 +--- a/drivers/memory/tegra/tegra124.c ++++ b/drivers/memory/tegra/tegra124.c +@@ -1019,6 +1019,9 @@ const struct tegra_mc_soc tegra124_mc_soc = { + .smmu = &tegra124_smmu_soc, + .emem_regs = tegra124_mc_emem_regs, + .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), ++ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | ++ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | ++ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, + }; + #endif /* CONFIG_ARCH_TEGRA_124_SOC */ + +@@ -1041,5 +1044,8 @@ const struct tegra_mc_soc tegra132_mc_soc = { + .atom_size = 32, + .client_id_mask = 0x7f, + .smmu = &tegra132_smmu_soc, ++ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | ++ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | ++ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, + }; + #endif /* CONFIG_ARCH_TEGRA_132_SOC */ +diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c +index 5e144abe4c18..47c78a6d8f00 100644 +--- a/drivers/memory/tegra/tegra210.c ++++ b/drivers/memory/tegra/tegra210.c +@@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = { + .atom_size = 64, + .client_id_mask = 0xff, + .smmu = &tegra210_smmu_soc, ++ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | ++ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | ++ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, + }; +diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c +index b44737840e70..d0689428ea1a 100644 +--- a/drivers/memory/tegra/tegra30.c ++++ b/drivers/memory/tegra/tegra30.c +@@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = { + .atom_size = 16, + .client_id_mask = 0x7f, + .smmu = &tegra30_smmu_soc, ++ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | ++ MC_INT_DECERR_EMEM, + }; +diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c +index 0eee63542038..115a6f67ab51 100644 +--- a/drivers/mfd/cros_ec.c ++++ b/drivers/mfd/cros_ec.c +@@ -68,7 +68,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev) + + mutex_init(&ec_dev->lock); + +- cros_ec_query_all(ec_dev); ++ err = cros_ec_query_all(ec_dev); ++ if (err) { ++ dev_err(dev, "Cannot identify the EC: error %d\n", err); ++ return err; ++ } + + err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1, + NULL, ec_dev->irq, NULL); +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 5e3fa5861039..2c0bbaed3609 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -449,9 +449,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + + case NAND_CMD_READID: + case NAND_CMD_PARAM: { ++ /* ++ * For READID, read 8 bytes that are currently used. ++ * For PARAM, read all 3 copies of 256-bytes pages. ++ */ ++ int len = 8; + int timing = IFC_FIR_OP_RB; +- if (command == NAND_CMD_PARAM) ++ if (command == NAND_CMD_PARAM) { + timing = IFC_FIR_OP_RBCD; ++ len = 256 * 3; ++ } + + ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | +@@ -461,12 +468,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + &ifc->ifc_nand.nand_fcr0); + ifc_out32(column, &ifc->ifc_nand.row3); + +- /* +- * although currently it's 8 bytes for READID, we always read +- * the maximum 256 bytes(for PARAM) +- */ +- ifc_out32(256, &ifc->ifc_nand.nand_fbcr); +- ifc_nand_ctrl->read_bytes = 256; ++ ifc_out32(len, &ifc->ifc_nand.nand_fbcr); ++ ifc_nand_ctrl->read_bytes = len; + + set_addr(mtd, 0, 0, 0); + fsl_ifc_run_command(mtd); +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c +index 357c9e89fdf9..047348033e27 100644 +--- a/drivers/net/can/usb/ems_usb.c ++++ b/drivers/net/can/usb/ems_usb.c +@@ -1078,6 +1078,7 @@ static void ems_usb_disconnect(struct usb_interface *intf) + usb_free_urb(dev->intr_urb); + + kfree(dev->intr_in_buffer); ++ kfree(dev->tx_msg_buffer); + } + } + +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +index 446058081866..7a0ab4c44ee4 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +@@ -872,14 +872,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) + + if (pdata->tx_pause != pdata->phy.tx_pause) { + new_state = 1; +- pdata->hw_if.config_tx_flow_control(pdata); + pdata->tx_pause = pdata->phy.tx_pause; ++ pdata->hw_if.config_tx_flow_control(pdata); + } + + if (pdata->rx_pause != pdata->phy.rx_pause) { + new_state = 1; +- pdata->hw_if.config_rx_flow_control(pdata); + pdata->rx_pause = pdata->phy.rx_pause; ++ pdata->hw_if.config_rx_flow_control(pdata); + } + + /* Speed support */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 5adaf537513b..7bba30f24135 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -54,7 +54,7 @@ + #include + #include + +-#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) ++#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) + + /* Module parameters */ + #define TX_TIMEO 5000 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index a6d429950cb0..acec4b565511 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -1361,6 +1361,8 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) + netif_dbg(dev, ifup, dev->net, + "MAC address set to random addr"); + } ++ ++ tasklet_schedule(&dev->bh); + } + + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); +diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h +index 37f53bd8fcb1..184b6810cde9 100644 +--- a/drivers/net/wireless/ath/regd.h ++++ b/drivers/net/wireless/ath/regd.h +@@ -68,12 +68,14 @@ enum CountryCode { + CTRY_AUSTRALIA = 36, + CTRY_AUSTRIA = 40, + CTRY_AZERBAIJAN = 31, ++ CTRY_BAHAMAS = 44, + CTRY_BAHRAIN = 48, + CTRY_BANGLADESH = 50, + CTRY_BARBADOS = 52, + CTRY_BELARUS = 112, + CTRY_BELGIUM = 56, + CTRY_BELIZE = 84, ++ CTRY_BERMUDA = 60, + CTRY_BOLIVIA = 68, + CTRY_BOSNIA_HERZ = 70, + CTRY_BRAZIL = 76, +@@ -159,6 +161,7 @@ enum CountryCode { + CTRY_ROMANIA = 642, + CTRY_RUSSIA = 643, + CTRY_SAUDI_ARABIA = 682, ++ CTRY_SERBIA = 688, + CTRY_SERBIA_MONTENEGRO = 891, + CTRY_SINGAPORE = 702, + CTRY_SLOVAKIA = 703, +@@ -170,11 +173,13 @@ enum CountryCode { + CTRY_SWITZERLAND = 756, + CTRY_SYRIA = 760, + CTRY_TAIWAN = 158, ++ CTRY_TANZANIA = 834, + CTRY_THAILAND = 764, + CTRY_TRINIDAD_Y_TOBAGO = 780, + CTRY_TUNISIA = 788, + CTRY_TURKEY = 792, + CTRY_UAE = 784, ++ CTRY_UGANDA = 800, + CTRY_UKRAINE = 804, + CTRY_UNITED_KINGDOM = 826, + CTRY_UNITED_STATES = 840, +diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h +index bdd2b4d61f2f..15bbd1e0d912 100644 +--- a/drivers/net/wireless/ath/regd_common.h ++++ b/drivers/net/wireless/ath/regd_common.h +@@ -35,6 +35,7 @@ enum EnumRd { + FRANCE_RES = 0x31, + FCC3_FCCA = 0x3A, + FCC3_WORLD = 0x3B, ++ FCC3_ETSIC = 0x3F, + + ETSI1_WORLD = 0x37, + ETSI3_ETSIA = 0x32, +@@ -44,6 +45,7 @@ enum EnumRd { + ETSI4_ETSIC = 0x38, + ETSI5_WORLD = 0x39, + ETSI6_WORLD = 0x34, ++ ETSI8_WORLD = 0x3D, + ETSI_RESERVED = 0x33, + + MKK1_MKKA = 0x40, +@@ -59,6 +61,7 @@ enum EnumRd { + MKK1_MKKA1 = 0x4A, + MKK1_MKKA2 = 0x4B, + MKK1_MKKC = 0x4C, ++ APL2_FCCA = 0x4D, + + APL3_FCCA = 0x50, + APL1_WORLD = 0x52, +@@ -67,6 +70,7 @@ enum EnumRd { + APL1_ETSIC = 0x55, + APL2_ETSIC = 0x56, + APL5_WORLD = 0x58, ++ APL13_WORLD = 0x5A, + APL6_WORLD = 0x5B, + APL7_FCCA = 0x5C, + APL8_WORLD = 0x5D, +@@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { + {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, + {FCC3_FCCA, CTL_FCC, CTL_FCC}, + {FCC3_WORLD, CTL_FCC, CTL_ETSI}, ++ {FCC3_ETSIC, CTL_FCC, CTL_ETSI}, + {FCC4_FCCA, CTL_FCC, CTL_FCC}, + {FCC5_FCCA, CTL_FCC, CTL_FCC}, + {FCC6_FCCA, CTL_FCC, CTL_FCC}, +@@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { + {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, ++ {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, + + /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ + {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, +@@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { + {FCC1_FCCA, CTL_FCC, CTL_FCC}, + {APL1_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_WORLD, CTL_FCC, CTL_ETSI}, ++ {APL2_FCCA, CTL_FCC, CTL_FCC}, + {APL3_WORLD, CTL_FCC, CTL_ETSI}, + {APL4_WORLD, CTL_FCC, CTL_ETSI}, + {APL5_WORLD, CTL_FCC, CTL_ETSI}, ++ {APL13_WORLD, CTL_ETSI, CTL_ETSI}, + {APL6_WORLD, CTL_ETSI, CTL_ETSI}, + {APL8_WORLD, CTL_ETSI, CTL_ETSI}, + {APL9_WORLD, CTL_ETSI, CTL_ETSI}, +@@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, + {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, ++ {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, + {CTRY_BAHRAIN, APL6_WORLD, "BH"}, + {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, + {CTRY_BARBADOS, FCC2_WORLD, "BB"}, +@@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, + {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, + {CTRY_BELIZE, APL1_ETSIC, "BZ"}, ++ {CTRY_BERMUDA, FCC3_FCCA, "BM"}, + {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, + {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, + {CTRY_BRAZIL, FCC3_WORLD, "BR"}, +@@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_ROMANIA, NULL1_WORLD, "RO"}, + {CTRY_RUSSIA, NULL1_WORLD, "RU"}, + {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, ++ {CTRY_SERBIA, ETSI1_WORLD, "RS"}, + {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, + {CTRY_SINGAPORE, APL6_WORLD, "SG"}, + {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, +@@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, + {CTRY_SYRIA, NULL1_WORLD, "SY"}, + {CTRY_TAIWAN, APL3_FCCA, "TW"}, ++ {CTRY_TANZANIA, APL1_WORLD, "TZ"}, + {CTRY_THAILAND, FCC3_WORLD, "TH"}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, + {CTRY_TURKEY, ETSI3_WORLD, "TR"}, ++ {CTRY_UGANDA, FCC3_WORLD, "UG"}, + {CTRY_UKRAINE, NULL1_WORLD, "UA"}, + {CTRY_UAE, NULL1_WORLD, "AE"}, + {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +index 59cef6c69fe8..91da67657f81 100644 +--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c ++++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +@@ -1109,6 +1109,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), ++ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), +diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c +index e06591f625c4..d6f9858ff2de 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/rx.c ++++ b/drivers/net/wireless/iwlwifi/pcie/rx.c +@@ -713,6 +713,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + ++ cancel_work_sync(&rba->rx_alloc); ++ + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); +diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c +index e43aff932360..1a1b1de87583 100644 +--- a/drivers/net/wireless/mwifiex/usb.c ++++ b/drivers/net/wireless/mwifiex/usb.c +@@ -624,6 +624,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) + MWIFIEX_FUNC_SHUTDOWN); + } + ++ if (adapter->workqueue) ++ flush_workqueue(adapter->workqueue); ++ + mwifiex_usb_free(card); + + mwifiex_dbg(adapter, FATAL, +diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c +index 0cec8a64473e..eb5ffa5b1c6c 100644 +--- a/drivers/net/wireless/mwifiex/util.c ++++ b/drivers/net/wireless/mwifiex/util.c +@@ -702,12 +702,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, + s8 nflr) + { + struct mwifiex_histogram_data *phist_data = priv->hist_data; ++ s8 nf = -nflr; ++ s8 rssi = snr - nflr; + + atomic_inc(&phist_data->num_samples); + atomic_inc(&phist_data->rx_rate[rx_rate]); +- atomic_inc(&phist_data->snr[snr]); +- atomic_inc(&phist_data->noise_flr[128 + nflr]); +- atomic_inc(&phist_data->sig_str[nflr - snr]); ++ atomic_inc(&phist_data->snr[snr + 128]); ++ atomic_inc(&phist_data->noise_flr[nf + 128]); ++ atomic_inc(&phist_data->sig_str[rssi + 128]); + } + + /* function to reset histogram data during init/reset */ +diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c +index 8428858204a6..fc895b466ebb 100644 +--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c ++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c +@@ -155,7 +155,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) + int err; + struct mmc_card *card = pfunction->card; + struct mmc_host *host = card->host; +- s32 bit = (fls(host->ocr_avail) - 1); + u8 cmd52_resp; + u32 clock, resp, i; + u16 rca; +@@ -175,7 +174,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) + msleep(20); + + /* Initialize the SDIO card */ +- host->ios.vdd = bit; + host->ios.chip_select = MMC_CS_DONTCARE; + host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; + host->ios.power_mode = MMC_POWER_UP; +diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c +index c172da56b550..e4a8280cea83 100644 +--- a/drivers/net/wireless/ti/wlcore/sdio.c ++++ b/drivers/net/wireless/ti/wlcore/sdio.c +@@ -388,6 +388,11 @@ static int wl1271_suspend(struct device *dev) + mmc_pm_flag_t sdio_flags; + int ret = 0; + ++ if (!wl) { ++ dev_err(dev, "no wilink module was probed\n"); ++ goto out; ++ } ++ + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", + wl->wow_enabled); + +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index a0de2453fa09..bec9f099573b 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -86,6 +86,7 @@ struct netfront_cb { + /* IRQ name is queue name with "-tx" or "-rx" appended */ + #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) + ++static DECLARE_WAIT_QUEUE_HEAD(module_load_q); + static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); + + struct netfront_stats { +@@ -238,7 +239,7 @@ static void rx_refill_timeout(unsigned long data) + static int netfront_tx_slot_available(struct netfront_queue *queue) + { + return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < +- (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); ++ (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); + } + + static void xennet_maybe_wake_tx(struct netfront_queue *queue) +@@ -775,7 +776,7 @@ static int xennet_get_responses(struct netfront_queue *queue, + RING_IDX cons = queue->rx.rsp_cons; + struct sk_buff *skb = xennet_get_rx_skb(queue, cons); + grant_ref_t ref = xennet_get_rx_ref(queue, cons); +- int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); ++ int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); + int slots = 1; + int err = 0; + unsigned long ret; +@@ -1335,6 +1336,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) + netif_carrier_off(netdev); + + xenbus_switch_state(dev, XenbusStateInitialising); ++ wait_event(module_load_q, ++ xenbus_read_driver_state(dev->otherend) != ++ XenbusStateClosed && ++ xenbus_read_driver_state(dev->otherend) != ++ XenbusStateUnknown); + return netdev; + + exit: +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index ec91cd17bf34..5fb4ed6ea322 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -180,13 +180,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + +- if (!val) { +- if (pci_is_enabled(pdev)) +- pci_disable_device(pdev); +- else +- result = -EIO; +- } else ++ device_lock(dev); ++ if (dev->driver) ++ result = -EBUSY; ++ else if (val) + result = pci_enable_device(pdev); ++ else if (pci_is_enabled(pdev)) ++ pci_disable_device(pdev); ++ else ++ result = -EIO; ++ device_unlock(dev); + + return result < 0 ? result : count; + } +diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c +index 271cca63e9bd..9aa82a4e9e25 100644 +--- a/drivers/pinctrl/pinctrl-at91-pio4.c ++++ b/drivers/pinctrl/pinctrl-at91-pio4.c +@@ -568,8 +568,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, + for_each_child_of_node(np_config, np) { + ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map, + &reserved_maps, num_maps); +- if (ret < 0) ++ if (ret < 0) { ++ of_node_put(np); + break; ++ } + } + } + +diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c +index 2a44e5dd9c2a..c68556bf6f39 100644 +--- a/drivers/regulator/pfuze100-regulator.c ++++ b/drivers/regulator/pfuze100-regulator.c +@@ -152,6 +152,7 @@ static struct regulator_ops pfuze100_sw_regulator_ops = { + static struct regulator_ops pfuze100_swb_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, ++ .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c +index c2cf9485fe32..8c10f3db6336 100644 +--- a/drivers/rtc/interface.c ++++ b/drivers/rtc/interface.c +@@ -349,6 +349,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) + { + int err; + ++ if (!rtc->ops) ++ return -ENODEV; ++ else if (!rtc->ops->set_alarm) ++ return -EINVAL; ++ + err = rtc_valid_tm(&alarm->time); + if (err != 0) + return err; +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c +index a56a7b243e91..5466246c69b4 100644 +--- a/drivers/scsi/3w-9xxx.c ++++ b/drivers/scsi/3w-9xxx.c +@@ -889,6 +889,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file) + unsigned int minor_number; + int retval = TW_IOCTL_ERROR_OS_ENODEV; + ++ if (!capable(CAP_SYS_ADMIN)) { ++ retval = -EACCES; ++ goto out; ++ } ++ + minor_number = iminor(inode); + if (minor_number >= twa_device_extension_count) + goto out; +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c +index 2940bd769936..14af38036287 100644 +--- a/drivers/scsi/3w-xxxx.c ++++ b/drivers/scsi/3w-xxxx.c +@@ -1034,6 +1034,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) + + dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++ + minor_number = iminor(inode); + if (minor_number >= tw_device_extension_count) + return -ENODEV; +diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c +index 9d05302a3bcd..19bffe0b2cc0 100644 +--- a/drivers/scsi/megaraid.c ++++ b/drivers/scsi/megaraid.c +@@ -4197,6 +4197,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) + int irq, i, j; + int error = -ENODEV; + ++ if (hba_count >= MAX_CONTROLLERS) ++ goto out; ++ + if (pci_enable_device(pdev)) + goto out; + pci_set_master(pdev); +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index 96007633ad39..213944ed64d9 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -1886,6 +1886,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, + pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); + pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + } else { ++ if (os_timeout_value) ++ os_timeout_value++; ++ + /* system pd Fast Path */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + timeout_limit = (scmd->device->type == TYPE_DISK) ? +diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c +index 5711d58f9e81..a8ebaeace154 100644 +--- a/drivers/scsi/scsi_dh.c ++++ b/drivers/scsi/scsi_dh.c +@@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { + {"IBM", "3526", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, +- {"SGI", "TP9", "rdac", }, ++ {"SGI", "TP9300", "rdac", }, ++ {"SGI", "TP9400", "rdac", }, ++ {"SGI", "TP9500", "rdac", }, ++ {"SGI", "TP9700", "rdac", }, + {"SGI", "IS", "rdac", }, + {"STK", "OPENstorage", "rdac", }, + {"STK", "FLEXLINE 380", "rdac", }, +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 4302880a20b3..e1639e80db53 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -2195,6 +2195,7 @@ sg_add_sfp(Sg_device * sdp) + write_lock_irqsave(&sdp->sfd_lock, iflags); + if (atomic_read(&sdp->detaching)) { + write_unlock_irqrestore(&sdp->sfd_lock, iflags); ++ kfree(sfp); + return ERR_PTR(-ENODEV); + } + list_add_tail(&sfp->sfd_siblings, &sdp->sfds); +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 18f26cf1e24d..8c58adadb728 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -3447,6 +3447,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) + hba = container_of(work, struct ufs_hba, eeh_work); + + pm_runtime_get_sync(hba->dev); ++ scsi_block_requests(hba->host); + err = ufshcd_get_ee_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get exception status %d\n", +@@ -3462,6 +3463,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) + __func__, err); + } + out: ++ scsi_unblock_requests(hba->host); + pm_runtime_put_sync(hba->dev); + return; + } +diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c +index fa61eff88496..16d45a25284f 100644 +--- a/drivers/thermal/samsung/exynos_tmu.c ++++ b/drivers/thermal/samsung/exynos_tmu.c +@@ -585,6 +585,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev) + threshold_code = temp_to_code(data, temp); + + rising_threshold = readl(data->base + rising_reg_offset); ++ rising_threshold &= ~(0xff << j * 8); + rising_threshold |= (threshold_code << j * 8); + writel(rising_threshold, data->base + rising_reg_offset); + +diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c +index 47b54c6aefd2..9f660e55d1ba 100644 +--- a/drivers/tty/hvc/hvc_opal.c ++++ b/drivers/tty/hvc/hvc_opal.c +@@ -323,7 +323,6 @@ static void udbg_init_opal_common(void) + udbg_putc = udbg_opal_putc; + udbg_getc = udbg_opal_getc; + udbg_getc_poll = udbg_opal_getc_poll; +- tb_ticks_per_usec = 0x200; /* Make udelay not suck */ + } + + void __init hvc_opal_init_early(void) +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 96aa0ad32497..c8a2e5b0eff7 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -106,16 +106,19 @@ static void pty_unthrottle(struct tty_struct *tty) + static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) + { + struct tty_struct *to = tty->link; ++ unsigned long flags; + + if (tty->stopped) + return 0; + + if (c > 0) { ++ spin_lock_irqsave(&to->port->lock, flags); + /* Stuff the data into the input queue of the other end */ + c = tty_insert_flip_string(to->port, buf, c); + /* And shovel */ + if (c) + tty_flip_buffer_push(to->port); ++ spin_unlock_irqrestore(&to->port->lock, flags); + } + return c; + } +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 93756664592a..2facffea2ee0 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -3308,6 +3308,10 @@ static int wait_for_ss_port_enable(struct usb_device *udev, + while (delay_ms < 2000) { + if (status || *portstatus & USB_PORT_STAT_CONNECTION) + break; ++ if (!port_is_power_on(hub, *portstatus)) { ++ status = -ENODEV; ++ break; ++ } + msleep(20); + delay_ms += 20; + status = hub_port_status(hub, *port1, portstatus, portchange); +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 7cf26768ea0b..cbe9e2295752 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -479,7 +479,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, + tell_host(vb, vb->inflate_vq); + + /* balloon's page migration 2nd step -- deflate "page" */ ++ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); + balloon_page_delete(page); ++ spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; + set_page_pfns(vb, vb->pfns, page); + tell_host(vb, vb->deflate_vq); +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 88d9b66e2207..a751937dded5 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -2185,6 +2185,21 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans) + BUG(); + } + ++/* ++ * Check if the leaf is the last leaf. Which means all node pointers ++ * are at their last position. ++ */ ++static bool is_last_leaf(struct btrfs_path *path) ++{ ++ int i; ++ ++ for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { ++ if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) ++ return false; ++ } ++ return true; ++} ++ + /* + * returns < 0 on error, 0 when more leafs are to be scanned. + * returns 1 when done. +@@ -2198,6 +2213,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, + struct ulist *roots = NULL; + struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); + u64 num_bytes; ++ bool done; + int slot; + int ret; + +@@ -2225,6 +2241,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, + mutex_unlock(&fs_info->qgroup_rescan_lock); + return ret; + } ++ done = is_last_leaf(path); + + btrfs_item_key_to_cpu(path->nodes[0], &found, + btrfs_header_nritems(path->nodes[0]) - 1); +@@ -2271,6 +2288,8 @@ out: + } + btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); + ++ if (done && !ret) ++ ret = 1; + return ret; + } + +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 738f5d6beb95..2c7f9a5f8717 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -2961,8 +2961,11 @@ out_wake_log_root: + mutex_unlock(&log_root_tree->log_mutex); + + /* +- * The barrier before waitqueue_active is implied by mutex_unlock ++ * The barrier before waitqueue_active is needed so all the updates ++ * above are seen by the woken threads. It might not be necessary, but ++ * proving that seems to be hard. + */ ++ smp_mb(); + if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) + wake_up(&log_root_tree->log_commit_wait[index2]); + out: +@@ -2973,8 +2976,11 @@ out: + mutex_unlock(&root->log_mutex); + + /* +- * The barrier before waitqueue_active is implied by mutex_unlock ++ * The barrier before waitqueue_active is needed so all the updates ++ * above are seen by the woken threads. It might not be necessary, but ++ * proving that seems to be hard. + */ ++ smp_mb(); + if (waitqueue_active(&root->log_commit_wait[index1])) + wake_up(&root->log_commit_wait[index1]); + return ret; +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index f77b3258454a..2bba0c4ef4b7 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -295,6 +295,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi) + + void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) + { ++ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) ++ return; ++ + /* try to shrink extent cache when there is no enough memory */ + if (!available_free_memory(sbi, EXTENT_CACHE)) + f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 4f666368aa85..6cc67e1bbb41 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -1566,6 +1566,12 @@ static int __init init_f2fs_fs(void) + { + int err; + ++ if (PAGE_SIZE != F2FS_BLKSIZE) { ++ printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n", ++ PAGE_SIZE, F2FS_BLKSIZE); ++ return -EINVAL; ++ } ++ + f2fs_build_trace_ios(); + + err = init_inodecache(); +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 57e3262ec57a..ee0da259a3d3 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1538,6 +1538,8 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp, + gdev->gd_maxcount = be32_to_cpup(p++); + num = be32_to_cpup(p++); + if (num) { ++ if (num > 1000) ++ goto xdr_error; + READ_BUF(4 * num); + gdev->gd_notify_types = be32_to_cpup(p++); + for (i = 1; i < num; i++) { +diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c +index 1cb70a0b2168..91ce49c05b7c 100644 +--- a/fs/squashfs/cache.c ++++ b/fs/squashfs/cache.c +@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer, + + TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); + ++ if (unlikely(length < 0)) ++ return -EIO; ++ + while (length) { + entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); + if (entry->error) { +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index e5c9689062ba..1ec7bae2751d 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n, + } + + for (i = 0; i < blocks; i++) { +- int size = le32_to_cpu(blist[i]); ++ int size = squashfs_block_size(blist[i]); ++ if (size < 0) { ++ err = size; ++ goto failure; ++ } + block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); + } + n -= blocks; +@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) + sizeof(size)); + if (res < 0) + return res; +- return le32_to_cpu(size); ++ return squashfs_block_size(size); + } + + /* Copy data into page cache */ +diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c +index 0ed6edbc5c71..0681feab4a84 100644 +--- a/fs/squashfs/fragment.c ++++ b/fs/squashfs/fragment.c +@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, + u64 *fragment_block) + { + struct squashfs_sb_info *msblk = sb->s_fs_info; +- int block = SQUASHFS_FRAGMENT_INDEX(fragment); +- int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); +- u64 start_block = le64_to_cpu(msblk->fragment_index[block]); ++ int block, offset, size; + struct squashfs_fragment_entry fragment_entry; +- int size; ++ u64 start_block; ++ ++ if (fragment >= msblk->fragments) ++ return -EIO; ++ block = SQUASHFS_FRAGMENT_INDEX(fragment); ++ offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); ++ ++ start_block = le64_to_cpu(msblk->fragment_index[block]); + + size = squashfs_read_metadata(sb, &fragment_entry, &start_block, + &offset, sizeof(fragment_entry)); +@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, + return size; + + *fragment_block = le64_to_cpu(fragment_entry.start_block); +- size = le32_to_cpu(fragment_entry.size); +- +- return size; ++ return squashfs_block_size(fragment_entry.size); + } + + +diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h +index 506f4ba5b983..e66486366f02 100644 +--- a/fs/squashfs/squashfs_fs.h ++++ b/fs/squashfs/squashfs_fs.h +@@ -129,6 +129,12 @@ + + #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) + ++static inline int squashfs_block_size(__le32 raw) ++{ ++ u32 size = le32_to_cpu(raw); ++ return (size >> 25) ? -EIO : size; ++} ++ + /* + * Inode number ops. Inodes consist of a compressed block number, and an + * uncompressed offset within that block +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h +index 1da565cb50c3..ef69c31947bf 100644 +--- a/fs/squashfs/squashfs_fs_sb.h ++++ b/fs/squashfs/squashfs_fs_sb.h +@@ -75,6 +75,7 @@ struct squashfs_sb_info { + unsigned short block_log; + long long bytes_used; + unsigned int inodes; ++ unsigned int fragments; + int xattr_ids; + }; + #endif +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index 5056babe00df..93aa3e23c845 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + msblk->inode_table = le64_to_cpu(sblk->inode_table_start); + msblk->directory_table = le64_to_cpu(sblk->directory_table_start); + msblk->inodes = le32_to_cpu(sblk->inodes); ++ msblk->fragments = le32_to_cpu(sblk->fragments); + flags = le16_to_cpu(sblk->flags); + + TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b)); +@@ -186,7 +187,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); + TRACE("Block size %d\n", msblk->block_size); + TRACE("Number of inodes %d\n", msblk->inodes); +- TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); ++ TRACE("Number of fragments %d\n", msblk->fragments); + TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); +@@ -273,7 +274,7 @@ allocate_id_index_table: + sb->s_export_op = &squashfs_export_ops; + + handle_fragments: +- fragments = le32_to_cpu(sblk->fragments); ++ fragments = msblk->fragments; + if (fragments == 0) + goto check_directory_table; + +diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h +index bb9d0deca07c..0fb4975fae91 100644 +--- a/include/drm/drm_dp_helper.h ++++ b/include/drm/drm_dp_helper.h +@@ -342,6 +342,7 @@ + # define DP_PSR_FRAME_CAPTURE (1 << 3) + # define DP_PSR_SELECTIVE_UPDATE (1 << 4) + # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) ++# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ + + #define DP_ADAPTER_CTRL 0x1a0 + # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) +diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h +index fc481037478a..19baa7f4f403 100644 +--- a/include/linux/dma-iommu.h ++++ b/include/linux/dma-iommu.h +@@ -17,6 +17,7 @@ + #define __DMA_IOMMU_H + + #ifdef __KERNEL__ ++#include + #include + + #ifdef CONFIG_IOMMU_DMA +diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h +index 83430f2ea757..e0325706b76d 100644 +--- a/include/linux/mmc/sdio_ids.h ++++ b/include/linux/mmc/sdio_ids.h +@@ -33,6 +33,7 @@ + #define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d + #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 + #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 ++#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 + #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 + #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 + #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h +index 1d6a935c1ac5..8793f5a7b820 100644 +--- a/include/linux/netfilter/ipset/ip_set_timeout.h ++++ b/include/linux/netfilter/ipset/ip_set_timeout.h +@@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) + static inline u32 + ip_set_timeout_get(unsigned long *timeout) + { +- return *timeout == IPSET_ELEM_PERMANENT ? 0 : +- jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; ++ u32 t; ++ ++ if (*timeout == IPSET_ELEM_PERMANENT) ++ return 0; ++ ++ t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; ++ /* Zero value in userspace means no timeout */ ++ return t == 0 ? 1 : t; + } + + #endif /* __KERNEL__ */ +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 65babd8a682d..cac4a6ad5db3 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -376,7 +376,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); + +-void tcp_enter_quickack_mode(struct sock *sk); ++void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); + static inline void tcp_dec_quickack_mode(struct sock *sk, + const unsigned int pkts) + { +diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h +index 44202ff897fd..f759e0918037 100644 +--- a/include/soc/tegra/mc.h ++++ b/include/soc/tegra/mc.h +@@ -99,6 +99,8 @@ struct tegra_mc_soc { + u8 client_id_mask; + + const struct tegra_smmu_soc *smmu; ++ ++ u32 intmask; + }; + + struct tegra_mc { +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index b8ff9e193753..b57f929f1b46 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -406,7 +406,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) + return -EINVAL; + break; + case AUDIT_EXE: +- if (f->op != Audit_equal) ++ if (f->op != Audit_not_equal && f->op != Audit_equal) + return -EINVAL; + if (entry->rule.listnr != AUDIT_FILTER_EXIT) + return -EINVAL; +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index 7444f95f3ee9..0fe8b337291a 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -470,6 +470,8 @@ static int audit_filter_rules(struct task_struct *tsk, + break; + case AUDIT_EXE: + result = audit_exe_compare(tsk, rule->exe); ++ if (f->op == Audit_not_equal) ++ result = !result; + break; + case AUDIT_UID: + result = audit_uid_comparator(cred->uid, f->op, f->uid); +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 79e3c21a35d0..35dfa9e9d69e 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2101,7 +2101,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) + /* hold the map. If the program is rejected by verifier, + * the map will be released by release_maps() or it + * will be used by the valid program until it's unloaded +- * and all maps are released in free_bpf_prog_info() ++ * and all maps are released in free_used_maps() + */ + map = bpf_map_inc(map, false); + if (IS_ERR(map)) { +@@ -2487,7 +2487,7 @@ free_log_buf: + vfree(log_buf); + if (!env->prog->aux->used_maps) + /* if we didn't copy map pointers into bpf_prog_info, release +- * them now. Otherwise free_bpf_prog_info() will release them. ++ * them now. Otherwise free_used_maps() will release them. + */ + release_maps(env); + *prog = env->prog; +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index 2172dd61577e..b8a894adab2c 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -663,6 +663,8 @@ event_trigger_callback(struct event_command *cmd_ops, + goto out_free; + + out_reg: ++ /* Up the trigger_data count to make sure reg doesn't free it on failure */ ++ event_trigger_init(trigger_ops, trigger_data); + ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); + /* + * The above returns on success the # of functions enabled, +@@ -670,11 +672,13 @@ event_trigger_callback(struct event_command *cmd_ops, + * Consider no functions a failure too. + */ + if (!ret) { ++ cmd_ops->unreg(glob, trigger_ops, trigger_data, file); + ret = -ENOENT; +- goto out_free; +- } else if (ret < 0) +- goto out_free; +- ret = 0; ++ } else if (ret > 0) ++ ret = 0; ++ ++ /* Down the counter of trigger_data or free it if not used anymore */ ++ event_trigger_free(trigger_ops, trigger_data); + out: + return ret; + +@@ -1227,6 +1231,9 @@ event_enable_trigger_func(struct event_command *cmd_ops, + goto out; + } + ++ /* Up the trigger_data count to make sure nothing frees it on failure */ ++ event_trigger_init(trigger_ops, trigger_data); ++ + if (trigger) { + number = strsep(&trigger, ":"); + +@@ -1277,6 +1284,7 @@ event_enable_trigger_func(struct event_command *cmd_ops, + goto out_disable; + /* Just return zero, not the number of enabled functions */ + ret = 0; ++ event_trigger_free(trigger_ops, trigger_data); + out: + return ret; + +@@ -1287,7 +1295,7 @@ event_enable_trigger_func(struct event_command *cmd_ops, + out_free: + if (cmd_ops->set_filter) + cmd_ops->set_filter(NULL, trigger_data, NULL); +- kfree(trigger_data); ++ event_trigger_free(trigger_ops, trigger_data); + kfree(enable_data); + goto out; + } +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index f2682799c215..f0ee722be520 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -349,11 +349,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, + static int + enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) + { ++ struct event_file_link *link = NULL; + int ret = 0; + + if (file) { +- struct event_file_link *link; +- + link = kmalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; +@@ -373,6 +372,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) + else + ret = enable_kprobe(&tk->rp.kp); + } ++ ++ if (ret) { ++ if (file) { ++ /* Notice the if is true on not WARN() */ ++ if (!WARN_ON_ONCE(!link)) ++ list_del_rcu(&link->list); ++ kfree(link); ++ tk->tp.flags &= ~TP_FLAG_TRACE; ++ } else { ++ tk->tp.flags &= ~TP_FLAG_PROFILE; ++ } ++ } + out: + return ret; + } +diff --git a/mm/slub.c b/mm/slub.c +index 4cf3a9c768b1..2284c4333857 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -659,7 +659,7 @@ void object_err(struct kmem_cache *s, struct page *page, + print_trailer(s, page, object); + } + +-static void slab_err(struct kmem_cache *s, struct page *page, ++static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, + const char *fmt, ...) + { + va_list args; +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 8e3c9c5a3042..de8e372ece04 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1460,7 +1460,7 @@ static void __vunmap(const void *addr, int deallocate_pages) + addr)) + return; + +- area = remove_vm_area(addr); ++ area = find_vmap_area((unsigned long)addr)->vm; + if (unlikely(!area)) { + WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", + addr); +@@ -1470,6 +1470,7 @@ static void __vunmap(const void *addr, int deallocate_pages) + debug_check_no_locks_freed(addr, get_vm_area_size(area)); + debug_check_no_obj_freed(addr, get_vm_area_size(area)); + ++ remove_vm_area(addr); + if (deallocate_pages) { + int i; + +diff --git a/net/dsa/slave.c b/net/dsa/slave.c +index 554c2a961ad5..48b28a7ecc7a 100644 +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1099,6 +1099,9 @@ int dsa_slave_suspend(struct net_device *slave_dev) + { + struct dsa_slave_priv *p = netdev_priv(slave_dev); + ++ if (!netif_running(slave_dev)) ++ return 0; ++ + netif_device_detach(slave_dev); + + if (p->phy) { +@@ -1116,6 +1119,9 @@ int dsa_slave_resume(struct net_device *slave_dev) + { + struct dsa_slave_priv *p = netdev_priv(slave_dev); + ++ if (!netif_running(slave_dev)) ++ return 0; ++ + netif_device_attach(slave_dev); + + if (p->phy) { +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 8f05816a8be2..015c33712803 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -289,19 +289,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) + return ip_hdr(skb)->daddr; + + in_dev = __in_dev_get_rcu(dev); +- BUG_ON(!in_dev); + + net = dev_net(dev); + + scope = RT_SCOPE_UNIVERSE; + if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { ++ bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); + struct flowi4 fl4 = { + .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), + .daddr = ip_hdr(skb)->saddr, + .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), + .flowi4_scope = scope, +- .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, ++ .flowi4_mark = vmark ? skb->mark : 0, + }; + if (!fib_lookup(net, &fl4, &res, 0)) + return FIB_RES_PREFSRC(net, res); +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c +index b34fa1bb278f..b2001b20e029 100644 +--- a/net/ipv4/inet_fragment.c ++++ b/net/ipv4/inet_fragment.c +@@ -364,11 +364,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, + { + struct inet_frag_queue *q; + +- if (frag_mem_limit(nf) > nf->high_thresh) { +- inet_frag_schedule_worker(f); +- return NULL; +- } +- + q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); + if (!q) + return NULL; +@@ -405,6 +400,11 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, + struct inet_frag_queue *q; + int depth = 0; + ++ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { ++ inet_frag_schedule_worker(f); ++ return NULL; ++ } ++ + if (frag_mem_limit(nf) > nf->low_thresh) + inet_frag_schedule_worker(f); + +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c +index 9d6b9c4c5f82..60f564db25a3 100644 +--- a/net/ipv4/ipconfig.c ++++ b/net/ipv4/ipconfig.c +@@ -790,6 +790,11 @@ static void __init ic_bootp_init_ext(u8 *e) + */ + static inline void __init ic_bootp_init(void) + { ++ /* Re-initialise all name servers to NONE, in case any were set via the ++ * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses ++ * specified there will already have been decoded but are no longer ++ * needed ++ */ + ic_nameservers_predef(); + + dev_add_pack(&bootp_packet_type); +@@ -1423,6 +1428,13 @@ static int __init ip_auto_config(void) + int err; + unsigned int i; + ++ /* Initialise all name servers to NONE (but only if the "ip=" or ++ * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise ++ * we'll overwrite the IP addresses specified there) ++ */ ++ if (ic_set_manually == 0) ++ ic_nameservers_predef(); ++ + #ifdef CONFIG_PROC_FS + proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops); + #endif /* CONFIG_PROC_FS */ +@@ -1640,6 +1652,7 @@ static int __init ip_auto_config_setup(char *addrs) + return 1; + } + ++ /* Initialise all name servers to NONE */ + ic_nameservers_predef(); + + /* Parse string for static IP assignment. */ +diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c +index e63b764e55ea..6300edf90e60 100644 +--- a/net/ipv4/tcp_dctcp.c ++++ b/net/ipv4/tcp_dctcp.c +@@ -138,7 +138,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, 1); + } + + ca->prior_rcv_nxt = tp->rcv_nxt; +@@ -159,7 +159,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, 1); + } + + ca->prior_rcv_nxt = tp->rcv_nxt; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 5c645069a09a..4a261e078082 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -176,21 +176,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) + } + } + +-static void tcp_incr_quickack(struct sock *sk) ++static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) + { + struct inet_connection_sock *icsk = inet_csk(sk); + unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); + + if (quickacks == 0) + quickacks = 2; ++ quickacks = min(quickacks, max_quickacks); + if (quickacks > icsk->icsk_ack.quick) +- icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); ++ icsk->icsk_ack.quick = quickacks; + } + +-void tcp_enter_quickack_mode(struct sock *sk) ++void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) + { + struct inet_connection_sock *icsk = inet_csk(sk); +- tcp_incr_quickack(sk); ++ ++ tcp_incr_quickack(sk, max_quickacks); + icsk->icsk_ack.pingpong = 0; + icsk->icsk_ack.ato = TCP_ATO_MIN; + } +@@ -226,8 +228,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + } + +-static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) ++static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) + { ++ struct tcp_sock *tp = tcp_sk(sk); ++ + switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { + case INET_ECN_NOT_ECT: + /* Funny extension: if ECT is not set on a segment, +@@ -235,31 +239,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) + * it is probably a retransmit. + */ + if (tp->ecn_flags & TCP_ECN_SEEN) +- tcp_enter_quickack_mode((struct sock *)tp); ++ tcp_enter_quickack_mode(sk, 2); + break; + case INET_ECN_CE: +- if (tcp_ca_needs_ecn((struct sock *)tp)) +- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); ++ if (tcp_ca_needs_ecn(sk)) ++ tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); + + if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { + /* Better not delay acks, sender can have a very low cwnd */ +- tcp_enter_quickack_mode((struct sock *)tp); ++ tcp_enter_quickack_mode(sk, 2); + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + } + tp->ecn_flags |= TCP_ECN_SEEN; + break; + default: +- if (tcp_ca_needs_ecn((struct sock *)tp)) +- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); ++ if (tcp_ca_needs_ecn(sk)) ++ tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); + tp->ecn_flags |= TCP_ECN_SEEN; + break; + } + } + +-static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) ++static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) + { +- if (tp->ecn_flags & TCP_ECN_OK) +- __tcp_ecn_check_ce(tp, skb); ++ if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) ++ __tcp_ecn_check_ce(sk, skb); + } + + static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) +@@ -651,7 +655,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) + /* The _first_ data packet received, initialize + * delayed ACK engine. + */ +- tcp_incr_quickack(sk); ++ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); + icsk->icsk_ack.ato = TCP_ATO_MIN; + } else { + int m = now - icsk->icsk_ack.lrcvtime; +@@ -667,13 +671,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) + /* Too long gap. Apparently sender failed to + * restart window, so that we send ACKs quickly. + */ +- tcp_incr_quickack(sk); ++ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); + sk_mem_reclaim(sk); + } + } + icsk->icsk_ack.lrcvtime = now; + +- tcp_ecn_check_ce(tp, skb); ++ tcp_ecn_check_ce(sk, skb); + + if (skb->len >= 128) + tcp_grow_window(sk, skb); +@@ -4136,7 +4140,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) + if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && + before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { + u32 end_seq = TCP_SKB_CB(skb)->end_seq; +@@ -4364,7 +4368,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + struct sk_buff *skb1; + u32 seq, end_seq; + +- tcp_ecn_check_ce(tp, skb); ++ tcp_ecn_check_ce(sk, skb); + + if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); +@@ -4638,7 +4642,7 @@ queue_and_out: + tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); + + out_of_window: +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + inet_csk_schedule_ack(sk); + drop: + __kfree_skb(skb); +@@ -4649,8 +4653,6 @@ drop: + if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) + goto out_of_window; + +- tcp_enter_quickack_mode(sk); +- + if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { + /* Partial packet, seq < rcv_next < end_seq */ + SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", +@@ -5676,7 +5678,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + * to stand against the temptation 8) --ANK + */ + inet_csk_schedule_ack(sk); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + TCP_DELACK_MAX, TCP_RTO_MAX); + +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 818400fddc9b..9708fff318d5 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -62,6 +62,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -654,6 +655,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, + + if (protocol < 0 || protocol >= MAX_LINKS) + return -EPROTONOSUPPORT; ++ protocol = array_index_nospec(protocol, MAX_LINKS); + + netlink_lock_table(); + #ifdef CONFIG_MODULES +diff --git a/net/socket.c b/net/socket.c +index 5b31e5baf3b5..0c544ae48eac 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -89,6 +89,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -2324,6 +2325,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) + + if (call < 1 || call > SYS_SENDMMSG) + return -EINVAL; ++ call = array_index_nospec(call, SYS_SENDMMSG + 1); + + len = nargs[call]; + if (len > sizeof(a)) +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c +index 14a305bd8a98..72e442d86bb1 100644 +--- a/sound/pci/emu10k1/emupcm.c ++++ b/sound/pci/emu10k1/emupcm.c +@@ -1850,7 +1850,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device) + if (!kctl) + return -ENOMEM; + kctl->id.device = device; +- snd_ctl_add(emu->card, kctl); ++ err = snd_ctl_add(emu->card, kctl); ++ if (err < 0) ++ return err; + + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024); + +diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c +index 4f1f69be1865..8c778fa33031 100644 +--- a/sound/pci/emu10k1/memory.c ++++ b/sound/pci/emu10k1/memory.c +@@ -237,13 +237,13 @@ __found_pages: + static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) + { + if (addr & ~emu->dma_mask) { +- dev_err(emu->card->dev, ++ dev_err_ratelimited(emu->card->dev, + "max memory size is 0x%lx (addr = 0x%lx)!!\n", + emu->dma_mask, (unsigned long)addr); + return 0; + } + if (addr & (EMUPAGESIZE-1)) { +- dev_err(emu->card->dev, "page is not aligned\n"); ++ dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); + return 0; + } + return 1; +@@ -334,7 +334,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst + else + addr = snd_pcm_sgbuf_get_addr(substream, ofs); + if (! is_valid_page(emu, addr)) { +- dev_err(emu->card->dev, ++ dev_err_ratelimited(emu->card->dev, + "emu: failure page = %d\n", idx); + mutex_unlock(&hdr->block_mutex); + return NULL; +diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c +index 1fdd92b6f18f..d6e89a6d0bb9 100644 +--- a/sound/pci/fm801.c ++++ b/sound/pci/fm801.c +@@ -1050,11 +1050,19 @@ static int snd_fm801_mixer(struct fm801 *chip) + if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0) + return err; + } +- for (i = 0; i < FM801_CONTROLS; i++) +- snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip)); ++ for (i = 0; i < FM801_CONTROLS; i++) { ++ err = snd_ctl_add(chip->card, ++ snd_ctl_new1(&snd_fm801_controls[i], chip)); ++ if (err < 0) ++ return err; ++ } + if (chip->multichannel) { +- for (i = 0; i < FM801_CONTROLS_MULTI; i++) +- snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); ++ for (i = 0; i < FM801_CONTROLS_MULTI; i++) { ++ err = snd_ctl_add(chip->card, ++ snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); ++ if (err < 0) ++ return err; ++ } + } + return 0; + } +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index 29e1ce2263bc..c55c0131be0a 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -38,6 +38,10 @@ + /* Enable this to see controls for tuning purpose. */ + /*#define ENABLE_TUNING_CONTROLS*/ + ++#ifdef ENABLE_TUNING_CONTROLS ++#include ++#endif ++ + #define FLOAT_ZERO 0x00000000 + #define FLOAT_ONE 0x3f800000 + #define FLOAT_TWO 0x40000000 +@@ -3067,8 +3071,8 @@ static int equalizer_ctl_put(struct snd_kcontrol *kcontrol, + return 1; + } + +-static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); +-static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0); ++static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); ++static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0); + + static int add_tuning_control(struct hda_codec *codec, + hda_nid_t pnid, hda_nid_t nid, +diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c +index 6147e86e9b0f..55ca9c9364b8 100644 +--- a/sound/soc/pxa/brownstone.c ++++ b/sound/soc/pxa/brownstone.c +@@ -136,3 +136,4 @@ module_platform_driver(mmp_driver); + MODULE_AUTHOR("Leo Yan "); + MODULE_DESCRIPTION("ALSA SoC Brownstone"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:brownstone-audio"); +diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c +index 29bc60e85e92..6cd28f95d548 100644 +--- a/sound/soc/pxa/mioa701_wm9713.c ++++ b/sound/soc/pxa/mioa701_wm9713.c +@@ -203,3 +203,4 @@ module_platform_driver(mioa701_wm9713_driver); + MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)"); + MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:mioa701-wm9713"); +diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c +index 51e790d006f5..96df9b2d8fc4 100644 +--- a/sound/soc/pxa/mmp-pcm.c ++++ b/sound/soc/pxa/mmp-pcm.c +@@ -248,3 +248,4 @@ module_platform_driver(mmp_pcm_driver); + MODULE_AUTHOR("Leo Yan "); + MODULE_DESCRIPTION("MMP Soc Audio DMA module"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:mmp-pcm-audio"); +diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c +index eca60c29791a..ca8b23f8c525 100644 +--- a/sound/soc/pxa/mmp-sspa.c ++++ b/sound/soc/pxa/mmp-sspa.c +@@ -482,3 +482,4 @@ module_platform_driver(asoc_mmp_sspa_driver); + MODULE_AUTHOR("Leo Yan "); + MODULE_DESCRIPTION("MMP SSPA SoC Interface"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:mmp-sspa-dai"); +diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c +index 4e74d9573f03..bcc81e920a67 100644 +--- a/sound/soc/pxa/palm27x.c ++++ b/sound/soc/pxa/palm27x.c +@@ -161,3 +161,4 @@ module_platform_driver(palm27x_wm9712_driver); + MODULE_AUTHOR("Marek Vasut "); + MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:palm27x-asoc"); +diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c +index da03fad1b9cd..3cad990dad2c 100644 +--- a/sound/soc/pxa/pxa-ssp.c ++++ b/sound/soc/pxa/pxa-ssp.c +@@ -833,3 +833,4 @@ module_platform_driver(asoc_ssp_driver); + MODULE_AUTHOR("Mark Brown "); + MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:pxa-ssp-dai"); +diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c +index f3de615aacd7..9615e6de1306 100644 +--- a/sound/soc/pxa/pxa2xx-ac97.c ++++ b/sound/soc/pxa/pxa2xx-ac97.c +@@ -287,3 +287,4 @@ module_platform_driver(pxa2xx_ac97_driver); + MODULE_AUTHOR("Nicolas Pitre"); + MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:pxa2xx-ac97"); +diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c +index 9f390398d518..410d48b93031 100644 +--- a/sound/soc/pxa/pxa2xx-pcm.c ++++ b/sound/soc/pxa/pxa2xx-pcm.c +@@ -117,3 +117,4 @@ module_platform_driver(pxa_pcm_driver); + MODULE_AUTHOR("Nicolas Pitre"); + MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:pxa-pcm-audio"); +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 977066ba1769..43b80db952d1 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1682,8 +1682,10 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream) + continue; + + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) +- continue; ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) { ++ soc_pcm_hw_free(be_substream); ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; ++ } + + dev_dbg(be->dev, "ASoC: close BE %s\n", + dpcm->fe->dai_link->name); +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index 8e8db4ddf365..a9079654107c 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -1300,7 +1300,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs, + if (bytes % (runtime->sample_bits >> 3) != 0) { + int oldbytes = bytes; + bytes = frames * stride; +- dev_warn(&subs->dev->dev, ++ dev_warn_ratelimited(&subs->dev->dev, + "Corrected urb data len. %d->%d\n", + oldbytes, bytes); + } +diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c +index 9db9d21bb2ec..6a8db858caa5 100644 +--- a/tools/usb/usbip/src/usbip_detach.c ++++ b/tools/usb/usbip/src/usbip_detach.c +@@ -43,7 +43,7 @@ void usbip_detach_usage(void) + + static int detach_port(char *port) + { +- int ret; ++ int ret = 0; + uint8_t portnum; + char path[PATH_MAX+1]; + +@@ -73,9 +73,12 @@ static int detach_port(char *port) + } + + ret = usbip_vhci_detach_device(portnum); +- if (ret < 0) +- return -1; ++ if (ret < 0) { ++ ret = -1; ++ goto call_driver_close; ++ } + ++call_driver_close: + usbip_vhci_driver_close(); + + return ret; diff --git a/patch/kernel/rk3328-default/04-patch-4.4.146-147.patch b/patch/kernel/rk3328-default/04-patch-4.4.146-147.patch new file mode 100644 index 000000000..57311537b --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.146-147.patch @@ -0,0 +1,254 @@ +diff --git a/Makefile b/Makefile +index 030f5af05f4e..ee92a12e3a4b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 146 ++SUBLEVEL = 147 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c +index d4d853680ae4..a4abf7dc9576 100644 +--- a/drivers/i2c/busses/i2c-imx.c ++++ b/drivers/i2c/busses/i2c-imx.c +@@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, + goto err_desc; + } + ++ reinit_completion(&dma->cmd_complete); + txdesc->callback = i2c_imx_dma_callback; + txdesc->callback_param = i2c_imx; + if (dma_submit_error(dmaengine_submit(txdesc))) { +@@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, + * The first byte must be transmitted by the CPU. + */ + imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); +- reinit_completion(&i2c_imx->dma->cmd_complete); + time_left = wait_for_completion_timeout( + &i2c_imx->dma->cmd_complete, + msecs_to_jiffies(DMA_TIMEOUT)); +@@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, + if (result) + return result; + +- reinit_completion(&i2c_imx->dma->cmd_complete); + time_left = wait_for_completion_timeout( + &i2c_imx->dma->cmd_complete, + msecs_to_jiffies(DMA_TIMEOUT)); +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c +index a32ba753e413..afaf13474796 100644 +--- a/drivers/pci/pci-acpi.c ++++ b/drivers/pci/pci-acpi.c +@@ -543,7 +543,7 @@ void acpi_pci_add_bus(struct pci_bus *bus) + union acpi_object *obj; + struct pci_host_bridge *bridge; + +- if (acpi_pci_disabled || !bus->bridge) ++ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) + return; + + acpi_pci_slot_enumerate(bus); +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c +index a9eb3cd453be..41a646696bab 100644 +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, + + wait_for_completion(&tm_iocb->u.tmf.comp); + +- rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? +- QLA_SUCCESS : QLA_FUNCTION_FAILED; ++ rval = tm_iocb->u.tmf.data; + +- if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { +- ql_dbg(ql_dbg_taskm, vha, 0x8030, ++ if (rval != QLA_SUCCESS) { ++ ql_log(ql_log_warn, vha, 0x8030, + "TM IOCB failed (%x).\n", rval); + } + +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 5cbf20ab94aa..18b19744398a 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -4938,8 +4938,9 @@ qla2x00_do_dpc(void *data) + } + } + +- if (test_and_clear_bit(ISP_ABORT_NEEDED, +- &base_vha->dpc_flags)) { ++ if (test_and_clear_bit ++ (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && ++ !test_bit(UNLOADING, &base_vha->dpc_flags)) { + + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 49af3c50b263..3e4d8ac1974e 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2102,7 +2102,7 @@ static int ext4_check_descriptors(struct super_block *sb, + struct ext4_sb_info *sbi = EXT4_SB(sb); + ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); + ext4_fsblk_t last_block; +- ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; ++ ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); + ext4_fsblk_t block_bitmap; + ext4_fsblk_t inode_bitmap; + ext4_fsblk_t inode_table; +@@ -3777,13 +3777,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + goto failed_mount2; + } + } ++ sbi->s_gdb_count = db_count; + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { + ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); + ret = -EFSCORRUPTED; + goto failed_mount2; + } + +- sbi->s_gdb_count = db_count; + get_random_bytes(&sbi->s_next_generation, sizeof(u32)); + spin_lock_init(&sbi->s_next_gen_lock); + +diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c +index 48b15a6e5558..40a26a542341 100644 +--- a/fs/jfs/xattr.c ++++ b/fs/jfs/xattr.c +@@ -493,15 +493,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) + if (size > PSIZE) { + /* + * To keep the rest of the code simple. Allocate a +- * contiguous buffer to work with ++ * contiguous buffer to work with. Make the buffer large ++ * enough to make use of the whole extent. + */ +- ea_buf->xattr = kmalloc(size, GFP_KERNEL); ++ ea_buf->max_size = (size + sb->s_blocksize - 1) & ++ ~(sb->s_blocksize - 1); ++ ++ ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL); + if (ea_buf->xattr == NULL) + return -ENOMEM; + + ea_buf->flag = EA_MALLOC; +- ea_buf->max_size = (size + sb->s_blocksize - 1) & +- ~(sb->s_blocksize - 1); + + if (ea_size == 0) + return 0; +diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h +index 4acc552e9279..19d0778ec382 100644 +--- a/include/linux/ring_buffer.h ++++ b/include/linux/ring_buffer.h +@@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); + void ring_buffer_record_off(struct ring_buffer *buffer); + void ring_buffer_record_on(struct ring_buffer *buffer); + int ring_buffer_record_is_on(struct ring_buffer *buffer); ++int ring_buffer_record_is_set_on(struct ring_buffer *buffer); + void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); + void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); + +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 5f55a8bf5264..0df2b44dac7c 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1012,6 +1012,13 @@ static int irq_setup_forced_threading(struct irqaction *new) + if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) + return 0; + ++ /* ++ * No further action required for interrupts which are requested as ++ * threaded interrupts already ++ */ ++ if (new->handler == irq_default_primary_handler) ++ return 0; ++ + new->flags |= IRQF_ONESHOT; + + /* +@@ -1019,7 +1026,7 @@ static int irq_setup_forced_threading(struct irqaction *new) + * thread handler. We force thread them as well by creating a + * secondary action. + */ +- if (new->handler != irq_default_primary_handler && new->thread_fn) { ++ if (new->handler && new->thread_fn) { + /* Allocate the secondary action */ + new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!new->secondary) +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c +index e5d228f7224c..5ad2e852e9f6 100644 +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -570,7 +570,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) + + static inline bool local_timer_softirq_pending(void) + { +- return local_softirq_pending() & TIMER_SOFTIRQ; ++ return local_softirq_pending() & BIT(TIMER_SOFTIRQ); + } + + static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index d9cd6191760b..fdaa88f38aec 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -3141,6 +3141,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer) + return !atomic_read(&buffer->record_disabled); + } + ++/** ++ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable ++ * @buffer: The ring buffer to see if write is set enabled ++ * ++ * Returns true if the ring buffer is set writable by ring_buffer_record_on(). ++ * Note that this does NOT mean it is in a writable state. ++ * ++ * It may return true when the ring buffer has been disabled by ++ * ring_buffer_record_disable(), as that is a temporary disabling of ++ * the ring buffer. ++ */ ++int ring_buffer_record_is_set_on(struct ring_buffer *buffer) ++{ ++ return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); ++} ++ + /** + * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer + * @buffer: The ring buffer to stop writes to. +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 8aef4e63ac57..1b980a8ef791 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1088,6 +1088,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) + + arch_spin_lock(&tr->max_lock); + ++ /* Inherit the recordable setting from trace_buffer */ ++ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) ++ ring_buffer_record_on(tr->max_buffer.buffer); ++ else ++ ring_buffer_record_off(tr->max_buffer.buffer); ++ + buf = tr->trace_buffer.buffer; + tr->trace_buffer.buffer = tr->max_buffer.buffer; + tr->max_buffer.buffer = buf; +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 9708fff318d5..bf292010760a 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -986,6 +986,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, + return err; + } + ++ if (nlk->ngroups == 0) ++ groups = 0; ++ else if (nlk->ngroups < 8*sizeof(groups)) ++ groups &= (1UL << nlk->ngroups) - 1; ++ + bound = nlk->bound; + if (bound) { + /* Ensure nlk->portid is up-to-date. */ diff --git a/patch/kernel/rk3328-default/04-patch-4.4.147-148.patch b/patch/kernel/rk3328-default/04-patch-4.4.147-148.patch new file mode 100644 index 000000000..ea24e4109 --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.147-148.patch @@ -0,0 +1,1873 @@ +diff --git a/Makefile b/Makefile +index ee92a12e3a4b..9b795164122e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 147 ++SUBLEVEL = 148 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi +index 167f77b3bd43..6963dff815dc 100644 +--- a/arch/arm/boot/dts/imx6sx.dtsi ++++ b/arch/arm/boot/dts/imx6sx.dtsi +@@ -1250,7 +1250,7 @@ + /* non-prefetchable memory */ + 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; + num-lanes = <1>; +- interrupts = ; ++ interrupts = ; + clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>, + <&clks IMX6SX_CLK_PCIE_AXI>, + <&clks IMX6SX_CLK_LVDS1_OUT>, +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig +index 729f89163bc3..210b3d675261 100644 +--- a/arch/parisc/Kconfig ++++ b/arch/parisc/Kconfig +@@ -177,7 +177,7 @@ config PREFETCH + + config MLONGCALLS + bool "Enable the -mlong-calls compiler option for big kernels" +- def_bool y if (!MODULES) ++ default y + depends on PA8X00 + help + If you configure the kernel to include many drivers built-in instead +diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h +new file mode 100644 +index 000000000000..dbaaca84f27f +--- /dev/null ++++ b/arch/parisc/include/asm/barrier.h +@@ -0,0 +1,32 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __ASM_BARRIER_H ++#define __ASM_BARRIER_H ++ ++#ifndef __ASSEMBLY__ ++ ++/* The synchronize caches instruction executes as a nop on systems in ++ which all memory references are performed in order. */ ++#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") ++ ++#if defined(CONFIG_SMP) ++#define mb() do { synchronize_caches(); } while (0) ++#define rmb() mb() ++#define wmb() mb() ++#define dma_rmb() mb() ++#define dma_wmb() mb() ++#else ++#define mb() barrier() ++#define rmb() barrier() ++#define wmb() barrier() ++#define dma_rmb() barrier() ++#define dma_wmb() barrier() ++#endif ++ ++#define __smp_mb() mb() ++#define __smp_rmb() mb() ++#define __smp_wmb() mb() ++ ++#include ++ ++#endif /* !__ASSEMBLY__ */ ++#endif /* __ASM_BARRIER_H */ +diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S +index 5dc831955de5..13cb2461fef5 100644 +--- a/arch/parisc/kernel/entry.S ++++ b/arch/parisc/kernel/entry.S +@@ -481,6 +481,8 @@ + /* Release pa_tlb_lock lock without reloading lock address. */ + .macro tlb_unlock0 spc,tmp + #ifdef CONFIG_SMP ++ or,COND(=) %r0,\spc,%r0 ++ sync + or,COND(=) %r0,\spc,%r0 + stw \spc,0(\tmp) + #endif +diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S +index 16073f472118..b3434a7fd3c9 100644 +--- a/arch/parisc/kernel/pacache.S ++++ b/arch/parisc/kernel/pacache.S +@@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local) + .macro tlb_unlock la,flags,tmp + #ifdef CONFIG_SMP + ldi 1,\tmp ++ sync + stw \tmp,0(\la) + mtsm \flags + #endif +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index 9f22195b90ed..f68eedc72484 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -631,6 +631,7 @@ cas_action: + sub,<> %r28, %r25, %r0 + 2: stw,ma %r24, 0(%r26) + /* Free lock */ ++ sync + stw,ma %r20, 0(%sr2,%r20) + #if ENABLE_LWS_DEBUG + /* Clear thread register indicator */ +@@ -645,6 +646,7 @@ cas_action: + 3: + /* Error occurred on load or store */ + /* Free lock */ ++ sync + stw %r20, 0(%sr2,%r20) + #if ENABLE_LWS_DEBUG + stw %r0, 4(%sr2,%r20) +@@ -846,6 +848,7 @@ cas2_action: + + cas2_end: + /* Free lock */ ++ sync + stw,ma %r20, 0(%sr2,%r20) + /* Enable interrupts */ + ssm PSW_SM_I, %r0 +@@ -856,6 +859,7 @@ cas2_end: + 22: + /* Error occurred on load or store */ + /* Free lock */ ++ sync + stw %r20, 0(%sr2,%r20) + ssm PSW_SM_I, %r0 + ldo 1(%r0),%r28 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index f4b175db70f4..dd2269dcbc47 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -193,12 +193,12 @@ + #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ + ++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ ++ + #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ + #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ + +-#define X86_FEATURE_RETPOLINE ( 7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +- + #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ + #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ + +@@ -214,7 +214,7 @@ + #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ + #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ + #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ +- ++#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ + + /* Virtualization flags: Linux defined, word 8 */ + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +@@ -310,6 +310,7 @@ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ + #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ + #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ + +@@ -331,5 +332,6 @@ + #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ + #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ ++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 0056bc945cd1..cb7f04981c6b 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -8,6 +8,8 @@ + * Interrupt control: + */ + ++/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ ++extern inline unsigned long native_save_fl(void); + extern inline unsigned long native_save_fl(void) + { + unsigned long flags; +diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h +index 3a52ee0e726d..bfceb5cc6347 100644 +--- a/arch/x86/include/asm/page_32_types.h ++++ b/arch/x86/include/asm/page_32_types.h +@@ -27,8 +27,13 @@ + #define N_EXCEPTION_STACKS 1 + + #ifdef CONFIG_X86_PAE +-/* 44=32+12, the limit we can fit into an unsigned long pfn */ +-#define __PHYSICAL_MASK_SHIFT 44 ++/* ++ * This is beyond the 44 bit limit imposed by the 32bit long pfns, ++ * but we need the full mask to make sure inverted PROT_NONE ++ * entries have all the host bits set in a guest. ++ * The real limit is still 44 bits. ++ */ ++#define __PHYSICAL_MASK_SHIFT 52 + #define __VIRTUAL_MASK_SHIFT 32 + + #else /* !CONFIG_X86_PAE */ +diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h +index fd74a11959de..89c50332a71e 100644 +--- a/arch/x86/include/asm/pgtable-2level.h ++++ b/arch/x86/include/asm/pgtable-2level.h +@@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi + #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) + #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) + ++/* No inverted PFNs on 2 level page tables */ ++ ++static inline u64 protnone_mask(u64 val) ++{ ++ return 0; ++} ++ ++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) ++{ ++ return val; ++} ++ ++static inline bool __pte_needs_invert(u64 val) ++{ ++ return false; ++} ++ + #endif /* _ASM_X86_PGTABLE_2LEVEL_H */ +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h +index cdaa58c9b39e..5c686382d84b 100644 +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -177,11 +177,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) + #endif + + /* Encode and de-code a swap entry */ ++#define SWP_TYPE_BITS 5 ++ ++#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) ++ ++/* We always extract/encode the offset by shifting it all the way up, and then down again */ ++#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) ++ + #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) + #define __swp_type(x) (((x).val) & 0x1f) + #define __swp_offset(x) ((x).val >> 5) + #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) +-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) +-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) ++ ++/* ++ * Normally, __swp_entry() converts from arch-independent swp_entry_t to ++ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result ++ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the ++ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to ++ * __swp_entry_to_pte() through the following helper macro based on 64bit ++ * __swp_entry(). ++ */ ++#define __swp_pteval_entry(type, offset) ((pteval_t) { \ ++ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ ++ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) ++ ++#define __swp_entry_to_pte(x) ((pte_t){ .pte = \ ++ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) ++/* ++ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent ++ * swp_entry_t, but also has to convert it from 64bit to the 32bit ++ * intermediate representation, using the following macros based on 64bit ++ * __swp_type() and __swp_offset(). ++ */ ++#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) ++#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) ++ ++#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ ++ __pteval_swp_offset(pte))) ++ ++#include + + #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ +diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h +new file mode 100644 +index 000000000000..44b1203ece12 +--- /dev/null ++++ b/arch/x86/include/asm/pgtable-invert.h +@@ -0,0 +1,32 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_PGTABLE_INVERT_H ++#define _ASM_PGTABLE_INVERT_H 1 ++ ++#ifndef __ASSEMBLY__ ++ ++static inline bool __pte_needs_invert(u64 val) ++{ ++ return !(val & _PAGE_PRESENT); ++} ++ ++/* Get a mask to xor with the page table entry to get the correct pfn. */ ++static inline u64 protnone_mask(u64 val) ++{ ++ return __pte_needs_invert(val) ? ~0ull : 0; ++} ++ ++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) ++{ ++ /* ++ * When a PTE transitions from NONE to !NONE or vice-versa ++ * invert the PFN part to stop speculation. ++ * pte_pfn undoes this when needed. ++ */ ++ if (__pte_needs_invert(oldval) != __pte_needs_invert(val)) ++ val = (val & ~mask) | (~val & mask); ++ return val; ++} ++ ++#endif /* __ASSEMBLY__ */ ++ ++#endif +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index 84c62d950023..4de6c282c02a 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -148,19 +148,29 @@ static inline int pte_special(pte_t pte) + return pte_flags(pte) & _PAGE_SPECIAL; + } + ++/* Entries that were set to PROT_NONE are inverted */ ++ ++static inline u64 protnone_mask(u64 val); ++ + static inline unsigned long pte_pfn(pte_t pte) + { +- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; ++ phys_addr_t pfn = pte_val(pte); ++ pfn ^= protnone_mask(pfn); ++ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT; + } + + static inline unsigned long pmd_pfn(pmd_t pmd) + { +- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; ++ phys_addr_t pfn = pmd_val(pmd); ++ pfn ^= protnone_mask(pfn); ++ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; + } + + static inline unsigned long pud_pfn(pud_t pud) + { +- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; ++ phys_addr_t pfn = pud_val(pud); ++ pfn ^= protnone_mask(pfn); ++ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT; + } + + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) +@@ -305,11 +315,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) + return pmd_set_flags(pmd, _PAGE_RW); + } + +-static inline pmd_t pmd_mknotpresent(pmd_t pmd) +-{ +- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); +-} +- + #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY + static inline int pte_soft_dirty(pte_t pte) + { +@@ -359,19 +364,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot) + + static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) + { +- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | +- massage_pgprot(pgprot)); ++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; ++ pfn ^= protnone_mask(pgprot_val(pgprot)); ++ pfn &= PTE_PFN_MASK; ++ return __pte(pfn | massage_pgprot(pgprot)); + } + + static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) + { +- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | +- massage_pgprot(pgprot)); ++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; ++ pfn ^= protnone_mask(pgprot_val(pgprot)); ++ pfn &= PHYSICAL_PMD_PAGE_MASK; ++ return __pmd(pfn | massage_pgprot(pgprot)); ++} ++ ++static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) ++{ ++ phys_addr_t pfn = page_nr << PAGE_SHIFT; ++ pfn ^= protnone_mask(pgprot_val(pgprot)); ++ pfn &= PHYSICAL_PUD_PAGE_MASK; ++ return __pud(pfn | massage_pgprot(pgprot)); ++} ++ ++static inline pmd_t pmd_mknotpresent(pmd_t pmd) ++{ ++ return pfn_pmd(pmd_pfn(pmd), ++ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); + } + ++static inline pud_t pud_set_flags(pud_t pud, pudval_t set) ++{ ++ pudval_t v = native_pud_val(pud); ++ ++ return __pud(v | set); ++} ++ ++static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) ++{ ++ pudval_t v = native_pud_val(pud); ++ ++ return __pud(v & ~clear); ++} ++ ++static inline pud_t pud_mkhuge(pud_t pud) ++{ ++ return pud_set_flags(pud, _PAGE_PSE); ++} ++ ++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); ++ + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) + { +- pteval_t val = pte_val(pte); ++ pteval_t val = pte_val(pte), oldval = val; + + /* + * Chop off the NX bit (if present), and add the NX portion of +@@ -379,17 +423,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) + */ + val &= _PAGE_CHG_MASK; + val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; +- ++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK); + return __pte(val); + } + + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) + { +- pmdval_t val = pmd_val(pmd); ++ pmdval_t val = pmd_val(pmd), oldval = val; + + val &= _HPAGE_CHG_MASK; + val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; +- ++ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK); + return __pmd(val); + } + +@@ -926,6 +970,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) + } + #endif + ++#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1 ++extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); ++ ++static inline bool arch_has_pfn_modify_check(void) ++{ ++ return boot_cpu_has_bug(X86_BUG_L1TF); ++} ++ + #include + #endif /* __ASSEMBLY__ */ + +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h +index c810226e741a..221a32ed1372 100644 +--- a/arch/x86/include/asm/pgtable_64.h ++++ b/arch/x86/include/asm/pgtable_64.h +@@ -163,18 +163,52 @@ static inline int pgd_large(pgd_t pgd) { return 0; } + #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) + #define pte_unmap(pte) ((void)(pte))/* NOP */ + +-/* Encode and de-code a swap entry */ +-#define SWP_TYPE_BITS 5 +-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) ++/* ++ * Encode and de-code a swap entry ++ * ++ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number ++ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names ++ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry ++ * ++ * G (8) is aliased and used as a PROT_NONE indicator for ++ * !present ptes. We need to start storing swap entries above ++ * there. We also need to avoid using A and D because of an ++ * erratum where they can be incorrectly set by hardware on ++ * non-present PTEs. ++ * ++ * SD (1) in swp entry is used to store soft dirty bit, which helps us ++ * remember soft dirty over page migration ++ * ++ * Bit 7 in swp entry should be 0 because pmd_present checks not only P, ++ * but also L and G. ++ * ++ * The offset is inverted by a binary not operation to make the high ++ * physical bits set. ++ */ ++#define SWP_TYPE_BITS 5 ++ ++#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) ++ ++/* We always extract/encode the offset by shifting it all the way up, and then down again */ ++#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS) + + #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) + +-#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ +- & ((1U << SWP_TYPE_BITS) - 1)) +-#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) +-#define __swp_entry(type, offset) ((swp_entry_t) { \ +- ((type) << (_PAGE_BIT_PRESENT + 1)) \ +- | ((offset) << SWP_OFFSET_SHIFT) }) ++/* Extract the high bits for type */ ++#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS)) ++ ++/* Shift up (to get rid of type), then down to get value */ ++#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT) ++ ++/* ++ * Shift the offset up "too far" by TYPE bits, then down again ++ * The offset is inverted by a binary not operation to make the high ++ * physical bits set. ++ */ ++#define __swp_entry(type, offset) ((swp_entry_t) { \ ++ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ ++ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) }) ++ + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) + #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) + +@@ -201,6 +235,8 @@ extern void cleanup_highmap(void); + extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); + extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); + ++#include ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_X86_PGTABLE_64_H */ +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h +index 8dba273da25a..7572ce32055e 100644 +--- a/arch/x86/include/asm/pgtable_types.h ++++ b/arch/x86/include/asm/pgtable_types.h +@@ -70,15 +70,15 @@ + /* + * Tracking soft dirty bit when a page goes to a swap is tricky. + * We need a bit which can be stored in pte _and_ not conflict +- * with swap entry format. On x86 bits 6 and 7 are *not* involved +- * into swap entry computation, but bit 6 is used for nonlinear +- * file mapping, so we borrow bit 7 for soft dirty tracking. ++ * with swap entry format. On x86 bits 1-4 are *not* involved ++ * into swap entry computation, but bit 7 is used for thp migration, ++ * so we borrow bit 1 for soft dirty tracking. + * + * Please note that this bit must be treated as swap dirty page +- * mark if and only if the PTE has present bit clear! ++ * mark if and only if the PTE/PMD has present bit clear! + */ + #ifdef CONFIG_MEM_SOFT_DIRTY +-#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE ++#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW + #else + #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) + #endif +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 8e415cf65457..a3a53955f01c 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -172,6 +172,11 @@ extern const struct seq_operations cpuinfo_op; + + extern void cpu_detect(struct cpuinfo_x86 *c); + ++static inline unsigned long l1tf_pfn_limit(void) ++{ ++ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; ++} ++ + extern void early_cpu_init(void); + extern void identify_boot_cpu(void); + extern void identify_secondary_cpu(struct cpuinfo_x86 *); +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 12a8867071f3..34e4aaaf03d2 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -26,9 +26,11 @@ + #include + #include + #include ++#include + + static void __init spectre_v2_select_mitigation(void); + static void __init ssb_select_mitigation(void); ++static void __init l1tf_select_mitigation(void); + + /* + * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any +@@ -80,6 +82,8 @@ void __init check_bugs(void) + */ + ssb_select_mitigation(); + ++ l1tf_select_mitigation(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -309,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) + return cmd; + } + +-/* Check for Skylake-like CPUs (for RSB handling) */ +-static bool __init is_skylake_era(void) +-{ +- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && +- boot_cpu_data.x86 == 6) { +- switch (boot_cpu_data.x86_model) { +- case INTEL_FAM6_SKYLAKE_MOBILE: +- case INTEL_FAM6_SKYLAKE_DESKTOP: +- case INTEL_FAM6_SKYLAKE_X: +- case INTEL_FAM6_KABYLAKE_MOBILE: +- case INTEL_FAM6_KABYLAKE_DESKTOP: +- return true; +- } +- } +- return false; +-} +- + static void __init spectre_v2_select_mitigation(void) + { + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); +@@ -386,22 +373,15 @@ retpoline_auto: + pr_info("%s\n", spectre_v2_strings[mode]); + + /* +- * If neither SMEP nor PTI are available, there is a risk of +- * hitting userspace addresses in the RSB after a context switch +- * from a shallow call stack to a deeper one. To prevent this fill +- * the entire RSB, even when using IBRS. ++ * If spectre v2 protection has been enabled, unconditionally fill ++ * RSB during a context switch; this protects against two independent ++ * issues: + * +- * Skylake era CPUs have a separate issue with *underflow* of the +- * RSB, when they will predict 'ret' targets from the generic BTB. +- * The proper mitigation for this is IBRS. If IBRS is not supported +- * or deactivated in favour of retpolines the RSB fill on context +- * switch is required. ++ * - RSB underflow (and switch to BTB) on Skylake+ ++ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs + */ +- if ((!boot_cpu_has(X86_FEATURE_KAISER) && +- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { +- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); +- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); +- } ++ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); ++ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + + /* Initialize Indirect Branch Prediction Barrier if supported */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { +@@ -652,6 +632,35 @@ void x86_spec_ctrl_setup_ap(void) + x86_amd_ssb_disable(); + } + ++#undef pr_fmt ++#define pr_fmt(fmt) "L1TF: " fmt ++static void __init l1tf_select_mitigation(void) ++{ ++ u64 half_pa; ++ ++ if (!boot_cpu_has_bug(X86_BUG_L1TF)) ++ return; ++ ++#if CONFIG_PGTABLE_LEVELS == 2 ++ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); ++ return; ++#endif ++ ++ /* ++ * This is extremely unlikely to happen because almost all ++ * systems have far more MAX_PA/2 than RAM can be fit into ++ * DIMM slots. ++ */ ++ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; ++ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) { ++ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); ++ return; ++ } ++ ++ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); ++} ++#undef pr_fmt ++ + #ifdef CONFIG_SYSFS + + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, +@@ -679,6 +688,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + case X86_BUG_SPEC_STORE_BYPASS: + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); + ++ case X86_BUG_L1TF: ++ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) ++ return sprintf(buf, "Mitigation: Page Table Inversion\n"); ++ break; ++ + default: + break; + } +@@ -705,4 +719,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute * + { + return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); + } ++ ++ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); ++} + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 3d21b28f9826..4d3fa79c0f09 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -880,6 +880,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { + {} + }; + ++static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { ++ /* in addition to cpu_no_speculation */ ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, ++ {} ++}; ++ + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { + u64 ia32_cap = 0; +@@ -905,6 +920,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + return; + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); ++ ++ if (x86_match_cpu(cpu_no_l1tf)) ++ return; ++ ++ setup_force_cpu_bug(X86_BUG_L1TF); + } + + /* +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 1f5c47a49e35..c6f466d6cc57 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -393,7 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src) + newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; + if ((s64) (s32) newdisp != newdisp) { + pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); +- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value); + return 0; + } + disp = (u8 *) dest + insn_offset_displacement(&insn); +@@ -609,8 +608,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, + * Raise a BUG or we'll continue in an endless reentering loop + * and eventually a stack overflow. + */ +- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", +- p->addr); ++ pr_err("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + default: +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index f534a0e3af53..632195b41688 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnbuf, + struct branch *b = insnbuf; + unsigned long delta = (unsigned long)target - (addr+5); + +- if (tgt_clobbers & ~site_clobbers) +- return len; /* target would clobber too much for this site */ +- if (len < 5) ++ if (len < 5) { ++#ifdef CONFIG_RETPOLINE ++ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); ++#endif + return len; /* call too long for patch site */ ++ } + + b->opcode = 0xe8; /* call */ + b->delta = delta; +@@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, + struct branch *b = insnbuf; + unsigned long delta = (unsigned long)target - (addr+5); + +- if (len < 5) ++ if (len < 5) { ++#ifdef CONFIG_RETPOLINE ++ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); ++#endif + return len; /* call too long for patch site */ ++ } + + b->opcode = 0xe9; /* jmp */ + b->delta = delta; +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index bbaae4cf9e8e..31c4bc0d3372 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p) + memblock_reserve(__pa_symbol(_text), + (unsigned long)__bss_stop - (unsigned long)_text); + ++ /* ++ * Make sure page 0 is always reserved because on systems with ++ * L1TF its contents can be leaked to user processes. ++ */ ++ memblock_reserve(0, PAGE_SIZE); ++ + early_reserve_initrd(); + + /* +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 151fd33e9043..4954a6cef50a 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -4,6 +4,8 @@ + #include + #include + #include /* for max_low_pfn */ ++#include ++#include + + #include + #include +@@ -767,3 +769,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) + __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); + __pte2cachemode_tbl[entry] = cache; + } ++ ++#ifdef CONFIG_SWAP ++unsigned long max_swapfile_size(void) ++{ ++ unsigned long pages; ++ ++ pages = generic_max_swapfile_size(); ++ ++ if (boot_cpu_has_bug(X86_BUG_L1TF)) { ++ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ ++ unsigned long l1tf_limit = l1tf_pfn_limit() + 1; ++ /* ++ * We encode swap offsets also with 3 bits below those for pfn ++ * which makes the usable limit higher. ++ */ ++#if CONFIG_PGTABLE_LEVELS > 2 ++ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; ++#endif ++ pages = min_t(unsigned long, l1tf_limit, pages); ++ } ++ return pages; ++} ++#endif +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c +index 76604c8a2a48..7bf14e74fc8f 100644 +--- a/arch/x86/mm/kmmio.c ++++ b/arch/x86/mm/kmmio.c +@@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr) + + static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) + { ++ pmd_t new_pmd; + pmdval_t v = pmd_val(*pmd); + if (clear) { +- *old = v & _PAGE_PRESENT; +- v &= ~_PAGE_PRESENT; +- } else /* presume this has been called with clear==true previously */ +- v |= *old; +- set_pmd(pmd, __pmd(v)); ++ *old = v; ++ new_pmd = pmd_mknotpresent(*pmd); ++ } else { ++ /* Presume this has been called with clear==true previously */ ++ new_pmd = __pmd(*old); ++ } ++ set_pmd(pmd, new_pmd); + } + + static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) + { + pteval_t v = pte_val(*pte); + if (clear) { +- *old = v & _PAGE_PRESENT; +- v &= ~_PAGE_PRESENT; +- } else /* presume this has been called with clear==true previously */ +- v |= *old; +- set_pte_atomic(pte, __pte(v)); ++ *old = v; ++ /* Nothing should care about address */ ++ pte_clear(&init_mm, 0, pte); ++ } else { ++ /* Presume this has been called with clear==true previously */ ++ set_pte_atomic(pte, __pte(*old)); ++ } + } + + static int clear_page_presence(struct kmmio_fault_page *f, bool clear) +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 307f60ecfc6d..9a055ea279eb 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma) + return "[mpx]"; + return NULL; + } ++ ++/* ++ * Only allow root to set high MMIO mappings to PROT_NONE. ++ * This prevents an unpriv. user to set them to PROT_NONE and invert ++ * them, then pointing to valid memory for L1TF speculation. ++ * ++ * Note: for locked down kernels may want to disable the root override. ++ */ ++bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_L1TF)) ++ return true; ++ if (!__pte_needs_invert(pgprot_val(prot))) ++ return true; ++ /* If it's real memory always allow */ ++ if (pfn_valid(pfn)) ++ return true; ++ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) ++ return false; ++ return true; ++} +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index 79377e2a7bcd..27610c2d1821 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -1006,8 +1006,8 @@ static int populate_pmd(struct cpa_data *cpa, + + pmd = pmd_offset(pud, start); + +- set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | +- massage_pgprot(pmd_pgprot))); ++ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, ++ canon_pgprot(pmd_pgprot)))); + + start += PMD_SIZE; + cpa->pfn += PMD_SIZE; +@@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, + * Map everything starting from the Gb boundary, possibly with 1G pages + */ + while (end - start >= PUD_SIZE) { +- set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | +- massage_pgprot(pud_pgprot))); ++ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, ++ canon_pgprot(pud_pgprot)))); + + start += PUD_SIZE; + cpa->pfn += PUD_SIZE; +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c +index f9e0d09f7c66..8a0f77fb5181 100644 +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -154,10 +154,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { + + static const struct lpss_device_desc byt_pwm_dev_desc = { + .flags = LPSS_SAVE_CTX, ++ .prv_offset = 0x800, + }; + + static const struct lpss_device_desc bsw_pwm_dev_desc = { + .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, ++ .prv_offset = 0x800, + }; + + static const struct lpss_device_desc byt_uart_dev_desc = { +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index 143edea1076f..41090ef5facb 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -524,16 +524,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_l1tf(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); + static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); ++static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, + &dev_attr_spectre_v1.attr, + &dev_attr_spectre_v2.attr, + &dev_attr_spec_store_bypass.attr, ++ &dev_attr_l1tf.attr, + NULL + }; + +diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c +index 912ad30be585..4719aa781bf2 100644 +--- a/drivers/char/tpm/tpm-dev.c ++++ b/drivers/char/tpm/tpm-dev.c +@@ -25,7 +25,7 @@ struct file_priv { + struct tpm_chip *chip; + + /* Data passed to and from the tpm via the read/write calls */ +- atomic_t data_pending; ++ size_t data_pending; + struct mutex buffer_mutex; + + struct timer_list user_read_timer; /* user needs to claim result */ +@@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work) + struct file_priv *priv = container_of(work, struct file_priv, work); + + mutex_lock(&priv->buffer_mutex); +- atomic_set(&priv->data_pending, 0); ++ priv->data_pending = 0; + memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); + mutex_unlock(&priv->buffer_mutex); + } +@@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file) + } + + priv->chip = chip; +- atomic_set(&priv->data_pending, 0); + mutex_init(&priv->buffer_mutex); + setup_timer(&priv->user_read_timer, user_reader_timeout, + (unsigned long)priv); +@@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf, + size_t size, loff_t *off) + { + struct file_priv *priv = file->private_data; +- ssize_t ret_size; ++ ssize_t ret_size = 0; + int rc; + + del_singleshot_timer_sync(&priv->user_read_timer); + flush_work(&priv->work); +- ret_size = atomic_read(&priv->data_pending); +- if (ret_size > 0) { /* relay data */ +- ssize_t orig_ret_size = ret_size; +- if (size < ret_size) +- ret_size = size; ++ mutex_lock(&priv->buffer_mutex); + +- mutex_lock(&priv->buffer_mutex); ++ if (priv->data_pending) { ++ ret_size = min_t(ssize_t, size, priv->data_pending); + rc = copy_to_user(buf, priv->data_buffer, ret_size); +- memset(priv->data_buffer, 0, orig_ret_size); ++ memset(priv->data_buffer, 0, priv->data_pending); + if (rc) + ret_size = -EFAULT; + +- mutex_unlock(&priv->buffer_mutex); ++ priv->data_pending = 0; + } + +- atomic_set(&priv->data_pending, 0); +- ++ mutex_unlock(&priv->buffer_mutex); + return ret_size; + } + +@@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, + size_t in_size = size; + ssize_t out_size; + +- /* cannot perform a write until the read has cleared +- either via tpm_read or a user_read_timer timeout. +- This also prevents splitted buffered writes from blocking here. +- */ +- if (atomic_read(&priv->data_pending) != 0) +- return -EBUSY; +- + if (in_size > TPM_BUFSIZE) + return -E2BIG; + + mutex_lock(&priv->buffer_mutex); + ++ /* Cannot perform a write until the read has cleared either via ++ * tpm_read or a user_read_timer timeout. This also prevents split ++ * buffered writes from blocking here. ++ */ ++ if (priv->data_pending != 0) { ++ mutex_unlock(&priv->buffer_mutex); ++ return -EBUSY; ++ } ++ + if (copy_from_user + (priv->data_buffer, (void __user *) buf, in_size)) { + mutex_unlock(&priv->buffer_mutex); +@@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, + return out_size; + } + +- atomic_set(&priv->data_pending, out_size); ++ priv->data_pending = out_size; + mutex_unlock(&priv->buffer_mutex); + + /* Set a timeout by which the reader must come claim the result */ +@@ -172,7 +169,7 @@ static int tpm_release(struct inode *inode, struct file *file) + del_singleshot_timer_sync(&priv->user_read_timer); + flush_work(&priv->work); + file->private_data = NULL; +- atomic_set(&priv->data_pending, 0); ++ priv->data_pending = 0; + clear_bit(0, &priv->chip->is_open); + kfree(priv); + return 0; +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c +index 6790ebb366dd..98fd9a594841 100644 +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + umem->address = addr; + umem->page_size = PAGE_SIZE; + umem->pid = get_task_pid(current, PIDTYPE_PID); +- /* +- * We ask for writable memory if any of the following +- * access flags are set. "Local write" and "remote write" +- * obviously require write access. "Remote atomic" can do +- * things like fetch and add, which will modify memory, and +- * "MW bind" can change permissions by binding a window. +- */ +- umem->writable = !!(access & +- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | +- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); ++ umem->writable = ib_access_writable(access); + + if (access & IB_ACCESS_ON_DEMAND) { + put_pid(umem->pid); +diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c +index ce87e9cc7eff..bf52e35dd506 100644 +--- a/drivers/infiniband/hw/mlx4/mr.c ++++ b/drivers/infiniband/hw/mlx4/mr.c +@@ -130,6 +130,40 @@ out: + return err; + } + ++static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start, ++ u64 length, u64 virt_addr, ++ int access_flags) ++{ ++ /* ++ * Force registering the memory as writable if the underlying pages ++ * are writable. This is so rereg can change the access permissions ++ * from readable to writable without having to run through ib_umem_get ++ * again ++ */ ++ if (!ib_access_writable(access_flags)) { ++ struct vm_area_struct *vma; ++ ++ down_read(¤t->mm->mmap_sem); ++ /* ++ * FIXME: Ideally this would iterate over all the vmas that ++ * cover the memory, but for now it requires a single vma to ++ * entirely cover the MR to support RO mappings. ++ */ ++ vma = find_vma(current->mm, start); ++ if (vma && vma->vm_end >= start + length && ++ vma->vm_start <= start) { ++ if (vma->vm_flags & VM_WRITE) ++ access_flags |= IB_ACCESS_LOCAL_WRITE; ++ } else { ++ access_flags |= IB_ACCESS_LOCAL_WRITE; ++ } ++ ++ up_read(¤t->mm->mmap_sem); ++ } ++ ++ return ib_umem_get(context, start, length, access_flags, 0); ++} ++ + struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +@@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + if (!mr) + return ERR_PTR(-ENOMEM); + +- /* Force registering the memory as writable. */ +- /* Used for memory re-registeration. HCA protects the access */ +- mr->umem = ib_umem_get(pd->uobject->context, start, length, +- access_flags | IB_ACCESS_LOCAL_WRITE, 0); ++ mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length, ++ virt_addr, access_flags); + if (IS_ERR(mr->umem)) { + err = PTR_ERR(mr->umem); + goto err_free; +@@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, + } + + if (flags & IB_MR_REREG_ACCESS) { ++ if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) ++ return -EPERM; ++ + err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, + convert_access(mr_access_flags)); + +@@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, + + mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); + ib_umem_release(mmr->umem); +- mmr->umem = ib_umem_get(mr->uobject->context, start, length, +- mr_access_flags | +- IB_ACCESS_LOCAL_WRITE, +- 0); ++ mmr->umem = ++ mlx4_get_umem_mr(mr->uobject->context, start, length, ++ virt_addr, mr_access_flags); + if (IS_ERR(mmr->umem)) { + err = PTR_ERR(mmr->umem); + /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +index 748b63b86cbc..40242ead096f 100644 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +@@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, + struct ocrdma_stats *pstats = filp->private_data; + struct ocrdma_dev *dev = pstats->dev; + +- if (count > 32) ++ if (*ppos != 0 || count == 0 || count > sizeof(tmp_str)) + goto err; + + if (copy_from_user(tmp_str, buffer, count)) +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index bec9f099573b..68d0a5c9d437 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -879,7 +879,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, + struct sk_buff *skb, + struct sk_buff_head *list) + { +- struct skb_shared_info *shinfo = skb_shinfo(skb); + RING_IDX cons = queue->rx.rsp_cons; + struct sk_buff *nskb; + +@@ -888,15 +887,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, + RING_GET_RESPONSE(&queue->rx, ++cons); + skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; + +- if (shinfo->nr_frags == MAX_SKB_FRAGS) { ++ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { + unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; + + BUG_ON(pull_to <= skb_headlen(skb)); + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); + } +- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); ++ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); + +- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), ++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, ++ skb_frag_page(nfrag), + rx->offset, rx->status, PAGE_SIZE); + + skb_shinfo(nskb)->nr_frags = 0; +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index de53c9694b68..5dc288fecace 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -520,18 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) + static int sr_block_open(struct block_device *bdev, fmode_t mode) + { + struct scsi_cd *cd; ++ struct scsi_device *sdev; + int ret = -ENXIO; + ++ cd = scsi_cd_get(bdev->bd_disk); ++ if (!cd) ++ goto out; ++ ++ sdev = cd->device; ++ scsi_autopm_get_device(sdev); + check_disk_change(bdev); + + mutex_lock(&sr_mutex); +- cd = scsi_cd_get(bdev->bd_disk); +- if (cd) { +- ret = cdrom_open(&cd->cdi, bdev, mode); +- if (ret) +- scsi_cd_put(cd); +- } ++ ret = cdrom_open(&cd->cdi, bdev, mode); + mutex_unlock(&sr_mutex); ++ ++ scsi_autopm_put_device(sdev); ++ if (ret) ++ scsi_cd_put(cd); ++ ++out: + return ret; + } + +@@ -559,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + if (ret) + goto out; + ++ scsi_autopm_get_device(sdev); ++ + /* + * Send SCSI addressing ioctls directly to mid level, send other + * ioctls to cdrom/block level. +@@ -567,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + ret = scsi_ioctl(sdev, cmd, argp); +- goto out; ++ goto put; + } + + ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); + if (ret != -ENOSYS) +- goto out; ++ goto put; + + ret = scsi_ioctl(sdev, cmd, argp); + ++put: ++ scsi_autopm_put_device(sdev); ++ + out: + mutex_unlock(&sr_mutex); + return ret; +diff --git a/fs/dcache.c b/fs/dcache.c +index 250c1222e30c..807efaab838e 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1954,10 +1954,12 @@ struct dentry *d_make_root(struct inode *root_inode) + static const struct qstr name = QSTR_INIT("/", 1); + + res = __d_alloc(root_inode->i_sb, &name); +- if (res) ++ if (res) { ++ res->d_flags |= DCACHE_RCUACCESS; + d_instantiate(res, root_inode); +- else ++ } else { + iput(root_inode); ++ } + } + return res; + } +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index 041117fd8fd7..0963213e9cd3 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -1308,7 +1308,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, + ext4_itable_unused_count(sb, gdp)), + sbi->s_inodes_per_block); + +- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { ++ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || ++ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - ++ ext4_itable_unused_count(sb, gdp)) < ++ EXT4_FIRST_INO(sb)))) { + ext4_error(sb, "Something is wrong with group %u: " + "used itable blocks: %d; " + "itable unused count: %u", +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 3e4d8ac1974e..8d18f6142da5 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2875,14 +2875,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) + if (!gdp) + continue; + +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) +- continue; +- if (group != 0) ++ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) + break; +- ext4_error(sb, "Inode table for bg 0 marked as " +- "needing zeroing"); +- if (sb->s_flags & MS_RDONLY) +- return ngroups; + } + + return group; +diff --git a/fs/namespace.c b/fs/namespace.c +index a879560ea144..b56b50e3da11 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -603,12 +603,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) + return 0; + mnt = real_mount(bastard); + mnt_add_count(mnt, 1); ++ smp_mb(); // see mntput_no_expire() + if (likely(!read_seqretry(&mount_lock, seq))) + return 0; + if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { + mnt_add_count(mnt, -1); + return 1; + } ++ lock_mount_hash(); ++ if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { ++ mnt_add_count(mnt, -1); ++ unlock_mount_hash(); ++ return 1; ++ } ++ unlock_mount_hash(); ++ /* caller will mntput() */ + return -1; + } + +@@ -1124,12 +1133,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); + static void mntput_no_expire(struct mount *mnt) + { + rcu_read_lock(); +- mnt_add_count(mnt, -1); +- if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ ++ if (likely(READ_ONCE(mnt->mnt_ns))) { ++ /* ++ * Since we don't do lock_mount_hash() here, ++ * ->mnt_ns can change under us. However, if it's ++ * non-NULL, then there's a reference that won't ++ * be dropped until after an RCU delay done after ++ * turning ->mnt_ns NULL. So if we observe it ++ * non-NULL under rcu_read_lock(), the reference ++ * we are dropping is not the final one. ++ */ ++ mnt_add_count(mnt, -1); + rcu_read_unlock(); + return; + } + lock_mount_hash(); ++ /* ++ * make sure that if __legitimize_mnt() has not seen us grab ++ * mount_lock, we'll see their refcount increment here. ++ */ ++ smp_mb(); ++ mnt_add_count(mnt, -1); + if (mnt_get_count(mnt)) { + rcu_read_unlock(); + unlock_mount_hash(); +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index 25b793325b09..dabecb661264 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -799,6 +799,18 @@ static inline int pmd_free_pte_page(pmd_t *pmd) + } + #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + ++#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED ++static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) ++{ ++ return true; ++} ++ ++static inline bool arch_has_pfn_modify_check(void) ++{ ++ return false; ++} ++#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ ++ + #endif /* !__ASSEMBLY__ */ + + #ifndef io_remap_pfn_range +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 2f9d12022100..063c73ed6d78 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -48,6 +48,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_l1tf(struct device *dev, ++ struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, +diff --git a/include/linux/mm.h b/include/linux/mm.h +index a100946607a5..1f4366567e7d 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -2083,6 +2083,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); + int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); ++int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, pgprot_t pgprot); + int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); + int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); +diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h +index 388293a91e8c..e4594de79bc4 100644 +--- a/include/linux/swapfile.h ++++ b/include/linux/swapfile.h +@@ -9,5 +9,7 @@ extern spinlock_t swap_lock; + extern struct plist_head swap_active_head; + extern struct swap_info_struct *swap_info[]; + extern int try_to_unuse(unsigned int, bool, unsigned long); ++extern unsigned long generic_max_swapfile_size(void); ++extern unsigned long max_swapfile_size(void); + + #endif /* _LINUX_SWAPFILE_H */ +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index ff307b548ed3..646891f3bc1e 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -55,11 +55,7 @@ extern long do_no_restart_syscall(struct restart_block *parm); + + #ifdef __KERNEL__ + +-#ifdef CONFIG_DEBUG_STACK_USAGE +-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +-#else +-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) +-#endif ++#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) + + /* + * flag set/clear/test wrappers +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h +index 120da1d7f57e..10fefb0dc640 100644 +--- a/include/rdma/ib_verbs.h ++++ b/include/rdma/ib_verbs.h +@@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags) + return 0; + } + ++static inline bool ib_access_writable(int access_flags) ++{ ++ /* ++ * We have writable memory backing the MR if any of the following ++ * access flags are set. "Local write" and "remote write" obviously ++ * require write access. "Remote atomic" can do things like fetch and ++ * add, which will modify memory, and "MW bind" can change permissions ++ * by binding a window. ++ */ ++ return access_flags & ++ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | ++ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); ++} ++ + /** + * ib_check_mr_status: lightweight check of MR status. + * This routine may provide status checks on a selected +diff --git a/mm/memory.c b/mm/memory.c +index 177cb7d111a9..d5bb1465d30c 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1604,9 +1604,30 @@ out: + */ + int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) ++{ ++ return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); ++} ++EXPORT_SYMBOL(vm_insert_pfn); ++ ++/** ++ * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot ++ * @vma: user vma to map to ++ * @addr: target user address of this page ++ * @pfn: source kernel pfn ++ * @pgprot: pgprot flags for the inserted page ++ * ++ * This is exactly like vm_insert_pfn, except that it allows drivers to ++ * to override pgprot on a per-page basis. ++ * ++ * This only makes sense for IO mappings, and it makes no sense for ++ * cow mappings. In general, using multiple vmas is preferable; ++ * vm_insert_pfn_prot should only be used if using multiple VMAs is ++ * impractical. ++ */ ++int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, pgprot_t pgprot) + { + int ret; +- pgprot_t pgprot = vma->vm_page_prot; + /* + * Technically, architectures with pte_special can avoid all these + * restrictions (same for remap_pfn_range). However we would like +@@ -1624,19 +1645,29 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + if (track_pfn_insert(vma, &pgprot, pfn)) + return -EINVAL; + ++ if (!pfn_modify_allowed(pfn, pgprot)) ++ return -EACCES; ++ + ret = insert_pfn(vma, addr, pfn, pgprot); + + return ret; + } +-EXPORT_SYMBOL(vm_insert_pfn); ++EXPORT_SYMBOL(vm_insert_pfn_prot); + + int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) + { ++ pgprot_t pgprot = vma->vm_page_prot; ++ + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; ++ if (track_pfn_insert(vma, &pgprot, pfn)) ++ return -EINVAL; ++ ++ if (!pfn_modify_allowed(pfn, pgprot)) ++ return -EACCES; + + /* + * If we don't have pte special, then we have to use the pfn_valid() +@@ -1649,9 +1680,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + struct page *page; + + page = pfn_to_page(pfn); +- return insert_page(vma, addr, page, vma->vm_page_prot); ++ return insert_page(vma, addr, page, pgprot); + } +- return insert_pfn(vma, addr, pfn, vma->vm_page_prot); ++ return insert_pfn(vma, addr, pfn, pgprot); + } + EXPORT_SYMBOL(vm_insert_mixed); + +@@ -1666,6 +1697,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, + { + pte_t *pte; + spinlock_t *ptl; ++ int err = 0; + + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) +@@ -1673,12 +1705,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, + arch_enter_lazy_mmu_mode(); + do { + BUG_ON(!pte_none(*pte)); ++ if (!pfn_modify_allowed(pfn, prot)) { ++ err = -EACCES; ++ break; ++ } + set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(pte - 1, ptl); +- return 0; ++ return err; + } + + static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, +@@ -1687,6 +1723,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, + { + pmd_t *pmd; + unsigned long next; ++ int err; + + pfn -= addr >> PAGE_SHIFT; + pmd = pmd_alloc(mm, pud, addr); +@@ -1695,9 +1732,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, + VM_BUG_ON(pmd_trans_huge(*pmd)); + do { + next = pmd_addr_end(addr, end); +- if (remap_pte_range(mm, pmd, addr, next, +- pfn + (addr >> PAGE_SHIFT), prot)) +- return -ENOMEM; ++ err = remap_pte_range(mm, pmd, addr, next, ++ pfn + (addr >> PAGE_SHIFT), prot); ++ if (err) ++ return err; + } while (pmd++, addr = next, addr != end); + return 0; + } +@@ -1708,6 +1746,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, + { + pud_t *pud; + unsigned long next; ++ int err; + + pfn -= addr >> PAGE_SHIFT; + pud = pud_alloc(mm, pgd, addr); +@@ -1715,9 +1754,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, + return -ENOMEM; + do { + next = pud_addr_end(addr, end); +- if (remap_pmd_range(mm, pud, addr, next, +- pfn + (addr >> PAGE_SHIFT), prot)) +- return -ENOMEM; ++ err = remap_pmd_range(mm, pud, addr, next, ++ pfn + (addr >> PAGE_SHIFT), prot); ++ if (err) ++ return err; + } while (pud++, addr = next, addr != end); + return 0; + } +diff --git a/mm/mprotect.c b/mm/mprotect.c +index c0b4b2a49462..a277f3412a5d 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -255,6 +255,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, + return pages; + } + ++static int prot_none_pte_entry(pte_t *pte, unsigned long addr, ++ unsigned long next, struct mm_walk *walk) ++{ ++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? ++ 0 : -EACCES; ++} ++ ++static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, ++ unsigned long addr, unsigned long next, ++ struct mm_walk *walk) ++{ ++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? ++ 0 : -EACCES; ++} ++ ++static int prot_none_test(unsigned long addr, unsigned long next, ++ struct mm_walk *walk) ++{ ++ return 0; ++} ++ ++static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, unsigned long newflags) ++{ ++ pgprot_t new_pgprot = vm_get_page_prot(newflags); ++ struct mm_walk prot_none_walk = { ++ .pte_entry = prot_none_pte_entry, ++ .hugetlb_entry = prot_none_hugetlb_entry, ++ .test_walk = prot_none_test, ++ .mm = current->mm, ++ .private = &new_pgprot, ++ }; ++ ++ return walk_page_range(start, end, &prot_none_walk); ++} ++ + int + mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned long newflags) +@@ -272,6 +308,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + return 0; + } + ++ /* ++ * Do PROT_NONE PFN permission checks here when we can still ++ * bail out without undoing a lot of state. This is a rather ++ * uncommon case, so doesn't need to be very optimized. ++ */ ++ if (arch_has_pfn_modify_check() && ++ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && ++ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { ++ error = prot_none_walk(vma, start, end, newflags); ++ if (error) ++ return error; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 674bf177ce44..8e25ff2b693a 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -2206,6 +2206,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) + return 0; + } + ++ ++/* ++ * Find out how many pages are allowed for a single swap device. There ++ * are two limiting factors: ++ * 1) the number of bits for the swap offset in the swp_entry_t type, and ++ * 2) the number of bits in the swap pte, as defined by the different ++ * architectures. ++ * ++ * In order to find the largest possible bit mask, a swap entry with ++ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, ++ * decoded to a swp_entry_t again, and finally the swap offset is ++ * extracted. ++ * ++ * This will mask all the bits from the initial ~0UL mask that can't ++ * be encoded in either the swp_entry_t or the architecture definition ++ * of a swap pte. ++ */ ++unsigned long generic_max_swapfile_size(void) ++{ ++ return swp_offset(pte_to_swp_entry( ++ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; ++} ++ ++/* Can be overridden by an architecture for additional checks. */ ++__weak unsigned long max_swapfile_size(void) ++{ ++ return generic_max_swapfile_size(); ++} ++ + static unsigned long read_swap_header(struct swap_info_struct *p, + union swap_header *swap_header, + struct inode *inode) +@@ -2241,22 +2270,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p, + p->cluster_next = 1; + p->cluster_nr = 0; + +- /* +- * Find out how many pages are allowed for a single swap +- * device. There are two limiting factors: 1) the number +- * of bits for the swap offset in the swp_entry_t type, and +- * 2) the number of bits in the swap pte as defined by the +- * different architectures. In order to find the +- * largest possible bit mask, a swap entry with swap type 0 +- * and swap offset ~0UL is created, encoded to a swap pte, +- * decoded to a swp_entry_t again, and finally the swap +- * offset is extracted. This will mask all the bits from +- * the initial ~0UL mask that can't be encoded in either +- * the swp_entry_t or the architecture definition of a +- * swap pte. +- */ +- maxpages = swp_offset(pte_to_swp_entry( +- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; ++ maxpages = max_swapfile_size(); + last_page = swap_header->info.last_page; + if (!last_page) { + pr_warn("Empty swap-file\n"); +diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig +index 93581bba8643..09d6c4a6b53d 100644 +--- a/net/ipv4/Kconfig ++++ b/net/ipv4/Kconfig +@@ -354,6 +354,7 @@ config INET_ESP + select CRYPTO_CBC + select CRYPTO_SHA1 + select CRYPTO_DES ++ select CRYPTO_ECHAINIV + ---help--- + Support for IPsec ESP. + +diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig +index 851d5c9e3ecc..0f50248bad17 100644 +--- a/net/ipv6/Kconfig ++++ b/net/ipv6/Kconfig +@@ -69,6 +69,7 @@ config INET6_ESP + select CRYPTO_CBC + select CRYPTO_SHA1 + select CRYPTO_DES ++ select CRYPTO_ECHAINIV + ---help--- + Support for IPsec ESP. + diff --git a/patch/kernel/rk3328-default/04-patch-4.4.148-149.patch b/patch/kernel/rk3328-default/04-patch-4.4.148-149.patch new file mode 100644 index 000000000..ec14d48fa --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.148-149.patch @@ -0,0 +1,1201 @@ +diff --git a/Documentation/Changes b/Documentation/Changes +index ec97b77c8b00..f25649ffb892 100644 +--- a/Documentation/Changes ++++ b/Documentation/Changes +@@ -25,7 +25,7 @@ o GNU C 3.2 # gcc --version + o GNU make 3.80 # make --version + o binutils 2.12 # ld -v + o util-linux 2.10o # fdformat --version +-o module-init-tools 0.9.10 # depmod -V ++o kmod 13 # depmod -V + o e2fsprogs 1.41.4 # e2fsck -V + o jfsutils 1.1.3 # fsck.jfs -V + o reiserfsprogs 3.6.3 # reiserfsck -V +@@ -132,12 +132,6 @@ is not build with CONFIG_KALLSYMS and you have no way to rebuild and + reproduce the Oops with that option, then you can still decode that Oops + with ksymoops. + +-Module-Init-Tools +------------------ +- +-A new module loader is now in the kernel that requires module-init-tools +-to use. It is backward compatible with the 2.4.x series kernels. +- + Mkinitrd + -------- + +@@ -319,14 +313,15 @@ Util-linux + ---------- + o + ++Kmod ++---- ++o ++o ++ + Ksymoops + -------- + o + +-Module-Init-Tools +------------------ +-o +- + Mkinitrd + -------- + o +diff --git a/Makefile b/Makefile +index 9b795164122e..e7c46ece5f27 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 148 ++SUBLEVEL = 149 + EXTRAVERSION = + NAME = Blurry Fish Butt + +@@ -443,7 +443,7 @@ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS + + export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS + export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN +-export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE ++export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE CFLAGS_KASAN_NOSANITIZE + export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE + export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL + export KBUILD_ARFLAGS +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index 51ac84e0812d..e9d96b028766 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -699,12 +699,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) + } + + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +-int pud_free_pmd_page(pud_t *pud) ++int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { + return pud_none(*pud); + } + +-int pmd_free_pte_page(pmd_t *pmd) ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + return pmd_none(*pmd); + } +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 08e94b6139ab..55c7446311a7 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -676,28 +676,50 @@ int pmd_clear_huge(pmd_t *pmd) + return 0; + } + ++#ifdef CONFIG_X86_64 + /** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. ++ * @addr: Virtual address associated with pud. + * +- * Context: The pud range has been unmaped and TLB purged. ++ * Context: The pud range has been unmapped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. ++ * ++ * NOTE: Callers must allow a single page allocation. + */ +-int pud_free_pmd_page(pud_t *pud) ++int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { +- pmd_t *pmd; ++ pmd_t *pmd, *pmd_sv; ++ pte_t *pte; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); ++ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); ++ if (!pmd_sv) ++ return 0; + +- for (i = 0; i < PTRS_PER_PMD; i++) +- if (!pmd_free_pte_page(&pmd[i])) +- return 0; ++ for (i = 0; i < PTRS_PER_PMD; i++) { ++ pmd_sv[i] = pmd[i]; ++ if (!pmd_none(pmd[i])) ++ pmd_clear(&pmd[i]); ++ } + + pud_clear(pud); ++ ++ /* INVLPG to clear all paging-structure caches */ ++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); ++ ++ for (i = 0; i < PTRS_PER_PMD; i++) { ++ if (!pmd_none(pmd_sv[i])) { ++ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); ++ free_page((unsigned long)pte); ++ } ++ } ++ ++ free_page((unsigned long)pmd_sv); + free_page((unsigned long)pmd); + + return 1; +@@ -706,11 +728,12 @@ int pud_free_pmd_page(pud_t *pud) + /** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. ++ * @addr: Virtual address associated with pmd. + * +- * Context: The pmd range has been unmaped and TLB purged. ++ * Context: The pmd range has been unmapped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +-int pmd_free_pte_page(pmd_t *pmd) ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + pte_t *pte; + +@@ -719,8 +742,30 @@ int pmd_free_pte_page(pmd_t *pmd) + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); ++ ++ /* INVLPG to clear all paging-structure caches */ ++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); ++ + free_page((unsigned long)pte); + + return 1; + } ++ ++#else /* !CONFIG_X86_64 */ ++ ++int pud_free_pmd_page(pud_t *pud, unsigned long addr) ++{ ++ return pud_none(*pud); ++} ++ ++/* ++ * Disable free page handling on x86-PAE. This assures that ioremap() ++ * does not update sync'd pmd entries. See vmalloc_sync_one(). ++ */ ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) ++{ ++ return pmd_none(*pmd); ++} ++ ++#endif /* CONFIG_X86_64 */ + #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c +index e5b5721809e2..149e7a7f04fe 100644 +--- a/crypto/ablkcipher.c ++++ b/crypto/ablkcipher.c +@@ -73,11 +73,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) + return max(start, end_page); + } + +-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, +- unsigned int bsize) ++static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, ++ unsigned int n) + { +- unsigned int n = bsize; +- + for (;;) { + unsigned int len_this_page = scatterwalk_pagelen(&walk->out); + +@@ -89,17 +87,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, + n -= len_this_page; + scatterwalk_start(&walk->out, sg_next(walk->out.sg)); + } +- +- return bsize; + } + +-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, +- unsigned int n) ++static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, ++ unsigned int n) + { + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); +- +- return n; + } + + static int ablkcipher_walk_next(struct ablkcipher_request *req, +@@ -109,39 +103,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, int err) + { + struct crypto_tfm *tfm = req->base.tfm; +- unsigned int nbytes = 0; ++ unsigned int n; /* bytes processed */ ++ bool more; + +- if (likely(err >= 0)) { +- unsigned int n = walk->nbytes - err; ++ if (unlikely(err < 0)) ++ goto finish; + +- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) +- n = ablkcipher_done_fast(walk, n); +- else if (WARN_ON(err)) { +- err = -EINVAL; +- goto err; +- } else +- n = ablkcipher_done_slow(walk, n); ++ n = walk->nbytes - err; ++ walk->total -= n; ++ more = (walk->total != 0); + +- nbytes = walk->total - n; +- err = 0; ++ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { ++ ablkcipher_done_fast(walk, n); ++ } else { ++ if (WARN_ON(err)) { ++ /* unexpected case; didn't process all bytes */ ++ err = -EINVAL; ++ goto finish; ++ } ++ ablkcipher_done_slow(walk, n); + } + +- scatterwalk_done(&walk->in, 0, nbytes); +- scatterwalk_done(&walk->out, 1, nbytes); +- +-err: +- walk->total = nbytes; +- walk->nbytes = nbytes; ++ scatterwalk_done(&walk->in, 0, more); ++ scatterwalk_done(&walk->out, 1, more); + +- if (nbytes) { ++ if (more) { + crypto_yield(req->base.flags); + return ablkcipher_walk_next(req, walk); + } +- ++ err = 0; ++finish: ++ walk->nbytes = 0; + if (walk->iv != req->info) + memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); + kfree(walk->iv_buffer); +- + return err; + } + EXPORT_SYMBOL_GPL(ablkcipher_walk_done); +diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c +index dca7bc87dad9..2d08e59b3212 100644 +--- a/crypto/blkcipher.c ++++ b/crypto/blkcipher.c +@@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) + return max(start, end_page); + } + +-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, +- unsigned int bsize) ++static inline void blkcipher_done_slow(struct blkcipher_walk *walk, ++ unsigned int bsize) + { + u8 *addr; + + addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); + addr = blkcipher_get_spot(addr, bsize); + scatterwalk_copychunks(addr, &walk->out, bsize, 1); +- return bsize; + } + +-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, +- unsigned int n) ++static inline void blkcipher_done_fast(struct blkcipher_walk *walk, ++ unsigned int n) + { + if (walk->flags & BLKCIPHER_WALK_COPY) { + blkcipher_map_dst(walk); +@@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, + + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); +- +- return n; + } + + int blkcipher_walk_done(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, int err) + { +- unsigned int nbytes = 0; ++ unsigned int n; /* bytes processed */ ++ bool more; + +- if (likely(err >= 0)) { +- unsigned int n = walk->nbytes - err; ++ if (unlikely(err < 0)) ++ goto finish; + +- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) +- n = blkcipher_done_fast(walk, n); +- else if (WARN_ON(err)) { +- err = -EINVAL; +- goto err; +- } else +- n = blkcipher_done_slow(walk, n); ++ n = walk->nbytes - err; ++ walk->total -= n; ++ more = (walk->total != 0); + +- nbytes = walk->total - n; +- err = 0; ++ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) { ++ blkcipher_done_fast(walk, n); ++ } else { ++ if (WARN_ON(err)) { ++ /* unexpected case; didn't process all bytes */ ++ err = -EINVAL; ++ goto finish; ++ } ++ blkcipher_done_slow(walk, n); + } + +- scatterwalk_done(&walk->in, 0, nbytes); +- scatterwalk_done(&walk->out, 1, nbytes); +- +-err: +- walk->total = nbytes; +- walk->nbytes = nbytes; ++ scatterwalk_done(&walk->in, 0, more); ++ scatterwalk_done(&walk->out, 1, more); + +- if (nbytes) { ++ if (more) { + crypto_yield(desc->flags); + return blkcipher_walk_next(desc, walk); + } +- ++ err = 0; ++finish: ++ walk->nbytes = 0; + if (walk->iv != desc->info) + memcpy(desc->info, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); +- + return err; + } + EXPORT_SYMBOL_GPL(blkcipher_walk_done); +diff --git a/crypto/vmac.c b/crypto/vmac.c +index df76a816cfb2..bb2fc787d615 100644 +--- a/crypto/vmac.c ++++ b/crypto/vmac.c +@@ -1,6 +1,10 @@ + /* +- * Modified to interface to the Linux kernel ++ * VMAC: Message Authentication Code using Universal Hashing ++ * ++ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 ++ * + * Copyright (c) 2009, Intel Corporation. ++ * Copyright (c) 2018, Google Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, +@@ -16,14 +20,15 @@ + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +-/* -------------------------------------------------------------------------- +- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. +- * This implementation is herby placed in the public domain. +- * The authors offers no warranty. Use at your own risk. +- * Please send bug reports to the authors. +- * Last modified: 17 APR 08, 1700 PDT +- * ----------------------------------------------------------------------- */ ++/* ++ * Derived from: ++ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. ++ * This implementation is herby placed in the public domain. ++ * The authors offers no warranty. Use at your own risk. ++ * Last modified: 17 APR 08, 1700 PDT ++ */ + ++#include + #include + #include + #include +@@ -31,9 +36,35 @@ + #include + #include + #include +-#include + #include + ++/* ++ * User definable settings. ++ */ ++#define VMAC_TAG_LEN 64 ++#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ ++#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) ++#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ ++ ++/* per-transform (per-key) context */ ++struct vmac_tfm_ctx { ++ struct crypto_cipher *cipher; ++ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; ++ u64 polykey[2*VMAC_TAG_LEN/64]; ++ u64 l3key[2*VMAC_TAG_LEN/64]; ++}; ++ ++/* per-request context */ ++struct vmac_desc_ctx { ++ union { ++ u8 partial[VMAC_NHBYTES]; /* partial block */ ++ __le64 partial_words[VMAC_NHBYTES / 8]; ++ }; ++ unsigned int partial_size; /* size of the partial block */ ++ bool first_block_processed; ++ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ ++}; ++ + /* + * Constants and masks + */ +@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo, + } while (0) + #endif + +-static void vhash_abort(struct vmac_ctx *ctx) +-{ +- ctx->polytmp[0] = ctx->polykey[0] ; +- ctx->polytmp[1] = ctx->polykey[1] ; +- ctx->first_block_processed = 0; +-} +- + static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) + { + u64 rh, rl, t, z = 0; +@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) + return rl; + } + +-static void vhash_update(const unsigned char *m, +- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ +- struct vmac_ctx *ctx) ++/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ ++static void vhash_blocks(const struct vmac_tfm_ctx *tctx, ++ struct vmac_desc_ctx *dctx, ++ const __le64 *mptr, unsigned int blocks) + { +- u64 rh, rl, *mptr; +- const u64 *kptr = (u64 *)ctx->nhkey; +- int i; +- u64 ch, cl; +- u64 pkh = ctx->polykey[0]; +- u64 pkl = ctx->polykey[1]; +- +- if (!mbytes) +- return; +- +- BUG_ON(mbytes % VMAC_NHBYTES); +- +- mptr = (u64 *)m; +- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ +- +- ch = ctx->polytmp[0]; +- cl = ctx->polytmp[1]; +- +- if (!ctx->first_block_processed) { +- ctx->first_block_processed = 1; ++ const u64 *kptr = tctx->nhkey; ++ const u64 pkh = tctx->polykey[0]; ++ const u64 pkl = tctx->polykey[1]; ++ u64 ch = dctx->polytmp[0]; ++ u64 cl = dctx->polytmp[1]; ++ u64 rh, rl; ++ ++ if (!dctx->first_block_processed) { ++ dctx->first_block_processed = true; + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); + rh &= m62; + ADD128(ch, cl, rh, rl); + mptr += (VMAC_NHBYTES/sizeof(u64)); +- i--; ++ blocks--; + } + +- while (i--) { ++ while (blocks--) { + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); + rh &= m62; + poly_step(ch, cl, pkh, pkl, rh, rl); + mptr += (VMAC_NHBYTES/sizeof(u64)); + } + +- ctx->polytmp[0] = ch; +- ctx->polytmp[1] = cl; ++ dctx->polytmp[0] = ch; ++ dctx->polytmp[1] = cl; + } + +-static u64 vhash(unsigned char m[], unsigned int mbytes, +- u64 *tagl, struct vmac_ctx *ctx) ++static int vmac_setkey(struct crypto_shash *tfm, ++ const u8 *key, unsigned int keylen) + { +- u64 rh, rl, *mptr; +- const u64 *kptr = (u64 *)ctx->nhkey; +- int i, remaining; +- u64 ch, cl; +- u64 pkh = ctx->polykey[0]; +- u64 pkl = ctx->polykey[1]; +- +- mptr = (u64 *)m; +- i = mbytes / VMAC_NHBYTES; +- remaining = mbytes % VMAC_NHBYTES; +- +- if (ctx->first_block_processed) { +- ch = ctx->polytmp[0]; +- cl = ctx->polytmp[1]; +- } else if (i) { +- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); +- ch &= m62; +- ADD128(ch, cl, pkh, pkl); +- mptr += (VMAC_NHBYTES/sizeof(u64)); +- i--; +- } else if (remaining) { +- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); +- ch &= m62; +- ADD128(ch, cl, pkh, pkl); +- mptr += (VMAC_NHBYTES/sizeof(u64)); +- goto do_l3; +- } else {/* Empty String */ +- ch = pkh; cl = pkl; +- goto do_l3; +- } +- +- while (i--) { +- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); +- rh &= m62; +- poly_step(ch, cl, pkh, pkl, rh, rl); +- mptr += (VMAC_NHBYTES/sizeof(u64)); +- } +- if (remaining) { +- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); +- rh &= m62; +- poly_step(ch, cl, pkh, pkl, rh, rl); +- } +- +-do_l3: +- vhash_abort(ctx); +- remaining *= 8; +- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); +-} ++ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); ++ __be64 out[2]; ++ u8 in[16] = { 0 }; ++ unsigned int i; ++ int err; + +-static u64 vmac(unsigned char m[], unsigned int mbytes, +- const unsigned char n[16], u64 *tagl, +- struct vmac_ctx_t *ctx) +-{ +- u64 *in_n, *out_p; +- u64 p, h; +- int i; +- +- in_n = ctx->__vmac_ctx.cached_nonce; +- out_p = ctx->__vmac_ctx.cached_aes; +- +- i = n[15] & 1; +- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { +- in_n[0] = *(u64 *)(n); +- in_n[1] = *(u64 *)(n+8); +- ((unsigned char *)in_n)[15] &= 0xFE; +- crypto_cipher_encrypt_one(ctx->child, +- (unsigned char *)out_p, (unsigned char *)in_n); +- +- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); ++ if (keylen != VMAC_KEY_LEN) { ++ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; + } +- p = be64_to_cpup(out_p + i); +- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); +- return le64_to_cpu(p + h); +-} + +-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) +-{ +- u64 in[2] = {0}, out[2]; +- unsigned i; +- int err = 0; +- +- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); ++ err = crypto_cipher_setkey(tctx->cipher, key, keylen); + if (err) + return err; + + /* Fill nh key */ +- ((unsigned char *)in)[0] = 0x80; +- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { +- crypto_cipher_encrypt_one(ctx->child, +- (unsigned char *)out, (unsigned char *)in); +- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); +- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); +- ((unsigned char *)in)[15] += 1; ++ in[0] = 0x80; ++ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { ++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); ++ tctx->nhkey[i] = be64_to_cpu(out[0]); ++ tctx->nhkey[i+1] = be64_to_cpu(out[1]); ++ in[15]++; + } + + /* Fill poly key */ +- ((unsigned char *)in)[0] = 0xC0; +- in[1] = 0; +- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { +- crypto_cipher_encrypt_one(ctx->child, +- (unsigned char *)out, (unsigned char *)in); +- ctx->__vmac_ctx.polytmp[i] = +- ctx->__vmac_ctx.polykey[i] = +- be64_to_cpup(out) & mpoly; +- ctx->__vmac_ctx.polytmp[i+1] = +- ctx->__vmac_ctx.polykey[i+1] = +- be64_to_cpup(out+1) & mpoly; +- ((unsigned char *)in)[15] += 1; ++ in[0] = 0xC0; ++ in[15] = 0; ++ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { ++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); ++ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; ++ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; ++ in[15]++; + } + + /* Fill ip key */ +- ((unsigned char *)in)[0] = 0xE0; +- in[1] = 0; +- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { ++ in[0] = 0xE0; ++ in[15] = 0; ++ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { + do { +- crypto_cipher_encrypt_one(ctx->child, +- (unsigned char *)out, (unsigned char *)in); +- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); +- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); +- ((unsigned char *)in)[15] += 1; +- } while (ctx->__vmac_ctx.l3key[i] >= p64 +- || ctx->__vmac_ctx.l3key[i+1] >= p64); ++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); ++ tctx->l3key[i] = be64_to_cpu(out[0]); ++ tctx->l3key[i+1] = be64_to_cpu(out[1]); ++ in[15]++; ++ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); + } + +- /* Invalidate nonce/aes cache and reset other elements */ +- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ +- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ +- ctx->__vmac_ctx.first_block_processed = 0; +- +- return err; ++ return 0; + } + +-static int vmac_setkey(struct crypto_shash *parent, +- const u8 *key, unsigned int keylen) ++static int vmac_init(struct shash_desc *desc) + { +- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); ++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); ++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + +- if (keylen != VMAC_KEY_LEN) { +- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); +- return -EINVAL; +- } +- +- return vmac_set_key((u8 *)key, ctx); +-} +- +-static int vmac_init(struct shash_desc *pdesc) +-{ ++ dctx->partial_size = 0; ++ dctx->first_block_processed = false; ++ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); + return 0; + } + +-static int vmac_update(struct shash_desc *pdesc, const u8 *p, +- unsigned int len) ++static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) + { +- struct crypto_shash *parent = pdesc->tfm; +- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); +- int expand; +- int min; +- +- expand = VMAC_NHBYTES - ctx->partial_size > 0 ? +- VMAC_NHBYTES - ctx->partial_size : 0; +- +- min = len < expand ? len : expand; +- +- memcpy(ctx->partial + ctx->partial_size, p, min); +- ctx->partial_size += min; +- +- if (len < expand) +- return 0; +- +- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); +- ctx->partial_size = 0; +- +- len -= expand; +- p += expand; ++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); ++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); ++ unsigned int n; ++ ++ if (dctx->partial_size) { ++ n = min(len, VMAC_NHBYTES - dctx->partial_size); ++ memcpy(&dctx->partial[dctx->partial_size], p, n); ++ dctx->partial_size += n; ++ p += n; ++ len -= n; ++ if (dctx->partial_size == VMAC_NHBYTES) { ++ vhash_blocks(tctx, dctx, dctx->partial_words, 1); ++ dctx->partial_size = 0; ++ } ++ } + +- if (len % VMAC_NHBYTES) { +- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), +- len % VMAC_NHBYTES); +- ctx->partial_size = len % VMAC_NHBYTES; ++ if (len >= VMAC_NHBYTES) { ++ n = round_down(len, VMAC_NHBYTES); ++ /* TODO: 'p' may be misaligned here */ ++ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); ++ p += n; ++ len -= n; + } + +- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); ++ if (len) { ++ memcpy(dctx->partial, p, len); ++ dctx->partial_size = len; ++ } + + return 0; + } + +-static int vmac_final(struct shash_desc *pdesc, u8 *out) ++static u64 vhash_final(const struct vmac_tfm_ctx *tctx, ++ struct vmac_desc_ctx *dctx) + { +- struct crypto_shash *parent = pdesc->tfm; +- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); +- vmac_t mac; +- u8 nonce[16] = {}; +- +- /* vmac() ends up accessing outside the array bounds that +- * we specify. In appears to access up to the next 2-word +- * boundary. We'll just be uber cautious and zero the +- * unwritten bytes in the buffer. +- */ +- if (ctx->partial_size) { +- memset(ctx->partial + ctx->partial_size, 0, +- VMAC_NHBYTES - ctx->partial_size); ++ unsigned int partial = dctx->partial_size; ++ u64 ch = dctx->polytmp[0]; ++ u64 cl = dctx->polytmp[1]; ++ ++ /* L1 and L2-hash the final block if needed */ ++ if (partial) { ++ /* Zero-pad to next 128-bit boundary */ ++ unsigned int n = round_up(partial, 16); ++ u64 rh, rl; ++ ++ memset(&dctx->partial[partial], 0, n - partial); ++ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); ++ rh &= m62; ++ if (dctx->first_block_processed) ++ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], ++ rh, rl); ++ else ++ ADD128(ch, cl, rh, rl); + } +- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); +- memcpy(out, &mac, sizeof(vmac_t)); +- memzero_explicit(&mac, sizeof(vmac_t)); +- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); +- ctx->partial_size = 0; ++ ++ /* L3-hash the 128-bit output of L2-hash */ ++ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); ++} ++ ++static int vmac_final(struct shash_desc *desc, u8 *out) ++{ ++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); ++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); ++ static const u8 nonce[16] = {}; /* TODO: this is insecure */ ++ union { ++ u8 bytes[16]; ++ __be64 pads[2]; ++ } block; ++ int index; ++ u64 hash, pad; ++ ++ /* Finish calculating the VHASH of the message */ ++ hash = vhash_final(tctx, dctx); ++ ++ /* Generate pseudorandom pad by encrypting the nonce */ ++ memcpy(&block, nonce, 16); ++ index = block.bytes[15] & 1; ++ block.bytes[15] &= ~1; ++ crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes); ++ pad = be64_to_cpu(block.pads[index]); ++ ++ /* The VMAC is the sum of VHASH and the pseudorandom pad */ ++ put_unaligned_le64(hash + pad, out); + return 0; + } + + static int vmac_init_tfm(struct crypto_tfm *tfm) + { +- struct crypto_cipher *cipher; +- struct crypto_instance *inst = (void *)tfm->__crt_alg; ++ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct crypto_spawn *spawn = crypto_instance_ctx(inst); +- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); ++ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); ++ struct crypto_cipher *cipher; + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + +- ctx->child = cipher; ++ tctx->cipher = cipher; + return 0; + } + + static void vmac_exit_tfm(struct crypto_tfm *tfm) + { +- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); +- crypto_free_cipher(ctx->child); ++ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); ++ ++ crypto_free_cipher(tctx->cipher); + } + + static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) +@@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) + if (IS_ERR(alg)) + return PTR_ERR(alg); + ++ err = -EINVAL; ++ if (alg->cra_blocksize != 16) ++ goto out_put_alg; ++ + inst = shash_alloc_instance("vmac", alg); + err = PTR_ERR(inst); + if (IS_ERR(inst)) +@@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) + inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_alignmask = alg->cra_alignmask; + +- inst->alg.digestsize = sizeof(vmac_t); +- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); ++ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); + inst->alg.base.cra_init = vmac_init_tfm; + inst->alg.base.cra_exit = vmac_exit_tfm; + ++ inst->alg.descsize = sizeof(struct vmac_desc_ctx); ++ inst->alg.digestsize = VMAC_TAG_LEN / 8; + inst->alg.init = vmac_init; + inst->alg.update = vmac_update; + inst->alg.final = vmac_final; +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index 1111cb966a44..fa2b58142cde 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -587,7 +587,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, + + /* unmap the data buffer */ + if (dma_size != 0) +- dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); ++ dma_unmap_single(dev, dma_addr, dma_size, dma_direction); + + if (unlikely(!time_left)) { + dev_err(dev, "completion wait timed out\n"); +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index dabecb661264..53a47d75cc43 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -770,8 +770,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); + int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); + int pud_clear_huge(pud_t *pud); + int pmd_clear_huge(pmd_t *pmd); +-int pud_free_pmd_page(pud_t *pud); +-int pmd_free_pte_page(pmd_t *pmd); ++int pud_free_pmd_page(pud_t *pud, unsigned long addr); ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); + #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ + static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) + { +@@ -789,11 +789,11 @@ static inline int pmd_clear_huge(pmd_t *pmd) + { + return 0; + } +-static inline int pud_free_pmd_page(pud_t *pud) ++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { + return 0; + } +-static inline int pmd_free_pte_page(pmd_t *pmd) ++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + return 0; + } +diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h +deleted file mode 100644 +index 6b700c7b2fe1..000000000000 +--- a/include/crypto/vmac.h ++++ /dev/null +@@ -1,63 +0,0 @@ +-/* +- * Modified to interface to the Linux kernel +- * Copyright (c) 2009, Intel Corporation. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms and conditions of the GNU General Public License, +- * version 2, as published by the Free Software Foundation. +- * +- * This program is distributed in the hope it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +- * more details. +- * +- * You should have received a copy of the GNU General Public License along with +- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple +- * Place - Suite 330, Boston, MA 02111-1307 USA. +- */ +- +-#ifndef __CRYPTO_VMAC_H +-#define __CRYPTO_VMAC_H +- +-/* -------------------------------------------------------------------------- +- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. +- * This implementation is herby placed in the public domain. +- * The authors offers no warranty. Use at your own risk. +- * Please send bug reports to the authors. +- * Last modified: 17 APR 08, 1700 PDT +- * ----------------------------------------------------------------------- */ +- +-/* +- * User definable settings. +- */ +-#define VMAC_TAG_LEN 64 +-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ +-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) +-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ +- +-/* +- * This implementation uses u32 and u64 as names for unsigned 32- +- * and 64-bit integer types. These are defined in C99 stdint.h. The +- * following may need adaptation if you are not running a C99 or +- * Microsoft C environment. +- */ +-struct vmac_ctx { +- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; +- u64 polykey[2*VMAC_TAG_LEN/64]; +- u64 l3key[2*VMAC_TAG_LEN/64]; +- u64 polytmp[2*VMAC_TAG_LEN/64]; +- u64 cached_nonce[2]; +- u64 cached_aes[2]; +- int first_block_processed; +-}; +- +-typedef u64 vmac_t; +- +-struct vmac_ctx_t { +- struct crypto_cipher *child; +- struct vmac_ctx __vmac_ctx; +- u8 partial[VMAC_NHBYTES]; /* partial block */ +- int partial_size; /* size of the partial block */ +-}; +- +-#endif /* __CRYPTO_VMAC_H */ +diff --git a/lib/ioremap.c b/lib/ioremap.c +index 5323b59ca393..b9462037868d 100644 +--- a/lib/ioremap.c ++++ b/lib/ioremap.c +@@ -84,7 +84,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, + if (ioremap_pmd_enabled() && + ((next - addr) == PMD_SIZE) && + IS_ALIGNED(phys_addr + addr, PMD_SIZE) && +- pmd_free_pte_page(pmd)) { ++ pmd_free_pte_page(pmd, addr)) { + if (pmd_set_huge(pmd, phys_addr + addr, prot)) + continue; + } +@@ -111,7 +111,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, + if (ioremap_pud_enabled() && + ((next - addr) == PUD_SIZE) && + IS_ALIGNED(phys_addr + addr, PUD_SIZE) && +- pud_free_pmd_page(pud)) { ++ pud_free_pmd_page(pud, addr)) { + if (pud_set_huge(pud, phys_addr + addr, prot)) + continue; + } +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 1fc076420d1e..1811f8e7ddf4 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session) + del_timer(&session->timer); + } + +-static void hidp_process_report(struct hidp_session *session, +- int type, const u8 *data, int len, int intr) ++static void hidp_process_report(struct hidp_session *session, int type, ++ const u8 *data, unsigned int len, int intr) + { + if (len > HID_MAX_BUFFER_SIZE) + len = HID_MAX_BUFFER_SIZE; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 4a261e078082..9c4c6cd0316e 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -4835,6 +4835,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) + end = TCP_SKB_CB(skb)->end_seq; + range_truesize = skb->truesize; + } else { ++ range_truesize += skb->truesize; + if (before(TCP_SKB_CB(skb)->seq, start)) + start = TCP_SKB_CB(skb)->seq; + if (after(TCP_SKB_CB(skb)->end_seq, end)) +diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan +index 37323b0df374..2624d4bf9a45 100644 +--- a/scripts/Makefile.kasan ++++ b/scripts/Makefile.kasan +@@ -28,4 +28,7 @@ else + CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) + endif + endif ++ ++CFLAGS_KASAN_NOSANITIZE := -fno-builtin ++ + endif +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index 24914e7de944..a2d0e6d32659 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -126,7 +126,7 @@ endif + ifeq ($(CONFIG_KASAN),y) + _c_flags += $(if $(patsubst n%,, \ + $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ +- $(CFLAGS_KASAN)) ++ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) + endif + + # If building the kernel in a separate objtree expand all occurrences +diff --git a/scripts/depmod.sh b/scripts/depmod.sh +index 122599b1c13b..ea1e96921e3b 100755 +--- a/scripts/depmod.sh ++++ b/scripts/depmod.sh +@@ -10,10 +10,16 @@ DEPMOD=$1 + KERNELRELEASE=$2 + SYMBOL_PREFIX=$3 + +-if ! test -r System.map -a -x "$DEPMOD"; then ++if ! test -r System.map ; then + exit 0 + fi + ++if [ -z $(command -v $DEPMOD) ]; then ++ echo "'make modules_install' requires $DEPMOD. Please install it." >&2 ++ echo "This is probably in the kmod package." >&2 ++ exit 1 ++fi ++ + # older versions of depmod don't support -P + # support was added in module-init-tools 3.13 + if test -n "$SYMBOL_PREFIX"; then +diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c +index 4e2fcf188dd1..01a573a063d1 100644 +--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c ++++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c +@@ -131,23 +131,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) + struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card); + struct snd_soc_jack *jack = &ctx->jack; + +- /** +- * TI supports 4 butons headset detection +- * KEY_MEDIA +- * KEY_VOICECOMMAND +- * KEY_VOLUMEUP +- * KEY_VOLUMEDOWN +- */ +- if (ctx->ts3a227e_present) +- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | +- SND_JACK_BTN_0 | SND_JACK_BTN_1 | +- SND_JACK_BTN_2 | SND_JACK_BTN_3; +- else +- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE; ++ if (ctx->ts3a227e_present) { ++ /* ++ * The jack has already been created in the ++ * cht_max98090_headset_init() function. ++ */ ++ snd_soc_jack_notifier_register(jack, &cht_jack_nb); ++ return 0; ++ } ++ ++ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE; + + ret = snd_soc_card_jack_new(runtime->card, "Headset Jack", + jack_type, jack, NULL, 0); +- + if (ret) { + dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret); + return ret; +@@ -203,6 +199,27 @@ static int cht_max98090_headset_init(struct snd_soc_component *component) + { + struct snd_soc_card *card = component->card; + struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card); ++ struct snd_soc_jack *jack = &ctx->jack; ++ int jack_type; ++ int ret; ++ ++ /* ++ * TI supports 4 butons headset detection ++ * KEY_MEDIA ++ * KEY_VOICECOMMAND ++ * KEY_VOLUMEUP ++ * KEY_VOLUMEDOWN ++ */ ++ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | ++ SND_JACK_BTN_0 | SND_JACK_BTN_1 | ++ SND_JACK_BTN_2 | SND_JACK_BTN_3; ++ ++ ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type, ++ jack, NULL, 0); ++ if (ret) { ++ dev_err(card->dev, "Headset Jack creation failed %d\n", ret); ++ return ret; ++ } + + return ts3a227e_enable_jack_detect(component, &ctx->jack); + } diff --git a/patch/kernel/rk3328-default/04-patch-4.4.149-150.patch b/patch/kernel/rk3328-default/04-patch-4.4.149-150.patch new file mode 100644 index 000000000..2446bbe3c --- /dev/null +++ b/patch/kernel/rk3328-default/04-patch-4.4.149-150.patch @@ -0,0 +1,36 @@ +diff --git a/Makefile b/Makefile +index e7c46ece5f27..7789195c6a59 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 149 ++SUBLEVEL = 150 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h +index 44b1203ece12..a0c1525f1b6f 100644 +--- a/arch/x86/include/asm/pgtable-invert.h ++++ b/arch/x86/include/asm/pgtable-invert.h +@@ -4,9 +4,18 @@ + + #ifndef __ASSEMBLY__ + ++/* ++ * A clear pte value is special, and doesn't get inverted. ++ * ++ * Note that even users that only pass a pgprot_t (rather ++ * than a full pte) won't trigger the special zero case, ++ * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED ++ * set. So the all zero case really is limited to just the ++ * cleared page table entry case. ++ */ + static inline bool __pte_needs_invert(u64 val) + { +- return !(val & _PAGE_PRESENT); ++ return val && !(val & _PAGE_PRESENT); + } + + /* Get a mask to xor with the page table entry to get the correct pfn. */