mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-23 05:18:55 +00:00
3153 lines
103 KiB
Diff
3153 lines
103 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index cf28e431b68a..a057ec583552 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 11
|
|
-SUBLEVEL = 11
|
|
+SUBLEVEL = 12
|
|
EXTRAVERSION =
|
|
NAME = Fearless Coyote
|
|
|
|
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
|
|
index d2315ffd8f12..f13ae153fb24 100644
|
|
--- a/arch/arm/include/asm/elf.h
|
|
+++ b/arch/arm/include/asm/elf.h
|
|
@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
|
|
#define CORE_DUMP_USE_REGSET
|
|
#define ELF_EXEC_PAGESIZE 4096
|
|
|
|
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
- use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
- the loader. We need to make sure that it is out of the way of the program
|
|
- that it will "exec", and that there is sufficient room for the brk. */
|
|
-
|
|
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
|
|
+#define ELF_ET_DYN_BASE 0x400000UL
|
|
|
|
/* When the program starts, a1 contains a pointer to a function to be
|
|
registered with atexit, as per the SVR4 ABI. A value of 0 means we
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
index b48d668a6ab6..684ace85dc15 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
|
|
@@ -75,14 +75,10 @@
|
|
|
|
timer {
|
|
compatible = "arm,armv8-timer";
|
|
- interrupts = <GIC_PPI 13
|
|
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
|
|
- <GIC_PPI 14
|
|
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
|
|
- <GIC_PPI 11
|
|
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
|
|
- <GIC_PPI 10
|
|
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
|
|
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
|
};
|
|
|
|
soc {
|
|
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
|
|
index 5d1700425efe..acae781f7359 100644
|
|
--- a/arch/arm64/include/asm/elf.h
|
|
+++ b/arch/arm64/include/asm/elf.h
|
|
@@ -113,12 +113,11 @@
|
|
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
|
|
|
/*
|
|
- * This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
- * use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
- * the loader. We need to make sure that it is out of the way of the program
|
|
- * that it will "exec", and that there is sufficient room for the brk.
|
|
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
|
|
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
|
+ * space open for things that want to use the area for 32-bit pointers.
|
|
*/
|
|
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
|
|
+#define ELF_ET_DYN_BASE 0x100000000UL
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
@@ -142,6 +141,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
|
|
({ \
|
|
clear_bit(TIF_32BIT, ¤t->mm->context.flags); \
|
|
clear_thread_flag(TIF_32BIT); \
|
|
+ current->personality &= ~READ_IMPLIES_EXEC; \
|
|
})
|
|
|
|
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
|
@@ -173,7 +173,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
-#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
|
|
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
|
|
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
|
|
|
|
/* AArch32 registers. */
|
|
#define COMPAT_ELF_NGREG 18
|
|
@@ -187,6 +188,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
|
|
((x)->e_flags & EF_ARM_EABI_MASK))
|
|
|
|
#define compat_start_thread compat_start_thread
|
|
+/*
|
|
+ * Unlike the native SET_PERSONALITY macro, the compat version inherits
|
|
+ * READ_IMPLIES_EXEC across a fork() since this is the behaviour on
|
|
+ * arch/arm/.
|
|
+ */
|
|
#define COMPAT_SET_PERSONALITY(ex) \
|
|
({ \
|
|
set_bit(TIF_32BIT, ¤t->mm->context.flags); \
|
|
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
|
|
index 5404c6a726b2..9a2a8956a695 100644
|
|
--- a/arch/parisc/include/asm/dma-mapping.h
|
|
+++ b/arch/parisc/include/asm/dma-mapping.h
|
|
@@ -20,6 +20,8 @@
|
|
** flush/purge and allocate "regular" cacheable pages for everything.
|
|
*/
|
|
|
|
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
|
+
|
|
#ifdef CONFIG_PA11
|
|
extern const struct dma_map_ops pcxl_dma_ops;
|
|
extern const struct dma_map_ops pcx_dma_ops;
|
|
@@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev)
|
|
break;
|
|
}
|
|
}
|
|
- BUG_ON(!dev->platform_data);
|
|
return dev->platform_data;
|
|
}
|
|
-
|
|
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
|
|
-
|
|
+
|
|
+#define GET_IOC(dev) ({ \
|
|
+ void *__pdata = parisc_walk_tree(dev); \
|
|
+ __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
|
|
+})
|
|
|
|
#ifdef CONFIG_IOMMU_CCIO
|
|
struct parisc_device;
|
|
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
|
|
index 59be25764433..a81226257878 100644
|
|
--- a/arch/parisc/include/asm/mmu_context.h
|
|
+++ b/arch/parisc/include/asm/mmu_context.h
|
|
@@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
|
|
mtctl(__space_to_prot(context), 8);
|
|
}
|
|
|
|
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
|
|
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
|
|
+ struct mm_struct *next, struct task_struct *tsk)
|
|
{
|
|
-
|
|
if (prev != next) {
|
|
mtctl(__pa(next->pgd), 25);
|
|
load_context(next->context);
|
|
}
|
|
}
|
|
|
|
+static inline void switch_mm(struct mm_struct *prev,
|
|
+ struct mm_struct *next, struct task_struct *tsk)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ switch_mm_irqs_off(prev, next, tsk);
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+#define switch_mm_irqs_off switch_mm_irqs_off
|
|
+
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
|
|
index 44aeaa9c039f..6308749359e4 100644
|
|
--- a/arch/parisc/kernel/syscall_table.S
|
|
+++ b/arch/parisc/kernel/syscall_table.S
|
|
@@ -361,7 +361,7 @@
|
|
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
|
|
ENTRY_SAME(add_key)
|
|
ENTRY_SAME(request_key) /* 265 */
|
|
- ENTRY_SAME(keyctl)
|
|
+ ENTRY_COMP(keyctl)
|
|
ENTRY_SAME(ioprio_set)
|
|
ENTRY_SAME(ioprio_get)
|
|
ENTRY_SAME(inotify_init)
|
|
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
|
|
index 32ec22146141..9fd95fec9717 100644
|
|
--- a/arch/parisc/mm/fault.c
|
|
+++ b/arch/parisc/mm/fault.c
|
|
@@ -367,7 +367,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|
case 15: /* Data TLB miss fault/Data page fault */
|
|
/* send SIGSEGV when outside of vma */
|
|
if (!vma ||
|
|
- address < vma->vm_start || address > vma->vm_end) {
|
|
+ address < vma->vm_start || address >= vma->vm_end) {
|
|
si.si_signo = SIGSEGV;
|
|
si.si_code = SEGV_MAPERR;
|
|
break;
|
|
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
|
|
index 09bde6e34f5d..548d9a411a0d 100644
|
|
--- a/arch/powerpc/include/asm/elf.h
|
|
+++ b/arch/powerpc/include/asm/elf.h
|
|
@@ -23,12 +23,13 @@
|
|
#define CORE_DUMP_USE_REGSET
|
|
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
|
|
|
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
- use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
- the loader. We need to make sure that it is out of the way of the program
|
|
- that it will "exec", and that there is sufficient room for the brk. */
|
|
-
|
|
-#define ELF_ET_DYN_BASE 0x20000000
|
|
+/*
|
|
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
|
|
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
|
+ * space open for things that want to use the area for 32-bit pointers.
|
|
+ */
|
|
+#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
|
|
+ 0x100000000UL)
|
|
|
|
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
|
|
|
|
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
|
|
index c119044cad0d..8ac0bd2bddb0 100644
|
|
--- a/arch/powerpc/kernel/misc_64.S
|
|
+++ b/arch/powerpc/kernel/misc_64.S
|
|
@@ -614,6 +614,18 @@ _GLOBAL(kexec_sequence)
|
|
li r0,0
|
|
std r0,16(r1)
|
|
|
|
+BEGIN_FTR_SECTION
|
|
+ /*
|
|
+ * This is the best time to turn AMR/IAMR off.
|
|
+ * key 0 is used in radix for supervisor<->user
|
|
+ * protection, but on hash key 0 is reserved
|
|
+ * ideally we want to enter with a clean state.
|
|
+ * NOTE, we rely on r0 being 0 from above.
|
|
+ */
|
|
+ mtspr SPRN_IAMR,r0
|
|
+ mtspr SPRN_AMOR,r0
|
|
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
|
+
|
|
/* save regs for local vars on new stack.
|
|
* yes, we won't go back, but ...
|
|
*/
|
|
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
|
|
index 1d48880b3cc1..baf7622d5a34 100644
|
|
--- a/arch/s390/include/asm/elf.h
|
|
+++ b/arch/s390/include/asm/elf.h
|
|
@@ -160,14 +160,13 @@ extern unsigned int vdso_enabled;
|
|
#define CORE_DUMP_USE_REGSET
|
|
#define ELF_EXEC_PAGESIZE 4096
|
|
|
|
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
- use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
- the loader. We need to make sure that it is out of the way of the program
|
|
- that it will "exec", and that there is sufficient room for the brk. 64-bit
|
|
- tasks are aligned to 4GB. */
|
|
-#define ELF_ET_DYN_BASE (is_compat_task() ? \
|
|
- (STACK_TOP / 3 * 2) : \
|
|
- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
|
|
+/*
|
|
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
|
|
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
|
+ * space open for things that want to use the area for 32-bit pointers.
|
|
+ */
|
|
+#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
|
|
+ 0x100000000UL)
|
|
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this CPU supports. */
|
|
diff --git a/arch/sparc/include/asm/asm-prototypes.h b/arch/sparc/include/asm/asm-prototypes.h
|
|
new file mode 100644
|
|
index 000000000000..d381e11c5dbb
|
|
--- /dev/null
|
|
+++ b/arch/sparc/include/asm/asm-prototypes.h
|
|
@@ -0,0 +1,24 @@
|
|
+/*
|
|
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
|
|
+ */
|
|
+
|
|
+#include <asm/xor.h>
|
|
+#include <asm/checksum.h>
|
|
+#include <asm/trap_block.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/atomic.h>
|
|
+#include <asm/ftrace.h>
|
|
+#include <asm/cacheflush.h>
|
|
+#include <asm/oplib.h>
|
|
+#include <linux/atomic.h>
|
|
+
|
|
+void *__memscan_zero(void *, size_t);
|
|
+void *__memscan_generic(void *, int, size_t);
|
|
+void *__bzero(void *, size_t);
|
|
+void VISenter(void); /* Dummy prototype to supress warning */
|
|
+#undef memcpy
|
|
+#undef memset
|
|
+void *memcpy(void *dest, const void *src, size_t n);
|
|
+void *memset(void *s, int c, size_t n);
|
|
+typedef int TItype __attribute__((mode(TI)));
|
|
+TItype __multi3(TItype a, TItype b);
|
|
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
|
|
index 1c6a1bde5138..ce17c3094ba6 100644
|
|
--- a/arch/sparc/lib/atomic_64.S
|
|
+++ b/arch/sparc/lib/atomic_64.S
|
|
@@ -62,19 +62,23 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
|
ENDPROC(atomic_fetch_##op); \
|
|
EXPORT_SYMBOL(atomic_fetch_##op);
|
|
|
|
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
|
+ATOMIC_OP(add)
|
|
+ATOMIC_OP_RETURN(add)
|
|
+ATOMIC_FETCH_OP(add)
|
|
|
|
-ATOMIC_OPS(add)
|
|
-ATOMIC_OPS(sub)
|
|
+ATOMIC_OP(sub)
|
|
+ATOMIC_OP_RETURN(sub)
|
|
+ATOMIC_FETCH_OP(sub)
|
|
|
|
-#undef ATOMIC_OPS
|
|
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
|
|
+ATOMIC_OP(and)
|
|
+ATOMIC_FETCH_OP(and)
|
|
|
|
-ATOMIC_OPS(and)
|
|
-ATOMIC_OPS(or)
|
|
-ATOMIC_OPS(xor)
|
|
+ATOMIC_OP(or)
|
|
+ATOMIC_FETCH_OP(or)
|
|
+
|
|
+ATOMIC_OP(xor)
|
|
+ATOMIC_FETCH_OP(xor)
|
|
|
|
-#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
@@ -124,19 +128,23 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
|
ENDPROC(atomic64_fetch_##op); \
|
|
EXPORT_SYMBOL(atomic64_fetch_##op);
|
|
|
|
-#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
|
|
+ATOMIC64_OP(add)
|
|
+ATOMIC64_OP_RETURN(add)
|
|
+ATOMIC64_FETCH_OP(add)
|
|
+
|
|
+ATOMIC64_OP(sub)
|
|
+ATOMIC64_OP_RETURN(sub)
|
|
+ATOMIC64_FETCH_OP(sub)
|
|
|
|
-ATOMIC64_OPS(add)
|
|
-ATOMIC64_OPS(sub)
|
|
+ATOMIC64_OP(and)
|
|
+ATOMIC64_FETCH_OP(and)
|
|
|
|
-#undef ATOMIC64_OPS
|
|
-#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
|
|
+ATOMIC64_OP(or)
|
|
+ATOMIC64_FETCH_OP(or)
|
|
|
|
-ATOMIC64_OPS(and)
|
|
-ATOMIC64_OPS(or)
|
|
-ATOMIC64_OPS(xor)
|
|
+ATOMIC64_OP(xor)
|
|
+ATOMIC64_FETCH_OP(xor)
|
|
|
|
-#undef ATOMIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP
|
|
#undef ATOMIC64_OP_RETURN
|
|
#undef ATOMIC64_OP
|
|
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
|
|
index f6732174fe6b..6cfa521f444d 100644
|
|
--- a/arch/sparc/lib/checksum_64.S
|
|
+++ b/arch/sparc/lib/checksum_64.S
|
|
@@ -38,6 +38,7 @@ csum_partial_fix_alignment:
|
|
|
|
.align 32
|
|
.globl csum_partial
|
|
+ .type csum_partial,#function
|
|
EXPORT_SYMBOL(csum_partial)
|
|
csum_partial: /* %o0=buff, %o1=len, %o2=sum */
|
|
prefetch [%o0 + 0x000], #n_reads
|
|
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
|
|
index 0ecbafc30fd0..b1051e77c49a 100644
|
|
--- a/arch/sparc/lib/csum_copy.S
|
|
+++ b/arch/sparc/lib/csum_copy.S
|
|
@@ -65,6 +65,7 @@
|
|
add %o5, %o4, %o4
|
|
|
|
.globl FUNC_NAME
|
|
+ .type FUNC_NAME,#function
|
|
EXPORT_SYMBOL(FUNC_NAME)
|
|
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
|
|
LOAD(prefetch, %o0 + 0x000, #n_reads)
|
|
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
|
|
index daa96f4b03e6..5efee1f4be36 100644
|
|
--- a/arch/sparc/lib/memscan_64.S
|
|
+++ b/arch/sparc/lib/memscan_64.S
|
|
@@ -14,6 +14,8 @@
|
|
.text
|
|
.align 32
|
|
.globl __memscan_zero, __memscan_generic
|
|
+ .type __memscan_zero,#function
|
|
+ .type __memscan_generic,#function
|
|
.globl memscan
|
|
EXPORT_SYMBOL(__memscan_zero)
|
|
EXPORT_SYMBOL(__memscan_generic)
|
|
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
|
|
index bb539b42b088..e23338dbfc43 100644
|
|
--- a/arch/sparc/lib/memset.S
|
|
+++ b/arch/sparc/lib/memset.S
|
|
@@ -63,6 +63,7 @@
|
|
__bzero_begin:
|
|
|
|
.globl __bzero
|
|
+ .type __bzero,#function
|
|
.globl memset
|
|
EXPORT_SYMBOL(__bzero)
|
|
EXPORT_SYMBOL(memset)
|
|
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
|
|
index cd0e32bbcb1d..f80cfc64c55b 100644
|
|
--- a/arch/sparc/mm/gup.c
|
|
+++ b/arch/sparc/mm/gup.c
|
|
@@ -78,8 +78,8 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
|
return 0;
|
|
|
|
refs = 0;
|
|
- head = pmd_page(pmd);
|
|
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
|
+ page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
|
+ head = compound_head(page);
|
|
do {
|
|
VM_BUG_ON(compound_head(page) != head);
|
|
pages[*nr] = page;
|
|
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
|
|
index fc61739150e7..f960a043cdeb 100644
|
|
--- a/arch/x86/crypto/sha1_ssse3_glue.c
|
|
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
|
|
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
|
|
|
static bool avx2_usable(void)
|
|
{
|
|
- if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
|
+ if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
|
&& boot_cpu_has(X86_FEATURE_BMI1)
|
|
&& boot_cpu_has(X86_FEATURE_BMI2))
|
|
return true;
|
|
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
|
|
index 3762536619f8..c72ec880ba7d 100644
|
|
--- a/arch/x86/include/asm/elf.h
|
|
+++ b/arch/x86/include/asm/elf.h
|
|
@@ -245,12 +245,13 @@ extern int force_personality32;
|
|
#define CORE_DUMP_USE_REGSET
|
|
#define ELF_EXEC_PAGESIZE 4096
|
|
|
|
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
- use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
- the loader. We need to make sure that it is out of the way of the program
|
|
- that it will "exec", and that there is sufficient room for the brk. */
|
|
-
|
|
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
+/*
|
|
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
|
|
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
|
|
+ * space open for things that want to use the area for 32-bit pointers.
|
|
+ */
|
|
+#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
|
|
+ 0x100000000UL)
|
|
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this CPU supports. This could be done in user space,
|
|
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
|
|
index d8b5f8ab8ef9..526d0606bd72 100644
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -417,6 +417,8 @@
|
|
#define MSR_IA32_TSC_ADJUST 0x0000003b
|
|
#define MSR_IA32_BNDCFGS 0x00000d90
|
|
|
|
+#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
|
|
+
|
|
#define MSR_IA32_XSS 0x00000da0
|
|
|
|
#define FEATURE_CONTROL_LOCKED (1<<0)
|
|
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
|
|
index 35058c2c0eea..9368fecca3ee 100644
|
|
--- a/arch/x86/kvm/cpuid.h
|
|
+++ b/arch/x86/kvm/cpuid.h
|
|
@@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
|
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
|
}
|
|
|
|
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
|
+{
|
|
+ struct kvm_cpuid_entry2 *best;
|
|
+
|
|
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
+ return best && (best->ebx & bit(X86_FEATURE_MPX));
|
|
+}
|
|
+
|
|
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index e06ec5333da1..9e1656611df3 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -3200,7 +3200,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
|
|
break;
|
|
case MSR_IA32_BNDCFGS:
|
|
- if (!kvm_mpx_supported())
|
|
+ if (!kvm_mpx_supported() ||
|
|
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
|
|
return 1;
|
|
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
|
|
break;
|
|
@@ -3282,7 +3283,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
vmcs_writel(GUEST_SYSENTER_ESP, data);
|
|
break;
|
|
case MSR_IA32_BNDCFGS:
|
|
- if (!kvm_mpx_supported())
|
|
+ if (!kvm_mpx_supported() ||
|
|
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
|
|
+ return 1;
|
|
+ if (is_noncanonical_address(data & PAGE_MASK) ||
|
|
+ (data & MSR_IA32_BNDCFGS_RSVD))
|
|
return 1;
|
|
vmcs_write64(GUEST_BNDCFGS, data);
|
|
break;
|
|
@@ -6588,7 +6593,6 @@ static __init int hardware_setup(void)
|
|
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
|
|
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
|
|
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
|
|
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
|
|
|
|
memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
|
|
vmx_msr_bitmap_legacy, PAGE_SIZE);
|
|
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
|
|
index 6a13d0924a66..fd9aa7091562 100644
|
|
--- a/block/blk-sysfs.c
|
|
+++ b/block/blk-sysfs.c
|
|
@@ -791,24 +791,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
|
|
}
|
|
|
|
/**
|
|
- * blk_release_queue: - release a &struct request_queue when it is no longer needed
|
|
- * @kobj: the kobj belonging to the request queue to be released
|
|
+ * __blk_release_queue - release a request queue when it is no longer needed
|
|
+ * @work: pointer to the release_work member of the request queue to be released
|
|
*
|
|
* Description:
|
|
- * blk_release_queue is the pair to blk_init_queue() or
|
|
- * blk_queue_make_request(). It should be called when a request queue is
|
|
- * being released; typically when a block device is being de-registered.
|
|
- * Currently, its primary task it to free all the &struct request
|
|
- * structures that were allocated to the queue and the queue itself.
|
|
+ * blk_release_queue is the counterpart of blk_init_queue(). It should be
|
|
+ * called when a request queue is being released; typically when a block
|
|
+ * device is being de-registered. Its primary task it to free the queue
|
|
+ * itself.
|
|
*
|
|
- * Note:
|
|
+ * Notes:
|
|
* The low level driver must have finished any outstanding requests first
|
|
* via blk_cleanup_queue().
|
|
- **/
|
|
-static void blk_release_queue(struct kobject *kobj)
|
|
+ *
|
|
+ * Although blk_release_queue() may be called with preemption disabled,
|
|
+ * __blk_release_queue() may sleep.
|
|
+ */
|
|
+static void __blk_release_queue(struct work_struct *work)
|
|
{
|
|
- struct request_queue *q =
|
|
- container_of(kobj, struct request_queue, kobj);
|
|
+ struct request_queue *q = container_of(work, typeof(*q), release_work);
|
|
|
|
wbt_exit(q);
|
|
bdi_put(q->backing_dev_info);
|
|
@@ -844,6 +845,15 @@ static void blk_release_queue(struct kobject *kobj)
|
|
call_rcu(&q->rcu_head, blk_free_queue_rcu);
|
|
}
|
|
|
|
+static void blk_release_queue(struct kobject *kobj)
|
|
+{
|
|
+ struct request_queue *q =
|
|
+ container_of(kobj, struct request_queue, kobj);
|
|
+
|
|
+ INIT_WORK(&q->release_work, __blk_release_queue);
|
|
+ schedule_work(&q->release_work);
|
|
+}
|
|
+
|
|
static const struct sysfs_ops queue_sysfs_ops = {
|
|
.show = queue_attr_show,
|
|
.store = queue_attr_store,
|
|
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
|
|
index 33b4b902741a..2df45ec8d935 100644
|
|
--- a/drivers/base/power/sysfs.c
|
|
+++ b/drivers/base/power/sysfs.c
|
|
@@ -272,6 +272,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
|
|
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
|
|
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
|
|
value = PM_QOS_LATENCY_ANY;
|
|
+ else
|
|
+ return -EINVAL;
|
|
}
|
|
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
|
|
return ret < 0 ? ret : n;
|
|
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
|
|
index 136854970489..a2f36a2a30fd 100644
|
|
--- a/drivers/base/power/wakeup.c
|
|
+++ b/drivers/base/power/wakeup.c
|
|
@@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources);
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
|
|
|
|
+DEFINE_STATIC_SRCU(wakeup_srcu);
|
|
+
|
|
static struct wakeup_source deleted_ws = {
|
|
.name = "deleted",
|
|
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
|
|
@@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
|
|
spin_lock_irqsave(&events_lock, flags);
|
|
list_del_rcu(&ws->entry);
|
|
spin_unlock_irqrestore(&events_lock, flags);
|
|
- synchronize_rcu();
|
|
+ synchronize_srcu(&wakeup_srcu);
|
|
}
|
|
EXPORT_SYMBOL_GPL(wakeup_source_remove);
|
|
|
|
@@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev)
|
|
void device_wakeup_arm_wake_irqs(void)
|
|
{
|
|
struct wakeup_source *ws;
|
|
+ int srcuidx;
|
|
|
|
- rcu_read_lock();
|
|
+ srcuidx = srcu_read_lock(&wakeup_srcu);
|
|
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
|
dev_pm_arm_wake_irq(ws->wakeirq);
|
|
-
|
|
- rcu_read_unlock();
|
|
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
|
|
}
|
|
|
|
/**
|
|
@@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void)
|
|
void device_wakeup_disarm_wake_irqs(void)
|
|
{
|
|
struct wakeup_source *ws;
|
|
+ int srcuidx;
|
|
|
|
- rcu_read_lock();
|
|
+ srcuidx = srcu_read_lock(&wakeup_srcu);
|
|
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
|
dev_pm_disarm_wake_irq(ws->wakeirq);
|
|
-
|
|
- rcu_read_unlock();
|
|
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
|
|
}
|
|
|
|
/**
|
|
@@ -805,10 +807,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_event);
|
|
void pm_print_active_wakeup_sources(void)
|
|
{
|
|
struct wakeup_source *ws;
|
|
- int active = 0;
|
|
+ int srcuidx, active = 0;
|
|
struct wakeup_source *last_activity_ws = NULL;
|
|
|
|
- rcu_read_lock();
|
|
+ srcuidx = srcu_read_lock(&wakeup_srcu);
|
|
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
|
if (ws->active) {
|
|
pr_debug("active wakeup source: %s\n", ws->name);
|
|
@@ -824,7 +826,7 @@ void pm_print_active_wakeup_sources(void)
|
|
if (!active && last_activity_ws)
|
|
pr_debug("last active wakeup source: %s\n",
|
|
last_activity_ws->name);
|
|
- rcu_read_unlock();
|
|
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
|
|
|
|
@@ -951,8 +953,9 @@ void pm_wakep_autosleep_enabled(bool set)
|
|
{
|
|
struct wakeup_source *ws;
|
|
ktime_t now = ktime_get();
|
|
+ int srcuidx;
|
|
|
|
- rcu_read_lock();
|
|
+ srcuidx = srcu_read_lock(&wakeup_srcu);
|
|
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
|
spin_lock_irq(&ws->lock);
|
|
if (ws->autosleep_enabled != set) {
|
|
@@ -966,7 +969,7 @@ void pm_wakep_autosleep_enabled(bool set)
|
|
}
|
|
spin_unlock_irq(&ws->lock);
|
|
}
|
|
- rcu_read_unlock();
|
|
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
|
|
}
|
|
#endif /* CONFIG_PM_AUTOSLEEP */
|
|
|
|
@@ -1027,15 +1030,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
|
|
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct wakeup_source *ws;
|
|
+ int srcuidx;
|
|
|
|
seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
|
|
"expire_count\tactive_since\ttotal_time\tmax_time\t"
|
|
"last_change\tprevent_suspend_time\n");
|
|
|
|
- rcu_read_lock();
|
|
+ srcuidx = srcu_read_lock(&wakeup_srcu);
|
|
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
|
|
print_wakeup_source_stats(m, ws);
|
|
- rcu_read_unlock();
|
|
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
|
|
|
|
print_wakeup_source_stats(m, &deleted_ws);
|
|
|
|
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
|
|
index a9482023d7d3..dad4e5bad827 100644
|
|
--- a/drivers/crypto/atmel-sha.c
|
|
+++ b/drivers/crypto/atmel-sha.c
|
|
@@ -1204,7 +1204,9 @@ static int atmel_sha_finup(struct ahash_request *req)
|
|
ctx->flags |= SHA_FLAGS_FINUP;
|
|
|
|
err1 = atmel_sha_update(req);
|
|
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
|
|
+ if (err1 == -EINPROGRESS ||
|
|
+ (err1 == -EBUSY && (ahash_request_flags(req) &
|
|
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
|
|
return err1;
|
|
|
|
/*
|
|
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
|
|
index b4e3f4ef5c05..20772c8abf48 100644
|
|
--- a/drivers/crypto/caam/caamalg.c
|
|
+++ b/drivers/crypto/caam/caamalg.c
|
|
@@ -881,10 +881,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|
{
|
|
struct ablkcipher_request *req = context;
|
|
struct ablkcipher_edesc *edesc;
|
|
-#ifdef DEBUG
|
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
|
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
|
|
|
+#ifdef DEBUG
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
#endif
|
|
|
|
@@ -903,6 +903,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|
#endif
|
|
|
|
ablkcipher_unmap(jrdev, edesc, req);
|
|
+
|
|
+ /*
|
|
+ * The crypto API expects us to set the IV (req->info) to the last
|
|
+ * ciphertext block. This is used e.g. by the CTS mode.
|
|
+ */
|
|
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
|
|
+ ivsize, 0);
|
|
+
|
|
kfree(edesc);
|
|
|
|
ablkcipher_request_complete(req, err);
|
|
@@ -913,10 +921,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|
{
|
|
struct ablkcipher_request *req = context;
|
|
struct ablkcipher_edesc *edesc;
|
|
-#ifdef DEBUG
|
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
|
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
|
|
|
+#ifdef DEBUG
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
#endif
|
|
|
|
@@ -934,6 +942,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
|
#endif
|
|
|
|
ablkcipher_unmap(jrdev, edesc, req);
|
|
+
|
|
+ /*
|
|
+ * The crypto API expects us to set the IV (req->info) to the last
|
|
+ * ciphertext block.
|
|
+ */
|
|
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
|
|
+ ivsize, 0);
|
|
+
|
|
kfree(edesc);
|
|
|
|
ablkcipher_request_complete(req, err);
|
|
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
|
|
index da4f94eab3da..718a03293ab6 100644
|
|
--- a/drivers/crypto/caam/caamhash.c
|
|
+++ b/drivers/crypto/caam/caamhash.c
|
|
@@ -396,7 +396,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
|
if (!ret) {
|
|
/* in progress */
|
|
- wait_for_completion_interruptible(&result.completion);
|
|
+ wait_for_completion(&result.completion);
|
|
ret = result.err;
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR,
|
|
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
|
|
index 1bb2816a9b4d..c425d4adaf2a 100644
|
|
--- a/drivers/crypto/caam/key_gen.c
|
|
+++ b/drivers/crypto/caam/key_gen.c
|
|
@@ -149,7 +149,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
|
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
|
|
if (!ret) {
|
|
/* in progress */
|
|
- wait_for_completion_interruptible(&result.completion);
|
|
+ wait_for_completion(&result.completion);
|
|
ret = result.err;
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
|
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
|
|
index 0bba6a19d36a..79791c690858 100644
|
|
--- a/drivers/crypto/talitos.c
|
|
+++ b/drivers/crypto/talitos.c
|
|
@@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev)
|
|
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
|
|
*/
|
|
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
|
|
-#define TALITOS_MAX_KEY_SIZE 96
|
|
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
|
|
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
|
|
|
|
struct talitos_ctx {
|
|
@@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
|
{
|
|
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
|
|
|
+ if (keylen > TALITOS_MAX_KEY_SIZE) {
|
|
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
memcpy(&ctx->key, key, keylen);
|
|
ctx->keylen = keylen;
|
|
|
|
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
|
|
index b372aad3b449..045d6d311bde 100644
|
|
--- a/drivers/firmware/efi/efi.c
|
|
+++ b/drivers/firmware/efi/efi.c
|
|
@@ -528,7 +528,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
|
|
}
|
|
}
|
|
|
|
- efi_memattr_init();
|
|
+ if (efi_enabled(EFI_MEMMAP))
|
|
+ efi_memattr_init();
|
|
|
|
/* Parse the EFI Properties table if it exists */
|
|
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
|
|
index 2086e7e68de4..116ea4bb31cd 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
|
|
@@ -1685,7 +1685,8 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
|
|
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
|
|
|
|
mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
|
|
- mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
|
|
+ adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
|
|
+ mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
|
|
|
|
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
|
|
adev->gfx.config.mem_max_burst_length_bytes = 256;
|
|
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
|
|
index 665a64f1611e..a87bdc22f82c 100644
|
|
--- a/drivers/hv/hv.c
|
|
+++ b/drivers/hv/hv.c
|
|
@@ -82,10 +82,15 @@ int hv_post_message(union hv_connection_id connection_id,
|
|
aligned_msg->message_type = message_type;
|
|
aligned_msg->payload_size = payload_size;
|
|
memcpy((void *)aligned_msg->payload, payload, payload_size);
|
|
- put_cpu_ptr(hv_cpu);
|
|
|
|
status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
|
|
|
|
+ /* Preemption must remain disabled until after the hypercall
|
|
+ * so some other thread can't get scheduled onto this cpu and
|
|
+ * corrupt the per-cpu post_msg_page
|
|
+ */
|
|
+ put_cpu_ptr(hv_cpu);
|
|
+
|
|
return status & 0xFFFF;
|
|
}
|
|
|
|
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
|
|
index c132f29322cc..dbffb7ab6203 100644
|
|
--- a/drivers/irqchip/irq-gic-v3.c
|
|
+++ b/drivers/irqchip/irq-gic-v3.c
|
|
@@ -645,6 +645,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|
int enabled;
|
|
u64 val;
|
|
|
|
+ if (cpu >= nr_cpu_ids)
|
|
+ return -EINVAL;
|
|
+
|
|
if (gic_irq_in_rdist(d))
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
|
|
index 962dcbcef8b5..0dcda0b9b0cc 100644
|
|
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
|
|
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
|
|
@@ -221,7 +221,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
|
|
/* Wait for 100ms as Octeon resets. */
|
|
mdelay(100);
|
|
|
|
- if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
|
|
+ if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
|
|
dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
|
|
oct->octeon_id);
|
|
return 1;
|
|
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
|
|
index bdec051107a6..d62a5096768e 100644
|
|
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
|
|
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
|
|
@@ -44,7 +44,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct)
|
|
/* Wait for 10ms as Octeon resets. */
|
|
mdelay(100);
|
|
|
|
- if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
|
|
+ if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
|
|
dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
|
|
return 1;
|
|
}
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
|
|
index fb0951929be9..10c85d3525a4 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
|
|
@@ -2076,12 +2076,12 @@ static void detach_ulds(struct adapter *adap)
|
|
|
|
mutex_lock(&uld_mutex);
|
|
list_del(&adap->list_node);
|
|
+
|
|
for (i = 0; i < CXGB4_ULD_MAX; i++)
|
|
- if (adap->uld && adap->uld[i].handle) {
|
|
+ if (adap->uld && adap->uld[i].handle)
|
|
adap->uld[i].state_change(adap->uld[i].handle,
|
|
CXGB4_STATE_DETACH);
|
|
- adap->uld[i].handle = NULL;
|
|
- }
|
|
+
|
|
if (netevent_registered && list_empty(&adapter_list)) {
|
|
unregister_netevent_notifier(&cxgb4_netevent_nb);
|
|
netevent_registered = false;
|
|
@@ -5089,8 +5089,10 @@ static void remove_one(struct pci_dev *pdev)
|
|
*/
|
|
destroy_workqueue(adapter->workq);
|
|
|
|
- if (is_uld(adapter))
|
|
+ if (is_uld(adapter)) {
|
|
detach_ulds(adapter);
|
|
+ t4_uld_clean_up(adapter);
|
|
+ }
|
|
|
|
disable_interrupts(adapter);
|
|
|
|
@@ -5167,7 +5169,11 @@ static void shutdown_one(struct pci_dev *pdev)
|
|
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
|
|
cxgb_close(adapter->port[i]);
|
|
|
|
- t4_uld_clean_up(adapter);
|
|
+ if (is_uld(adapter)) {
|
|
+ detach_ulds(adapter);
|
|
+ t4_uld_clean_up(adapter);
|
|
+ }
|
|
+
|
|
disable_interrupts(adapter);
|
|
disable_msi(adapter);
|
|
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
|
|
index d0868c2320da..dbbc2b7f0d46 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
|
|
@@ -589,22 +589,37 @@ void t4_uld_mem_free(struct adapter *adap)
|
|
kfree(adap->uld);
|
|
}
|
|
|
|
+/* This function should be called with uld_mutex taken. */
|
|
+static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
|
|
+{
|
|
+ if (adap->uld[type].handle) {
|
|
+ adap->uld[type].handle = NULL;
|
|
+ adap->uld[type].add = NULL;
|
|
+ release_sge_txq_uld(adap, type);
|
|
+
|
|
+ if (adap->flags & FULL_INIT_DONE)
|
|
+ quiesce_rx_uld(adap, type);
|
|
+
|
|
+ if (adap->flags & USING_MSIX)
|
|
+ free_msix_queue_irqs_uld(adap, type);
|
|
+
|
|
+ free_sge_queues_uld(adap, type);
|
|
+ free_queues_uld(adap, type);
|
|
+ }
|
|
+}
|
|
+
|
|
void t4_uld_clean_up(struct adapter *adap)
|
|
{
|
|
unsigned int i;
|
|
|
|
- if (!adap->uld)
|
|
- return;
|
|
+ mutex_lock(&uld_mutex);
|
|
for (i = 0; i < CXGB4_ULD_MAX; i++) {
|
|
if (!adap->uld[i].handle)
|
|
continue;
|
|
- if (adap->flags & FULL_INIT_DONE)
|
|
- quiesce_rx_uld(adap, i);
|
|
- if (adap->flags & USING_MSIX)
|
|
- free_msix_queue_irqs_uld(adap, i);
|
|
- free_sge_queues_uld(adap, i);
|
|
- free_queues_uld(adap, i);
|
|
+
|
|
+ cxgb4_shutdown_uld_adapter(adap, i);
|
|
}
|
|
+ mutex_unlock(&uld_mutex);
|
|
}
|
|
|
|
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
|
|
@@ -782,15 +797,8 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
|
|
continue;
|
|
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
|
|
continue;
|
|
- adap->uld[type].handle = NULL;
|
|
- adap->uld[type].add = NULL;
|
|
- release_sge_txq_uld(adap, type);
|
|
- if (adap->flags & FULL_INIT_DONE)
|
|
- quiesce_rx_uld(adap, type);
|
|
- if (adap->flags & USING_MSIX)
|
|
- free_msix_queue_irqs_uld(adap, type);
|
|
- free_sge_queues_uld(adap, type);
|
|
- free_queues_uld(adap, type);
|
|
+
|
|
+ cxgb4_shutdown_uld_adapter(adap, type);
|
|
}
|
|
mutex_unlock(&uld_mutex);
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
|
|
index 8fa23f6a1f67..2eb54d36e16e 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
|
|
@@ -464,6 +464,8 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
|
|
if (!perm_addr)
|
|
return;
|
|
|
|
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
|
|
+
|
|
mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
index f778436a2d28..00104de79d0e 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
@@ -2771,8 +2771,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
|
|
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
|
|
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
|
|
- stats->tx_carrier_errors =
|
|
- PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
|
|
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
|
|
stats->rx_frame_errors;
|
|
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
|
|
index 44f59b1d6f0f..9c4b74d44234 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
|
|
@@ -67,6 +67,7 @@ enum {
|
|
|
|
enum {
|
|
MLX5_DROP_NEW_HEALTH_WORK,
|
|
+ MLX5_DROP_NEW_RECOVERY_WORK,
|
|
};
|
|
|
|
static u8 get_nic_state(struct mlx5_core_dev *dev)
|
|
@@ -193,7 +194,7 @@ static void health_care(struct work_struct *work)
|
|
mlx5_handle_bad_state(dev);
|
|
|
|
spin_lock(&health->wq_lock);
|
|
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
|
|
+ if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
|
|
schedule_delayed_work(&health->recover_work, recover_delay);
|
|
else
|
|
dev_err(&dev->pdev->dev,
|
|
@@ -314,6 +315,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
|
|
init_timer(&health->timer);
|
|
health->sick = 0;
|
|
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
|
|
+ clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
|
|
health->health = &dev->iseg->health;
|
|
health->health_counter = &dev->iseg->health_counter;
|
|
|
|
@@ -336,11 +338,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
|
|
|
|
spin_lock(&health->wq_lock);
|
|
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
|
|
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
|
|
spin_unlock(&health->wq_lock);
|
|
cancel_delayed_work_sync(&health->recover_work);
|
|
cancel_work_sync(&health->work);
|
|
}
|
|
|
|
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
|
|
+{
|
|
+ struct mlx5_core_health *health = &dev->priv.health;
|
|
+
|
|
+ spin_lock(&health->wq_lock);
|
|
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
|
|
+ spin_unlock(&health->wq_lock);
|
|
+ cancel_delayed_work_sync(&dev->priv.health.recover_work);
|
|
+}
|
|
+
|
|
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
|
|
{
|
|
struct mlx5_core_health *health = &dev->priv.health;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
index 9862a741b32a..23173be1cbc0 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
@@ -1236,7 +1236,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|
int err = 0;
|
|
|
|
if (cleanup)
|
|
- mlx5_drain_health_wq(dev);
|
|
+ mlx5_drain_health_recovery(dev);
|
|
|
|
mutex_lock(&dev->intf_state_mutex);
|
|
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
index 16484f24b7db..5995ca1a43f9 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
@@ -3829,6 +3829,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
|
|
u16 vid = vlan_dev_vlan_id(vlan_dev);
|
|
|
|
+ if (netif_is_bridge_port(vlan_dev))
|
|
+ return 0;
|
|
+
|
|
if (mlxsw_sp_port_dev_check(real_dev))
|
|
return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
|
|
vid);
|
|
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
|
|
index 2ae852454780..a9ce82d3e9cf 100644
|
|
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
|
|
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
|
|
@@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
|
|
*index = entry->index;
|
|
resolved = false;
|
|
} else if (removing) {
|
|
- ofdpa_neigh_del(trans, found);
|
|
*index = found->index;
|
|
+ ofdpa_neigh_del(trans, found);
|
|
} else if (updating) {
|
|
ofdpa_neigh_update(found, trans, NULL, false);
|
|
resolved = !is_zero_ether_addr(found->eth_dst);
|
|
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
|
|
index c60c2d4c646a..5a1b9b219a42 100644
|
|
--- a/drivers/net/ethernet/sfc/ef10.c
|
|
+++ b/drivers/net/ethernet/sfc/ef10.c
|
|
@@ -4171,7 +4171,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
|
|
* recipients
|
|
*/
|
|
if (is_mc_recip) {
|
|
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
|
|
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
|
|
unsigned int depth, i;
|
|
|
|
memset(inbuf, 0, sizeof(inbuf));
|
|
@@ -4319,7 +4319,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
|
|
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
|
|
} else {
|
|
efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
|
|
- MC_CMD_FILTER_OP_IN_LEN,
|
|
+ MC_CMD_FILTER_OP_EXT_IN_LEN,
|
|
NULL, 0, rc);
|
|
}
|
|
}
|
|
@@ -4452,7 +4452,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
|
|
struct efx_filter_spec *spec)
|
|
{
|
|
struct efx_ef10_filter_table *table = efx->filter_state;
|
|
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
|
|
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
|
|
struct efx_filter_spec *saved_spec;
|
|
unsigned int hash, i, depth = 1;
|
|
bool replacing = false;
|
|
@@ -4939,7 +4939,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
|
|
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
|
|
{
|
|
struct efx_ef10_filter_table *table = efx->filter_state;
|
|
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
|
|
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
|
|
struct efx_filter_spec *spec;
|
|
unsigned int filter_idx;
|
|
int rc;
|
|
@@ -5033,12 +5033,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
|
|
struct efx_ef10_filter_table *table = efx->filter_state;
|
|
struct net_device *net_dev = efx->net_dev;
|
|
struct netdev_hw_addr *uc;
|
|
- int addr_count;
|
|
unsigned int i;
|
|
|
|
- addr_count = netdev_uc_count(net_dev);
|
|
table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
|
|
- table->dev_uc_count = 1 + addr_count;
|
|
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
|
|
i = 1;
|
|
netdev_for_each_uc_addr(uc, net_dev) {
|
|
@@ -5049,6 +5046,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
|
|
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
|
|
i++;
|
|
}
|
|
+
|
|
+ table->dev_uc_count = i;
|
|
}
|
|
|
|
static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
|
|
@@ -5056,11 +5055,10 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
|
|
struct efx_ef10_filter_table *table = efx->filter_state;
|
|
struct net_device *net_dev = efx->net_dev;
|
|
struct netdev_hw_addr *mc;
|
|
- unsigned int i, addr_count;
|
|
+ unsigned int i;
|
|
|
|
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
|
|
|
|
- addr_count = netdev_mc_count(net_dev);
|
|
i = 0;
|
|
netdev_for_each_mc_addr(mc, net_dev) {
|
|
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
|
|
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
|
|
index 199459bd6961..6ec8fc9aad8f 100644
|
|
--- a/drivers/net/geneve.c
|
|
+++ b/drivers/net/geneve.c
|
|
@@ -45,9 +45,17 @@ struct geneve_net {
|
|
|
|
static unsigned int geneve_net_id;
|
|
|
|
+struct geneve_dev_node {
|
|
+ struct hlist_node hlist;
|
|
+ struct geneve_dev *geneve;
|
|
+};
|
|
+
|
|
/* Pseudo network device */
|
|
struct geneve_dev {
|
|
- struct hlist_node hlist; /* vni hash table */
|
|
+ struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ struct geneve_dev_node hlist6; /* vni hash table for IPv6 socket */
|
|
+#endif
|
|
struct net *net; /* netns for packet i/o */
|
|
struct net_device *dev; /* netdev for geneve tunnel */
|
|
struct ip_tunnel_info info;
|
|
@@ -123,16 +131,16 @@ static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
|
|
__be32 addr, u8 vni[])
|
|
{
|
|
struct hlist_head *vni_list_head;
|
|
- struct geneve_dev *geneve;
|
|
+ struct geneve_dev_node *node;
|
|
__u32 hash;
|
|
|
|
/* Find the device for this VNI */
|
|
hash = geneve_net_vni_hash(vni);
|
|
vni_list_head = &gs->vni_list[hash];
|
|
- hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
|
|
- if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) &&
|
|
- addr == geneve->info.key.u.ipv4.dst)
|
|
- return geneve;
|
|
+ hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
|
|
+ if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) &&
|
|
+ addr == node->geneve->info.key.u.ipv4.dst)
|
|
+ return node->geneve;
|
|
}
|
|
return NULL;
|
|
}
|
|
@@ -142,16 +150,16 @@ static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
|
|
struct in6_addr addr6, u8 vni[])
|
|
{
|
|
struct hlist_head *vni_list_head;
|
|
- struct geneve_dev *geneve;
|
|
+ struct geneve_dev_node *node;
|
|
__u32 hash;
|
|
|
|
/* Find the device for this VNI */
|
|
hash = geneve_net_vni_hash(vni);
|
|
vni_list_head = &gs->vni_list[hash];
|
|
- hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
|
|
- if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) &&
|
|
- ipv6_addr_equal(&addr6, &geneve->info.key.u.ipv6.dst))
|
|
- return geneve;
|
|
+ hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
|
|
+ if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) &&
|
|
+ ipv6_addr_equal(&addr6, &node->geneve->info.key.u.ipv6.dst))
|
|
+ return node->geneve;
|
|
}
|
|
return NULL;
|
|
}
|
|
@@ -579,6 +587,7 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
|
|
{
|
|
struct net *net = geneve->net;
|
|
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
|
+ struct geneve_dev_node *node;
|
|
struct geneve_sock *gs;
|
|
__u8 vni[3];
|
|
__u32 hash;
|
|
@@ -597,15 +606,20 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
|
|
out:
|
|
gs->collect_md = geneve->collect_md;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
- if (ipv6)
|
|
+ if (ipv6) {
|
|
rcu_assign_pointer(geneve->sock6, gs);
|
|
- else
|
|
+ node = &geneve->hlist6;
|
|
+ } else
|
|
#endif
|
|
+ {
|
|
rcu_assign_pointer(geneve->sock4, gs);
|
|
+ node = &geneve->hlist4;
|
|
+ }
|
|
+ node->geneve = geneve;
|
|
|
|
tunnel_id_to_vni(geneve->info.key.tun_id, vni);
|
|
hash = geneve_net_vni_hash(vni);
|
|
- hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
|
|
+ hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]);
|
|
return 0;
|
|
}
|
|
|
|
@@ -632,8 +646,10 @@ static int geneve_stop(struct net_device *dev)
|
|
{
|
|
struct geneve_dev *geneve = netdev_priv(dev);
|
|
|
|
- if (!hlist_unhashed(&geneve->hlist))
|
|
- hlist_del_rcu(&geneve->hlist);
|
|
+ hlist_del_init_rcu(&geneve->hlist4.hlist);
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ hlist_del_init_rcu(&geneve->hlist6.hlist);
|
|
+#endif
|
|
geneve_sock_release(geneve);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
|
|
index 5ede87f30463..09b0becb3843 100644
|
|
--- a/drivers/net/hyperv/netvsc_drv.c
|
|
+++ b/drivers/net/hyperv/netvsc_drv.c
|
|
@@ -753,7 +753,7 @@ static int netvsc_set_channels(struct net_device *net,
|
|
channels->rx_count || channels->tx_count || channels->other_count)
|
|
return -EINVAL;
|
|
|
|
- if (count > net->num_tx_queues || count > net->num_rx_queues)
|
|
+ if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX)
|
|
return -EINVAL;
|
|
|
|
if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
|
|
@@ -1142,7 +1142,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
|
|
|
|
if (indir) {
|
|
for (i = 0; i < ITAB_NUM; i++)
|
|
- if (indir[i] >= dev->num_rx_queues)
|
|
+ if (indir[i] >= VRSS_CHANNEL_MAX)
|
|
return -EINVAL;
|
|
|
|
for (i = 0; i < ITAB_NUM; i++)
|
|
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
|
|
index ed0d10f54f26..c3065236ffcc 100644
|
|
--- a/drivers/net/phy/dp83640.c
|
|
+++ b/drivers/net/phy/dp83640.c
|
|
@@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
|
|
if (overflow) {
|
|
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
|
|
while (skb) {
|
|
- skb_complete_tx_timestamp(skb, NULL);
|
|
+ kfree_skb(skb);
|
|
skb = skb_dequeue(&dp83640->tx_queue);
|
|
}
|
|
return;
|
|
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
|
|
index da5b39268370..4b7a6e0d4c39 100644
|
|
--- a/drivers/net/phy/micrel.c
|
|
+++ b/drivers/net/phy/micrel.c
|
|
@@ -611,6 +611,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
|
|
if ((regval & 0xFF) == 0xFF) {
|
|
phy_init_hw(phydev);
|
|
phydev->link = 0;
|
|
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
|
|
+ phydev->drv->config_intr(phydev);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
|
|
index 4d4173d25dd0..d88ae3c2edbf 100644
|
|
--- a/drivers/net/tap.c
|
|
+++ b/drivers/net/tap.c
|
|
@@ -106,7 +106,7 @@ struct major_info {
|
|
struct rcu_head rcu;
|
|
dev_t major;
|
|
struct idr minor_idr;
|
|
- struct mutex minor_lock;
|
|
+ spinlock_t minor_lock;
|
|
const char *device_name;
|
|
struct list_head next;
|
|
};
|
|
@@ -416,15 +416,15 @@ int tap_get_minor(dev_t major, struct tap_dev *tap)
|
|
goto unlock;
|
|
}
|
|
|
|
- mutex_lock(&tap_major->minor_lock);
|
|
- retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL);
|
|
+ spin_lock(&tap_major->minor_lock);
|
|
+ retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
|
|
if (retval >= 0) {
|
|
tap->minor = retval;
|
|
} else if (retval == -ENOSPC) {
|
|
netdev_err(tap->dev, "Too many tap devices\n");
|
|
retval = -EINVAL;
|
|
}
|
|
- mutex_unlock(&tap_major->minor_lock);
|
|
+ spin_unlock(&tap_major->minor_lock);
|
|
|
|
unlock:
|
|
rcu_read_unlock();
|
|
@@ -442,12 +442,12 @@ void tap_free_minor(dev_t major, struct tap_dev *tap)
|
|
goto unlock;
|
|
}
|
|
|
|
- mutex_lock(&tap_major->minor_lock);
|
|
+ spin_lock(&tap_major->minor_lock);
|
|
if (tap->minor) {
|
|
idr_remove(&tap_major->minor_idr, tap->minor);
|
|
tap->minor = 0;
|
|
}
|
|
- mutex_unlock(&tap_major->minor_lock);
|
|
+ spin_unlock(&tap_major->minor_lock);
|
|
|
|
unlock:
|
|
rcu_read_unlock();
|
|
@@ -467,13 +467,13 @@ static struct tap_dev *dev_get_by_tap_file(int major, int minor)
|
|
goto unlock;
|
|
}
|
|
|
|
- mutex_lock(&tap_major->minor_lock);
|
|
+ spin_lock(&tap_major->minor_lock);
|
|
tap = idr_find(&tap_major->minor_idr, minor);
|
|
if (tap) {
|
|
dev = tap->dev;
|
|
dev_hold(dev);
|
|
}
|
|
- mutex_unlock(&tap_major->minor_lock);
|
|
+ spin_unlock(&tap_major->minor_lock);
|
|
|
|
unlock:
|
|
rcu_read_unlock();
|
|
@@ -1227,7 +1227,7 @@ static int tap_list_add(dev_t major, const char *device_name)
|
|
tap_major->major = MAJOR(major);
|
|
|
|
idr_init(&tap_major->minor_idr);
|
|
- mutex_init(&tap_major->minor_lock);
|
|
+ spin_lock_init(&tap_major->minor_lock);
|
|
|
|
tap_major->device_name = device_name;
|
|
|
|
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
|
|
index d9d8f4f43f90..7e171d7aed66 100644
|
|
--- a/drivers/net/virtio_net.c
|
|
+++ b/drivers/net/virtio_net.c
|
|
@@ -1709,6 +1709,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
|
|
flush_work(&vi->config_work);
|
|
|
|
netif_device_detach(vi->dev);
|
|
+ netif_tx_disable(vi->dev);
|
|
cancel_delayed_work_sync(&vi->refill);
|
|
|
|
if (netif_running(vi->dev)) {
|
|
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
|
|
index 2e62c4d1b220..608ecaf16fe4 100644
|
|
--- a/drivers/net/vrf.c
|
|
+++ b/drivers/net/vrf.c
|
|
@@ -788,15 +788,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
|
|
static void vrf_dev_uninit(struct net_device *dev)
|
|
{
|
|
struct net_vrf *vrf = netdev_priv(dev);
|
|
- struct net_device *port_dev;
|
|
- struct list_head *iter;
|
|
|
|
vrf_rtable_release(dev, vrf);
|
|
vrf_rt6_release(dev, vrf);
|
|
|
|
- netdev_for_each_lower_dev(dev, port_dev, iter)
|
|
- vrf_del_slave(dev, port_dev);
|
|
-
|
|
free_percpu(dev->dstats);
|
|
dev->dstats = NULL;
|
|
}
|
|
@@ -1247,6 +1242,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
|
|
|
|
static void vrf_dellink(struct net_device *dev, struct list_head *head)
|
|
{
|
|
+ struct net_device *port_dev;
|
|
+ struct list_head *iter;
|
|
+
|
|
+ netdev_for_each_lower_dev(dev, port_dev, iter)
|
|
+ vrf_del_slave(dev, port_dev);
|
|
+
|
|
unregister_netdevice_queue(dev, head);
|
|
}
|
|
|
|
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
|
|
index 4574b95c7938..3911fd19635d 100644
|
|
--- a/drivers/net/vxlan.c
|
|
+++ b/drivers/net/vxlan.c
|
|
@@ -228,15 +228,15 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
|
|
|
|
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
|
|
{
|
|
- struct vxlan_dev *vxlan;
|
|
+ struct vxlan_dev_node *node;
|
|
|
|
/* For flow based devices, map all packets to VNI 0 */
|
|
if (vs->flags & VXLAN_F_COLLECT_METADATA)
|
|
vni = 0;
|
|
|
|
- hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
|
|
- if (vxlan->default_dst.remote_vni == vni)
|
|
- return vxlan;
|
|
+ hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
|
|
+ if (node->vxlan->default_dst.remote_vni == vni)
|
|
+ return node->vxlan;
|
|
}
|
|
|
|
return NULL;
|
|
@@ -2361,17 +2361,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
|
|
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
|
|
|
spin_lock(&vn->sock_lock);
|
|
- hlist_del_init_rcu(&vxlan->hlist);
|
|
+ hlist_del_init_rcu(&vxlan->hlist4.hlist);
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ hlist_del_init_rcu(&vxlan->hlist6.hlist);
|
|
+#endif
|
|
spin_unlock(&vn->sock_lock);
|
|
}
|
|
|
|
-static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
|
|
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
|
|
+ struct vxlan_dev_node *node)
|
|
{
|
|
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
|
__be32 vni = vxlan->default_dst.remote_vni;
|
|
|
|
+ node->vxlan = vxlan;
|
|
spin_lock(&vn->sock_lock);
|
|
- hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
|
|
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
|
|
spin_unlock(&vn->sock_lock);
|
|
}
|
|
|
|
@@ -2817,6 +2822,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
|
|
{
|
|
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
|
struct vxlan_sock *vs = NULL;
|
|
+ struct vxlan_dev_node *node;
|
|
|
|
if (!vxlan->cfg.no_share) {
|
|
spin_lock(&vn->sock_lock);
|
|
@@ -2834,12 +2840,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
|
|
if (IS_ERR(vs))
|
|
return PTR_ERR(vs);
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
- if (ipv6)
|
|
+ if (ipv6) {
|
|
rcu_assign_pointer(vxlan->vn6_sock, vs);
|
|
- else
|
|
+ node = &vxlan->hlist6;
|
|
+ } else
|
|
#endif
|
|
+ {
|
|
rcu_assign_pointer(vxlan->vn4_sock, vs);
|
|
- vxlan_vs_add_dev(vs, vxlan);
|
|
+ node = &vxlan->hlist4;
|
|
+ }
|
|
+ vxlan_vs_add_dev(vs, vxlan, node);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
|
|
index 5bc2ba214735..3722f230334a 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
|
|
@@ -705,7 +705,7 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
|
|
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
|
struct sk_buff_head *pktq, uint totlen)
|
|
{
|
|
- struct sk_buff *glom_skb;
|
|
+ struct sk_buff *glom_skb = NULL;
|
|
struct sk_buff *skb;
|
|
u32 addr = sdiodev->sbwad;
|
|
int err = 0;
|
|
@@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
|
return -ENOMEM;
|
|
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
|
|
glom_skb);
|
|
- if (err) {
|
|
- brcmu_pkt_buf_free_skb(glom_skb);
|
|
+ if (err)
|
|
goto done;
|
|
- }
|
|
|
|
skb_queue_walk(pktq, skb) {
|
|
memcpy(skb->data, glom_skb->data, skb->len);
|
|
@@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
|
pktq);
|
|
|
|
done:
|
|
+ brcmu_pkt_buf_free_skb(glom_skb);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
index 017e20b34304..9d0cee91eb83 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
@@ -4835,6 +4835,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
|
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
|
|
GFP_KERNEL);
|
|
} else if (ieee80211_is_action(mgmt->frame_control)) {
|
|
+ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
|
|
+ brcmf_err("invalid action frame length\n");
|
|
+ err = -EINVAL;
|
|
+ goto exit;
|
|
+ }
|
|
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
|
|
if (af_params == NULL) {
|
|
brcmf_err("unable to allocate frame\n");
|
|
@@ -6822,7 +6827,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
|
|
wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
|
|
if (!wiphy) {
|
|
brcmf_err("Could not allocate wiphy device\n");
|
|
- return NULL;
|
|
+ goto ops_out;
|
|
}
|
|
memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
|
|
set_wiphy_dev(wiphy, busdev);
|
|
@@ -6965,6 +6970,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
|
|
ifp->vif = NULL;
|
|
wiphy_out:
|
|
brcmf_free_wiphy(wiphy);
|
|
+ops_out:
|
|
kfree(ops);
|
|
return NULL;
|
|
}
|
|
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
|
|
index 408b521ee520..1094cab9e859 100644
|
|
--- a/drivers/nvmem/core.c
|
|
+++ b/drivers/nvmem/core.c
|
|
@@ -488,21 +488,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
|
|
|
rval = device_add(&nvmem->dev);
|
|
if (rval)
|
|
- goto out;
|
|
+ goto err_put_device;
|
|
|
|
if (config->compat) {
|
|
rval = nvmem_setup_compat(nvmem, config);
|
|
if (rval)
|
|
- goto out;
|
|
+ goto err_device_del;
|
|
}
|
|
|
|
if (config->cells)
|
|
nvmem_add_cells(nvmem, config);
|
|
|
|
return nvmem;
|
|
-out:
|
|
- ida_simple_remove(&nvmem_ida, nvmem->id);
|
|
- kfree(nvmem);
|
|
+
|
|
+err_device_del:
|
|
+ device_del(&nvmem->dev);
|
|
+err_put_device:
|
|
+ put_device(&nvmem->dev);
|
|
+
|
|
return ERR_PTR(rval);
|
|
}
|
|
EXPORT_SYMBOL_GPL(nvmem_register);
|
|
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
|
|
index e32ca2ef9e54..56c93f096de9 100644
|
|
--- a/drivers/parisc/ccio-dma.c
|
|
+++ b/drivers/parisc/ccio-dma.c
|
|
@@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
|
|
|
BUG_ON(!dev);
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc)
|
|
+ return DMA_ERROR_CODE;
|
|
|
|
BUG_ON(size <= 0);
|
|
|
|
@@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
|
|
|
BUG_ON(!dev);
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc) {
|
|
+ WARN_ON(!ioc);
|
|
+ return;
|
|
+ }
|
|
|
|
DBG_RUN("%s() iovp 0x%lx/%x\n",
|
|
__func__, (long)iova, size);
|
|
@@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|
|
|
BUG_ON(!dev);
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc)
|
|
+ return 0;
|
|
|
|
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
|
|
|
|
@@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|
|
|
BUG_ON(!dev);
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc) {
|
|
+ WARN_ON(!ioc);
|
|
+ return;
|
|
+ }
|
|
|
|
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
|
|
__func__, nents, sg_virt(sglist), sglist->length);
|
|
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
|
|
index 1133b5cc88ca..5c63b920b471 100644
|
|
--- a/drivers/parisc/dino.c
|
|
+++ b/drivers/parisc/dino.c
|
|
@@ -154,7 +154,10 @@ struct dino_device
|
|
};
|
|
|
|
/* Looks nice and keeps the compiler happy */
|
|
-#define DINO_DEV(d) ((struct dino_device *) d)
|
|
+#define DINO_DEV(d) ({ \
|
|
+ void *__pdata = d; \
|
|
+ BUG_ON(!__pdata); \
|
|
+ (struct dino_device *)__pdata; })
|
|
|
|
|
|
/*
|
|
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
|
|
index 2ec2aef4d211..bc286cbbbc9b 100644
|
|
--- a/drivers/parisc/lba_pci.c
|
|
+++ b/drivers/parisc/lba_pci.c
|
|
@@ -111,8 +111,10 @@ static u32 lba_t32;
|
|
|
|
|
|
/* Looks nice and keeps the compiler happy */
|
|
-#define LBA_DEV(d) ((struct lba_device *) (d))
|
|
-
|
|
+#define LBA_DEV(d) ({ \
|
|
+ void *__pdata = d; \
|
|
+ BUG_ON(!__pdata); \
|
|
+ (struct lba_device *)__pdata; })
|
|
|
|
/*
|
|
** Only allow 8 subsidiary busses per LBA
|
|
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
|
|
index 33385e574433..87ad5fd6a7a2 100644
|
|
--- a/drivers/parisc/sba_iommu.c
|
|
+++ b/drivers/parisc/sba_iommu.c
|
|
@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
|
|
return 0;
|
|
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc)
|
|
+ return 0;
|
|
|
|
/*
|
|
* check if mask is >= than the current max IO Virt Address
|
|
@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
|
int pide;
|
|
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc)
|
|
+ return DMA_ERROR_CODE;
|
|
|
|
/* save offset bits */
|
|
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
|
|
@@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
|
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
|
|
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc) {
|
|
+ WARN_ON(!ioc);
|
|
+ return;
|
|
+ }
|
|
offset = iova & ~IOVP_MASK;
|
|
iova ^= offset; /* clear offset bits */
|
|
size += offset;
|
|
@@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
|
|
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc)
|
|
+ return 0;
|
|
|
|
/* Fast path single entry scatterlists. */
|
|
if (nents == 1) {
|
|
@@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|
__func__, nents, sg_virt(sglist), sglist->length);
|
|
|
|
ioc = GET_IOC(dev);
|
|
+ if (!ioc) {
|
|
+ WARN_ON(!ioc);
|
|
+ return;
|
|
+ }
|
|
|
|
#ifdef SBA_COLLECT_STATS
|
|
ioc->usg_calls++;
|
|
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
|
index 5c4933bb4b53..7fca928d894d 100644
|
|
--- a/drivers/tty/vt/vt.c
|
|
+++ b/drivers/tty/vt/vt.c
|
|
@@ -2709,13 +2709,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
|
|
* related to the kernel should not use this.
|
|
*/
|
|
data = vt_get_shift_state();
|
|
- ret = __put_user(data, p);
|
|
+ ret = put_user(data, p);
|
|
break;
|
|
case TIOCL_GETMOUSEREPORTING:
|
|
console_lock(); /* May be overkill */
|
|
data = mouse_reporting();
|
|
console_unlock();
|
|
- ret = __put_user(data, p);
|
|
+ ret = put_user(data, p);
|
|
break;
|
|
case TIOCL_SETVESABLANK:
|
|
console_lock();
|
|
@@ -2724,7 +2724,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
|
|
break;
|
|
case TIOCL_GETKMSGREDIRECT:
|
|
data = vt_get_kmsg_redirect();
|
|
- ret = __put_user(data, p);
|
|
+ ret = put_user(data, p);
|
|
break;
|
|
case TIOCL_SETKMSGREDIRECT:
|
|
if (!capable(CAP_SYS_ADMIN)) {
|
|
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
|
|
index 5075fd5c62c8..7465c3ea5dd5 100644
|
|
--- a/fs/binfmt_elf.c
|
|
+++ b/fs/binfmt_elf.c
|
|
@@ -927,17 +927,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
|
|
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
|
|
|
|
vaddr = elf_ppnt->p_vaddr;
|
|
+ /*
|
|
+ * If we are loading ET_EXEC or we have already performed
|
|
+ * the ET_DYN load_addr calculations, proceed normally.
|
|
+ */
|
|
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
|
|
elf_flags |= MAP_FIXED;
|
|
} else if (loc->elf_ex.e_type == ET_DYN) {
|
|
- /* Try and get dynamic programs out of the way of the
|
|
- * default mmap base, as well as whatever program they
|
|
- * might try to exec. This is because the brk will
|
|
- * follow the loader, and is not movable. */
|
|
- load_bias = ELF_ET_DYN_BASE - vaddr;
|
|
- if (current->flags & PF_RANDOMIZE)
|
|
- load_bias += arch_mmap_rnd();
|
|
- load_bias = ELF_PAGESTART(load_bias);
|
|
+ /*
|
|
+ * This logic is run once for the first LOAD Program
|
|
+ * Header for ET_DYN binaries to calculate the
|
|
+ * randomization (load_bias) for all the LOAD
|
|
+ * Program Headers, and to calculate the entire
|
|
+ * size of the ELF mapping (total_size). (Note that
|
|
+ * load_addr_set is set to true later once the
|
|
+ * initial mapping is performed.)
|
|
+ *
|
|
+ * There are effectively two types of ET_DYN
|
|
+ * binaries: programs (i.e. PIE: ET_DYN with INTERP)
|
|
+ * and loaders (ET_DYN without INTERP, since they
|
|
+ * _are_ the ELF interpreter). The loaders must
|
|
+ * be loaded away from programs since the program
|
|
+ * may otherwise collide with the loader (especially
|
|
+ * for ET_EXEC which does not have a randomized
|
|
+ * position). For example to handle invocations of
|
|
+ * "./ld.so someprog" to test out a new version of
|
|
+ * the loader, the subsequent program that the
|
|
+ * loader loads must avoid the loader itself, so
|
|
+ * they cannot share the same load range. Sufficient
|
|
+ * room for the brk must be allocated with the
|
|
+ * loader as well, since brk must be available with
|
|
+ * the loader.
|
|
+ *
|
|
+ * Therefore, programs are loaded offset from
|
|
+ * ELF_ET_DYN_BASE and loaders are loaded into the
|
|
+ * independently randomized mmap region (0 load_bias
|
|
+ * without MAP_FIXED).
|
|
+ */
|
|
+ if (elf_interpreter) {
|
|
+ load_bias = ELF_ET_DYN_BASE;
|
|
+ if (current->flags & PF_RANDOMIZE)
|
|
+ load_bias += arch_mmap_rnd();
|
|
+ elf_flags |= MAP_FIXED;
|
|
+ } else
|
|
+ load_bias = 0;
|
|
+
|
|
+ /*
|
|
+ * Since load_bias is used for all subsequent loading
|
|
+ * calculations, we must lower it by the first vaddr
|
|
+ * so that the remaining calculations based on the
|
|
+ * ELF vaddrs will be correctly offset. The result
|
|
+ * is then page aligned.
|
|
+ */
|
|
+ load_bias = ELF_PAGESTART(load_bias - vaddr);
|
|
+
|
|
total_size = total_mapping_size(elf_phdata,
|
|
loc->elf_ex.e_phnum);
|
|
if (!total_size) {
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 95d71eda8142..6dc078da0838 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -1133,11 +1133,12 @@ void shrink_dcache_sb(struct super_block *sb)
|
|
LIST_HEAD(dispose);
|
|
|
|
freed = list_lru_walk(&sb->s_dentry_lru,
|
|
- dentry_lru_isolate_shrink, &dispose, UINT_MAX);
|
|
+ dentry_lru_isolate_shrink, &dispose, 1024);
|
|
|
|
this_cpu_sub(nr_dentry_unused, freed);
|
|
shrink_dentry_list(&dispose);
|
|
- } while (freed > 0);
|
|
+ cond_resched();
|
|
+ } while (list_lru_count(&sb->s_dentry_lru) > 0);
|
|
}
|
|
EXPORT_SYMBOL(shrink_dcache_sb);
|
|
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index ce0901e65c40..e9908c3f0227 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -220,8 +220,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|
|
|
if (write) {
|
|
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
|
|
- unsigned long ptr_size;
|
|
- struct rlimit *rlim;
|
|
+ unsigned long ptr_size, limit;
|
|
|
|
/*
|
|
* Since the stack will hold pointers to the strings, we
|
|
@@ -250,14 +249,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|
return page;
|
|
|
|
/*
|
|
- * Limit to 1/4-th the stack size for the argv+env strings.
|
|
+ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
|
|
+ * (whichever is smaller) for the argv+env strings.
|
|
* This ensures that:
|
|
* - the remaining binfmt code will not run out of stack space,
|
|
* - the program will have a reasonable amount of stack left
|
|
* to work from.
|
|
*/
|
|
- rlim = current->signal->rlim;
|
|
- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
|
|
+ limit = _STK_LIM / 4 * 3;
|
|
+ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
|
|
+ if (size > limit)
|
|
goto fail;
|
|
}
|
|
|
|
diff --git a/fs/mount.h b/fs/mount.h
|
|
index 2826543a131d..20390e6357c9 100644
|
|
--- a/fs/mount.h
|
|
+++ b/fs/mount.h
|
|
@@ -58,6 +58,7 @@ struct mount {
|
|
struct mnt_namespace *mnt_ns; /* containing namespace */
|
|
struct mountpoint *mnt_mp; /* where is it mounted */
|
|
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
|
|
+ struct list_head mnt_umounting; /* list entry for umount propagation */
|
|
#ifdef CONFIG_FSNOTIFY
|
|
struct hlist_head mnt_fsnotify_marks;
|
|
__u32 mnt_fsnotify_mask;
|
|
diff --git a/fs/namespace.c b/fs/namespace.c
|
|
index cc1375eff88c..f6f7e57e2192 100644
|
|
--- a/fs/namespace.c
|
|
+++ b/fs/namespace.c
|
|
@@ -236,6 +236,7 @@ static struct mount *alloc_vfsmnt(const char *name)
|
|
INIT_LIST_HEAD(&mnt->mnt_slave_list);
|
|
INIT_LIST_HEAD(&mnt->mnt_slave);
|
|
INIT_HLIST_NODE(&mnt->mnt_mp_list);
|
|
+ INIT_LIST_HEAD(&mnt->mnt_umounting);
|
|
#ifdef CONFIG_FSNOTIFY
|
|
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
|
|
#endif
|
|
diff --git a/fs/pnode.c b/fs/pnode.c
|
|
index 5bc7896d122a..53d411a371ce 100644
|
|
--- a/fs/pnode.c
|
|
+++ b/fs/pnode.c
|
|
@@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
|
|
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
|
|
}
|
|
|
|
+static inline struct mount *last_slave(struct mount *p)
|
|
+{
|
|
+ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
|
|
+}
|
|
+
|
|
static inline struct mount *next_slave(struct mount *p)
|
|
{
|
|
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
|
|
@@ -162,6 +167,19 @@ static struct mount *propagation_next(struct mount *m,
|
|
}
|
|
}
|
|
|
|
+static struct mount *skip_propagation_subtree(struct mount *m,
|
|
+ struct mount *origin)
|
|
+{
|
|
+ /*
|
|
+ * Advance m such that propagation_next will not return
|
|
+ * the slaves of m.
|
|
+ */
|
|
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
|
|
+ m = last_slave(m);
|
|
+
|
|
+ return m;
|
|
+}
|
|
+
|
|
static struct mount *next_group(struct mount *m, struct mount *origin)
|
|
{
|
|
while (1) {
|
|
@@ -413,65 +431,104 @@ void propagate_mount_unlock(struct mount *mnt)
|
|
}
|
|
}
|
|
|
|
-/*
|
|
- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
|
|
- */
|
|
-static void mark_umount_candidates(struct mount *mnt)
|
|
+static void umount_one(struct mount *mnt, struct list_head *to_umount)
|
|
{
|
|
- struct mount *parent = mnt->mnt_parent;
|
|
- struct mount *m;
|
|
-
|
|
- BUG_ON(parent == mnt);
|
|
-
|
|
- for (m = propagation_next(parent, parent); m;
|
|
- m = propagation_next(m, parent)) {
|
|
- struct mount *child = __lookup_mnt(&m->mnt,
|
|
- mnt->mnt_mountpoint);
|
|
- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
|
|
- continue;
|
|
- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
|
|
- SET_MNT_MARK(child);
|
|
- }
|
|
- }
|
|
+ CLEAR_MNT_MARK(mnt);
|
|
+ mnt->mnt.mnt_flags |= MNT_UMOUNT;
|
|
+ list_del_init(&mnt->mnt_child);
|
|
+ list_del_init(&mnt->mnt_umounting);
|
|
+ list_move_tail(&mnt->mnt_list, to_umount);
|
|
}
|
|
|
|
/*
|
|
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
|
|
* parent propagates to.
|
|
*/
|
|
-static void __propagate_umount(struct mount *mnt)
|
|
+static bool __propagate_umount(struct mount *mnt,
|
|
+ struct list_head *to_umount,
|
|
+ struct list_head *to_restore)
|
|
{
|
|
- struct mount *parent = mnt->mnt_parent;
|
|
- struct mount *m;
|
|
+ bool progress = false;
|
|
+ struct mount *child;
|
|
|
|
- BUG_ON(parent == mnt);
|
|
+ /*
|
|
+ * The state of the parent won't change if this mount is
|
|
+ * already unmounted or marked as without children.
|
|
+ */
|
|
+ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
|
|
+ goto out;
|
|
|
|
- for (m = propagation_next(parent, parent); m;
|
|
- m = propagation_next(m, parent)) {
|
|
- struct mount *topper;
|
|
- struct mount *child = __lookup_mnt(&m->mnt,
|
|
- mnt->mnt_mountpoint);
|
|
- /*
|
|
- * umount the child only if the child has no children
|
|
- * and the child is marked safe to unmount.
|
|
- */
|
|
- if (!child || !IS_MNT_MARKED(child))
|
|
+ /* Verify topper is the only grandchild that has not been
|
|
+ * speculatively unmounted.
|
|
+ */
|
|
+ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
|
|
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
|
|
continue;
|
|
- CLEAR_MNT_MARK(child);
|
|
+ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
|
|
+ continue;
|
|
+ /* Found a mounted child */
|
|
+ goto children;
|
|
+ }
|
|
|
|
- /* If there is exactly one mount covering all of child
|
|
- * replace child with that mount.
|
|
- */
|
|
- topper = find_topper(child);
|
|
- if (topper)
|
|
- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
|
|
- topper);
|
|
+ /* Mark mounts that can be unmounted if not locked */
|
|
+ SET_MNT_MARK(mnt);
|
|
+ progress = true;
|
|
+
|
|
+ /* If a mount is without children and not locked umount it. */
|
|
+ if (!IS_MNT_LOCKED(mnt)) {
|
|
+ umount_one(mnt, to_umount);
|
|
+ } else {
|
|
+children:
|
|
+ list_move_tail(&mnt->mnt_umounting, to_restore);
|
|
+ }
|
|
+out:
|
|
+ return progress;
|
|
+}
|
|
+
|
|
+static void umount_list(struct list_head *to_umount,
|
|
+ struct list_head *to_restore)
|
|
+{
|
|
+ struct mount *mnt, *child, *tmp;
|
|
+ list_for_each_entry(mnt, to_umount, mnt_list) {
|
|
+ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
|
|
+ /* topper? */
|
|
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
|
|
+ list_move_tail(&child->mnt_umounting, to_restore);
|
|
+ else
|
|
+ umount_one(child, to_umount);
|
|
+ }
|
|
+ }
|
|
+}
|
|
|
|
- if (list_empty(&child->mnt_mounts)) {
|
|
- list_del_init(&child->mnt_child);
|
|
- child->mnt.mnt_flags |= MNT_UMOUNT;
|
|
- list_move_tail(&child->mnt_list, &mnt->mnt_list);
|
|
+static void restore_mounts(struct list_head *to_restore)
|
|
+{
|
|
+ /* Restore mounts to a clean working state */
|
|
+ while (!list_empty(to_restore)) {
|
|
+ struct mount *mnt, *parent;
|
|
+ struct mountpoint *mp;
|
|
+
|
|
+ mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
|
|
+ CLEAR_MNT_MARK(mnt);
|
|
+ list_del_init(&mnt->mnt_umounting);
|
|
+
|
|
+ /* Should this mount be reparented? */
|
|
+ mp = mnt->mnt_mp;
|
|
+ parent = mnt->mnt_parent;
|
|
+ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
|
|
+ mp = parent->mnt_mp;
|
|
+ parent = parent->mnt_parent;
|
|
}
|
|
+ if (parent != mnt->mnt_parent)
|
|
+ mnt_change_mountpoint(parent, mp, mnt);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void cleanup_umount_visitations(struct list_head *visited)
|
|
+{
|
|
+ while (!list_empty(visited)) {
|
|
+ struct mount *mnt =
|
|
+ list_first_entry(visited, struct mount, mnt_umounting);
|
|
+ list_del_init(&mnt->mnt_umounting);
|
|
}
|
|
}
|
|
|
|
@@ -485,11 +542,68 @@ static void __propagate_umount(struct mount *mnt)
|
|
int propagate_umount(struct list_head *list)
|
|
{
|
|
struct mount *mnt;
|
|
+ LIST_HEAD(to_restore);
|
|
+ LIST_HEAD(to_umount);
|
|
+ LIST_HEAD(visited);
|
|
+
|
|
+ /* Find candidates for unmounting */
|
|
+ list_for_each_entry_reverse(mnt, list, mnt_list) {
|
|
+ struct mount *parent = mnt->mnt_parent;
|
|
+ struct mount *m;
|
|
+
|
|
+ /*
|
|
+ * If this mount has already been visited it is known that it's
|
|
+ * entire peer group and all of their slaves in the propagation
|
|
+ * tree for the mountpoint has already been visited and there is
|
|
+ * no need to visit them again.
|
|
+ */
|
|
+ if (!list_empty(&mnt->mnt_umounting))
|
|
+ continue;
|
|
+
|
|
+ list_add_tail(&mnt->mnt_umounting, &visited);
|
|
+ for (m = propagation_next(parent, parent); m;
|
|
+ m = propagation_next(m, parent)) {
|
|
+ struct mount *child = __lookup_mnt(&m->mnt,
|
|
+ mnt->mnt_mountpoint);
|
|
+ if (!child)
|
|
+ continue;
|
|
+
|
|
+ if (!list_empty(&child->mnt_umounting)) {
|
|
+ /*
|
|
+ * If the child has already been visited it is
|
|
+ * know that it's entire peer group and all of
|
|
+ * their slaves in the propgation tree for the
|
|
+ * mountpoint has already been visited and there
|
|
+ * is no need to visit this subtree again.
|
|
+ */
|
|
+ m = skip_propagation_subtree(m, parent);
|
|
+ continue;
|
|
+ } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
|
|
+ /*
|
|
+ * We have come accross an partially unmounted
|
|
+ * mount in list that has not been visited yet.
|
|
+ * Remember it has been visited and continue
|
|
+ * about our merry way.
|
|
+ */
|
|
+ list_add_tail(&child->mnt_umounting, &visited);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Check the child and parents while progress is made */
|
|
+ while (__propagate_umount(child,
|
|
+ &to_umount, &to_restore)) {
|
|
+ /* Is the parent a umount candidate? */
|
|
+ child = child->mnt_parent;
|
|
+ if (list_empty(&child->mnt_umounting))
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
|
|
- list_for_each_entry_reverse(mnt, list, mnt_list)
|
|
- mark_umount_candidates(mnt);
|
|
+ umount_list(&to_umount, &to_restore);
|
|
+ restore_mounts(&to_restore);
|
|
+ cleanup_umount_visitations(&visited);
|
|
+ list_splice_tail(&to_umount, list);
|
|
|
|
- list_for_each_entry(mnt, list, mnt_list)
|
|
- __propagate_umount(mnt);
|
|
return 0;
|
|
}
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index 01a696b0a4d3..3f03d0fc4250 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -580,6 +580,8 @@ struct request_queue {
|
|
|
|
size_t cmd_size;
|
|
void *rq_alloc_data;
|
|
+
|
|
+ struct work_struct release_work;
|
|
};
|
|
|
|
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
|
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
|
|
index 96f1e88b767c..713753554895 100644
|
|
--- a/include/linux/cpumask.h
|
|
+++ b/include/linux/cpumask.h
|
|
@@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
|
|
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
|
(cpu) < nr_cpu_ids;)
|
|
|
|
+extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
|
+
|
|
+/**
|
|
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
|
+ * @cpu: the (optionally unsigned) integer iterator
|
|
+ * @mask: the cpumask poiter
|
|
+ * @start: the start location
|
|
+ *
|
|
+ * The implementation does not assume any bit in @mask is set (including @start).
|
|
+ *
|
|
+ * After the loop, cpu is >= nr_cpu_ids.
|
|
+ */
|
|
+#define for_each_cpu_wrap(cpu, mask, start) \
|
|
+ for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
|
|
+ (cpu) < nr_cpumask_bits; \
|
|
+ (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
|
|
+
|
|
/**
|
|
* for_each_cpu_and - iterate over every cpu in both masks
|
|
* @cpu: the (optionally unsigned) integer iterator
|
|
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
|
|
index cb0ba9f2a9a2..fa7fd03cb5f9 100644
|
|
--- a/include/linux/list_lru.h
|
|
+++ b/include/linux/list_lru.h
|
|
@@ -44,6 +44,7 @@ struct list_lru_node {
|
|
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
|
|
struct list_lru_memcg *memcg_lrus;
|
|
#endif
|
|
+ long nr_items;
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
struct list_lru {
|
|
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
|
|
index c965d1165df6..77bdfcdd5a04 100644
|
|
--- a/include/linux/mlx5/driver.h
|
|
+++ b/include/linux/mlx5/driver.h
|
|
@@ -928,6 +928,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev);
|
|
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
|
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
|
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
|
|
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
|
|
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
|
struct mlx5_buf *buf, int node);
|
|
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
|
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
|
|
index f5e625f53367..4341731f39a5 100644
|
|
--- a/include/net/ip6_route.h
|
|
+++ b/include/net/ip6_route.h
|
|
@@ -22,6 +22,7 @@ struct route_info {
|
|
#include <net/flow.h>
|
|
#include <net/ip6_fib.h>
|
|
#include <net/sock.h>
|
|
+#include <net/lwtunnel.h>
|
|
#include <linux/ip.h>
|
|
#include <linux/ipv6.h>
|
|
#include <linux/route.h>
|
|
@@ -233,4 +234,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
|
|
return daddr;
|
|
}
|
|
|
|
+static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
|
|
+{
|
|
+ return a->dst.dev == b->dst.dev &&
|
|
+ a->rt6i_idev == b->rt6i_idev &&
|
|
+ ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
|
|
+ !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
|
|
+}
|
|
#endif
|
|
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
|
|
index 49a59202f85e..da7d6b89df77 100644
|
|
--- a/include/net/vxlan.h
|
|
+++ b/include/net/vxlan.h
|
|
@@ -221,9 +221,17 @@ struct vxlan_config {
|
|
bool no_share;
|
|
};
|
|
|
|
+struct vxlan_dev_node {
|
|
+ struct hlist_node hlist;
|
|
+ struct vxlan_dev *vxlan;
|
|
+};
|
|
+
|
|
/* Pseudo network device */
|
|
struct vxlan_dev {
|
|
- struct hlist_node hlist; /* vni hash table */
|
|
+ struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
|
|
+#endif
|
|
struct list_head next; /* vxlan's per namespace list */
|
|
struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
|
|
index 904decd32783..7a6462b7f72a 100644
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -951,6 +951,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
if (err)
|
|
return err;
|
|
|
|
+ if (is_pointer_value(env, insn->src_reg)) {
|
|
+ verbose("R%d leaks addr into mem\n", insn->src_reg);
|
|
+ return -EACCES;
|
|
+ }
|
|
+
|
|
/* check whether atomic_add can read the memory */
|
|
err = check_mem_access(env, insn->dst_reg, insn->off,
|
|
BPF_SIZE(insn->code), BPF_READ, -1);
|
|
diff --git a/kernel/extable.c b/kernel/extable.c
|
|
index 2676d7f8baf6..4efaf26d7def 100644
|
|
--- a/kernel/extable.c
|
|
+++ b/kernel/extable.c
|
|
@@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr)
|
|
return 0;
|
|
}
|
|
|
|
-int core_kernel_text(unsigned long addr)
|
|
+int notrace core_kernel_text(unsigned long addr)
|
|
{
|
|
if (addr >= (unsigned long)_stext &&
|
|
addr < (unsigned long)_etext)
|
|
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
|
|
index 0a62a8f1caac..6de6eb5da71e 100644
|
|
--- a/kernel/rcu/tree_plugin.h
|
|
+++ b/kernel/rcu/tree_plugin.h
|
|
@@ -1769,6 +1769,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
|
|
if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
|
|
/* Prior smp_mb__after_atomic() orders against prior enqueue. */
|
|
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
|
+ smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
|
|
swake_up(&rdp_leader->nocb_wq);
|
|
}
|
|
}
|
|
@@ -2023,6 +2024,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
|
|
* nocb_gp_head, where they await a grace period.
|
|
*/
|
|
gotcbs = false;
|
|
+ smp_mb(); /* wakeup before ->nocb_head reads. */
|
|
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
|
|
rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
|
|
if (!rdp->nocb_gp_head)
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index dea138964b91..358c4aa921e7 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -5615,43 +5615,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
|
|
}
|
|
|
|
-/*
|
|
- * Implement a for_each_cpu() variant that starts the scan at a given cpu
|
|
- * (@start), and wraps around.
|
|
- *
|
|
- * This is used to scan for idle CPUs; such that not all CPUs looking for an
|
|
- * idle CPU find the same CPU. The down-side is that tasks tend to cycle
|
|
- * through the LLC domain.
|
|
- *
|
|
- * Especially tbench is found sensitive to this.
|
|
- */
|
|
-
|
|
-static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
|
|
-{
|
|
- int next;
|
|
-
|
|
-again:
|
|
- next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
|
|
-
|
|
- if (*wrapped) {
|
|
- if (next >= start)
|
|
- return nr_cpumask_bits;
|
|
- } else {
|
|
- if (next >= nr_cpumask_bits) {
|
|
- *wrapped = 1;
|
|
- n = -1;
|
|
- goto again;
|
|
- }
|
|
- }
|
|
-
|
|
- return next;
|
|
-}
|
|
-
|
|
-#define for_each_cpu_wrap(cpu, mask, start, wrap) \
|
|
- for ((wrap) = 0, (cpu) = (start)-1; \
|
|
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
|
|
- (cpu) < nr_cpumask_bits; )
|
|
-
|
|
#ifdef CONFIG_SCHED_SMT
|
|
|
|
static inline void set_idle_cores(int cpu, int val)
|
|
@@ -5711,7 +5674,7 @@ void __update_idle_core(struct rq *rq)
|
|
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
|
|
{
|
|
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
|
|
- int core, cpu, wrap;
|
|
+ int core, cpu;
|
|
|
|
if (!static_branch_likely(&sched_smt_present))
|
|
return -1;
|
|
@@ -5721,7 +5684,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
|
|
|
cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
|
|
|
- for_each_cpu_wrap(core, cpus, target, wrap) {
|
|
+ for_each_cpu_wrap(core, cpus, target) {
|
|
bool idle = true;
|
|
|
|
for_each_cpu(cpu, cpu_smt_mask(core)) {
|
|
@@ -5787,7 +5750,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
|
u64 avg_cost, avg_idle = this_rq()->avg_idle;
|
|
u64 time, cost;
|
|
s64 delta;
|
|
- int cpu, wrap;
|
|
+ int cpu;
|
|
|
|
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
|
|
if (!this_sd)
|
|
@@ -5804,7 +5767,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
|
|
|
time = local_clock();
|
|
|
|
- for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
|
|
+ for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
|
|
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
|
continue;
|
|
if (idle_cpu(cpu))
|
|
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
|
|
index 1b0b4fb12837..f463b6b7b378 100644
|
|
--- a/kernel/sched/topology.c
|
|
+++ b/kernel/sched/topology.c
|
|
@@ -480,6 +480,9 @@ enum s_alloc {
|
|
* Build an iteration mask that can exclude certain CPUs from the upwards
|
|
* domain traversal.
|
|
*
|
|
+ * Only CPUs that can arrive at this group should be considered to continue
|
|
+ * balancing.
|
|
+ *
|
|
* Asymmetric node setups can result in situations where the domain tree is of
|
|
* unequal depth, make sure to skip domains that already cover the entire
|
|
* range.
|
|
@@ -490,18 +493,31 @@ enum s_alloc {
|
|
*/
|
|
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
|
|
{
|
|
- const struct cpumask *span = sched_domain_span(sd);
|
|
+ const struct cpumask *sg_span = sched_group_cpus(sg);
|
|
struct sd_data *sdd = sd->private;
|
|
struct sched_domain *sibling;
|
|
int i;
|
|
|
|
- for_each_cpu(i, span) {
|
|
+ for_each_cpu(i, sg_span) {
|
|
sibling = *per_cpu_ptr(sdd->sd, i);
|
|
- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
|
|
+
|
|
+ /*
|
|
+ * Can happen in the asymmetric case, where these siblings are
|
|
+ * unused. The mask will not be empty because those CPUs that
|
|
+ * do have the top domain _should_ span the domain.
|
|
+ */
|
|
+ if (!sibling->child)
|
|
+ continue;
|
|
+
|
|
+ /* If we would not end up here, we can't continue from here */
|
|
+ if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
|
|
continue;
|
|
|
|
cpumask_set_cpu(i, sched_group_mask(sg));
|
|
}
|
|
+
|
|
+ /* We must not have empty masks here */
|
|
+ WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
|
|
}
|
|
|
|
/*
|
|
@@ -525,7 +541,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
|
|
cpumask_clear(covered);
|
|
|
|
- for_each_cpu(i, span) {
|
|
+ for_each_cpu_wrap(i, span, cpu) {
|
|
struct cpumask *sg_span;
|
|
|
|
if (cpumask_test_cpu(i, covered))
|
|
diff --git a/lib/cpumask.c b/lib/cpumask.c
|
|
index 81dedaab36cc..4731a0895760 100644
|
|
--- a/lib/cpumask.c
|
|
+++ b/lib/cpumask.c
|
|
@@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
|
}
|
|
EXPORT_SYMBOL(cpumask_any_but);
|
|
|
|
+/**
|
|
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
|
|
+ * @n: the cpu prior to the place to search
|
|
+ * @mask: the cpumask pointer
|
|
+ * @start: the start point of the iteration
|
|
+ * @wrap: assume @n crossing @start terminates the iteration
|
|
+ *
|
|
+ * Returns >= nr_cpu_ids on completion
|
|
+ *
|
|
+ * Note: the @wrap argument is required for the start condition when
|
|
+ * we cannot assume @start is set in @mask.
|
|
+ */
|
|
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
|
+{
|
|
+ int next;
|
|
+
|
|
+again:
|
|
+ next = cpumask_next(n, mask);
|
|
+
|
|
+ if (wrap && n < start && next >= start) {
|
|
+ return nr_cpumask_bits;
|
|
+
|
|
+ } else if (next >= nr_cpumask_bits) {
|
|
+ wrap = true;
|
|
+ n = -1;
|
|
+ goto again;
|
|
+ }
|
|
+
|
|
+ return next;
|
|
+}
|
|
+EXPORT_SYMBOL(cpumask_next_wrap);
|
|
+
|
|
/* These are not inline because of header tangles. */
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
/**
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index 209b33e8c247..6560174edf2a 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -1561,8 +1561,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
get_page(page);
|
|
spin_unlock(ptl);
|
|
split_huge_page(page);
|
|
- put_page(page);
|
|
unlock_page(page);
|
|
+ put_page(page);
|
|
goto out_unlocked;
|
|
}
|
|
|
|
diff --git a/mm/list_lru.c b/mm/list_lru.c
|
|
index 234676e31edd..7a40fa2be858 100644
|
|
--- a/mm/list_lru.c
|
|
+++ b/mm/list_lru.c
|
|
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
|
l = list_lru_from_kmem(nlru, item);
|
|
list_add_tail(item, &l->list);
|
|
l->nr_items++;
|
|
+ nlru->nr_items++;
|
|
spin_unlock(&nlru->lock);
|
|
return true;
|
|
}
|
|
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
|
|
l = list_lru_from_kmem(nlru, item);
|
|
list_del_init(item);
|
|
l->nr_items--;
|
|
+ nlru->nr_items--;
|
|
spin_unlock(&nlru->lock);
|
|
return true;
|
|
}
|
|
@@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
|
|
|
|
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
|
|
{
|
|
- long count = 0;
|
|
- int memcg_idx;
|
|
+ struct list_lru_node *nlru;
|
|
|
|
- count += __list_lru_count_one(lru, nid, -1);
|
|
- if (list_lru_memcg_aware(lru)) {
|
|
- for_each_memcg_cache_index(memcg_idx)
|
|
- count += __list_lru_count_one(lru, nid, memcg_idx);
|
|
- }
|
|
- return count;
|
|
+ nlru = &lru->node[nid];
|
|
+ return nlru->nr_items;
|
|
}
|
|
EXPORT_SYMBOL_GPL(list_lru_count_node);
|
|
|
|
@@ -226,6 +223,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
|
|
assert_spin_locked(&nlru->lock);
|
|
case LRU_REMOVED:
|
|
isolated++;
|
|
+ nlru->nr_items--;
|
|
/*
|
|
* If the lru lock has been dropped, our list
|
|
* traversal is now invalid and so we have to
|
|
diff --git a/mm/mmap.c b/mm/mmap.c
|
|
index cde1262776dd..6355fcf253fd 100644
|
|
--- a/mm/mmap.c
|
|
+++ b/mm/mmap.c
|
|
@@ -2232,7 +2232,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
|
|
/* Guard against exceeding limits of the address space. */
|
|
address &= PAGE_MASK;
|
|
- if (address >= TASK_SIZE)
|
|
+ if (address >= (TASK_SIZE & PAGE_MASK))
|
|
return -ENOMEM;
|
|
address += PAGE_SIZE;
|
|
|
|
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
|
|
index 056e6ac49d8f..57e94a1b57e1 100644
|
|
--- a/net/bridge/br_mdb.c
|
|
+++ b/net/bridge/br_mdb.c
|
|
@@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
|
|
__mdb_entry_to_br_ip(entry, &complete_info->ip);
|
|
mdb.obj.complete_priv = complete_info;
|
|
mdb.obj.complete = br_mdb_complete;
|
|
- switchdev_port_obj_add(port_dev, &mdb.obj);
|
|
+ if (switchdev_port_obj_add(port_dev, &mdb.obj))
|
|
+ kfree(complete_info);
|
|
}
|
|
} else if (port_dev && type == RTM_DELMDB) {
|
|
switchdev_port_obj_del(port_dev, &mdb.obj);
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 9debc1b26ce9..84d27d3145f9 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -4627,6 +4627,13 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
|
|
}
|
|
EXPORT_SYMBOL(gro_find_complete_by_type);
|
|
|
|
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
|
|
+{
|
|
+ skb_dst_drop(skb);
|
|
+ secpath_reset(skb);
|
|
+ kmem_cache_free(skbuff_head_cache, skb);
|
|
+}
|
|
+
|
|
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
|
|
{
|
|
switch (ret) {
|
|
@@ -4640,13 +4647,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
|
|
break;
|
|
|
|
case GRO_MERGED_FREE:
|
|
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
|
|
- skb_dst_drop(skb);
|
|
- secpath_reset(skb);
|
|
- kmem_cache_free(skbuff_head_cache, skb);
|
|
- } else {
|
|
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
|
|
+ napi_skb_free_stolen_head(skb);
|
|
+ else
|
|
__kfree_skb(skb);
|
|
- }
|
|
break;
|
|
|
|
case GRO_HELD:
|
|
@@ -4718,10 +4722,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
|
|
break;
|
|
|
|
case GRO_DROP:
|
|
- case GRO_MERGED_FREE:
|
|
napi_reuse_skb(napi, skb);
|
|
break;
|
|
|
|
+ case GRO_MERGED_FREE:
|
|
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
|
|
+ napi_skb_free_stolen_head(skb);
|
|
+ else
|
|
+ napi_reuse_skb(napi, skb);
|
|
+ break;
|
|
+
|
|
case GRO_MERGED:
|
|
case GRO_CONSUMED:
|
|
break;
|
|
@@ -7581,7 +7591,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
|
|
{
|
|
#if BITS_PER_LONG == 64
|
|
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
|
|
- memcpy(stats64, netdev_stats, sizeof(*stats64));
|
|
+ memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
|
|
/* zero out counters that only exist in rtnl_link_stats64 */
|
|
memset((char *)stats64 + sizeof(*netdev_stats), 0,
|
|
sizeof(*stats64) - sizeof(*netdev_stats));
|
|
@@ -7623,9 +7633,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
|
} else {
|
|
netdev_stats_to_stats64(storage, &dev->stats);
|
|
}
|
|
- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
|
|
- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
|
|
- storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
|
|
+ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
|
|
+ storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
|
|
+ storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
|
|
return storage;
|
|
}
|
|
EXPORT_SYMBOL(dev_get_stats);
|
|
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
|
|
index 7a3fd25e8913..532b36e9ce2a 100644
|
|
--- a/net/ipv4/ip_output.c
|
|
+++ b/net/ipv4/ip_output.c
|
|
@@ -964,7 +964,8 @@ static int __ip_append_data(struct sock *sk,
|
|
csummode = CHECKSUM_PARTIAL;
|
|
|
|
cork->length += length;
|
|
- if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
|
|
+ if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
|
|
+ (skb && skb_is_gso(skb))) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
(rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
|
|
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
|
index 651f1f058a64..2391a07e4566 100644
|
|
--- a/net/ipv4/tcp.c
|
|
+++ b/net/ipv4/tcp.c
|
|
@@ -2325,6 +2325,8 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|
tcp_init_send_head(sk);
|
|
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
|
|
__sk_dst_reset(sk);
|
|
+ dst_release(sk->sk_rx_dst);
|
|
+ sk->sk_rx_dst = NULL;
|
|
tcp_saved_syn_free(tp);
|
|
|
|
/* Clean up fastopen related fields */
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index 9725e8faf56d..682ea6417db3 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -1888,15 +1888,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
|
|
if (dad_failed)
|
|
ifp->flags |= IFA_F_DADFAILED;
|
|
|
|
- if (ifp->flags&IFA_F_PERMANENT) {
|
|
- spin_lock_bh(&ifp->lock);
|
|
- addrconf_del_dad_work(ifp);
|
|
- ifp->flags |= IFA_F_TENTATIVE;
|
|
- spin_unlock_bh(&ifp->lock);
|
|
- if (dad_failed)
|
|
- ipv6_ifa_notify(0, ifp);
|
|
- in6_ifa_put(ifp);
|
|
- } else if (ifp->flags&IFA_F_TEMPORARY) {
|
|
+ if (ifp->flags&IFA_F_TEMPORARY) {
|
|
struct inet6_ifaddr *ifpub;
|
|
spin_lock_bh(&ifp->lock);
|
|
ifpub = ifp->ifpub;
|
|
@@ -1909,6 +1901,14 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
|
|
spin_unlock_bh(&ifp->lock);
|
|
}
|
|
ipv6_del_addr(ifp);
|
|
+ } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
|
|
+ spin_lock_bh(&ifp->lock);
|
|
+ addrconf_del_dad_work(ifp);
|
|
+ ifp->flags |= IFA_F_TENTATIVE;
|
|
+ spin_unlock_bh(&ifp->lock);
|
|
+ if (dad_failed)
|
|
+ ipv6_ifa_notify(0, ifp);
|
|
+ in6_ifa_put(ifp);
|
|
} else {
|
|
ipv6_del_addr(ifp);
|
|
}
|
|
@@ -3334,6 +3334,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
struct netdev_notifier_changeupper_info *info;
|
|
struct inet6_dev *idev = __in6_dev_get(dev);
|
|
+ struct net *net = dev_net(dev);
|
|
int run_pending = 0;
|
|
int err;
|
|
|
|
@@ -3349,7 +3350,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|
case NETDEV_CHANGEMTU:
|
|
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
|
|
if (dev->mtu < IPV6_MIN_MTU) {
|
|
- addrconf_ifdown(dev, 1);
|
|
+ addrconf_ifdown(dev, dev != net->loopback_dev);
|
|
break;
|
|
}
|
|
|
|
@@ -3465,7 +3466,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|
* IPV6_MIN_MTU stop IPv6 on this interface.
|
|
*/
|
|
if (dev->mtu < IPV6_MIN_MTU)
|
|
- addrconf_ifdown(dev, 1);
|
|
+ addrconf_ifdown(dev, dev != net->loopback_dev);
|
|
}
|
|
break;
|
|
|
|
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
|
|
index e6b78ba0e636..e4e9f752ebbf 100644
|
|
--- a/net/ipv6/ip6_fib.c
|
|
+++ b/net/ipv6/ip6_fib.c
|
|
@@ -784,10 +784,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
|
goto next_iter;
|
|
}
|
|
|
|
- if (iter->dst.dev == rt->dst.dev &&
|
|
- iter->rt6i_idev == rt->rt6i_idev &&
|
|
- ipv6_addr_equal(&iter->rt6i_gateway,
|
|
- &rt->rt6i_gateway)) {
|
|
+ if (rt6_duplicate_nexthop(iter, rt)) {
|
|
if (rt->rt6i_nsiblings)
|
|
rt->rt6i_nsiblings = 0;
|
|
if (!(iter->rt6i_flags & RTF_EXPIRES))
|
|
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
|
|
index bf8a58a1c32d..1699acb2fa2c 100644
|
|
--- a/net/ipv6/ip6_output.c
|
|
+++ b/net/ipv6/ip6_output.c
|
|
@@ -1390,7 +1390,7 @@ static int __ip6_append_data(struct sock *sk,
|
|
*/
|
|
|
|
cork->length += length;
|
|
- if ((((length + fragheaderlen) > mtu) ||
|
|
+ if ((((length + (skb ? skb->len : headersize)) > mtu) ||
|
|
(skb && skb_is_gso(skb))) &&
|
|
(sk->sk_protocol == IPPROTO_UDP) &&
|
|
(rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index 1072fc189708..78a54eedf739 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -3047,17 +3047,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
|
|
struct rt6_info *rt, struct fib6_config *r_cfg)
|
|
{
|
|
struct rt6_nh *nh;
|
|
- struct rt6_info *rtnh;
|
|
int err = -EEXIST;
|
|
|
|
list_for_each_entry(nh, rt6_nh_list, next) {
|
|
/* check if rt6_info already exists */
|
|
- rtnh = nh->rt6_info;
|
|
-
|
|
- if (rtnh->dst.dev == rt->dst.dev &&
|
|
- rtnh->rt6i_idev == rt->rt6i_idev &&
|
|
- ipv6_addr_equal(&rtnh->rt6i_gateway,
|
|
- &rt->rt6i_gateway))
|
|
+ if (rt6_duplicate_nexthop(nh->rt6_info, rt))
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
|
|
index 507678853e6c..9a1798d6ae94 100644
|
|
--- a/net/rds/tcp_listen.c
|
|
+++ b/net/rds/tcp_listen.c
|
|
@@ -125,7 +125,7 @@ int rds_tcp_accept_one(struct socket *sock)
|
|
if (!sock) /* module unload or netns delete in progress */
|
|
return -ENETUNREACH;
|
|
|
|
- ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
|
|
+ ret = sock_create_lite(sock->sk->sk_family,
|
|
sock->sk->sk_type, sock->sk->sk_protocol,
|
|
&new_sock);
|
|
if (ret)
|
|
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
|
|
index bcf49cd22786..6ca1db73d1d1 100644
|
|
--- a/net/sched/sch_api.c
|
|
+++ b/net/sched/sch_api.c
|
|
@@ -1008,7 +1008,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
|
|
return sch;
|
|
}
|
|
/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
|
|
- ops->destroy(sch);
|
|
+ if (ops->destroy)
|
|
+ ops->destroy(sch);
|
|
err_out3:
|
|
dev_put(dev);
|
|
kfree((char *) sch - sch->padded);
|
|
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
|
|
index 2312dc2ffdb9..06c4ebe9b654 100644
|
|
--- a/net/wireless/nl80211.c
|
|
+++ b/net/wireless/nl80211.c
|
|
@@ -291,8 +291,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
|
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
|
|
[NL80211_ATTR_PID] = { .type = NLA_U32 },
|
|
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
|
|
- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
|
|
- .len = WLAN_PMKID_LEN },
|
|
+ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
|
|
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
|
|
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
|
|
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
|
|
@@ -348,6 +347,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
|
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
|
|
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
|
|
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
|
|
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
|
|
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
|
|
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
|
|
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
|
|
@@ -509,7 +509,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
|
|
static const struct nla_policy
|
|
nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
|
|
[NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
|
|
- [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
|
|
+ [NL80211_NAN_FUNC_SERVICE_ID] = {
|
|
.len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
|
|
[NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
|
|
[NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
|
|
@@ -6421,6 +6421,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
|
|
struct nlattr *attr1, *attr2;
|
|
int n_channels = 0, tmp1, tmp2;
|
|
|
|
+ nla_for_each_nested(attr1, freqs, tmp1)
|
|
+ if (nla_len(attr1) != sizeof(u32))
|
|
+ return 0;
|
|
+
|
|
nla_for_each_nested(attr1, freqs, tmp1) {
|
|
n_channels++;
|
|
/*
|
|
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
|
|
index baa3c7be04ad..8a92d73b6ce8 100755
|
|
--- a/scripts/checkpatch.pl
|
|
+++ b/scripts/checkpatch.pl
|
|
@@ -3523,7 +3523,7 @@ sub process {
|
|
$fixedline =~ s/\s*=\s*$/ = {/;
|
|
fix_insert_line($fixlinenr, $fixedline);
|
|
$fixedline = $line;
|
|
- $fixedline =~ s/^(.\s*){\s*/$1/;
|
|
+ $fixedline =~ s/^(.\s*)\{\s*/$1/;
|
|
fix_insert_line($fixlinenr, $fixedline);
|
|
}
|
|
}
|
|
@@ -3864,7 +3864,7 @@ sub process {
|
|
my $fixedline = rtrim($prevrawline) . " {";
|
|
fix_insert_line($fixlinenr, $fixedline);
|
|
$fixedline = $rawline;
|
|
- $fixedline =~ s/^(.\s*){\s*/$1\t/;
|
|
+ $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
|
|
if ($fixedline !~ /^\+\s*$/) {
|
|
fix_insert_line($fixlinenr, $fixedline);
|
|
}
|
|
@@ -4353,7 +4353,7 @@ sub process {
|
|
if (ERROR("SPACING",
|
|
"space required before the open brace '{'\n" . $herecurr) &&
|
|
$fix) {
|
|
- $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
|
|
+ $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
|
|
}
|
|
}
|
|
|
|
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
|
|
index bfac6f21ae5e..5b89662493c9 100644
|
|
--- a/sound/x86/intel_hdmi_audio.c
|
|
+++ b/sound/x86/intel_hdmi_audio.c
|
|
@@ -1665,6 +1665,11 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
|
|
static void hdmi_lpe_audio_free(struct snd_card *card)
|
|
{
|
|
struct snd_intelhad *ctx = card->private_data;
|
|
+ struct intel_hdmi_lpe_audio_pdata *pdata = ctx->dev->platform_data;
|
|
+
|
|
+ spin_lock_irq(&pdata->lpe_audio_slock);
|
|
+ pdata->notify_audio_lpe = NULL;
|
|
+ spin_unlock_irq(&pdata->lpe_audio_slock);
|
|
|
|
cancel_work_sync(&ctx->hdmi_audio_wq);
|
|
|
|
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
|
|
index c808c7d02d21..d30214221920 100644
|
|
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
|
|
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
|
|
@@ -8,7 +8,7 @@
|
|
#include <linux/utsname.h>
|
|
#include <linux/compiler.h>
|
|
|
|
-#define MAX_LOCK_DEPTH 2000UL
|
|
+#define MAX_LOCK_DEPTH 63UL
|
|
|
|
#define asmlinkage
|
|
#define __visible
|
|
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
|
|
index 8b433bf3fdd7..5238d0a1398c 100644
|
|
--- a/tools/testing/selftests/bpf/test_verifier.c
|
|
+++ b/tools/testing/selftests/bpf/test_verifier.c
|
|
@@ -3518,6 +3518,72 @@ static struct bpf_test tests[] = {
|
|
.errstr = "invalid bpf_context access",
|
|
},
|
|
{
|
|
+ "leak pointer into ctx 1",
|
|
+ .insns = {
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
+ offsetof(struct __sk_buff, cb[0])),
|
|
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
|
|
+ offsetof(struct __sk_buff, cb[0])),
|
|
+ BPF_EXIT_INSN(),
|
|
+ },
|
|
+ .fixup_map1 = { 2 },
|
|
+ .errstr_unpriv = "R2 leaks addr into mem",
|
|
+ .result_unpriv = REJECT,
|
|
+ .result = ACCEPT,
|
|
+ },
|
|
+ {
|
|
+ "leak pointer into ctx 2",
|
|
+ .insns = {
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
+ offsetof(struct __sk_buff, cb[0])),
|
|
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
|
|
+ offsetof(struct __sk_buff, cb[0])),
|
|
+ BPF_EXIT_INSN(),
|
|
+ },
|
|
+ .errstr_unpriv = "R10 leaks addr into mem",
|
|
+ .result_unpriv = REJECT,
|
|
+ .result = ACCEPT,
|
|
+ },
|
|
+ {
|
|
+ "leak pointer into ctx 3",
|
|
+ .insns = {
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
|
|
+ offsetof(struct __sk_buff, cb[0])),
|
|
+ BPF_EXIT_INSN(),
|
|
+ },
|
|
+ .fixup_map1 = { 1 },
|
|
+ .errstr_unpriv = "R2 leaks addr into ctx",
|
|
+ .result_unpriv = REJECT,
|
|
+ .result = ACCEPT,
|
|
+ },
|
|
+ {
|
|
+ "leak pointer into map val",
|
|
+ .insns = {
|
|
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
|
+ BPF_FUNC_map_lookup_elem),
|
|
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
+ BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ BPF_EXIT_INSN(),
|
|
+ },
|
|
+ .fixup_map1 = { 4 },
|
|
+ .errstr_unpriv = "R6 leaks addr into mem",
|
|
+ .result_unpriv = REJECT,
|
|
+ .result = ACCEPT,
|
|
+ },
|
|
+ {
|
|
"helper access to map: full range",
|
|
.insns = {
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
|
|
index 10a21a958aaf..763f37fecfb8 100644
|
|
--- a/tools/testing/selftests/capabilities/test_execve.c
|
|
+++ b/tools/testing/selftests/capabilities/test_execve.c
|
|
@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
|
|
|
|
if (chdir(cwd) != 0)
|
|
err(1, "chdir to private tmpfs");
|
|
-
|
|
- if (umount2(".", MNT_DETACH) != 0)
|
|
- err(1, "detach private tmpfs");
|
|
}
|
|
|
|
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
|
|
@@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
|
|
err(1, "chown");
|
|
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
|
|
err(1, "chmod");
|
|
-}
|
|
+ }
|
|
|
|
capng_get_caps_process();
|
|
|
|
@@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
|
|
} else {
|
|
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
|
|
exec_other_validate_cap("./validate_cap_sgidnonroot",
|
|
- false, false, true, false);
|
|
+ false, false, true, false);
|
|
|
|
if (fork_wait()) {
|
|
printf("[RUN]\tNon-root +ia, sgidroot => i\n");
|