mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-23 21:39:02 +00:00
3752 lines
128 KiB
Diff
3752 lines
128 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 401d58b35e61..b77b4332a41a 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 7
|
|
-SUBLEVEL = 12
|
|
+SUBLEVEL = 13
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
|
|
index e038abc0c6b4..420ae26e846b 100644
|
|
--- a/arch/arm/boot/dts/armada-38x.dtsi
|
|
+++ b/arch/arm/boot/dts/armada-38x.dtsi
|
|
@@ -344,7 +344,8 @@
|
|
|
|
comphy: phy@18300 {
|
|
compatible = "marvell,armada-380-comphy";
|
|
- reg = <0x18300 0x100>;
|
|
+ reg-names = "comphy", "conf";
|
|
+ reg = <0x18300 0x100>, <0x18460 4>;
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
|
|
diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
|
|
index 756f3a9f1b4f..12997dae35d9 100644
|
|
--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
|
|
+++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
|
|
@@ -397,7 +397,7 @@
|
|
|
|
pinctrl_usbotg: usbotggrp {
|
|
fsl,pins = <
|
|
- MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
|
|
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
|
|
>;
|
|
};
|
|
|
|
@@ -409,6 +409,7 @@
|
|
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
|
|
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
|
|
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
|
|
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
|
|
>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
|
|
index 825924448ab4..14fd1de52a68 100644
|
|
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
|
|
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
|
|
@@ -99,7 +99,7 @@
|
|
&fec2 {
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pinctrl_enet2>;
|
|
- phy-mode = "rgmii";
|
|
+ phy-mode = "rgmii-id";
|
|
phy-handle = <ðphy0>;
|
|
fsl,magic-packet;
|
|
status = "okay";
|
|
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
|
|
index 3e5fb72f21fc..c99aa273c296 100644
|
|
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
|
|
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
|
|
@@ -213,7 +213,7 @@
|
|
&fec2 {
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pinctrl_enet2>;
|
|
- phy-mode = "rgmii";
|
|
+ phy-mode = "rgmii-id";
|
|
phy-handle = <ðphy2>;
|
|
status = "okay";
|
|
};
|
|
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
|
|
index bf531efc0610..0f95a6ef8543 100644
|
|
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
|
|
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
|
|
@@ -198,7 +198,7 @@
|
|
default-pool {
|
|
compatible = "shared-dma-pool";
|
|
size = <0x6000000>;
|
|
- alloc-ranges = <0x4a000000 0x6000000>;
|
|
+ alloc-ranges = <0x40000000 0x10000000>;
|
|
reusable;
|
|
linux,cma-default;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
|
|
index e6b036734a64..c2b4fbf552a3 100644
|
|
--- a/arch/arm/boot/dts/sun5i.dtsi
|
|
+++ b/arch/arm/boot/dts/sun5i.dtsi
|
|
@@ -117,7 +117,7 @@
|
|
default-pool {
|
|
compatible = "shared-dma-pool";
|
|
size = <0x6000000>;
|
|
- alloc-ranges = <0x4a000000 0x6000000>;
|
|
+ alloc-ranges = <0x40000000 0x10000000>;
|
|
reusable;
|
|
linux,cma-default;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
|
|
index ffe1d10a1a84..6d6a37940db2 100644
|
|
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
|
|
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
|
|
@@ -181,7 +181,7 @@
|
|
default-pool {
|
|
compatible = "shared-dma-pool";
|
|
size = <0x6000000>;
|
|
- alloc-ranges = <0x4a000000 0x6000000>;
|
|
+ alloc-ranges = <0x40000000 0x10000000>;
|
|
reusable;
|
|
linux,cma-default;
|
|
};
|
|
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
|
|
index 02ca7adf5375..7fff88e61252 100644
|
|
--- a/arch/arm/kernel/hw_breakpoint.c
|
|
+++ b/arch/arm/kernel/hw_breakpoint.c
|
|
@@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
|
|
arch_install_hw_breakpoint(bp);
|
|
}
|
|
|
|
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
|
|
+ struct arch_hw_breakpoint *info)
|
|
+{
|
|
+ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
|
|
+}
|
|
+
|
|
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
|
struct pt_regs *regs)
|
|
{
|
|
@@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
|
}
|
|
|
|
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
|
|
+
|
|
+ /*
|
|
+ * If we triggered a user watchpoint from a uaccess routine,
|
|
+ * then handle the stepping ourselves since userspace really
|
|
+ * can't help us with this.
|
|
+ */
|
|
+ if (watchpoint_fault_on_uaccess(regs, info))
|
|
+ goto step;
|
|
+
|
|
perf_bp_event(wp, regs);
|
|
|
|
/*
|
|
- * If no overflow handler is present, insert a temporary
|
|
- * mismatch breakpoint so we can single-step over the
|
|
- * watchpoint trigger.
|
|
+ * Defer stepping to the overflow handler if one is installed.
|
|
+ * Otherwise, insert a temporary mismatch breakpoint so that
|
|
+ * we can single-step over the watchpoint trigger.
|
|
*/
|
|
- if (is_default_overflow_handler(wp))
|
|
- enable_single_step(wp, instruction_pointer(regs));
|
|
+ if (!is_default_overflow_handler(wp))
|
|
+ goto unlock;
|
|
|
|
+step:
|
|
+ enable_single_step(wp, instruction_pointer(regs));
|
|
unlock:
|
|
rcu_read_unlock();
|
|
}
|
|
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
|
|
index e0330a25e1c6..28cfe7bad1bf 100644
|
|
--- a/arch/arm/kernel/vdso.c
|
|
+++ b/arch/arm/kernel/vdso.c
|
|
@@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
|
|
if (!cntvct_ok) {
|
|
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
|
|
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
|
|
+ vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
|
|
index 12f0eb56a1cc..619db9b4c9d5 100644
|
|
--- a/arch/arm64/include/asm/alternative.h
|
|
+++ b/arch/arm64/include/asm/alternative.h
|
|
@@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
|
|
"663:\n\t" \
|
|
newinstr "\n" \
|
|
"664:\n\t" \
|
|
- ".previous\n\t" \
|
|
".org . - (664b-663b) + (662b-661b)\n\t" \
|
|
- ".org . - (662b-661b) + (664b-663b)\n" \
|
|
+ ".org . - (662b-661b) + (664b-663b)\n\t" \
|
|
+ ".previous\n" \
|
|
".endif\n"
|
|
|
|
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
|
|
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
|
|
index b6f7bc6da5fb..93a161b3bf3f 100644
|
|
--- a/arch/arm64/include/asm/checksum.h
|
|
+++ b/arch/arm64/include/asm/checksum.h
|
|
@@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
|
{
|
|
__uint128_t tmp;
|
|
u64 sum;
|
|
+ int n = ihl; /* we want it signed */
|
|
|
|
tmp = *(const __uint128_t *)iph;
|
|
iph += 16;
|
|
- ihl -= 4;
|
|
+ n -= 4;
|
|
tmp += ((tmp >> 64) | (tmp << 64));
|
|
sum = tmp >> 64;
|
|
do {
|
|
sum += *(const u32 *)iph;
|
|
iph += 4;
|
|
- } while (--ihl);
|
|
+ } while (--n > 0);
|
|
|
|
sum += ((sum >> 32) | (sum << 32));
|
|
return csum_fold((__force u32)(sum >> 32));
|
|
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
|
|
index ab5c215cf46c..068958575871 100644
|
|
--- a/arch/parisc/include/asm/cmpxchg.h
|
|
+++ b/arch/parisc/include/asm/cmpxchg.h
|
|
@@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
|
|
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
|
|
unsigned int new_);
|
|
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
|
|
+extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
|
|
|
|
/* don't worry...optimizer will get rid of most of this */
|
|
static inline unsigned long
|
|
@@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
|
|
#endif
|
|
case 4: return __cmpxchg_u32((unsigned int *)ptr,
|
|
(unsigned int)old, (unsigned int)new_);
|
|
+ case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
|
|
}
|
|
__cmpxchg_called_with_bad_pointer();
|
|
return old;
|
|
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
|
|
index 70ffbcf889b8..2e4d1f05a926 100644
|
|
--- a/arch/parisc/lib/bitops.c
|
|
+++ b/arch/parisc/lib/bitops.c
|
|
@@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
return (unsigned long)prev;
|
|
}
|
|
+
|
|
+u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ u8 prev;
|
|
+
|
|
+ _atomic_spin_lock_irqsave(ptr, flags);
|
|
+ if ((prev = *ptr) == old)
|
|
+ *ptr = new;
|
|
+ _atomic_spin_unlock_irqrestore(ptr, flags);
|
|
+ return prev;
|
|
+}
|
|
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
|
|
index 81493cee0a16..115fb9245f16 100644
|
|
--- a/arch/riscv/mm/init.c
|
|
+++ b/arch/riscv/mm/init.c
|
|
@@ -146,33 +146,36 @@ void __init setup_bootmem(void)
|
|
{
|
|
struct memblock_region *reg;
|
|
phys_addr_t mem_size = 0;
|
|
+ phys_addr_t total_mem = 0;
|
|
+ phys_addr_t mem_start, end = 0;
|
|
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
|
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
|
|
|
/* Find the memory region containing the kernel */
|
|
for_each_memblock(memory, reg) {
|
|
- phys_addr_t end = reg->base + reg->size;
|
|
-
|
|
- if (reg->base <= vmlinux_start && vmlinux_end <= end) {
|
|
- mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
|
|
-
|
|
- /*
|
|
- * Remove memblock from the end of usable area to the
|
|
- * end of region
|
|
- */
|
|
- if (reg->base + mem_size < end)
|
|
- memblock_remove(reg->base + mem_size,
|
|
- end - reg->base - mem_size);
|
|
- }
|
|
+ end = reg->base + reg->size;
|
|
+ if (!total_mem)
|
|
+ mem_start = reg->base;
|
|
+ if (reg->base <= vmlinux_start && vmlinux_end <= end)
|
|
+ BUG_ON(reg->size == 0);
|
|
+ total_mem = total_mem + reg->size;
|
|
}
|
|
- BUG_ON(mem_size == 0);
|
|
+
|
|
+ /*
|
|
+ * Remove memblock from the end of usable area to the
|
|
+ * end of region
|
|
+ */
|
|
+ mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
|
|
+ if (mem_start + mem_size < end)
|
|
+ memblock_remove(mem_start + mem_size,
|
|
+ end - mem_start - mem_size);
|
|
|
|
/* Reserve from the start of the kernel to the end of the kernel */
|
|
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
|
|
|
- set_max_mapnr(PFN_DOWN(mem_size));
|
|
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
|
max_low_pfn = max_pfn;
|
|
+ set_max_mapnr(max_low_pfn);
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
setup_initrd();
|
|
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
|
|
index ec0ca90dd900..7a580c8ad603 100644
|
|
--- a/arch/riscv/mm/kasan_init.c
|
|
+++ b/arch/riscv/mm/kasan_init.c
|
|
@@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
|
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
- flush_tlb_all();
|
|
+ local_flush_tlb_all();
|
|
}
|
|
|
|
static void __init populate(void *start, void *end)
|
|
@@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
|
|
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
- flush_tlb_all();
|
|
+ local_flush_tlb_all();
|
|
memset(start, 0, end - start);
|
|
}
|
|
|
|
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
|
|
index 22d968bfe9bb..d770da3f8b6f 100644
|
|
--- a/arch/sh/include/asm/pgalloc.h
|
|
+++ b/arch/sh/include/asm/pgalloc.h
|
|
@@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
|
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
|
|
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
|
|
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
|
|
+#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
|
|
#endif
|
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
@@ -33,13 +34,4 @@ do { \
|
|
tlb_remove_page((tlb), (pte)); \
|
|
} while (0)
|
|
|
|
-#if CONFIG_PGTABLE_LEVELS > 2
|
|
-#define __pmd_free_tlb(tlb, pmdp, addr) \
|
|
-do { \
|
|
- struct page *page = virt_to_page(pmdp); \
|
|
- pgtable_pmd_page_dtor(page); \
|
|
- tlb_remove_page((tlb), page); \
|
|
-} while (0);
|
|
-#endif
|
|
-
|
|
#endif /* __ASM_SH_PGALLOC_H */
|
|
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
|
|
index 956a7a03b0c8..9bac5bbb67f3 100644
|
|
--- a/arch/sh/kernel/entry-common.S
|
|
+++ b/arch/sh/kernel/entry-common.S
|
|
@@ -199,7 +199,7 @@ syscall_trace_entry:
|
|
mov.l @(OFF_R7,r15), r7 ! arg3
|
|
mov.l @(OFF_R3,r15), r3 ! syscall_nr
|
|
!
|
|
- mov.l 2f, r10 ! Number of syscalls
|
|
+ mov.l 6f, r10 ! Number of syscalls
|
|
cmp/hs r10, r3
|
|
bf syscall_call
|
|
mov #-ENOSYS, r0
|
|
@@ -353,7 +353,7 @@ ENTRY(system_call)
|
|
tst r9, r8
|
|
bf syscall_trace_entry
|
|
!
|
|
- mov.l 2f, r8 ! Number of syscalls
|
|
+ mov.l 6f, r8 ! Number of syscalls
|
|
cmp/hs r8, r3
|
|
bt syscall_badsys
|
|
!
|
|
@@ -392,7 +392,7 @@ syscall_exit:
|
|
#if !defined(CONFIG_CPU_SH2)
|
|
1: .long TRA
|
|
#endif
|
|
-2: .long NR_syscalls
|
|
+6: .long NR_syscalls
|
|
3: .long sys_call_table
|
|
7: .long do_syscall_trace_enter
|
|
8: .long do_syscall_trace_leave
|
|
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
|
|
index 519649ddf100..fe522691ac71 100644
|
|
--- a/arch/x86/kernel/i8259.c
|
|
+++ b/arch/x86/kernel/i8259.c
|
|
@@ -207,7 +207,7 @@ spurious_8259A_irq:
|
|
* lets ACK and report it. [once per IRQ]
|
|
*/
|
|
if (!(spurious_irq_mask & irqmask)) {
|
|
- printk(KERN_DEBUG
|
|
+ printk_deferred(KERN_DEBUG
|
|
"spurious 8259A interrupt: IRQ%d.\n", irq);
|
|
spurious_irq_mask |= irqmask;
|
|
}
|
|
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
|
|
index 6ad43fc44556..2fd698e28e4d 100644
|
|
--- a/arch/x86/kernel/stacktrace.c
|
|
+++ b/arch/x86/kernel/stacktrace.c
|
|
@@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
|
* or a page fault), which can make frame pointers
|
|
* unreliable.
|
|
*/
|
|
-
|
|
if (IS_ENABLED(CONFIG_FRAME_POINTER))
|
|
return -EINVAL;
|
|
}
|
|
@@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
|
if (unwind_error(&state))
|
|
return -EINVAL;
|
|
|
|
- /* Success path for non-user tasks, i.e. kthreads and idle tasks */
|
|
- if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
|
|
- return -EINVAL;
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
|
|
index 7f969b2d240f..ec88bbe08a32 100644
|
|
--- a/arch/x86/kernel/unwind_orc.c
|
|
+++ b/arch/x86/kernel/unwind_orc.c
|
|
@@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
|
|
/*
|
|
* Find the orc_entry associated with the text address.
|
|
*
|
|
- * Decrement call return addresses by one so they work for sibling
|
|
- * calls and calls to noreturn functions.
|
|
+ * For a call frame (as opposed to a signal frame), state->ip points to
|
|
+ * the instruction after the call. That instruction's stack layout
|
|
+ * could be different from the call instruction's layout, for example
|
|
+ * if the call was to a noreturn function. So get the ORC data for the
|
|
+ * call instruction itself.
|
|
*/
|
|
orc = orc_find(state->signal ? state->ip : state->ip - 1);
|
|
if (!orc) {
|
|
@@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|
state->sp = task->thread.sp;
|
|
state->bp = READ_ONCE_NOCHECK(frame->bp);
|
|
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
|
|
+ state->signal = (void *)state->ip == ret_from_fork;
|
|
}
|
|
|
|
if (get_stack_info((unsigned long *)state->sp, state->task,
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 8967e320a978..6b26deccedfd 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -2136,7 +2136,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
|
|
{
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
|
|
+ if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
|
|
apic_lvtt_period(apic))
|
|
return;
|
|
|
|
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
|
|
index a862c768fd54..7dbfc0bc738c 100644
|
|
--- a/arch/x86/kvm/svm/svm.c
|
|
+++ b/arch/x86/kvm/svm/svm.c
|
|
@@ -1105,7 +1105,7 @@ static void init_vmcb(struct vcpu_svm *svm)
|
|
svm->nested.vmcb = 0;
|
|
svm->vcpu.arch.hflags = 0;
|
|
|
|
- if (pause_filter_count) {
|
|
+ if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
|
|
control->pause_filter_count = pause_filter_count;
|
|
if (pause_filter_thresh)
|
|
control->pause_filter_thresh = pause_filter_thresh;
|
|
@@ -2682,7 +2682,7 @@ static int pause_interception(struct vcpu_svm *svm)
|
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
|
bool in_kernel = (svm_get_cpl(vcpu) == 0);
|
|
|
|
- if (pause_filter_thresh)
|
|
+ if (!kvm_pause_in_guest(vcpu->kvm))
|
|
grow_ple_window(vcpu);
|
|
|
|
kvm_vcpu_on_spin(vcpu, in_kernel);
|
|
@@ -3727,7 +3727,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
|
|
|
|
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
|
|
{
|
|
- if (pause_filter_thresh)
|
|
+ if (!kvm_pause_in_guest(vcpu->kvm))
|
|
shrink_ple_window(vcpu);
|
|
}
|
|
|
|
@@ -3892,6 +3892,9 @@ static void svm_vm_destroy(struct kvm *kvm)
|
|
|
|
static int svm_vm_init(struct kvm *kvm)
|
|
{
|
|
+ if (!pause_filter_count || !pause_filter_thresh)
|
|
+ kvm->arch.pause_in_guest = true;
|
|
+
|
|
if (avic) {
|
|
int ret = avic_vm_init(kvm);
|
|
if (ret)
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
index fd1dc3236eca..81f83ee4b12b 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
@@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|
return n ? -EFAULT : 0;
|
|
}
|
|
case AMDGPU_INFO_DEV_INFO: {
|
|
- struct drm_amdgpu_info_device dev_info = {};
|
|
+ struct drm_amdgpu_info_device dev_info;
|
|
uint64_t vm_size;
|
|
|
|
+ memset(&dev_info, 0, sizeof(dev_info));
|
|
dev_info.device_id = dev->pdev->device;
|
|
dev_info.chip_rev = adev->rev_id;
|
|
dev_info.external_rev = adev->external_rev_id;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
|
|
index b14b0b4ffeb2..96b8feb77b15 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
|
|
@@ -775,7 +775,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
|
tmp_str++;
|
|
while (isspace(*++tmp_str));
|
|
|
|
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
|
+ while (tmp_str[0]) {
|
|
+ sub_str = strsep(&tmp_str, delimiter);
|
|
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
|
if (ret)
|
|
return -EINVAL;
|
|
@@ -1035,7 +1036,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
|
|
memcpy(buf_cpy, buf, bytes);
|
|
buf_cpy[bytes] = '\0';
|
|
tmp = buf_cpy;
|
|
- while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
|
|
+ while (tmp[0]) {
|
|
+ sub_str = strsep(&tmp, delimiter);
|
|
if (strlen(sub_str)) {
|
|
ret = kstrtol(sub_str, 0, &level);
|
|
if (ret)
|
|
@@ -1632,7 +1634,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
|
i++;
|
|
memcpy(buf_cpy, buf, count-i);
|
|
tmp_str = buf_cpy;
|
|
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
|
+ while (tmp_str[0]) {
|
|
+ sub_str = strsep(&tmp_str, delimiter);
|
|
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
|
if (ret)
|
|
return -EINVAL;
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index 837a286469ec..d50751ae73f1 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -8489,20 +8489,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|
* the same resource. If we have a new DC context as part of
|
|
* the DM atomic state from validation we need to free it and
|
|
* retain the existing one instead.
|
|
+ *
|
|
+ * Furthermore, since the DM atomic state only contains the DC
|
|
+ * context and can safely be annulled, we can free the state
|
|
+ * and clear the associated private object now to free
|
|
+ * some memory and avoid a possible use-after-free later.
|
|
*/
|
|
- struct dm_atomic_state *new_dm_state, *old_dm_state;
|
|
|
|
- new_dm_state = dm_atomic_get_new_state(state);
|
|
- old_dm_state = dm_atomic_get_old_state(state);
|
|
+ for (i = 0; i < state->num_private_objs; i++) {
|
|
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
|
|
|
|
- if (new_dm_state && old_dm_state) {
|
|
- if (new_dm_state->context)
|
|
- dc_release_state(new_dm_state->context);
|
|
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
|
|
+ int j = state->num_private_objs-1;
|
|
|
|
- new_dm_state->context = old_dm_state->context;
|
|
+ dm_atomic_destroy_state(obj,
|
|
+ state->private_objs[i].state);
|
|
+
|
|
+ /* If i is not at the end of the array then the
|
|
+ * last element needs to be moved to where i was
|
|
+ * before the array can safely be truncated.
|
|
+ */
|
|
+ if (i != j)
|
|
+ state->private_objs[i] =
|
|
+ state->private_objs[j];
|
|
|
|
- if (old_dm_state->context)
|
|
- dc_retain_state(old_dm_state->context);
|
|
+ state->private_objs[j].ptr = NULL;
|
|
+ state->private_objs[j].state = NULL;
|
|
+ state->private_objs[j].old_state = NULL;
|
|
+ state->private_objs[j].new_state = NULL;
|
|
+
|
|
+ state->num_private_objs = j;
|
|
+ break;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
|
|
index 37627d06fb06..3087aa710e8d 100644
|
|
--- a/drivers/gpu/drm/drm_gem.c
|
|
+++ b/drivers/gpu/drm/drm_gem.c
|
|
@@ -872,9 +872,6 @@ err:
|
|
* @file_priv: drm file-private structure
|
|
*
|
|
* Open an object using the global name, returning a handle and the size.
|
|
- *
|
|
- * This handle (of course) holds a reference to the object, so the object
|
|
- * will not go away until the handle is deleted.
|
|
*/
|
|
int
|
|
drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
|
@@ -899,14 +896,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
|
|
|
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
|
|
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
|
|
- drm_gem_object_put_unlocked(obj);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err;
|
|
|
|
args->handle = handle;
|
|
args->size = obj->size;
|
|
|
|
- return 0;
|
|
+err:
|
|
+ drm_gem_object_put_unlocked(obj);
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
|
|
index 558baf989f5a..7d2211016eda 100644
|
|
--- a/drivers/gpu/drm/drm_mipi_dbi.c
|
|
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
|
|
@@ -938,7 +938,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
|
|
}
|
|
}
|
|
|
|
- tr.len = chunk;
|
|
+ tr.len = chunk * 2;
|
|
len -= chunk;
|
|
|
|
ret = spi_sync(spi, &m);
|
|
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
|
|
index b50b44e76279..8fc3f67e3e76 100644
|
|
--- a/drivers/gpu/drm/drm_of.c
|
|
+++ b/drivers/gpu/drm/drm_of.c
|
|
@@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
|
|
* configurations by passing the endpoints explicitly to
|
|
* drm_of_lvds_get_dual_link_pixel_order().
|
|
*/
|
|
- if (!current_pt || pixels_type != current_pt) {
|
|
- of_node_put(remote_port);
|
|
+ if (!current_pt || pixels_type != current_pt)
|
|
return -EINVAL;
|
|
- }
|
|
}
|
|
|
|
return pixels_type;
|
|
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
|
|
index e59907e68854..d72ac23cd110 100644
|
|
--- a/drivers/gpu/drm/mcde/mcde_display.c
|
|
+++ b/drivers/gpu/drm/mcde/mcde_display.c
|
|
@@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
|
|
*/
|
|
if (fb) {
|
|
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
|
|
- if (!mcde->video_mode)
|
|
- /* Send a single frame using software sync */
|
|
- mcde_display_send_one_frame(mcde);
|
|
+ if (!mcde->video_mode) {
|
|
+ /*
|
|
+ * Send a single frame using software sync if the flow
|
|
+ * is not active yet.
|
|
+ */
|
|
+ if (mcde->flow_active == 0)
|
|
+ mcde_display_send_one_frame(mcde);
|
|
+ }
|
|
dev_info_once(mcde->dev, "sent first display update\n");
|
|
} else {
|
|
/*
|
|
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
|
|
index 89d58f7d2a25..1efdabb5adca 100644
|
|
--- a/drivers/i2c/busses/i2c-cadence.c
|
|
+++ b/drivers/i2c/busses/i2c-cadence.c
|
|
@@ -230,20 +230,21 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
|
|
/* Read data if receive data valid is set */
|
|
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
|
|
CDNS_I2C_SR_RXDV) {
|
|
- /*
|
|
- * Clear hold bit that was set for FIFO control if
|
|
- * RX data left is less than FIFO depth, unless
|
|
- * repeated start is selected.
|
|
- */
|
|
- if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
|
|
- !id->bus_hold_flag)
|
|
- cdns_i2c_clear_bus_hold(id);
|
|
-
|
|
if (id->recv_count > 0) {
|
|
*(id->p_recv_buf)++ =
|
|
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
|
|
id->recv_count--;
|
|
id->curr_recv_count--;
|
|
+
|
|
+ /*
|
|
+ * Clear hold bit that was set for FIFO control
|
|
+ * if RX data left is less than or equal to
|
|
+ * FIFO DEPTH unless repeated start is selected
|
|
+ */
|
|
+ if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
|
|
+ !id->bus_hold_flag)
|
|
+ cdns_i2c_clear_bus_hold(id);
|
|
+
|
|
} else {
|
|
dev_err(id->adap.dev.parent,
|
|
"xfer_size reg rollover. xfer aborted!\n");
|
|
@@ -382,10 +383,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
|
|
* Check for the message size against FIFO depth and set the
|
|
* 'hold bus' bit if it is greater than FIFO depth.
|
|
*/
|
|
- if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
|
|
+ if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
|
|
ctrl_reg |= CDNS_I2C_CR_HOLD;
|
|
- else
|
|
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
|
|
|
|
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
|
|
|
|
@@ -442,11 +441,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
|
|
* Check for the message size against FIFO depth and set the
|
|
* 'hold bus' bit if it is greater than FIFO depth.
|
|
*/
|
|
- if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
|
|
+ if (id->send_count > CDNS_I2C_FIFO_DEPTH)
|
|
ctrl_reg |= CDNS_I2C_CR_HOLD;
|
|
- else
|
|
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
|
|
-
|
|
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
|
|
|
|
/* Clear the interrupts in interrupt status register. */
|
|
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
|
|
index 4f25b2400694..6bb62d04030a 100644
|
|
--- a/drivers/infiniband/core/cq.c
|
|
+++ b/drivers/infiniband/core/cq.c
|
|
@@ -68,6 +68,15 @@ static void rdma_dim_init(struct ib_cq *cq)
|
|
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
|
|
}
|
|
|
|
+static void rdma_dim_destroy(struct ib_cq *cq)
|
|
+{
|
|
+ if (!cq->dim)
|
|
+ return;
|
|
+
|
|
+ cancel_work_sync(&cq->dim->work);
|
|
+ kfree(cq->dim);
|
|
+}
|
|
+
|
|
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
|
|
{
|
|
int rc;
|
|
@@ -261,6 +270,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
|
return cq;
|
|
|
|
out_destroy_cq:
|
|
+ rdma_dim_destroy(cq);
|
|
rdma_restrack_del(&cq->res);
|
|
cq->device->ops.destroy_cq(cq, udata);
|
|
out_free_wc:
|
|
@@ -324,12 +334,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
+ rdma_dim_destroy(cq);
|
|
trace_cq_free(cq);
|
|
rdma_restrack_del(&cq->res);
|
|
cq->device->ops.destroy_cq(cq, udata);
|
|
- if (cq->dim)
|
|
- cancel_work_sync(&cq->dim->work);
|
|
- kfree(cq->dim);
|
|
kfree(cq->wc);
|
|
kfree(cq);
|
|
}
|
|
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
|
|
index bdeb6500a919..b56d812b8a7b 100644
|
|
--- a/drivers/infiniband/hw/mlx5/odp.c
|
|
+++ b/drivers/infiniband/hw/mlx5/odp.c
|
|
@@ -1798,9 +1798,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
|
|
work->frags[i].mr =
|
|
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
|
|
if (!work->frags[i].mr) {
|
|
- work->num_sge = i - 1;
|
|
- if (i)
|
|
- destroy_prefetch_work(work);
|
|
+ work->num_sge = i;
|
|
return false;
|
|
}
|
|
|
|
@@ -1866,6 +1864,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
|
srcu_key = srcu_read_lock(&dev->odp_srcu);
|
|
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
|
|
srcu_read_unlock(&dev->odp_srcu, srcu_key);
|
|
+ destroy_prefetch_work(work);
|
|
return -EINVAL;
|
|
}
|
|
queue_work(system_unbound_wq, &work->work);
|
|
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
|
|
index ca29954a54ac..94372408cb5e 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/qp.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/qp.c
|
|
@@ -898,8 +898,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
|
|
qp->s_tail_ack_queue = 0;
|
|
qp->s_acked_ack_queue = 0;
|
|
qp->s_num_rd_atomic = 0;
|
|
- if (qp->r_rq.kwq)
|
|
- qp->r_rq.kwq->count = qp->r_rq.size;
|
|
qp->r_sge.num_sge = 0;
|
|
atomic_set(&qp->s_reserved_used, 0);
|
|
}
|
|
@@ -2352,31 +2350,6 @@ bad_lkey:
|
|
return 0;
|
|
}
|
|
|
|
-/**
|
|
- * get_count - count numbers of request work queue entries
|
|
- * in circular buffer
|
|
- * @rq: data structure for request queue entry
|
|
- * @tail: tail indices of the circular buffer
|
|
- * @head: head indices of the circular buffer
|
|
- *
|
|
- * Return - total number of entries in the circular buffer
|
|
- */
|
|
-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
|
|
-{
|
|
- u32 count;
|
|
-
|
|
- count = head;
|
|
-
|
|
- if (count >= rq->size)
|
|
- count = 0;
|
|
- if (count < tail)
|
|
- count += rq->size - tail;
|
|
- else
|
|
- count -= tail;
|
|
-
|
|
- return count;
|
|
-}
|
|
-
|
|
/**
|
|
* get_rvt_head - get head indices of the circular buffer
|
|
* @rq: data structure for request queue entry
|
|
@@ -2451,7 +2424,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
|
|
|
|
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
|
|
head = get_rvt_head(rq, ip);
|
|
- kwq->count = get_count(rq, tail, head);
|
|
+ kwq->count = rvt_get_rq_count(rq, head, tail);
|
|
}
|
|
if (unlikely(kwq->count == 0)) {
|
|
ret = 0;
|
|
@@ -2486,7 +2459,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
|
|
* the number of remaining WQEs.
|
|
*/
|
|
if (kwq->count < srq->limit) {
|
|
- kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
|
|
+ kwq->count =
|
|
+ rvt_get_rq_count(rq,
|
|
+ get_rvt_head(rq, ip), tail);
|
|
if (kwq->count < srq->limit) {
|
|
struct ib_event ev;
|
|
|
|
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
|
|
index 977906cc0d11..c58735f4c94a 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/rc.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/rc.c
|
|
@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
|
|
* not atomic, which is OK, since the fuzziness is
|
|
* resolved as further ACKs go out.
|
|
*/
|
|
- credits = head - tail;
|
|
- if ((int)credits < 0)
|
|
- credits += qp->r_rq.size;
|
|
+ credits = rvt_get_rq_count(&qp->r_rq, head, tail);
|
|
}
|
|
/*
|
|
* Binary search the credit table to find the code to
|
|
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
|
|
index 409276b6374d..e7c8e7473226 100644
|
|
--- a/drivers/misc/habanalabs/command_submission.c
|
|
+++ b/drivers/misc/habanalabs/command_submission.c
|
|
@@ -425,11 +425,19 @@ static int validate_queue_index(struct hl_device *hdev,
|
|
struct asic_fixed_properties *asic = &hdev->asic_prop;
|
|
struct hw_queue_properties *hw_queue_prop;
|
|
|
|
+ /* This must be checked here to prevent out-of-bounds access to
|
|
+ * hw_queues_props array
|
|
+ */
|
|
+ if (chunk->queue_index >= HL_MAX_QUEUES) {
|
|
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
|
|
+ chunk->queue_index);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
|
|
|
|
- if ((chunk->queue_index >= HL_MAX_QUEUES) ||
|
|
- (hw_queue_prop->type == QUEUE_TYPE_NA)) {
|
|
- dev_err(hdev->dev, "Queue index %d is invalid\n",
|
|
+ if (hw_queue_prop->type == QUEUE_TYPE_NA) {
|
|
+ dev_err(hdev->dev, "Queue index %d is not applicable\n",
|
|
chunk->queue_index);
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
|
|
index 3dd46cd55114..88e7900853db 100644
|
|
--- a/drivers/net/bareudp.c
|
|
+++ b/drivers/net/bareudp.c
|
|
@@ -407,19 +407,34 @@ free_dst:
|
|
return err;
|
|
}
|
|
|
|
+static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
|
|
+{
|
|
+ if (bareudp->ethertype == proto)
|
|
+ return true;
|
|
+
|
|
+ if (!bareudp->multi_proto_mode)
|
|
+ return false;
|
|
+
|
|
+ if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
|
|
+ proto == htons(ETH_P_MPLS_MC))
|
|
+ return true;
|
|
+
|
|
+ if (bareudp->ethertype == htons(ETH_P_IP) &&
|
|
+ proto == htons(ETH_P_IPV6))
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
{
|
|
struct bareudp_dev *bareudp = netdev_priv(dev);
|
|
struct ip_tunnel_info *info = NULL;
|
|
int err;
|
|
|
|
- if (skb->protocol != bareudp->ethertype) {
|
|
- if (!bareudp->multi_proto_mode ||
|
|
- (skb->protocol != htons(ETH_P_MPLS_MC) &&
|
|
- skb->protocol != htons(ETH_P_IPV6))) {
|
|
- err = -EINVAL;
|
|
- goto tx_error;
|
|
- }
|
|
+ if (!bareudp_proto_valid(bareudp, skb->protocol)) {
|
|
+ err = -EINVAL;
|
|
+ goto tx_error;
|
|
}
|
|
|
|
info = skb_tunnel_info(skb);
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
|
|
index 28ce9856a078..0f5ca68c9854 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
|
|
@@ -2925,6 +2925,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
|
|
txq_info = adap->sge.uld_txq_info[tx_uld_type];
|
|
if (unlikely(!txq_info)) {
|
|
WARN_ON(true);
|
|
+ kfree_skb(skb);
|
|
return NET_XMIT_DROP;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
|
|
index 5bff5c2be88b..5359fb40578d 100644
|
|
--- a/drivers/net/ethernet/cortina/gemini.c
|
|
+++ b/drivers/net/ethernet/cortina/gemini.c
|
|
@@ -2445,6 +2445,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
|
port->reset = devm_reset_control_get_exclusive(dev, NULL);
|
|
if (IS_ERR(port->reset)) {
|
|
dev_err(dev, "no reset\n");
|
|
+ clk_disable_unprepare(port->pclk);
|
|
return PTR_ERR(port->reset);
|
|
}
|
|
reset_control_reset(port->reset);
|
|
@@ -2500,8 +2501,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
|
IRQF_SHARED,
|
|
port_names[port->id],
|
|
port);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ clk_disable_unprepare(port->pclk);
|
|
return ret;
|
|
+ }
|
|
|
|
ret = register_netdev(netdev);
|
|
if (!ret) {
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
index df1cb0441183..6e186aea7a2f 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
@@ -1098,16 +1098,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|
int k, sizeoflast;
|
|
dma_addr_t dma;
|
|
|
|
- if (type == DESC_TYPE_SKB) {
|
|
- struct sk_buff *skb = (struct sk_buff *)priv;
|
|
- int ret;
|
|
-
|
|
- ret = hns3_fill_skb_desc(ring, skb, desc);
|
|
- if (unlikely(ret < 0))
|
|
- return ret;
|
|
-
|
|
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
|
- } else if (type == DESC_TYPE_FRAGLIST_SKB) {
|
|
+ if (type == DESC_TYPE_FRAGLIST_SKB ||
|
|
+ type == DESC_TYPE_SKB) {
|
|
struct sk_buff *skb = (struct sk_buff *)priv;
|
|
|
|
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
|
|
@@ -1452,6 +1444,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
next_to_use_head = ring->next_to_use;
|
|
|
|
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
|
|
+ if (unlikely(ret < 0))
|
|
+ goto fill_err;
|
|
+
|
|
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
|
|
if (unlikely(ret < 0))
|
|
goto fill_err;
|
|
@@ -4174,8 +4170,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
|
|
return;
|
|
|
|
if (linkup) {
|
|
- netif_carrier_on(netdev);
|
|
netif_tx_wake_all_queues(netdev);
|
|
+ netif_carrier_on(netdev);
|
|
if (netif_msg_link(handle))
|
|
netdev_info(netdev, "link up\n");
|
|
} else {
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
index b66b93f320b4..dfe247ad8475 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
@@ -5737,9 +5737,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
|
|
/* to avoid rule conflict, when user configure rule by ethtool,
|
|
* we need to clear all arfs rules
|
|
*/
|
|
+ spin_lock_bh(&hdev->fd_rule_lock);
|
|
hclge_clear_arfs_rules(handle);
|
|
|
|
- spin_lock_bh(&hdev->fd_rule_lock);
|
|
ret = hclge_fd_config_rule(hdev, rule);
|
|
|
|
spin_unlock_bh(&hdev->fd_rule_lock);
|
|
@@ -5782,6 +5782,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
|
|
return ret;
|
|
}
|
|
|
|
+/* make sure being called after lock up with fd_rule_lock */
|
|
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
|
|
bool clear_list)
|
|
{
|
|
@@ -5794,7 +5795,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
|
|
if (!hnae3_dev_fd_supported(hdev))
|
|
return;
|
|
|
|
- spin_lock_bh(&hdev->fd_rule_lock);
|
|
for_each_set_bit(location, hdev->fd_bmap,
|
|
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
|
|
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
|
|
@@ -5811,8 +5811,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
|
|
bitmap_zero(hdev->fd_bmap,
|
|
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
|
|
}
|
|
-
|
|
- spin_unlock_bh(&hdev->fd_rule_lock);
|
|
}
|
|
|
|
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
|
|
@@ -6179,7 +6177,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
|
|
u16 flow_id, struct flow_keys *fkeys)
|
|
{
|
|
struct hclge_vport *vport = hclge_get_vport(handle);
|
|
- struct hclge_fd_rule_tuples new_tuples;
|
|
+ struct hclge_fd_rule_tuples new_tuples = {};
|
|
struct hclge_dev *hdev = vport->back;
|
|
struct hclge_fd_rule *rule;
|
|
u16 tmp_queue_id;
|
|
@@ -6189,20 +6187,18 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
|
|
if (!hnae3_dev_fd_supported(hdev))
|
|
return -EOPNOTSUPP;
|
|
|
|
- memset(&new_tuples, 0, sizeof(new_tuples));
|
|
- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
|
|
-
|
|
- spin_lock_bh(&hdev->fd_rule_lock);
|
|
-
|
|
/* when there is already fd rule existed add by user,
|
|
* arfs should not work
|
|
*/
|
|
+ spin_lock_bh(&hdev->fd_rule_lock);
|
|
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
|
|
spin_unlock_bh(&hdev->fd_rule_lock);
|
|
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
|
|
+
|
|
/* check is there flow director filter existed for this flow,
|
|
* if not, create a new filter for it;
|
|
* if filter exist with different queue id, modify the filter;
|
|
@@ -6287,6 +6283,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
|
|
#endif
|
|
}
|
|
|
|
+/* make sure being called after lock up with fd_rule_lock */
|
|
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
|
|
{
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
@@ -6331,10 +6328,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
|
|
|
|
hdev->fd_en = enable;
|
|
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
|
|
- if (!enable)
|
|
+
|
|
+ if (!enable) {
|
|
+ spin_lock_bh(&hdev->fd_rule_lock);
|
|
hclge_del_all_fd_entries(handle, clear);
|
|
- else
|
|
+ spin_unlock_bh(&hdev->fd_rule_lock);
|
|
+ } else {
|
|
hclge_restore_fd_entries(handle);
|
|
+ }
|
|
}
|
|
|
|
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
|
|
@@ -6799,8 +6800,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
|
|
int i;
|
|
|
|
set_bit(HCLGE_STATE_DOWN, &hdev->state);
|
|
-
|
|
+ spin_lock_bh(&hdev->fd_rule_lock);
|
|
hclge_clear_arfs_rules(handle);
|
|
+ spin_unlock_bh(&hdev->fd_rule_lock);
|
|
|
|
/* If it is not PF reset, the firmware will disable the MAC,
|
|
* so it only need to stop phy here.
|
|
@@ -8532,11 +8534,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
|
|
bool writen_to_tbl = false;
|
|
int ret = 0;
|
|
|
|
- /* When device is resetting, firmware is unable to handle
|
|
- * mailbox. Just record the vlan id, and remove it after
|
|
+ /* When device is resetting or reset failed, firmware is unable to
|
|
+ * handle mailbox. Just record the vlan id, and remove it after
|
|
* reset finished.
|
|
*/
|
|
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
|
|
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
|
|
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
|
|
set_bit(vlan_id, vport->vlan_del_fail_bmap);
|
|
return -EBUSY;
|
|
}
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
|
|
index e6cdd06925e6..0060fa643d0e 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
|
|
@@ -1322,11 +1322,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
|
|
if (proto != htons(ETH_P_8021Q))
|
|
return -EPROTONOSUPPORT;
|
|
|
|
- /* When device is resetting, firmware is unable to handle
|
|
- * mailbox. Just record the vlan id, and remove it after
|
|
+ /* When device is resetting or reset failed, firmware is unable to
|
|
+ * handle mailbox. Just record the vlan id, and remove it after
|
|
* reset finished.
|
|
*/
|
|
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
|
|
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
|
|
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
|
|
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
|
|
return -EBUSY;
|
|
}
|
|
@@ -3142,23 +3143,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
|
|
{
|
|
struct hnae3_handle *nic = &hdev->nic;
|
|
struct hclge_vf_to_pf_msg send_msg;
|
|
+ int ret;
|
|
|
|
rtnl_lock();
|
|
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
|
- rtnl_unlock();
|
|
+
|
|
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
|
|
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
|
|
+ dev_warn(&hdev->pdev->dev,
|
|
+ "is resetting when updating port based vlan info\n");
|
|
+ rtnl_unlock();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
|
+ if (ret) {
|
|
+ rtnl_unlock();
|
|
+ return;
|
|
+ }
|
|
|
|
/* send msg to PF and wait update port based vlan info */
|
|
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
|
HCLGE_MBX_PORT_BASE_VLAN_CFG);
|
|
memcpy(send_msg.data, port_base_vlan_info, data_size);
|
|
- hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
|
-
|
|
- if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
|
|
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
|
|
- else
|
|
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
|
|
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
|
+ if (!ret) {
|
|
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
|
|
+ nic->port_base_vlan_state = state;
|
|
+ else
|
|
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
|
|
+ }
|
|
|
|
- rtnl_lock();
|
|
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
|
|
rtnl_unlock();
|
|
}
|
|
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
index 0fd7eae25fe9..5afb3c9c52d2 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmvnic.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
|
|
req_tx_irq_failed:
|
|
for (j = 0; j < i; j++) {
|
|
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
|
|
- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
|
|
+ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
|
|
}
|
|
release_sub_crqs(adapter, 1);
|
|
return rc;
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
|
|
index 64786568af0d..75a8c407e815 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
|
|
@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
|
|
if (!netif_running(pf->netdev))
|
|
return;
|
|
|
|
+ rtnl_lock();
|
|
otx2_stop(pf->netdev);
|
|
pf->reset_count++;
|
|
otx2_open(pf->netdev);
|
|
netif_trans_update(pf->netdev);
|
|
+ rtnl_unlock();
|
|
}
|
|
|
|
static const struct net_device_ops otx2_netdev_ops = {
|
|
@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
|
|
|
|
pf = netdev_priv(netdev);
|
|
|
|
+ cancel_work_sync(&pf->reset_task);
|
|
/* Disable link notifications */
|
|
otx2_cgx_config_linkevents(pf, false);
|
|
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
|
|
index f4227517dc8e..92a3db69a6cd 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
|
|
@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
|
|
|
|
vf = netdev_priv(netdev);
|
|
|
|
+ cancel_work_sync(&vf->reset_task);
|
|
+ unregister_netdev(netdev);
|
|
otx2vf_disable_mbox_intr(vf);
|
|
|
|
otx2_detach_resources(&vf->mbox);
|
|
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
index 09047109d0da..b743d8b56c84 100644
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
@@ -2882,6 +2882,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
|
eth->netdev[id]->irq = eth->irq[0];
|
|
eth->netdev[id]->dev.of_node = np;
|
|
|
|
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
|
|
+
|
|
return 0;
|
|
|
|
free_netdev:
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
|
|
index c72c4e1ea383..598e222e0b90 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
|
|
@@ -4358,12 +4358,14 @@ end:
|
|
static void mlx4_shutdown(struct pci_dev *pdev)
|
|
{
|
|
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
|
+ struct mlx4_dev *dev = persist->dev;
|
|
|
|
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
|
|
mutex_lock(&persist->interface_state_mutex);
|
|
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
|
|
mlx4_unload_one(pdev);
|
|
mutex_unlock(&persist->interface_state_mutex);
|
|
+ mlx4_pci_disable_device(dev);
|
|
}
|
|
|
|
static const struct pci_error_handlers mlx4_err_handler = {
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
|
|
index 951ea26d96bc..e472ed0eacfb 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
|
|
@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
|
|
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
|
|
}
|
|
|
|
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
|
|
index 58b13192df23..2805416c32a3 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
|
|
@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
|
|
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
|
|
}
|
|
|
|
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
|
|
index 37b176801bcc..038a0f1cecec 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
|
|
@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
|
|
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
|
|
be32_to_cpu(enc_keyid.key->keyid));
|
|
|
|
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
index bc54913c5861..9861c9e42c0a 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
@@ -422,7 +422,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
|
|
&rq->wq_ctrl);
|
|
if (err)
|
|
- return err;
|
|
+ goto err_rq_wq_destroy;
|
|
|
|
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
|
|
|
|
@@ -475,7 +475,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
|
|
&rq->wq_ctrl);
|
|
if (err)
|
|
- return err;
|
|
+ goto err_rq_wq_destroy;
|
|
|
|
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
|
|
|
|
@@ -3041,6 +3041,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
|
|
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
}
|
|
|
|
+static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
|
|
+ enum mlx5_port_status state)
|
|
+{
|
|
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
+ int vport_admin_state;
|
|
+
|
|
+ mlx5_set_port_admin_status(mdev, state);
|
|
+
|
|
+ if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
|
|
+ return;
|
|
+
|
|
+ if (state == MLX5_PORT_UP)
|
|
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
|
+ else
|
|
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
|
|
+
|
|
+ mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
|
|
+}
|
|
+
|
|
int mlx5e_open_locked(struct net_device *netdev)
|
|
{
|
|
struct mlx5e_priv *priv = netdev_priv(netdev);
|
|
@@ -3073,7 +3092,7 @@ int mlx5e_open(struct net_device *netdev)
|
|
mutex_lock(&priv->state_lock);
|
|
err = mlx5e_open_locked(netdev);
|
|
if (!err)
|
|
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
|
|
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
|
|
mutex_unlock(&priv->state_lock);
|
|
|
|
return err;
|
|
@@ -3107,7 +3126,7 @@ int mlx5e_close(struct net_device *netdev)
|
|
return -ENODEV;
|
|
|
|
mutex_lock(&priv->state_lock);
|
|
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
|
|
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
|
|
err = mlx5e_close_locked(netdev);
|
|
mutex_unlock(&priv->state_lock);
|
|
|
|
@@ -5185,7 +5204,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|
|
|
/* Marking the link as currently not needed by the Driver */
|
|
if (!netif_running(netdev))
|
|
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
|
|
+ mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
|
|
|
|
mlx5e_set_netdev_mtu_boundaries(priv);
|
|
mlx5e_set_dev_port_mtu(priv);
|
|
@@ -5395,6 +5414,8 @@ err_cleanup_tx:
|
|
profile->cleanup_tx(priv);
|
|
|
|
out:
|
|
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
|
+ cancel_work_sync(&priv->update_stats_work);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
index 4a8e0dfdc5f2..e93d7430c1a3 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
@@ -1922,6 +1922,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
|
|
INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
|
|
mlx5e_tc_reoffload_flows_work);
|
|
|
|
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
|
|
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
|
|
mlx5_lag_add(mdev, netdev);
|
|
priv->events_nb.notifier_call = uplink_rep_async_event;
|
|
mlx5_notifier_register(mdev, &priv->events_nb);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
index 10f705761666..c0f54d2d4925 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
@@ -2256,6 +2256,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|
match.key->vlan_priority);
|
|
|
|
*match_level = MLX5_MATCH_L2;
|
|
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
|
index 7f618a443bfd..77a1ac1b1cc1 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
|
@@ -2161,7 +2161,7 @@ abort:
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
|
}
|
|
-
|
|
+ esw_destroy_tsar(esw);
|
|
return err;
|
|
}
|
|
|
|
@@ -2206,8 +2206,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
|
|
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
|
esw_offloads_disable(esw);
|
|
|
|
- esw_destroy_tsar(esw);
|
|
-
|
|
old_mode = esw->mode;
|
|
esw->mode = MLX5_ESWITCH_NONE;
|
|
|
|
@@ -2217,6 +2215,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
|
}
|
|
+ esw_destroy_tsar(esw);
|
|
+
|
|
if (clear_vf)
|
|
mlx5_eswitch_clear_vf_vports_info(esw);
|
|
}
|
|
@@ -2374,6 +2374,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
|
u16 vport, int link_state)
|
|
{
|
|
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
|
+ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
|
|
+ int other_vport = 1;
|
|
int err = 0;
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
@@ -2381,15 +2383,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
|
if (IS_ERR(evport))
|
|
return PTR_ERR(evport);
|
|
|
|
+ if (vport == MLX5_VPORT_UPLINK) {
|
|
+ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
|
|
+ other_vport = 0;
|
|
+ vport = 0;
|
|
+ }
|
|
mutex_lock(&esw->state_lock);
|
|
|
|
- err = mlx5_modify_vport_admin_state(esw->dev,
|
|
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
|
- vport, 1, link_state);
|
|
+ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
|
|
if (err) {
|
|
- mlx5_core_warn(esw->dev,
|
|
- "Failed to set vport %d link state, err = %d",
|
|
- vport, err);
|
|
+ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
|
|
+ vport, opmod, err);
|
|
goto unlock;
|
|
}
|
|
|
|
@@ -2431,8 +2435,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
|
int err = 0;
|
|
|
|
- if (!ESW_ALLOWED(esw))
|
|
- return -EPERM;
|
|
if (IS_ERR(evport))
|
|
return PTR_ERR(evport);
|
|
if (vlan > 4095 || qos > 7)
|
|
@@ -2460,6 +2462,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|
u8 set_flags = 0;
|
|
int err;
|
|
|
|
+ if (!ESW_ALLOWED(esw))
|
|
+ return -EPERM;
|
|
+
|
|
if (vlan || qos)
|
|
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
|
|
index c1848b57f61c..56d2a1ab9378 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
|
|
@@ -684,6 +684,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
|
|
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
|
|
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
|
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
|
+static inline
|
|
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
|
|
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
|
|
{
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
|
index 5d9def18ae3a..cfc52521d775 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
|
@@ -264,9 +264,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
|
mlx5_eswitch_get_vport_metadata_mask());
|
|
|
|
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
|
|
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
|
- if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
|
|
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
|
|
} else {
|
|
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
|
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
|
|
@@ -381,6 +378,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|
flow_act.modify_hdr = attr->modify_hdr;
|
|
|
|
if (split) {
|
|
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
|
|
+ attr->in_rep->vport == MLX5_VPORT_UPLINK)
|
|
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
|
|
fdb = esw_vport_tbl_get(esw, attr);
|
|
} else {
|
|
if (attr->chain || attr->prio)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
index 43f97601b500..1d9a5117f90b 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
@@ -252,17 +252,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
|
|
if (rq->extts.index >= clock->ptp_info.n_pins)
|
|
return -EINVAL;
|
|
|
|
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
|
|
+ if (pin < 0)
|
|
+ return -EBUSY;
|
|
+
|
|
if (on) {
|
|
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
|
|
- if (pin < 0)
|
|
- return -EBUSY;
|
|
pin_mode = MLX5_PIN_MODE_IN;
|
|
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
|
|
field_select = MLX5_MTPPS_FS_PIN_MODE |
|
|
MLX5_MTPPS_FS_PATTERN |
|
|
MLX5_MTPPS_FS_ENABLE;
|
|
} else {
|
|
- pin = rq->extts.index;
|
|
field_select = MLX5_MTPPS_FS_ENABLE;
|
|
}
|
|
|
|
@@ -310,12 +310,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
|
|
if (rq->perout.index >= clock->ptp_info.n_pins)
|
|
return -EINVAL;
|
|
|
|
- if (on) {
|
|
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
|
|
- rq->perout.index);
|
|
- if (pin < 0)
|
|
- return -EBUSY;
|
|
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
|
|
+ rq->perout.index);
|
|
+ if (pin < 0)
|
|
+ return -EBUSY;
|
|
|
|
+ if (on) {
|
|
pin_mode = MLX5_PIN_MODE_OUT;
|
|
pattern = MLX5_OUT_PATTERN_PERIODIC;
|
|
ts.tv_sec = rq->perout.period.sec;
|
|
@@ -341,7 +341,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
|
|
MLX5_MTPPS_FS_ENABLE |
|
|
MLX5_MTPPS_FS_TIME_STAMP;
|
|
} else {
|
|
- pin = rq->perout.index;
|
|
field_select = MLX5_MTPPS_FS_ENABLE;
|
|
}
|
|
|
|
@@ -388,10 +387,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
|
|
return 0;
|
|
}
|
|
|
|
+enum {
|
|
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
|
|
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
|
|
+};
|
|
+
|
|
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
|
|
enum ptp_pin_function func, unsigned int chan)
|
|
{
|
|
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
|
|
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
|
|
+ ptp_info);
|
|
+
|
|
+ switch (func) {
|
|
+ case PTP_PF_NONE:
|
|
+ return 0;
|
|
+ case PTP_PF_EXTTS:
|
|
+ return !(clock->pps_info.pin_caps[pin] &
|
|
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
|
|
+ case PTP_PF_PEROUT:
|
|
+ return !(clock->pps_info.pin_caps[pin] &
|
|
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
|
|
+ default:
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
+ return -EOPNOTSUPP;
|
|
}
|
|
|
|
static const struct ptp_clock_info mlx5_ptp_clock_info = {
|
|
@@ -411,6 +431,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
|
|
.verify = NULL,
|
|
};
|
|
|
|
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
|
|
+ u32 *mtpps, u32 mtpps_size)
|
|
+{
|
|
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
|
|
+
|
|
+ MLX5_SET(mtpps_reg, in, pin, pin);
|
|
+
|
|
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
|
|
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
|
|
+}
|
|
+
|
|
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
|
|
+{
|
|
+ struct mlx5_core_dev *mdev = clock->mdev;
|
|
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
|
|
+ u8 mode;
|
|
+ int err;
|
|
+
|
|
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
|
|
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
|
|
+ return PTP_PF_NONE;
|
|
+
|
|
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
|
|
+
|
|
+ if (mode == MLX5_PIN_MODE_IN)
|
|
+ return PTP_PF_EXTTS;
|
|
+ else if (mode == MLX5_PIN_MODE_OUT)
|
|
+ return PTP_PF_PEROUT;
|
|
+
|
|
+ return PTP_PF_NONE;
|
|
+}
|
|
+
|
|
static int mlx5_init_pin_config(struct mlx5_clock *clock)
|
|
{
|
|
int i;
|
|
@@ -430,8 +482,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
|
|
sizeof(clock->ptp_info.pin_config[i].name),
|
|
"mlx5_pps%d", i);
|
|
clock->ptp_info.pin_config[i].index = i;
|
|
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
|
|
- clock->ptp_info.pin_config[i].chan = i;
|
|
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
|
|
+ clock->ptp_info.pin_config[i].chan = 0;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
|
|
index d6d6fe64887b..71b6185b4904 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
|
|
@@ -1814,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
|
|
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
|
|
bulk_list, cb, cb_priv, tid);
|
|
if (err) {
|
|
- kfree(trans);
|
|
+ kfree_rcu(trans, rcu);
|
|
return err;
|
|
}
|
|
return 0;
|
|
@@ -2051,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
|
|
break;
|
|
}
|
|
}
|
|
- rcu_read_unlock();
|
|
- if (!found)
|
|
+ if (!found) {
|
|
+ rcu_read_unlock();
|
|
goto drop;
|
|
+ }
|
|
|
|
rxl->func(skb, local_port, rxl_item->priv);
|
|
+ rcu_read_unlock();
|
|
return;
|
|
|
|
drop:
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
index 84b3d78a9dd8..ac1a63fe0899 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
@@ -8072,16 +8072,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
|
mlxsw_sp->router = router;
|
|
router->mlxsw_sp = mlxsw_sp;
|
|
|
|
- router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
|
|
- err = register_inetaddr_notifier(&router->inetaddr_nb);
|
|
- if (err)
|
|
- goto err_register_inetaddr_notifier;
|
|
-
|
|
- router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
|
|
- err = register_inet6addr_notifier(&router->inet6addr_nb);
|
|
- if (err)
|
|
- goto err_register_inet6addr_notifier;
|
|
-
|
|
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
|
|
err = __mlxsw_sp_router_init(mlxsw_sp);
|
|
if (err)
|
|
@@ -8122,12 +8112,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
|
if (err)
|
|
goto err_neigh_init;
|
|
|
|
- mlxsw_sp->router->netevent_nb.notifier_call =
|
|
- mlxsw_sp_router_netevent_event;
|
|
- err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
|
- if (err)
|
|
- goto err_register_netevent_notifier;
|
|
-
|
|
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
|
|
if (err)
|
|
goto err_mp_hash_init;
|
|
@@ -8136,6 +8120,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
|
if (err)
|
|
goto err_dscp_init;
|
|
|
|
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
|
|
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
|
|
+ if (err)
|
|
+ goto err_register_inetaddr_notifier;
|
|
+
|
|
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
|
|
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
|
|
+ if (err)
|
|
+ goto err_register_inet6addr_notifier;
|
|
+
|
|
+ mlxsw_sp->router->netevent_nb.notifier_call =
|
|
+ mlxsw_sp_router_netevent_event;
|
|
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
|
+ if (err)
|
|
+ goto err_register_netevent_notifier;
|
|
+
|
|
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
|
|
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
|
|
&mlxsw_sp->router->fib_nb,
|
|
@@ -8146,10 +8146,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
|
|
return 0;
|
|
|
|
err_register_fib_notifier:
|
|
-err_dscp_init:
|
|
-err_mp_hash_init:
|
|
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
|
err_register_netevent_notifier:
|
|
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
|
|
+err_register_inet6addr_notifier:
|
|
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
|
|
+err_register_inetaddr_notifier:
|
|
+ mlxsw_core_flush_owq();
|
|
+err_dscp_init:
|
|
+err_mp_hash_init:
|
|
mlxsw_sp_neigh_fini(mlxsw_sp);
|
|
err_neigh_init:
|
|
mlxsw_sp_vrs_fini(mlxsw_sp);
|
|
@@ -8168,10 +8173,6 @@ err_ipips_init:
|
|
err_rifs_init:
|
|
__mlxsw_sp_router_fini(mlxsw_sp);
|
|
err_router_init:
|
|
- unregister_inet6addr_notifier(&router->inet6addr_nb);
|
|
-err_register_inet6addr_notifier:
|
|
- unregister_inetaddr_notifier(&router->inetaddr_nb);
|
|
-err_register_inetaddr_notifier:
|
|
mutex_destroy(&mlxsw_sp->router->lock);
|
|
kfree(mlxsw_sp->router);
|
|
return err;
|
|
@@ -8182,6 +8183,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
|
|
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
|
|
&mlxsw_sp->router->fib_nb);
|
|
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
|
|
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
|
|
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
|
|
+ mlxsw_core_flush_owq();
|
|
mlxsw_sp_neigh_fini(mlxsw_sp);
|
|
mlxsw_sp_vrs_fini(mlxsw_sp);
|
|
mlxsw_sp_mr_fini(mlxsw_sp);
|
|
@@ -8191,8 +8195,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
|
|
mlxsw_sp_ipips_fini(mlxsw_sp);
|
|
mlxsw_sp_rifs_fini(mlxsw_sp);
|
|
__mlxsw_sp_router_fini(mlxsw_sp);
|
|
- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
|
|
- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
|
|
mutex_destroy(&mlxsw_sp->router->lock);
|
|
kfree(mlxsw_sp->router);
|
|
}
|
|
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
|
|
index 2fdd0753b3af..0e776131a3ef 100644
|
|
--- a/drivers/net/ethernet/ni/nixge.c
|
|
+++ b/drivers/net/ethernet/ni/nixge.c
|
|
@@ -1298,19 +1298,21 @@ static int nixge_probe(struct platform_device *pdev)
|
|
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
|
|
err = nixge_of_get_resources(pdev);
|
|
if (err)
|
|
- return err;
|
|
+ goto free_netdev;
|
|
__nixge_hw_set_mac_address(ndev);
|
|
|
|
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
|
|
if (priv->tx_irq < 0) {
|
|
netdev_err(ndev, "could not find 'tx' irq");
|
|
- return priv->tx_irq;
|
|
+ err = priv->tx_irq;
|
|
+ goto free_netdev;
|
|
}
|
|
|
|
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
|
|
if (priv->rx_irq < 0) {
|
|
netdev_err(ndev, "could not find 'rx' irq");
|
|
- return priv->rx_irq;
|
|
+ err = priv->rx_irq;
|
|
+ goto free_netdev;
|
|
}
|
|
|
|
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
|
|
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
|
|
index 2c3e9ef22129..337d971ffd92 100644
|
|
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
|
|
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
|
|
@@ -1959,7 +1959,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
|
|
netif_device_detach(lif->netdev);
|
|
err = ionic_stop(lif->netdev);
|
|
if (err)
|
|
- return err;
|
|
+ goto reset_out;
|
|
}
|
|
|
|
if (cb)
|
|
@@ -1969,6 +1969,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
|
|
err = ionic_open(lif->netdev);
|
|
netif_device_attach(lif->netdev);
|
|
}
|
|
+
|
|
+reset_out:
|
|
mutex_unlock(&lif->queue_lock);
|
|
|
|
return err;
|
|
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
|
|
index 8d106063e927..666e43748a5f 100644
|
|
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
|
|
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
|
|
@@ -1180,7 +1180,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
|
|
index, attn_bits, attn_acks, asserted_bits,
|
|
deasserted_bits, p_sb_attn_sw->known_attn);
|
|
} else if (asserted_bits == 0x100) {
|
|
- DP_INFO(p_hwfn, "MFW indication via attention\n");
|
|
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
|
|
+ "MFW indication via attention\n");
|
|
} else {
|
|
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
|
|
"MFW indication [deassertion]\n");
|
|
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
|
|
index 067ad25553b9..ab335f7dab82 100644
|
|
--- a/drivers/net/ethernet/renesas/ravb_main.c
|
|
+++ b/drivers/net/ethernet/renesas/ravb_main.c
|
|
@@ -1444,6 +1444,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
|
struct ravb_private *priv = container_of(work, struct ravb_private,
|
|
work);
|
|
struct net_device *ndev = priv->ndev;
|
|
+ int error;
|
|
|
|
netif_tx_stop_all_queues(ndev);
|
|
|
|
@@ -1452,15 +1453,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
|
ravb_ptp_stop(ndev);
|
|
|
|
/* Wait for DMA stopping */
|
|
- ravb_stop_dma(ndev);
|
|
+ if (ravb_stop_dma(ndev)) {
|
|
+ /* If ravb_stop_dma() fails, the hardware is still operating
|
|
+ * for TX and/or RX. So, this should not call the following
|
|
+ * functions because ravb_dmac_init() is possible to fail too.
|
|
+ * Also, this should not retry ravb_stop_dma() again and again
|
|
+ * here because it's possible to wait forever. So, this just
|
|
+ * re-enables the TX and RX and skip the following
|
|
+ * re-initialization procedure.
|
|
+ */
|
|
+ ravb_rcv_snd_enable(ndev);
|
|
+ goto out;
|
|
+ }
|
|
|
|
ravb_ring_free(ndev, RAVB_BE);
|
|
ravb_ring_free(ndev, RAVB_NC);
|
|
|
|
/* Device init */
|
|
- ravb_dmac_init(ndev);
|
|
+ error = ravb_dmac_init(ndev);
|
|
+ if (error) {
|
|
+ /* If ravb_dmac_init() fails, descriptors are freed. So, this
|
|
+ * should return here to avoid re-enabling the TX and RX in
|
|
+ * ravb_emac_init().
|
|
+ */
|
|
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
|
|
+ __func__, error);
|
|
+ return;
|
|
+ }
|
|
ravb_emac_init(ndev);
|
|
|
|
+out:
|
|
/* Initialise PTP Clock driver */
|
|
if (priv->chip_id == RCAR_GEN2)
|
|
ravb_ptp_init(ndev, priv->pdev);
|
|
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
|
|
index bb8c34d746ab..5f123a8cf68e 100644
|
|
--- a/drivers/net/usb/hso.c
|
|
+++ b/drivers/net/usb/hso.c
|
|
@@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
|
|
unsigned long flags;
|
|
|
|
if (old)
|
|
- hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
|
|
- tty->termios.c_cflag, old->c_cflag);
|
|
+ hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
|
|
+ (unsigned int)tty->termios.c_cflag,
|
|
+ (unsigned int)old->c_cflag);
|
|
|
|
/* the actual setup */
|
|
spin_lock_irqsave(&serial->serial_lock, flags);
|
|
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
|
|
index eccbf4cd7149..ee062b27cfa7 100644
|
|
--- a/drivers/net/usb/lan78xx.c
|
|
+++ b/drivers/net/usb/lan78xx.c
|
|
@@ -3759,6 +3759,11 @@ static int lan78xx_probe(struct usb_interface *intf,
|
|
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
|
|
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
|
|
|
|
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
|
|
+ ret = -ENODEV;
|
|
+ goto out3;
|
|
+ }
|
|
+
|
|
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
|
|
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
|
|
dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
|
|
@@ -3783,6 +3788,7 @@ static int lan78xx_probe(struct usb_interface *intf,
|
|
usb_fill_int_urb(dev->urb_intr, dev->udev,
|
|
dev->pipe_intr, buf, maxp,
|
|
intr_complete, dev, period);
|
|
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
|
|
index 779e56c43d27..6e64bc8d601f 100644
|
|
--- a/drivers/net/vxlan.c
|
|
+++ b/drivers/net/vxlan.c
|
|
@@ -2863,8 +2863,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
|
|
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
|
|
continue;
|
|
/* the all_zeros_mac entry is deleted at vxlan_uninit */
|
|
- if (!is_zero_ether_addr(f->eth_addr))
|
|
- vxlan_fdb_destroy(vxlan, f, true, true);
|
|
+ if (is_zero_ether_addr(f->eth_addr) &&
|
|
+ f->vni == vxlan->cfg.vni)
|
|
+ continue;
|
|
+ vxlan_fdb_destroy(vxlan, f, true, true);
|
|
}
|
|
spin_unlock_bh(&vxlan->hash_lock[h]);
|
|
}
|
|
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
|
|
index c84536b03aa8..f70336bb6f52 100644
|
|
--- a/drivers/net/wan/hdlc_x25.c
|
|
+++ b/drivers/net/wan/hdlc_x25.c
|
|
@@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
|
|
{
|
|
unsigned char *ptr;
|
|
|
|
- if (skb_cow(skb, 1))
|
|
+ if (skb_cow(skb, 1)) {
|
|
+ kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
+ }
|
|
|
|
skb_push(skb, 1);
|
|
skb_reset_network_header(skb);
|
|
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
|
|
index 284832314f31..b2868433718f 100644
|
|
--- a/drivers/net/wan/lapbether.c
|
|
+++ b/drivers/net/wan/lapbether.c
|
|
@@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
|
|
{
|
|
unsigned char *ptr;
|
|
|
|
- skb_push(skb, 1);
|
|
-
|
|
- if (skb_cow(skb, 1))
|
|
+ if (skb_cow(skb, 1)) {
|
|
+ kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
+ }
|
|
+
|
|
+ skb_push(skb, 1);
|
|
|
|
ptr = skb->data;
|
|
*ptr = X25_IFACE_DATA;
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
index bf2f00b89214..85b132a77787 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
@@ -263,6 +263,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
|
|
{
|
|
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
|
|
u32 tp = le32_to_cpu(trig->time_point);
|
|
+ struct iwl_ucode_tlv *dup = NULL;
|
|
+ int ret;
|
|
|
|
if (le32_to_cpu(tlv->length) < sizeof(*trig))
|
|
return -EINVAL;
|
|
@@ -275,10 +277,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!le32_to_cpu(trig->occurrences))
|
|
+ if (!le32_to_cpu(trig->occurrences)) {
|
|
+ dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
|
|
+ GFP_KERNEL);
|
|
+ if (!dup)
|
|
+ return -ENOMEM;
|
|
+ trig = (void *)dup->data;
|
|
trig->occurrences = cpu_to_le32(-1);
|
|
+ tlv = dup;
|
|
+ }
|
|
+
|
|
+ ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
|
|
+ kfree(dup);
|
|
|
|
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
|
|
+ return ret;
|
|
}
|
|
|
|
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
|
|
index b4d0795154e3..a2afd1a3c51b 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
|
|
@@ -206,10 +206,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
- int j, acs = i / 4, index = i % 4;
|
|
+ int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
|
|
+ int acs = i / MT7615_MAX_WMM_SETS;
|
|
u32 ctrl, val, qlen = 0;
|
|
|
|
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
|
|
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
|
|
ctrl = BIT(31) | BIT(15) | (acs << 8);
|
|
|
|
for (j = 0; j < 32; j++) {
|
|
@@ -217,11 +218,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
|
|
continue;
|
|
|
|
mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
|
|
- ctrl | (j + (index << 5)));
|
|
+ ctrl | (j + (wmm_idx << 5)));
|
|
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
|
|
GENMASK(11, 0));
|
|
}
|
|
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
|
|
+ seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
|
index 482c6c8b0fb7..88280057e032 100644
|
|
--- a/drivers/net/xen-netfront.c
|
|
+++ b/drivers/net/xen-netfront.c
|
|
@@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
|
|
MODULE_PARM_DESC(max_queues,
|
|
"Maximum number of queues per virtual interface");
|
|
|
|
+#define XENNET_TIMEOUT (5 * HZ)
|
|
+
|
|
static const struct ethtool_ops xennet_ethtool_ops;
|
|
|
|
struct netfront_cb {
|
|
@@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
- xenbus_switch_state(dev, XenbusStateInitialising);
|
|
- wait_event(module_wq,
|
|
- xenbus_read_driver_state(dev->otherend) !=
|
|
- XenbusStateClosed &&
|
|
- xenbus_read_driver_state(dev->otherend) !=
|
|
- XenbusStateUnknown);
|
|
+ do {
|
|
+ xenbus_switch_state(dev, XenbusStateInitialising);
|
|
+ err = wait_event_timeout(module_wq,
|
|
+ xenbus_read_driver_state(dev->otherend) !=
|
|
+ XenbusStateClosed &&
|
|
+ xenbus_read_driver_state(dev->otherend) !=
|
|
+ XenbusStateUnknown, XENNET_TIMEOUT);
|
|
+ } while (!err);
|
|
+
|
|
return netdev;
|
|
|
|
exit:
|
|
@@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = {
|
|
};
|
|
#endif /* CONFIG_SYSFS */
|
|
|
|
-static int xennet_remove(struct xenbus_device *dev)
|
|
+static void xennet_bus_close(struct xenbus_device *dev)
|
|
{
|
|
- struct netfront_info *info = dev_get_drvdata(&dev->dev);
|
|
-
|
|
- dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
|
+ int ret;
|
|
|
|
- if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
|
|
+ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
|
|
+ return;
|
|
+ do {
|
|
xenbus_switch_state(dev, XenbusStateClosing);
|
|
- wait_event(module_wq,
|
|
- xenbus_read_driver_state(dev->otherend) ==
|
|
- XenbusStateClosing ||
|
|
- xenbus_read_driver_state(dev->otherend) ==
|
|
- XenbusStateUnknown);
|
|
+ ret = wait_event_timeout(module_wq,
|
|
+ xenbus_read_driver_state(dev->otherend) ==
|
|
+ XenbusStateClosing ||
|
|
+ xenbus_read_driver_state(dev->otherend) ==
|
|
+ XenbusStateClosed ||
|
|
+ xenbus_read_driver_state(dev->otherend) ==
|
|
+ XenbusStateUnknown,
|
|
+ XENNET_TIMEOUT);
|
|
+ } while (!ret);
|
|
+
|
|
+ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
|
|
+ return;
|
|
|
|
+ do {
|
|
xenbus_switch_state(dev, XenbusStateClosed);
|
|
- wait_event(module_wq,
|
|
- xenbus_read_driver_state(dev->otherend) ==
|
|
- XenbusStateClosed ||
|
|
- xenbus_read_driver_state(dev->otherend) ==
|
|
- XenbusStateUnknown);
|
|
- }
|
|
+ ret = wait_event_timeout(module_wq,
|
|
+ xenbus_read_driver_state(dev->otherend) ==
|
|
+ XenbusStateClosed ||
|
|
+ xenbus_read_driver_state(dev->otherend) ==
|
|
+ XenbusStateUnknown,
|
|
+ XENNET_TIMEOUT);
|
|
+ } while (!ret);
|
|
+}
|
|
+
|
|
+static int xennet_remove(struct xenbus_device *dev)
|
|
+{
|
|
+ struct netfront_info *info = dev_get_drvdata(&dev->dev);
|
|
|
|
+ xennet_bus_close(dev);
|
|
xennet_disconnect_backend(info);
|
|
|
|
if (info->netdev->reg_state == NETREG_REGISTERED)
|
|
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
|
|
index 91d4d5b28a7d..ba6c486d6465 100644
|
|
--- a/drivers/nfc/s3fwrn5/core.c
|
|
+++ b/drivers/nfc/s3fwrn5/core.c
|
|
@@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
|
|
case S3FWRN5_MODE_FW:
|
|
return s3fwrn5_fw_recv_frame(ndev, skb);
|
|
default:
|
|
+ kfree_skb(skb);
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index 137d7bcc1358..f7540a9e54fd 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -1106,6 +1106,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
|
int pos;
|
|
int len;
|
|
|
|
+ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
|
|
+ return 0;
|
|
+
|
|
c.identify.opcode = nvme_admin_identify;
|
|
c.identify.nsid = cpu_to_le32(nsid);
|
|
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
|
|
@@ -1119,18 +1122,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
|
if (status) {
|
|
dev_warn(ctrl->device,
|
|
"Identify Descriptors failed (%d)\n", status);
|
|
- /*
|
|
- * Don't treat non-retryable errors as fatal, as we potentially
|
|
- * already have a NGUID or EUI-64. If we failed with DNR set,
|
|
- * we want to silently ignore the error as we can still
|
|
- * identify the device, but if the status has DNR set, we want
|
|
- * to propagate the error back specifically for the disk
|
|
- * revalidation flow to make sure we don't abandon the
|
|
- * device just because of a temporal retry-able error (such
|
|
- * as path of transport errors).
|
|
- */
|
|
- if (status > 0 && (status & NVME_SC_DNR))
|
|
- status = 0;
|
|
goto free_data;
|
|
}
|
|
|
|
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
|
|
index 46f965f8c9bc..8f1b0a30fd2a 100644
|
|
--- a/drivers/nvme/host/nvme.h
|
|
+++ b/drivers/nvme/host/nvme.h
|
|
@@ -126,6 +126,13 @@ enum nvme_quirks {
|
|
* Don't change the value of the temperature threshold feature
|
|
*/
|
|
NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
|
|
+
|
|
+ /*
|
|
+ * The controller doesn't handle the Identify Namespace
|
|
+ * Identification Descriptor list subcommand despite claiming
|
|
+ * NVMe 1.3 compliance.
|
|
+ */
|
|
+ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
|
|
index 4ad629eb3bc6..10d65f27879f 100644
|
|
--- a/drivers/nvme/host/pci.c
|
|
+++ b/drivers/nvme/host/pci.c
|
|
@@ -3105,6 +3105,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
|
|
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
|
|
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
|
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
|
|
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
|
|
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
|
|
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
|
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
|
|
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
|
|
index 4862fa962011..26461bf3fdcc 100644
|
|
--- a/drivers/nvme/host/tcp.c
|
|
+++ b/drivers/nvme/host/tcp.c
|
|
@@ -1392,6 +1392,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
|
|
}
|
|
}
|
|
|
|
+ /* Set 10 seconds timeout for icresp recvmsg */
|
|
+ queue->sock->sk->sk_rcvtimeo = 10 * HZ;
|
|
+
|
|
queue->sock->sk->sk_allocation = GFP_ATOMIC;
|
|
nvme_tcp_set_queue_io_cpu(queue);
|
|
queue->request = NULL;
|
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
|
index 5067562924f0..cd522dd3dd58 100644
|
|
--- a/drivers/pci/quirks.c
|
|
+++ b/drivers/pci/quirks.c
|
|
@@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
|
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
|
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
|
|
|
|
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
|
|
+{
|
|
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
|
|
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
|
|
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
|
|
+ * disable both L0s and L1 for now to be safe.
|
|
+ */
|
|
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
|
|
+
|
|
/*
|
|
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
|
|
* Link bit cleared after starting the link retrain process to allow this
|
|
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
|
|
index c5d4428f1f94..2a1233b41aa4 100644
|
|
--- a/drivers/pinctrl/qcom/Kconfig
|
|
+++ b/drivers/pinctrl/qcom/Kconfig
|
|
@@ -7,6 +7,8 @@ config PINCTRL_MSM
|
|
select PINCONF
|
|
select GENERIC_PINCONF
|
|
select GPIOLIB_IRQCHIP
|
|
+ select IRQ_DOMAIN_HIERARCHY
|
|
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
|
|
|
|
config PINCTRL_APQ8064
|
|
tristate "Qualcomm APQ8064 pin controller driver"
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
index 85858c1d56d0..4ebce5b73845 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
@@ -833,6 +833,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
|
|
msm_gpio_irq_clear_unmask(d, false);
|
|
}
|
|
|
|
+/**
|
|
+ * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
|
|
+ * @d: The irq dta.
|
|
+ *
|
|
+ * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are
|
|
+ * normally handled by the parent irqchip. The logic here is slightly
|
|
+ * different due to what's easy to do with our parent, but in principle it's
|
|
+ * the same.
|
|
+ */
|
|
+static void msm_gpio_update_dual_edge_parent(struct irq_data *d)
|
|
+{
|
|
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
|
|
+ const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
|
|
+ int loop_limit = 100;
|
|
+ unsigned int val;
|
|
+ unsigned int type;
|
|
+
|
|
+ /* Read the value and make a guess about what edge we need to catch */
|
|
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
|
|
+ type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
|
|
+
|
|
+ do {
|
|
+ /* Set the parent to catch the next edge */
|
|
+ irq_chip_set_type_parent(d, type);
|
|
+
|
|
+ /*
|
|
+ * Possibly the line changed between when we last read "val"
|
|
+ * (and decided what edge we needed) and when set the edge.
|
|
+ * If the value didn't change (or changed and then changed
|
|
+ * back) then we're done.
|
|
+ */
|
|
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
|
|
+ if (type == IRQ_TYPE_EDGE_RISING) {
|
|
+ if (!val)
|
|
+ return;
|
|
+ type = IRQ_TYPE_EDGE_FALLING;
|
|
+ } else if (type == IRQ_TYPE_EDGE_FALLING) {
|
|
+ if (val)
|
|
+ return;
|
|
+ type = IRQ_TYPE_EDGE_RISING;
|
|
+ }
|
|
+ } while (loop_limit-- > 0);
|
|
+ dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n");
|
|
+}
|
|
+
|
|
static void msm_gpio_irq_ack(struct irq_data *d)
|
|
{
|
|
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
@@ -841,8 +887,11 @@ static void msm_gpio_irq_ack(struct irq_data *d)
|
|
unsigned long flags;
|
|
u32 val;
|
|
|
|
- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
|
|
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
|
|
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
|
|
+ msm_gpio_update_dual_edge_parent(d);
|
|
return;
|
|
+ }
|
|
|
|
g = &pctrl->soc->groups[d->hwirq];
|
|
|
|
@@ -861,6 +910,17 @@ static void msm_gpio_irq_ack(struct irq_data *d)
|
|
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
|
|
}
|
|
|
|
+static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
|
|
+ unsigned int type)
|
|
+{
|
|
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
|
|
+
|
|
+ return type == IRQ_TYPE_EDGE_BOTH &&
|
|
+ pctrl->soc->wakeirq_dual_edge_errata && d->parent_data &&
|
|
+ test_bit(d->hwirq, pctrl->skip_wake_irqs);
|
|
+}
|
|
+
|
|
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
|
{
|
|
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
@@ -869,11 +929,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
|
unsigned long flags;
|
|
u32 val;
|
|
|
|
+ if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
|
|
+ set_bit(d->hwirq, pctrl->dual_edge_irqs);
|
|
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
|
|
+ msm_gpio_update_dual_edge_parent(d);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
if (d->parent_data)
|
|
irq_chip_set_type_parent(d, type);
|
|
|
|
- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
|
|
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
|
|
+ clear_bit(d->hwirq, pctrl->dual_edge_irqs);
|
|
+ irq_set_handler_locked(d, handle_fasteoi_irq);
|
|
return 0;
|
|
+ }
|
|
|
|
g = &pctrl->soc->groups[d->hwirq];
|
|
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
|
|
index 9452da18a78b..7486fe08eb9b 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
|
|
@@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map {
|
|
* @pull_no_keeper: The SoC does not support keeper bias.
|
|
* @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
|
|
* @nwakeirq_map: The number of entries in @wakeirq_map
|
|
+ * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
|
|
+ * to be aware that their parent can't handle dual
|
|
+ * edge interrupts.
|
|
*/
|
|
struct msm_pinctrl_soc_data {
|
|
const struct pinctrl_pin_desc *pins;
|
|
@@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data {
|
|
const int *reserved_gpios;
|
|
const struct msm_gpio_wakeirq_map *wakeirq_map;
|
|
unsigned int nwakeirq_map;
|
|
+ bool wakeirq_dual_edge_errata;
|
|
};
|
|
|
|
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
|
|
index 1b6465a882f2..1d9acad3c1ce 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
|
|
@@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
|
|
.ntiles = ARRAY_SIZE(sc7180_tiles),
|
|
.wakeirq_map = sc7180_pdc_map,
|
|
.nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
|
|
+ .wakeirq_dual_edge_errata = true,
|
|
};
|
|
|
|
static int sc7180_pinctrl_probe(struct platform_device *pdev)
|
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
|
index b8b4366f1200..887b6a47f5da 100644
|
|
--- a/drivers/scsi/scsi_lib.c
|
|
+++ b/drivers/scsi/scsi_lib.c
|
|
@@ -564,6 +564,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
|
|
scsi_uninit_cmd(cmd);
|
|
}
|
|
|
|
+static void scsi_run_queue_async(struct scsi_device *sdev)
|
|
+{
|
|
+ if (scsi_target(sdev)->single_lun ||
|
|
+ !list_empty(&sdev->host->starved_list))
|
|
+ kblockd_schedule_work(&sdev->requeue_work);
|
|
+ else
|
|
+ blk_mq_run_hw_queues(sdev->request_queue, true);
|
|
+}
|
|
+
|
|
/* Returns false when no more bytes to process, true if there are more */
|
|
static bool scsi_end_request(struct request *req, blk_status_t error,
|
|
unsigned int bytes)
|
|
@@ -608,11 +617,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
|
|
|
|
__blk_mq_end_request(req, error);
|
|
|
|
- if (scsi_target(sdev)->single_lun ||
|
|
- !list_empty(&sdev->host->starved_list))
|
|
- kblockd_schedule_work(&sdev->requeue_work);
|
|
- else
|
|
- blk_mq_run_hw_queues(q, true);
|
|
+ scsi_run_queue_async(sdev);
|
|
|
|
percpu_ref_put(&q->q_usage_counter);
|
|
return false;
|
|
@@ -1706,6 +1711,7 @@ out_put_budget:
|
|
*/
|
|
if (req->rq_flags & RQF_DONTPREP)
|
|
scsi_mq_uninit_cmd(cmd);
|
|
+ scsi_run_queue_async(sdev);
|
|
break;
|
|
}
|
|
return ret;
|
|
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
|
|
index 8b104f76f324..675a83659c98 100644
|
|
--- a/drivers/vhost/scsi.c
|
|
+++ b/drivers/vhost/scsi.c
|
|
@@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
|
continue;
|
|
}
|
|
|
|
- switch (v_req.type) {
|
|
+ switch (vhost32_to_cpu(vq, v_req.type)) {
|
|
case VIRTIO_SCSI_T_TMF:
|
|
vc.req = &v_req.tmf;
|
|
vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
|
|
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
|
|
index 1f157d2f4952..67b002ade3e7 100644
|
|
--- a/drivers/virtio/virtio_balloon.c
|
|
+++ b/drivers/virtio/virtio_balloon.c
|
|
@@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb)
|
|
static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
|
|
{
|
|
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
|
|
- &vb->config_read_bitmap))
|
|
+ &vb->config_read_bitmap)) {
|
|
virtio_cread(vb->vdev, struct virtio_balloon_config,
|
|
free_page_hint_cmd_id,
|
|
&vb->cmd_id_received_cache);
|
|
+ /* Legacy balloon config space is LE, unlike all other devices. */
|
|
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
|
|
+ vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
|
|
+ }
|
|
|
|
return vb->cmd_id_received_cache;
|
|
}
|
|
diff --git a/fs/io_uring.c b/fs/io_uring.c
|
|
index d0d3efaaa4d4..4e09af1d5d22 100644
|
|
--- a/fs/io_uring.c
|
|
+++ b/fs/io_uring.c
|
|
@@ -4808,7 +4808,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
|
|
{
|
|
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
|
|
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
|
+ return -EINVAL;
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->len)
|
|
return -EINVAL;
|
|
|
|
req->timeout.addr = READ_ONCE(sqe->addr);
|
|
@@ -5014,8 +5016,9 @@ static int io_async_cancel_prep(struct io_kiocb *req,
|
|
{
|
|
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
- if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
|
|
- sqe->cancel_flags)
|
|
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
|
+ return -EINVAL;
|
|
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
|
|
return -EINVAL;
|
|
|
|
req->cancel.addr = READ_ONCE(sqe->addr);
|
|
@@ -5033,7 +5036,9 @@ static int io_async_cancel(struct io_kiocb *req)
|
|
static int io_files_update_prep(struct io_kiocb *req,
|
|
const struct io_uring_sqe *sqe)
|
|
{
|
|
- if (sqe->flags || sqe->ioprio || sqe->rw_flags)
|
|
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
|
+ return -EINVAL;
|
|
+ if (sqe->ioprio || sqe->rw_flags)
|
|
return -EINVAL;
|
|
|
|
req->files_update.offset = READ_ONCE(sqe->off);
|
|
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
|
|
index 69b27c7dfc3e..fb7fa1fc8e01 100644
|
|
--- a/include/linux/mlx5/mlx5_ifc.h
|
|
+++ b/include/linux/mlx5/mlx5_ifc.h
|
|
@@ -4347,6 +4347,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
|
|
enum {
|
|
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
|
|
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
|
|
+ MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
|
|
};
|
|
|
|
struct mlx5_ifc_arm_monitor_counter_in_bits {
|
|
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
|
|
index 70ebef866cc8..e3def7bbe932 100644
|
|
--- a/include/linux/rhashtable.h
|
|
+++ b/include/linux/rhashtable.h
|
|
@@ -349,11 +349,11 @@ static inline void rht_unlock(struct bucket_table *tbl,
|
|
local_bh_enable();
|
|
}
|
|
|
|
-static inline struct rhash_head __rcu *__rht_ptr(
|
|
- struct rhash_lock_head *const *bkt)
|
|
+static inline struct rhash_head *__rht_ptr(
|
|
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
|
|
{
|
|
- return (struct rhash_head __rcu *)
|
|
- ((unsigned long)*bkt & ~BIT(0) ?:
|
|
+ return (struct rhash_head *)
|
|
+ ((unsigned long)p & ~BIT(0) ?:
|
|
(unsigned long)RHT_NULLS_MARKER(bkt));
|
|
}
|
|
|
|
@@ -365,25 +365,26 @@ static inline struct rhash_head __rcu *__rht_ptr(
|
|
* access is guaranteed, such as when destroying the table.
|
|
*/
|
|
static inline struct rhash_head *rht_ptr_rcu(
|
|
- struct rhash_lock_head *const *bkt)
|
|
+ struct rhash_lock_head *const *p)
|
|
{
|
|
- struct rhash_head __rcu *p = __rht_ptr(bkt);
|
|
-
|
|
- return rcu_dereference(p);
|
|
+ struct rhash_lock_head __rcu *const *bkt = (void *)p;
|
|
+ return __rht_ptr(rcu_dereference(*bkt), bkt);
|
|
}
|
|
|
|
static inline struct rhash_head *rht_ptr(
|
|
- struct rhash_lock_head *const *bkt,
|
|
+ struct rhash_lock_head *const *p,
|
|
struct bucket_table *tbl,
|
|
unsigned int hash)
|
|
{
|
|
- return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
|
|
+ struct rhash_lock_head __rcu *const *bkt = (void *)p;
|
|
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
|
|
}
|
|
|
|
static inline struct rhash_head *rht_ptr_exclusive(
|
|
- struct rhash_lock_head *const *bkt)
|
|
+ struct rhash_lock_head *const *p)
|
|
{
|
|
- return rcu_dereference_protected(__rht_ptr(bkt), 1);
|
|
+ struct rhash_lock_head __rcu *const *bkt = (void *)p;
|
|
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
|
|
}
|
|
|
|
static inline void rht_assign_locked(struct rhash_lock_head **bkt,
|
|
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
|
|
index 03024701c79f..7b616e45fbfc 100644
|
|
--- a/include/net/xfrm.h
|
|
+++ b/include/net/xfrm.h
|
|
@@ -946,7 +946,7 @@ struct xfrm_dst {
|
|
static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
|
|
{
|
|
#ifdef CONFIG_XFRM
|
|
- if (dst->xfrm) {
|
|
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
|
|
const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
|
|
|
|
return xdst->path;
|
|
@@ -958,7 +958,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
|
|
static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
|
|
{
|
|
#ifdef CONFIG_XFRM
|
|
- if (dst->xfrm) {
|
|
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
|
|
return xdst->child;
|
|
}
|
|
@@ -1633,13 +1633,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
|
|
void *);
|
|
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
|
|
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
|
|
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
|
|
- u8 type, int dir,
|
|
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
|
|
+ const struct xfrm_mark *mark,
|
|
+ u32 if_id, u8 type, int dir,
|
|
struct xfrm_selector *sel,
|
|
struct xfrm_sec_ctx *ctx, int delete,
|
|
int *err);
|
|
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
|
|
- int dir, u32 id, int delete, int *err);
|
|
+struct xfrm_policy *xfrm_policy_byid(struct net *net,
|
|
+ const struct xfrm_mark *mark, u32 if_id,
|
|
+ u8 type, int dir, u32 id, int delete,
|
|
+ int *err);
|
|
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
|
|
void xfrm_policy_hash_rebuild(struct net *net);
|
|
u32 xfrm_get_acqseq(void);
|
|
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
|
|
index 5fc10108703a..4814f1771120 100644
|
|
--- a/include/rdma/rdmavt_qp.h
|
|
+++ b/include/rdma/rdmavt_qp.h
|
|
@@ -278,6 +278,25 @@ struct rvt_rq {
|
|
spinlock_t lock ____cacheline_aligned_in_smp;
|
|
};
|
|
|
|
+/**
|
|
+ * rvt_get_rq_count - count numbers of request work queue entries
|
|
+ * in circular buffer
|
|
+ * @rq: data structure for request queue entry
|
|
+ * @head: head indices of the circular buffer
|
|
+ * @tail: tail indices of the circular buffer
|
|
+ *
|
|
+ * Return - total number of entries in the Receive Queue
|
|
+ */
|
|
+
|
|
+static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
|
|
+{
|
|
+ u32 count = head - tail;
|
|
+
|
|
+ if ((s32)count < 0)
|
|
+ count += rq->size;
|
|
+ return count;
|
|
+}
|
|
+
|
|
/*
|
|
* This structure holds the information that the send tasklet needs
|
|
* to send a RDMA read response or atomic operation.
|
|
diff --git a/kernel/audit.c b/kernel/audit.c
|
|
index f711f424a28a..0aa0e00e4f83 100644
|
|
--- a/kernel/audit.c
|
|
+++ b/kernel/audit.c
|
|
@@ -1811,7 +1811,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
|
}
|
|
|
|
audit_get_stamp(ab->ctx, &t, &serial);
|
|
- audit_clear_dummy(ab->ctx);
|
|
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
|
|
(unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial);
|
|
|
|
diff --git a/kernel/audit.h b/kernel/audit.h
|
|
index f0233dc40b17..ddc22878433d 100644
|
|
--- a/kernel/audit.h
|
|
+++ b/kernel/audit.h
|
|
@@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t);
|
|
extern void audit_filter_inodes(struct task_struct *tsk,
|
|
struct audit_context *ctx);
|
|
extern struct list_head *audit_killed_trees(void);
|
|
-
|
|
-static inline void audit_clear_dummy(struct audit_context *ctx)
|
|
-{
|
|
- if (ctx)
|
|
- ctx->dummy = 0;
|
|
-}
|
|
-
|
|
#else /* CONFIG_AUDITSYSCALL */
|
|
#define auditsc_get_stamp(c, t, s) 0
|
|
#define audit_put_watch(w) {}
|
|
@@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t)
|
|
}
|
|
|
|
#define audit_filter_inodes(t, c) AUDIT_DISABLED
|
|
-#define audit_clear_dummy(c) {}
|
|
#endif /* CONFIG_AUDITSYSCALL */
|
|
|
|
extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
|
|
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
|
|
index 814406a35db1..4effe01ebbe2 100644
|
|
--- a/kernel/auditsc.c
|
|
+++ b/kernel/auditsc.c
|
|
@@ -1406,6 +1406,9 @@ static void audit_log_proctitle(void)
|
|
struct audit_context *context = audit_context();
|
|
struct audit_buffer *ab;
|
|
|
|
+ if (!context || context->dummy)
|
|
+ return;
|
|
+
|
|
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
|
|
if (!ab)
|
|
return; /* audit_panic or being filtered */
|
|
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
|
|
index d541c8486c95..5e1ac22adf7a 100644
|
|
--- a/kernel/bpf/hashtab.c
|
|
+++ b/kernel/bpf/hashtab.c
|
|
@@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
|
|
htab_elem_free(htab, l);
|
|
}
|
|
|
|
-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
|
+static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
|
|
{
|
|
struct bpf_map *map = &htab->map;
|
|
+ void *ptr;
|
|
|
|
if (map->ops->map_fd_put_ptr) {
|
|
- void *ptr = fd_htab_map_get_ptr(map, l);
|
|
-
|
|
+ ptr = fd_htab_map_get_ptr(map, l);
|
|
map->ops->map_fd_put_ptr(ptr);
|
|
}
|
|
+}
|
|
+
|
|
+static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
|
+{
|
|
+ htab_put_fd_value(htab, l);
|
|
|
|
if (htab_is_prealloc(htab)) {
|
|
__pcpu_freelist_push(&htab->freelist, &l->fnode);
|
|
@@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|
*/
|
|
pl_new = this_cpu_ptr(htab->extra_elems);
|
|
l_new = *pl_new;
|
|
+ htab_put_fd_value(htab, old_elem);
|
|
*pl_new = old_elem;
|
|
} else {
|
|
struct pcpu_freelist_node *l;
|
|
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
|
|
index 13cd683a658a..3f67803123be 100644
|
|
--- a/net/9p/trans_fd.c
|
|
+++ b/net/9p/trans_fd.c
|
|
@@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work)
|
|
if (m->rreq->status == REQ_STATUS_SENT) {
|
|
list_del(&m->rreq->req_list);
|
|
p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
|
|
+ } else if (m->rreq->status == REQ_STATUS_FLSHD) {
|
|
+ /* Ignore replies associated with a cancelled request. */
|
|
+ p9_debug(P9_DEBUG_TRANS,
|
|
+ "Ignore replies associated with a cancelled request\n");
|
|
} else {
|
|
spin_unlock(&m->client->lock);
|
|
p9_debug(P9_DEBUG_ERROR,
|
|
@@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
|
|
{
|
|
p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
|
|
|
|
+ spin_lock(&client->lock);
|
|
+ /* Ignore cancelled request if message has been received
|
|
+ * before lock.
|
|
+ */
|
|
+ if (req->status == REQ_STATUS_RCVD) {
|
|
+ spin_unlock(&client->lock);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
/* we haven't received a response for oldreq,
|
|
* remove it from the list.
|
|
*/
|
|
- spin_lock(&client->lock);
|
|
list_del(&req->req_list);
|
|
+ req->status = REQ_STATUS_FLSHD;
|
|
spin_unlock(&client->lock);
|
|
p9_req_put(req);
|
|
|
|
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
|
|
index b11f8d391ad8..fe75f435171c 100644
|
|
--- a/net/bluetooth/hci_event.c
|
|
+++ b/net/bluetooth/hci_event.c
|
|
@@ -1305,6 +1305,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
|
{
|
|
struct discovery_state *d = &hdev->discovery;
|
|
|
|
+ if (len > HCI_MAX_AD_LENGTH)
|
|
+ return;
|
|
+
|
|
bacpy(&d->last_adv_addr, bdaddr);
|
|
d->last_adv_addr_type = bdaddr_type;
|
|
d->last_adv_rssi = rssi;
|
|
@@ -5317,7 +5320,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
|
|
|
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
u8 bdaddr_type, bdaddr_t *direct_addr,
|
|
- u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
|
|
+ u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
|
|
+ bool ext_adv)
|
|
{
|
|
struct discovery_state *d = &hdev->discovery;
|
|
struct smp_irk *irk;
|
|
@@ -5339,6 +5343,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
return;
|
|
}
|
|
|
|
+ if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
|
|
+ bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
|
|
+ return;
|
|
+ }
|
|
+
|
|
/* Find the end of the data in case the report contains padded zero
|
|
* bytes at the end causing an invalid length value.
|
|
*
|
|
@@ -5398,7 +5407,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
*/
|
|
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
|
|
direct_addr);
|
|
- if (conn && type == LE_ADV_IND) {
|
|
+ if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
|
|
/* Store report for later inclusion by
|
|
* mgmt_device_connected
|
|
*/
|
|
@@ -5452,7 +5461,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
* event or send an immediate device found event if the data
|
|
* should not be stored for later.
|
|
*/
|
|
- if (!has_pending_adv_report(hdev)) {
|
|
+ if (!ext_adv && !has_pending_adv_report(hdev)) {
|
|
/* If the report will trigger a SCAN_REQ store it for
|
|
* later merging.
|
|
*/
|
|
@@ -5487,7 +5496,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|
/* If the new report will trigger a SCAN_REQ store it for
|
|
* later merging.
|
|
*/
|
|
- if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
|
|
+ if (!ext_adv && (type == LE_ADV_IND ||
|
|
+ type == LE_ADV_SCAN_IND)) {
|
|
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
|
|
rssi, flags, data, len);
|
|
return;
|
|
@@ -5527,7 +5537,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|
rssi = ev->data[ev->length];
|
|
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
|
|
ev->bdaddr_type, NULL, 0, rssi,
|
|
- ev->data, ev->length);
|
|
+ ev->data, ev->length, false);
|
|
} else {
|
|
bt_dev_err(hdev, "Dropping invalid advertising data");
|
|
}
|
|
@@ -5599,7 +5609,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|
if (legacy_evt_type != LE_ADV_INVALID) {
|
|
process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
|
|
ev->bdaddr_type, NULL, 0, ev->rssi,
|
|
- ev->data, ev->length);
|
|
+ ev->data, ev->length,
|
|
+ !(evt_type & LE_EXT_ADV_LEGACY_PDU));
|
|
}
|
|
|
|
ptr += sizeof(*ev) + ev->length;
|
|
@@ -5797,7 +5808,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
|
|
|
|
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
|
|
ev->bdaddr_type, &ev->direct_addr,
|
|
- ev->direct_addr_type, ev->rssi, NULL, 0);
|
|
+ ev->direct_addr_type, ev->rssi, NULL, 0,
|
|
+ false);
|
|
|
|
ptr += sizeof(*ev);
|
|
}
|
|
diff --git a/net/key/af_key.c b/net/key/af_key.c
|
|
index b67ed3a8486c..979c579afc63 100644
|
|
--- a/net/key/af_key.c
|
|
+++ b/net/key/af_key.c
|
|
@@ -2400,7 +2400,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
|
|
return err;
|
|
}
|
|
|
|
- xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
|
|
+ xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
|
|
pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
|
|
1, &err);
|
|
security_xfrm_policy_free(pol_ctx);
|
|
@@ -2651,7 +2651,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
|
|
return -EINVAL;
|
|
|
|
delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
|
|
- xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
|
|
+ xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
|
|
dir, pol->sadb_x_policy_id, delete, &err);
|
|
if (xp == NULL)
|
|
return -ENOENT;
|
|
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
|
|
index 0f72813fed53..4230b483168a 100644
|
|
--- a/net/mac80211/cfg.c
|
|
+++ b/net/mac80211/cfg.c
|
|
@@ -2140,6 +2140,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
|
|
ieee80211_stop_mesh(sdata);
|
|
mutex_lock(&sdata->local->mtx);
|
|
ieee80211_vif_release_channel(sdata);
|
|
+ kfree(sdata->u.mesh.ie);
|
|
mutex_unlock(&sdata->local->mtx);
|
|
|
|
return 0;
|
|
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
|
|
index 117519bf33d6..aca608ae313f 100644
|
|
--- a/net/mac80211/mesh_pathtbl.c
|
|
+++ b/net/mac80211/mesh_pathtbl.c
|
|
@@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
|
|
del_timer_sync(&mpath->timer);
|
|
atomic_dec(&sdata->u.mesh.mpaths);
|
|
atomic_dec(&tbl->entries);
|
|
+ mesh_path_flush_pending(mpath);
|
|
kfree_rcu(mpath, rcu);
|
|
}
|
|
|
|
diff --git a/net/rds/recv.c b/net/rds/recv.c
|
|
index c8404971d5ab..aba4afe4dfed 100644
|
|
--- a/net/rds/recv.c
|
|
+++ b/net/rds/recv.c
|
|
@@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
|
|
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
|
|
{
|
|
struct rds_notifier *notifier;
|
|
- struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
|
|
+ struct rds_rdma_notify cmsg;
|
|
unsigned int count = 0, max_messages = ~0U;
|
|
unsigned long flags;
|
|
LIST_HEAD(copy);
|
|
int err = 0;
|
|
|
|
+ memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
|
|
|
|
/* put_cmsg copies to user space and thus may sleep. We can't do this
|
|
* with rs_lock held, so first grab as many notifications as we can stuff
|
|
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
|
|
index 47a756503d11..f6fe2e6cd65a 100644
|
|
--- a/net/sunrpc/sunrpc.h
|
|
+++ b/net/sunrpc/sunrpc.h
|
|
@@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk)
|
|
|
|
int rpc_clients_notifier_register(void);
|
|
void rpc_clients_notifier_unregister(void);
|
|
+void auth_domain_cleanup(void);
|
|
#endif /* _NET_SUNRPC_SUNRPC_H */
|
|
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
|
|
index f9edaa9174a4..236fadc4a439 100644
|
|
--- a/net/sunrpc/sunrpc_syms.c
|
|
+++ b/net/sunrpc/sunrpc_syms.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/sunrpc/rpc_pipe_fs.h>
|
|
#include <linux/sunrpc/xprtsock.h>
|
|
|
|
+#include "sunrpc.h"
|
|
#include "netns.h"
|
|
|
|
unsigned int sunrpc_net_id;
|
|
@@ -131,6 +132,7 @@ cleanup_sunrpc(void)
|
|
unregister_rpc_pipefs();
|
|
rpc_destroy_mempool();
|
|
unregister_pernet_subsys(&sunrpc_net_ops);
|
|
+ auth_domain_cleanup();
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
|
rpc_unregister_sysctl();
|
|
#endif
|
|
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
|
|
index 552617e3467b..998b196b6176 100644
|
|
--- a/net/sunrpc/svcauth.c
|
|
+++ b/net/sunrpc/svcauth.c
|
|
@@ -21,6 +21,8 @@
|
|
|
|
#include <trace/events/sunrpc.h>
|
|
|
|
+#include "sunrpc.h"
|
|
+
|
|
#define RPCDBG_FACILITY RPCDBG_AUTH
|
|
|
|
|
|
@@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
|
|
return NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(auth_domain_find);
|
|
+
|
|
+/**
|
|
+ * auth_domain_cleanup - check that the auth_domain table is empty
|
|
+ *
|
|
+ * On module unload the auth_domain_table must be empty. To make it
|
|
+ * easier to catch bugs which don't clean up domains properly, we
|
|
+ * warn if anything remains in the table at cleanup time.
|
|
+ *
|
|
+ * Note that we cannot proactively remove the domains at this stage.
|
|
+ * The ->release() function might be in a module that has already been
|
|
+ * unloaded.
|
|
+ */
|
|
+
|
|
+void auth_domain_cleanup(void)
|
|
+{
|
|
+ int h;
|
|
+ struct auth_domain *hp;
|
|
+
|
|
+ for (h = 0; h < DN_HASHMAX; h++)
|
|
+ hlist_for_each_entry(hp, &auth_domain_table[h], hash)
|
|
+ pr_warn("svc: domain %s still present at module unload.\n",
|
|
+ hp->name);
|
|
+}
|
|
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
|
|
index 0285aaa1e93c..3d424e80f16d 100644
|
|
--- a/net/x25/x25_subr.c
|
|
+++ b/net/x25/x25_subr.c
|
|
@@ -363,6 +363,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
|
|
x25->neighbour = NULL;
|
|
read_unlock_bh(&x25_list_lock);
|
|
}
|
|
+ if (x25->neighbour) {
|
|
+ read_lock_bh(&x25_list_lock);
|
|
+ x25_neigh_put(x25->neighbour);
|
|
+ x25->neighbour = NULL;
|
|
+ read_unlock_bh(&x25_list_lock);
|
|
+ }
|
|
}
|
|
|
|
/*
|
|
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
|
|
index 5a0ff665b71a..19396f3655c0 100644
|
|
--- a/net/xfrm/espintcp.c
|
|
+++ b/net/xfrm/espintcp.c
|
|
@@ -41,9 +41,32 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb)
|
|
struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx,
|
|
strp);
|
|
struct strp_msg *rxm = strp_msg(skb);
|
|
+ int len = rxm->full_len - 2;
|
|
u32 nonesp_marker;
|
|
int err;
|
|
|
|
+ /* keepalive packet? */
|
|
+ if (unlikely(len == 1)) {
|
|
+ u8 data;
|
|
+
|
|
+ err = skb_copy_bits(skb, rxm->offset + 2, &data, 1);
|
|
+ if (err < 0) {
|
|
+ kfree_skb(skb);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (data == 0xff) {
|
|
+ kfree_skb(skb);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* drop other short messages */
|
|
+ if (unlikely(len <= sizeof(nonesp_marker))) {
|
|
+ kfree_skb(skb);
|
|
+ return;
|
|
+ }
|
|
+
|
|
err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker,
|
|
sizeof(nonesp_marker));
|
|
if (err < 0) {
|
|
@@ -83,7 +106,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb)
|
|
return err;
|
|
|
|
len = be16_to_cpu(blen);
|
|
- if (len < 6)
|
|
+ if (len < 2)
|
|
return -EINVAL;
|
|
|
|
return len;
|
|
@@ -101,8 +124,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|
flags |= nonblock ? MSG_DONTWAIT : 0;
|
|
|
|
skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
|
|
- if (!skb)
|
|
+ if (!skb) {
|
|
+ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN)
|
|
+ return 0;
|
|
return err;
|
|
+ }
|
|
|
|
copied = len;
|
|
if (copied > skb->len)
|
|
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
|
|
index 564aa6492e7c..6847b3579f54 100644
|
|
--- a/net/xfrm/xfrm_policy.c
|
|
+++ b/net/xfrm/xfrm_policy.c
|
|
@@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
|
|
spin_unlock_bh(&pq->hold_queue.lock);
|
|
}
|
|
|
|
-static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
|
|
- struct xfrm_policy *pol)
|
|
+static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
|
|
+ struct xfrm_policy *pol)
|
|
{
|
|
- if (policy->mark.v == pol->mark.v &&
|
|
- policy->priority == pol->priority)
|
|
- return true;
|
|
-
|
|
- return false;
|
|
+ return mark->v == pol->mark.v && mark->m == pol->mark.m;
|
|
}
|
|
|
|
static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
|
|
@@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
|
|
if (pol->type == policy->type &&
|
|
pol->if_id == policy->if_id &&
|
|
!selector_cmp(&pol->selector, &policy->selector) &&
|
|
- xfrm_policy_mark_match(policy, pol) &&
|
|
+ xfrm_policy_mark_match(&policy->mark, pol) &&
|
|
xfrm_sec_ctx_match(pol->security, policy->security) &&
|
|
!WARN_ON(delpol)) {
|
|
delpol = pol;
|
|
@@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
|
|
if (pol->type == policy->type &&
|
|
pol->if_id == policy->if_id &&
|
|
!selector_cmp(&pol->selector, &policy->selector) &&
|
|
- xfrm_policy_mark_match(policy, pol) &&
|
|
+ xfrm_policy_mark_match(&policy->mark, pol) &&
|
|
xfrm_sec_ctx_match(pol->security, policy->security) &&
|
|
!WARN_ON(delpol)) {
|
|
if (excl)
|
|
@@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
|
EXPORT_SYMBOL(xfrm_policy_insert);
|
|
|
|
static struct xfrm_policy *
|
|
-__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
|
|
- u8 type, int dir,
|
|
- struct xfrm_selector *sel,
|
|
+__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
|
|
+ u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
|
|
struct xfrm_sec_ctx *ctx)
|
|
{
|
|
struct xfrm_policy *pol;
|
|
@@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
|
|
hlist_for_each_entry(pol, chain, bydst) {
|
|
if (pol->type == type &&
|
|
pol->if_id == if_id &&
|
|
- (mark & pol->mark.m) == pol->mark.v &&
|
|
+ xfrm_policy_mark_match(mark, pol) &&
|
|
!selector_cmp(sel, &pol->selector) &&
|
|
xfrm_sec_ctx_match(ctx, pol->security))
|
|
return pol;
|
|
@@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
|
|
return NULL;
|
|
}
|
|
|
|
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
|
|
- u8 type, int dir,
|
|
- struct xfrm_selector *sel,
|
|
- struct xfrm_sec_ctx *ctx, int delete,
|
|
- int *err)
|
|
+struct xfrm_policy *
|
|
+xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
|
|
+ u8 type, int dir, struct xfrm_selector *sel,
|
|
+ struct xfrm_sec_ctx *ctx, int delete, int *err)
|
|
{
|
|
struct xfrm_pol_inexact_bin *bin = NULL;
|
|
struct xfrm_policy *pol, *ret = NULL;
|
|
@@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
|
|
}
|
|
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
|
|
|
|
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
|
|
- u8 type, int dir, u32 id, int delete,
|
|
- int *err)
|
|
+struct xfrm_policy *
|
|
+xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
|
|
+ u8 type, int dir, u32 id, int delete, int *err)
|
|
{
|
|
struct xfrm_policy *pol, *ret;
|
|
struct hlist_head *chain;
|
|
@@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
|
|
ret = NULL;
|
|
hlist_for_each_entry(pol, chain, byidx) {
|
|
if (pol->type == type && pol->index == id &&
|
|
- pol->if_id == if_id &&
|
|
- (mark & pol->mark.m) == pol->mark.v) {
|
|
+ pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
|
|
xfrm_pol_hold(pol);
|
|
if (delete) {
|
|
*err = security_xfrm_policy_delete(
|
|
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
|
|
index e6cfaa680ef3..fbb7d9d06478 100644
|
|
--- a/net/xfrm/xfrm_user.c
|
|
+++ b/net/xfrm/xfrm_user.c
|
|
@@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
struct km_event c;
|
|
int delete;
|
|
struct xfrm_mark m;
|
|
- u32 mark = xfrm_mark_get(attrs, &m);
|
|
u32 if_id = 0;
|
|
|
|
p = nlmsg_data(nlh);
|
|
@@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
if (attrs[XFRMA_IF_ID])
|
|
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
|
|
|
|
+ xfrm_mark_get(attrs, &m);
|
|
+
|
|
if (p->index)
|
|
- xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
|
|
+ xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
|
|
+ p->index, delete, &err);
|
|
else {
|
|
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
|
|
struct xfrm_sec_ctx *ctx;
|
|
@@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
if (err)
|
|
return err;
|
|
}
|
|
- xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
|
|
- ctx, delete, &err);
|
|
+ xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
|
|
+ &p->sel, ctx, delete, &err);
|
|
security_xfrm_policy_free(ctx);
|
|
}
|
|
if (xp == NULL)
|
|
@@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
u8 type = XFRM_POLICY_TYPE_MAIN;
|
|
int err = -ENOENT;
|
|
struct xfrm_mark m;
|
|
- u32 mark = xfrm_mark_get(attrs, &m);
|
|
u32 if_id = 0;
|
|
|
|
err = copy_from_user_policy_type(&type, attrs);
|
|
@@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
if (attrs[XFRMA_IF_ID])
|
|
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
|
|
|
|
+ xfrm_mark_get(attrs, &m);
|
|
+
|
|
if (p->index)
|
|
- xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
|
|
+ xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
|
|
+ 0, &err);
|
|
else {
|
|
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
|
|
struct xfrm_sec_ctx *ctx;
|
|
@@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
if (err)
|
|
return err;
|
|
}
|
|
- xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
|
|
+ xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
|
|
&p->sel, ctx, 0, &err);
|
|
security_xfrm_policy_free(ctx);
|
|
}
|
|
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
|
|
index 82e26442724b..a356fb0e5773 100644
|
|
--- a/sound/pci/hda/hda_controller.h
|
|
+++ b/sound/pci/hda/hda_controller.h
|
|
@@ -41,7 +41,7 @@
|
|
/* 24 unused */
|
|
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
|
|
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
|
|
-/* 27 unused */
|
|
+#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */
|
|
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
|
|
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
|
|
#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index 11ec5c56c80e..9d14c40c07ea 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -298,7 +298,8 @@ enum {
|
|
/* PCH for HSW/BDW; with runtime PM */
|
|
/* no i915 binding for this as HSW/BDW has another controller for HDMI */
|
|
#define AZX_DCAPS_INTEL_PCH \
|
|
- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
|
|
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
|
|
+ AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
|
|
|
|
/* HSW HDMI */
|
|
#define AZX_DCAPS_INTEL_HASWELL \
|
|
@@ -1028,7 +1029,14 @@ static int azx_suspend(struct device *dev)
|
|
chip = card->private_data;
|
|
bus = azx_bus(chip);
|
|
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
|
|
- pm_runtime_force_suspend(dev);
|
|
+ /* An ugly workaround: direct call of __azx_runtime_suspend() and
|
|
+ * __azx_runtime_resume() for old Intel platforms that suffer from
|
|
+ * spurious wakeups after S3 suspend
|
|
+ */
|
|
+ if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
|
|
+ __azx_runtime_suspend(chip);
|
|
+ else
|
|
+ pm_runtime_force_suspend(dev);
|
|
if (bus->irq >= 0) {
|
|
free_irq(bus->irq, chip);
|
|
bus->irq = -1;
|
|
@@ -1057,7 +1065,10 @@ static int azx_resume(struct device *dev)
|
|
if (azx_acquire_irq(chip, 1) < 0)
|
|
return -EIO;
|
|
|
|
- pm_runtime_force_resume(dev);
|
|
+ if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
|
|
+ __azx_runtime_resume(chip, false);
|
|
+ else
|
|
+ pm_runtime_force_resume(dev);
|
|
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
|
|
|
|
trace_azx_resume(chip);
|
|
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
|
|
index e821c9df8107..37391c3d2f47 100644
|
|
--- a/sound/pci/hda/patch_hdmi.c
|
|
+++ b/sound/pci/hda/patch_hdmi.c
|
|
@@ -2439,6 +2439,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
|
|
mutex_lock(&spec->bind_lock);
|
|
spec->use_acomp_notifier = use_acomp;
|
|
spec->codec->relaxed_resume = use_acomp;
|
|
+ spec->codec->bus->keep_power = 0;
|
|
/* reprogram each jack detection logic depending on the notifier */
|
|
for (i = 0; i < spec->num_pins; i++)
|
|
reprogram_jack_detect(spec->codec,
|
|
@@ -2533,7 +2534,6 @@ static void generic_acomp_init(struct hda_codec *codec,
|
|
if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops,
|
|
match_bound_vga, 0)) {
|
|
spec->acomp_registered = true;
|
|
- codec->bus->keep_power = 0;
|
|
}
|
|
}
|
|
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 27dd8945d6e6..d8d018536484 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -5940,6 +5940,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
|
|
snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
|
|
}
|
|
|
|
+static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ if (action != HDA_FIXUP_ACT_INIT)
|
|
+ return;
|
|
+
|
|
+ msleep(100);
|
|
+ alc_write_coef_idx(codec, 0x65, 0x0);
|
|
+}
|
|
+
|
|
/* for hda_fixup_thinkpad_acpi() */
|
|
#include "thinkpad_helper.c"
|
|
|
|
@@ -6117,8 +6127,10 @@ enum {
|
|
ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
|
|
ALC269VC_FIXUP_ACER_HEADSET_MIC,
|
|
ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
|
|
- ALC289_FIXUP_ASUS_G401,
|
|
+ ALC289_FIXUP_ASUS_GA401,
|
|
+ ALC289_FIXUP_ASUS_GA502,
|
|
ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
|
|
+ ALC285_FIXUP_HP_GPIO_AMP_INIT,
|
|
};
|
|
|
|
static const struct hda_fixup alc269_fixups[] = {
|
|
@@ -7328,7 +7340,14 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC269_FIXUP_HEADSET_MIC
|
|
},
|
|
- [ALC289_FIXUP_ASUS_G401] = {
|
|
+ [ALC289_FIXUP_ASUS_GA401] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
|
|
+ { }
|
|
+ },
|
|
+ },
|
|
+ [ALC289_FIXUP_ASUS_GA502] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
.v.pins = (const struct hda_pintbl[]) {
|
|
{ 0x19, 0x03a11020 }, /* headset mic with jack detect */
|
|
@@ -7344,6 +7363,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
|
|
},
|
|
+ [ALC285_FIXUP_HP_GPIO_AMP_INIT] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc285_fixup_hp_gpio_amp_init,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC285_FIXUP_HP_GPIO_LED
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
@@ -7494,7 +7519,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
|
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
|
|
- SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
|
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
|
@@ -7526,7 +7551,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
|
|
- SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
|
|
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
|
|
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
|
|
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
|
|
@@ -7546,7 +7572,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
|
|
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
|
|
- SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
|
|
+ SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
|
|
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
|
|
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
|
|
SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
|
|
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
|
|
index 9702c4311b91..0247162a9fbf 100644
|
|
--- a/sound/usb/pcm.c
|
|
+++ b/sound/usb/pcm.c
|
|
@@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|
ifnum = 0;
|
|
goto add_sync_ep_from_ifnum;
|
|
case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
|
|
+ case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
|
|
case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
|
|
case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
|
|
ep = 0x81;
|
|
diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile
|
|
index 349bb81482ab..680d883efe05 100644
|
|
--- a/tools/lib/traceevent/plugins/Makefile
|
|
+++ b/tools/lib/traceevent/plugins/Makefile
|
|
@@ -197,7 +197,7 @@ define do_generate_dynamic_list_file
|
|
xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
|
|
if [ "$$symbol_type" = "U W" ];then \
|
|
(echo '{'; \
|
|
- $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
|
|
+ $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
|
|
echo '};'; \
|
|
) > $2; \
|
|
else \
|
|
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
|
|
index 0a6e75b8777a..28a5d0c18b1d 100644
|
|
--- a/tools/perf/arch/arm/util/auxtrace.c
|
|
+++ b/tools/perf/arch/arm/util/auxtrace.c
|
|
@@ -56,7 +56,7 @@ struct auxtrace_record
|
|
struct perf_pmu *cs_etm_pmu;
|
|
struct evsel *evsel;
|
|
bool found_etm = false;
|
|
- bool found_spe = false;
|
|
+ struct perf_pmu *found_spe = NULL;
|
|
static struct perf_pmu **arm_spe_pmus = NULL;
|
|
static int nr_spes = 0;
|
|
int i = 0;
|
|
@@ -74,12 +74,12 @@ struct auxtrace_record
|
|
evsel->core.attr.type == cs_etm_pmu->type)
|
|
found_etm = true;
|
|
|
|
- if (!nr_spes)
|
|
+ if (!nr_spes || found_spe)
|
|
continue;
|
|
|
|
for (i = 0; i < nr_spes; i++) {
|
|
if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
|
|
- found_spe = true;
|
|
+ found_spe = arm_spe_pmus[i];
|
|
break;
|
|
}
|
|
}
|
|
@@ -96,7 +96,7 @@ struct auxtrace_record
|
|
|
|
#if defined(__aarch64__)
|
|
if (found_spe)
|
|
- return arm_spe_recording_init(err, arm_spe_pmus[i]);
|
|
+ return arm_spe_recording_init(err, found_spe);
|
|
#endif
|
|
|
|
/*
|
|
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
|
|
index 8294ae3ffb3c..43c9cda199b8 100755
|
|
--- a/tools/testing/selftests/bpf/test_offload.py
|
|
+++ b/tools/testing/selftests/bpf/test_offload.py
|
|
@@ -318,6 +318,9 @@ class DebugfsDir:
|
|
continue
|
|
|
|
if os.path.isfile(p):
|
|
+ # We need to init trap_flow_action_cookie before read it
|
|
+ if f == "trap_flow_action_cookie":
|
|
+ cmd('echo deadbeef > %s/%s' % (path, f))
|
|
_, out = cmd('cat %s/%s' % (path, f))
|
|
dfs[f] = out.strip()
|
|
elif os.path.isdir(p):
|
|
diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
|
|
index 9dc35a16e415..51df5e305855 100755
|
|
--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
|
|
+++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
|
|
@@ -144,7 +144,7 @@ setup()
|
|
|
|
cleanup()
|
|
{
|
|
- for n in h1 r1 h2 h3 h4
|
|
+ for n in h0 r1 h1 h2 h3
|
|
do
|
|
ip netns del ${n} 2>/dev/null
|
|
done
|
|
diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh
|
|
index eb8e2a23bbb4..43a948feed26 100755
|
|
--- a/tools/testing/selftests/net/forwarding/ethtool.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/ethtool.sh
|
|
@@ -252,8 +252,6 @@ check_highest_speed_is_chosen()
|
|
fi
|
|
|
|
local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
|
|
- # Remove the first speed, h1 does not advertise this speed.
|
|
- unset speeds_arr[0]
|
|
|
|
max_speed=${speeds_arr[0]}
|
|
for current in ${speeds_arr[@]}; do
|
|
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
|
|
index 15d3489ecd9c..ceb7ad4dbd94 100755
|
|
--- a/tools/testing/selftests/net/ip_defrag.sh
|
|
+++ b/tools/testing/selftests/net/ip_defrag.sh
|
|
@@ -6,6 +6,8 @@
|
|
set +x
|
|
set -e
|
|
|
|
+modprobe -q nf_defrag_ipv6
|
|
+
|
|
readonly NETNS="ns-$(mktemp -u XXXXXX)"
|
|
|
|
setup() {
|
|
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
|
|
index 8c8c7d79c38d..2c522f7a0aec 100644
|
|
--- a/tools/testing/selftests/net/psock_fanout.c
|
|
+++ b/tools/testing/selftests/net/psock_fanout.c
|
|
@@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
|
|
int fds[2], fds_udp[2][2], ret;
|
|
|
|
fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
|
|
- typeflags, PORT_BASE, PORT_BASE + port_off);
|
|
+ typeflags, (uint16_t)PORT_BASE,
|
|
+ (uint16_t)(PORT_BASE + port_off));
|
|
|
|
fds[0] = sock_fanout_open(typeflags, 0);
|
|
fds[1] = sock_fanout_open(typeflags, 0);
|
|
diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c
|
|
index 422e7761254d..bcb79ba1f214 100644
|
|
--- a/tools/testing/selftests/net/rxtimestamp.c
|
|
+++ b/tools/testing/selftests/net/rxtimestamp.c
|
|
@@ -329,8 +329,7 @@ int main(int argc, char **argv)
|
|
bool all_tests = true;
|
|
int arg_index = 0;
|
|
int failures = 0;
|
|
- int s, t;
|
|
- char opt;
|
|
+ int s, t, opt;
|
|
|
|
while ((opt = getopt_long(argc, argv, "", long_options,
|
|
&arg_index)) != -1) {
|
|
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
|
|
index ceaad78e9667..3155fbbf644b 100644
|
|
--- a/tools/testing/selftests/net/so_txtime.c
|
|
+++ b/tools/testing/selftests/net/so_txtime.c
|
|
@@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
|
|
if (rbuf[0] != ts->data)
|
|
error(1, 0, "payload mismatch. expected %c", ts->data);
|
|
|
|
- if (labs(tstop - texpect) > cfg_variance_us)
|
|
+ if (llabs(tstop - texpect) > cfg_variance_us)
|
|
error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
|
|
|
|
return false;
|
|
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
|
|
index 4555f88252ba..a61b7b3da549 100644
|
|
--- a/tools/testing/selftests/net/tcp_mmap.c
|
|
+++ b/tools/testing/selftests/net/tcp_mmap.c
|
|
@@ -344,7 +344,7 @@ int main(int argc, char *argv[])
|
|
{
|
|
struct sockaddr_storage listenaddr, addr;
|
|
unsigned int max_pacing_rate = 0;
|
|
- size_t total = 0;
|
|
+ uint64_t total = 0;
|
|
char *host = NULL;
|
|
int fd, c, on = 1;
|
|
char *buffer;
|
|
@@ -473,12 +473,12 @@ int main(int argc, char *argv[])
|
|
zflg = 0;
|
|
}
|
|
while (total < FILE_SZ) {
|
|
- ssize_t wr = FILE_SZ - total;
|
|
+ int64_t wr = FILE_SZ - total;
|
|
|
|
if (wr > chunk_size)
|
|
wr = chunk_size;
|
|
/* Note : we just want to fill the pipe with 0 bytes */
|
|
- wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0);
|
|
+ wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0);
|
|
if (wr <= 0)
|
|
break;
|
|
total += wr;
|
|
diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh
|
|
index eea6f5193693..31637769f59f 100755
|
|
--- a/tools/testing/selftests/net/txtimestamp.sh
|
|
+++ b/tools/testing/selftests/net/txtimestamp.sh
|
|
@@ -75,7 +75,7 @@ main() {
|
|
fi
|
|
}
|
|
|
|
-if [[ "$(ip netns identify)" == "root" ]]; then
|
|
+if [[ -z "$(ip netns identify)" ]]; then
|
|
./in_netns.sh $0 $@
|
|
else
|
|
main $@
|
|
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
|
|
index e3b9ee268823..8a9d13e8e904 100644
|
|
--- a/virt/kvm/arm/mmu.c
|
|
+++ b/virt/kvm/arm/mmu.c
|
|
@@ -1198,7 +1198,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
|
|
return true;
|
|
}
|
|
|
|
-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
|
|
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
|
|
{
|
|
pud_t *pudp;
|
|
pmd_t *pmdp;
|
|
@@ -1210,11 +1210,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
|
|
return false;
|
|
|
|
if (pudp)
|
|
- return kvm_s2pud_exec(pudp);
|
|
+ return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
|
|
else if (pmdp)
|
|
- return kvm_s2pmd_exec(pmdp);
|
|
+ return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
|
|
else
|
|
- return kvm_s2pte_exec(ptep);
|
|
+ return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
|
|
}
|
|
|
|
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
|
@@ -1801,7 +1801,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
* execute permissions, and we preserve whatever we have.
|
|
*/
|
|
needs_exec = exec_fault ||
|
|
- (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
|
|
+ (fault_status == FSC_PERM &&
|
|
+ stage2_is_exec(kvm, fault_ipa, vma_pagesize));
|
|
|
|
if (vma_pagesize == PUD_SIZE) {
|
|
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
|