mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-29 02:01:21 +00:00
6420 lines
216 KiB
Diff
6420 lines
216 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index 2caa131ff306b..b71076cecba9c 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 19
|
|
-SUBLEVEL = 25
|
|
+SUBLEVEL = 26
|
|
EXTRAVERSION =
|
|
NAME = "People's Front"
|
|
|
|
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
|
|
index ff7d3232764a2..db681cf4959c8 100644
|
|
--- a/arch/arc/include/asm/cache.h
|
|
+++ b/arch/arc/include/asm/cache.h
|
|
@@ -52,6 +52,17 @@
|
|
#define cache_line_size() SMP_CACHE_BYTES
|
|
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
|
|
|
|
+/*
|
|
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
|
|
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
|
|
+ * alignment for any atomic64_t embedded in buffer.
|
|
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
|
|
+ * value of 4 (and not 8) in ARC ABI.
|
|
+ */
|
|
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
|
|
+#define ARCH_SLAB_MINALIGN 8
|
|
+#endif
|
|
+
|
|
extern void arc_cache_init(void);
|
|
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
|
|
extern void read_decode_cache_bcr(void);
|
|
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
|
|
index 8b90d25a15cca..1f945d0f40daa 100644
|
|
--- a/arch/arc/kernel/head.S
|
|
+++ b/arch/arc/kernel/head.S
|
|
@@ -17,6 +17,7 @@
|
|
#include <asm/entry.h>
|
|
#include <asm/arcregs.h>
|
|
#include <asm/cache.h>
|
|
+#include <asm/irqflags.h>
|
|
|
|
.macro CPU_EARLY_SETUP
|
|
|
|
@@ -47,6 +48,15 @@
|
|
sr r5, [ARC_REG_DC_CTRL]
|
|
|
|
1:
|
|
+
|
|
+#ifdef CONFIG_ISA_ARCV2
|
|
+ ; Unaligned access is disabled at reset, so re-enable early as
|
|
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
|
|
+ ; by default
|
|
+ lr r5, [status32]
|
|
+ bset r5, r5, STATUS_AD_BIT
|
|
+ kflag r5
|
|
+#endif
|
|
.endm
|
|
|
|
.section .init.text, "ax",@progbits
|
|
@@ -93,9 +103,9 @@ ENTRY(stext)
|
|
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
|
; Uboot - kernel ABI
|
|
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
|
|
- ; r1 = magic number (board identity, unused as of now
|
|
+ ; r1 = magic number (always zero as of now)
|
|
; r2 = pointer to uboot provided cmdline or external DTB in mem
|
|
- ; These are handled later in setup_arch()
|
|
+ ; These are handled later in handle_uboot_args()
|
|
st r0, [@uboot_tag]
|
|
st r2, [@uboot_arg]
|
|
#endif
|
|
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
|
|
index b2cae79a25d71..62a30e58441c5 100644
|
|
--- a/arch/arc/kernel/setup.c
|
|
+++ b/arch/arc/kernel/setup.c
|
|
@@ -449,43 +449,80 @@ void setup_processor(void)
|
|
arc_chk_core_config();
|
|
}
|
|
|
|
-static inline int is_kernel(unsigned long addr)
|
|
+static inline bool uboot_arg_invalid(unsigned long addr)
|
|
{
|
|
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
|
|
- return 1;
|
|
- return 0;
|
|
+ /*
|
|
+ * Check that it is a untranslated address (although MMU is not enabled
|
|
+ * yet, it being a high address ensures this is not by fluke)
|
|
+ */
|
|
+ if (addr < PAGE_OFFSET)
|
|
+ return true;
|
|
+
|
|
+ /* Check that address doesn't clobber resident kernel image */
|
|
+ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
|
|
}
|
|
|
|
-void __init setup_arch(char **cmdline_p)
|
|
+#define IGNORE_ARGS "Ignore U-boot args: "
|
|
+
|
|
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
|
|
+#define UBOOT_TAG_NONE 0
|
|
+#define UBOOT_TAG_CMDLINE 1
|
|
+#define UBOOT_TAG_DTB 2
|
|
+
|
|
+void __init handle_uboot_args(void)
|
|
{
|
|
+ bool use_embedded_dtb = true;
|
|
+ bool append_cmdline = false;
|
|
+
|
|
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
|
- /* make sure that uboot passed pointer to cmdline/dtb is valid */
|
|
- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
|
|
- panic("Invalid uboot arg\n");
|
|
+ /* check that we know this tag */
|
|
+ if (uboot_tag != UBOOT_TAG_NONE &&
|
|
+ uboot_tag != UBOOT_TAG_CMDLINE &&
|
|
+ uboot_tag != UBOOT_TAG_DTB) {
|
|
+ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
|
|
+ goto ignore_uboot_args;
|
|
+ }
|
|
+
|
|
+ if (uboot_tag != UBOOT_TAG_NONE &&
|
|
+ uboot_arg_invalid((unsigned long)uboot_arg)) {
|
|
+ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
|
|
+ goto ignore_uboot_args;
|
|
+ }
|
|
+
|
|
+ /* see if U-boot passed an external Device Tree blob */
|
|
+ if (uboot_tag == UBOOT_TAG_DTB) {
|
|
+ machine_desc = setup_machine_fdt((void *)uboot_arg);
|
|
|
|
- /* See if u-boot passed an external Device Tree blob */
|
|
- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
|
|
- if (!machine_desc)
|
|
+ /* external Device Tree blob is invalid - use embedded one */
|
|
+ use_embedded_dtb = !machine_desc;
|
|
+ }
|
|
+
|
|
+ if (uboot_tag == UBOOT_TAG_CMDLINE)
|
|
+ append_cmdline = true;
|
|
+
|
|
+ignore_uboot_args:
|
|
#endif
|
|
- {
|
|
- /* No, so try the embedded one */
|
|
+
|
|
+ if (use_embedded_dtb) {
|
|
machine_desc = setup_machine_fdt(__dtb_start);
|
|
if (!machine_desc)
|
|
panic("Embedded DT invalid\n");
|
|
+ }
|
|
|
|
- /*
|
|
- * If we are here, it is established that @uboot_arg didn't
|
|
- * point to DT blob. Instead if u-boot says it is cmdline,
|
|
- * append to embedded DT cmdline.
|
|
- * setup_machine_fdt() would have populated @boot_command_line
|
|
- */
|
|
- if (uboot_tag == 1) {
|
|
- /* Ensure a whitespace between the 2 cmdlines */
|
|
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
|
- strlcat(boot_command_line, uboot_arg,
|
|
- COMMAND_LINE_SIZE);
|
|
- }
|
|
+ /*
|
|
+ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
|
|
+ * append processing can only happen after.
|
|
+ */
|
|
+ if (append_cmdline) {
|
|
+ /* Ensure a whitespace between the 2 cmdlines */
|
|
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
|
+ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
|
|
}
|
|
+}
|
|
+
|
|
+void __init setup_arch(char **cmdline_p)
|
|
+{
|
|
+ handle_uboot_args();
|
|
|
|
/* Save unparsed command line copy for /proc/cmdline */
|
|
*cmdline_p = boot_command_line;
|
|
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
|
|
index 2c118a6ab3587..0dc23fc227ed2 100644
|
|
--- a/arch/arm/probes/kprobes/opt-arm.c
|
|
+++ b/arch/arm/probes/kprobes/opt-arm.c
|
|
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
|
|
}
|
|
|
|
/* Copy arch-dep-instance from template. */
|
|
- memcpy(code, (unsigned char *)optprobe_template_entry,
|
|
+ memcpy(code, (unsigned long *)&optprobe_template_entry,
|
|
TMPL_END_IDX * sizeof(kprobe_opcode_t));
|
|
|
|
/* Adjust buffer according to instruction. */
|
|
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
|
|
index 951c4231bdb85..4c47b3fd958b6 100644
|
|
--- a/arch/mips/configs/ath79_defconfig
|
|
+++ b/arch/mips/configs/ath79_defconfig
|
|
@@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
|
# CONFIG_SERIAL_8250_PCI is not set
|
|
CONFIG_SERIAL_8250_NR_UARTS=1
|
|
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
|
|
+CONFIG_SERIAL_OF_PLATFORM=y
|
|
CONFIG_SERIAL_AR933X=y
|
|
CONFIG_SERIAL_AR933X_CONSOLE=y
|
|
# CONFIG_HW_RANDOM is not set
|
|
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
|
|
index d31bc2f012088..fb2b6d0b77c36 100644
|
|
--- a/arch/mips/jazz/jazzdma.c
|
|
+++ b/arch/mips/jazz/jazzdma.c
|
|
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
|
|
get_order(VDMA_PGTBL_SIZE));
|
|
BUG_ON(!pgtbl);
|
|
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
|
|
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
|
|
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
|
|
|
|
/*
|
|
* Clear the R4030 translation table
|
|
*/
|
|
vdma_pgtbl_init();
|
|
|
|
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
|
|
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
|
|
+ CPHYSADDR((unsigned long)pgtbl));
|
|
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
|
|
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
|
|
|
|
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
|
|
index aeb7b1b0f2024..252c00985c973 100644
|
|
--- a/arch/mips/net/ebpf_jit.c
|
|
+++ b/arch/mips/net/ebpf_jit.c
|
|
@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
|
|
const struct bpf_prog *prog = ctx->skf;
|
|
int stack_adjust = ctx->stack_size;
|
|
int store_offset = stack_adjust - 8;
|
|
+ enum reg_val_type td;
|
|
int r0 = MIPS_R_V0;
|
|
|
|
- if (dest_reg == MIPS_R_RA &&
|
|
- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
|
|
+ if (dest_reg == MIPS_R_RA) {
|
|
/* Don't let zero extended value escape. */
|
|
- emit_instr(ctx, sll, r0, r0, 0);
|
|
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
|
|
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
|
|
+ emit_instr(ctx, sll, r0, r0, 0);
|
|
+ }
|
|
|
|
if (ctx->flags & EBPF_SAVE_RA) {
|
|
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
|
|
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
|
|
index 2582df1c529bb..0964c236e3e5a 100644
|
|
--- a/arch/parisc/kernel/ptrace.c
|
|
+++ b/arch/parisc/kernel/ptrace.c
|
|
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|
|
|
long do_syscall_trace_enter(struct pt_regs *regs)
|
|
{
|
|
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
|
- tracehook_report_syscall_entry(regs)) {
|
|
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
|
|
+ int rc = tracehook_report_syscall_entry(regs);
|
|
+
|
|
/*
|
|
- * Tracing decided this syscall should not happen or the
|
|
- * debugger stored an invalid system call number. Skip
|
|
- * the system call and the system call restart handling.
|
|
+ * As tracesys_next does not set %r28 to -ENOSYS
|
|
+ * when %r20 is set to -1, initialize it here.
|
|
*/
|
|
- regs->gr[20] = -1UL;
|
|
- goto out;
|
|
+ regs->gr[28] = -ENOSYS;
|
|
+
|
|
+ if (rc) {
|
|
+ /*
|
|
+ * A nonzero return code from
|
|
+ * tracehook_report_syscall_entry() tells us
|
|
+ * to prevent the syscall execution. Skip
|
|
+ * the syscall call and the syscall restart handling.
|
|
+ *
|
|
+ * Note that the tracer may also just change
|
|
+ * regs->gr[20] to an invalid syscall number,
|
|
+ * that is handled by tracesys_next.
|
|
+ */
|
|
+ regs->gr[20] = -1UL;
|
|
+ return -1;
|
|
+ }
|
|
}
|
|
|
|
/* Do the secure computing check after ptrace. */
|
|
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
|
|
regs->gr[24] & 0xffffffff,
|
|
regs->gr[23] & 0xffffffff);
|
|
|
|
-out:
|
|
/*
|
|
* Sign extend the syscall number to 64bit since it may have been
|
|
* modified by a compat ptrace call
|
|
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
|
|
index 81d4574d1f377..9fd2ff28b8ff2 100644
|
|
--- a/arch/powerpc/kernel/head_8xx.S
|
|
+++ b/arch/powerpc/kernel/head_8xx.S
|
|
@@ -919,11 +919,12 @@ start_here:
|
|
|
|
/* set up the PTE pointers for the Abatron bdiGDB.
|
|
*/
|
|
- tovirt(r6,r6)
|
|
lis r5, abatron_pteptrs@h
|
|
ori r5, r5, abatron_pteptrs@l
|
|
stw r5, 0xf0(0) /* Must match your Abatron config file */
|
|
tophys(r5,r5)
|
|
+ lis r6, swapper_pg_dir@h
|
|
+ ori r6, r6, swapper_pg_dir@l
|
|
stw r6, 0(r5)
|
|
|
|
/* Now turn on the MMU for real! */
|
|
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
|
|
index 7bcfa61375c09..98d13c6a64be0 100644
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
|
|
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
|
|
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
|
|
+ unsigned f_la57 = 0;
|
|
|
|
/* cpuid 1.edx */
|
|
const u32 kvm_cpuid_1_edx_x86_features =
|
|
@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|
// TSC_ADJUST is emulated
|
|
entry->ebx |= F(TSC_ADJUST);
|
|
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
|
|
+ f_la57 = entry->ecx & F(LA57);
|
|
cpuid_mask(&entry->ecx, CPUID_7_ECX);
|
|
+ /* Set LA57 based on hardware capability. */
|
|
+ entry->ecx |= f_la57;
|
|
entry->ecx |= f_umip;
|
|
/* PKU is not yet implemented for shadow paging. */
|
|
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
|
|
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
|
|
index 52a7c3faee0cc..782f98b332f05 100644
|
|
--- a/arch/x86/xen/enlighten_pv.c
|
|
+++ b/arch/x86/xen/enlighten_pv.c
|
|
@@ -899,10 +899,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
|
|
val = native_read_msr_safe(msr, err);
|
|
switch (msr) {
|
|
case MSR_IA32_APICBASE:
|
|
-#ifdef CONFIG_X86_X2APIC
|
|
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
|
|
-#endif
|
|
- val &= ~X2APIC_ENABLE;
|
|
+ val &= ~X2APIC_ENABLE;
|
|
break;
|
|
}
|
|
return val;
|
|
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
|
|
index ea59c01ce8db0..f530d35412428 100644
|
|
--- a/drivers/acpi/nfit/core.c
|
|
+++ b/drivers/acpi/nfit/core.c
|
|
@@ -719,6 +719,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
|
|
struct acpi_nfit_memory_map *memdev;
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
struct nfit_mem *nfit_mem;
|
|
+ u16 physical_id;
|
|
|
|
mutex_lock(&acpi_desc_lock);
|
|
list_for_each_entry(acpi_desc, &acpi_descs, list) {
|
|
@@ -726,10 +727,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
|
memdev = __to_nfit_memdev(nfit_mem);
|
|
if (memdev->device_handle == device_handle) {
|
|
+ *flags = memdev->flags;
|
|
+ physical_id = memdev->physical_id;
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
mutex_unlock(&acpi_desc_lock);
|
|
- *flags = memdev->flags;
|
|
- return memdev->physical_id;
|
|
+ return physical_id;
|
|
}
|
|
}
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
|
|
index 29f102dcfec49..329ce9072ee9f 100644
|
|
--- a/drivers/atm/he.c
|
|
+++ b/drivers/atm/he.c
|
|
@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
|
|
instead of '/ 512', use '>> 9' to prevent a call
|
|
to divdu3 on x86 platforms
|
|
*/
|
|
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
|
|
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
|
|
|
|
if (rate_cps < 10)
|
|
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
|
|
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
|
|
index 00e954f22bc92..74401e0adb29c 100644
|
|
--- a/drivers/gpio/gpio-mt7621.c
|
|
+++ b/drivers/gpio/gpio-mt7621.c
|
|
@@ -30,6 +30,7 @@
|
|
#define GPIO_REG_EDGE 0xA0
|
|
|
|
struct mtk_gc {
|
|
+ struct irq_chip irq_chip;
|
|
struct gpio_chip chip;
|
|
spinlock_t lock;
|
|
int bank;
|
|
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
|
|
return 0;
|
|
}
|
|
|
|
-static struct irq_chip mediatek_gpio_irq_chip = {
|
|
- .irq_unmask = mediatek_gpio_irq_unmask,
|
|
- .irq_mask = mediatek_gpio_irq_mask,
|
|
- .irq_mask_ack = mediatek_gpio_irq_mask,
|
|
- .irq_set_type = mediatek_gpio_irq_type,
|
|
-};
|
|
-
|
|
static int
|
|
mediatek_gpio_xlate(struct gpio_chip *chip,
|
|
const struct of_phandle_args *spec, u32 *flags)
|
|
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
|
|
return ret;
|
|
}
|
|
|
|
+ rg->irq_chip.name = dev_name(dev);
|
|
+ rg->irq_chip.parent_device = dev;
|
|
+ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
|
|
+ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
|
|
+ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
|
|
+ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
|
|
+
|
|
if (mtk->gpio_irq) {
|
|
/*
|
|
* Manually request the irq here instead of passing
|
|
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
|
|
return ret;
|
|
}
|
|
|
|
- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
|
|
+ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
|
|
0, handle_simple_irq, IRQ_TYPE_NONE);
|
|
if (ret) {
|
|
dev_err(dev, "failed to add gpiochip_irqchip\n");
|
|
return ret;
|
|
}
|
|
|
|
- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
|
|
+ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
|
|
mtk->gpio_irq, NULL);
|
|
}
|
|
|
|
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
|
|
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
|
|
mtk->dev = dev;
|
|
platform_set_drvdata(pdev, mtk);
|
|
- mediatek_gpio_irq_chip.name = dev_name(dev);
|
|
|
|
for (i = 0; i < MTK_BANK_CNT; i++) {
|
|
ret = mediatek_gpio_bank_probe(dev, np, i);
|
|
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
|
|
index 9f3f166f17608..eb27fa76e8fc7 100644
|
|
--- a/drivers/gpio/gpio-pxa.c
|
|
+++ b/drivers/gpio/gpio-pxa.c
|
|
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
|
|
{
|
|
switch (gpio_type) {
|
|
case PXA3XX_GPIO:
|
|
+ case MMP2_GPIO:
|
|
return false;
|
|
|
|
default:
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
index fd825d30edf13..c0396e83f3526 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
@@ -159,6 +159,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
}
|
|
|
|
if (amdgpu_device_is_px(dev)) {
|
|
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
|
|
pm_runtime_use_autosuspend(dev->dev);
|
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
|
pm_runtime_set_active(dev->dev);
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
|
index 80f5db4ef75fd..0805c423a5ce0 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
|
@@ -1072,8 +1072,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
|
|
* the GPU device is not already present in the topology device
|
|
* list then return NULL. This means a new topology device has to
|
|
* be created for this GPU.
|
|
- * TODO: Rather than assiging @gpu to first topology device withtout
|
|
- * gpu attached, it will better to have more stringent check.
|
|
*/
|
|
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
|
|
{
|
|
@@ -1081,12 +1079,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
|
|
struct kfd_topology_device *out_dev = NULL;
|
|
|
|
down_write(&topology_lock);
|
|
- list_for_each_entry(dev, &topology_device_list, list)
|
|
+ list_for_each_entry(dev, &topology_device_list, list) {
|
|
+ /* Discrete GPUs need their own topology device list
|
|
+ * entries. Don't assign them to CPU/APU nodes.
|
|
+ */
|
|
+ if (!gpu->device_info->needs_iommu_device &&
|
|
+ dev->node_props.cpu_cores_count)
|
|
+ continue;
|
|
+
|
|
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
|
|
dev->gpu = gpu;
|
|
out_dev = dev;
|
|
break;
|
|
}
|
|
+ }
|
|
up_write(&topology_lock);
|
|
return out_dev;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index a851bb07443f0..c5ba9128b7361 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -624,12 +624,13 @@ static int dm_suspend(void *handle)
|
|
struct amdgpu_display_manager *dm = &adev->dm;
|
|
int ret = 0;
|
|
|
|
+ WARN_ON(adev->dm.cached_state);
|
|
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
|
|
+
|
|
s3_handle_mst(adev->ddev, true);
|
|
|
|
amdgpu_dm_irq_suspend(adev);
|
|
|
|
- WARN_ON(adev->dm.cached_state);
|
|
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
|
|
|
|
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
|
|
index 580e7e82034fa..53ccacf99eca4 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
|
|
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|
|
|
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
|
|
|
|
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
|
+ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
|
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
|
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
|
|
/* un-mute audio */
|
|
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
|
|
pipe_ctx->stream_res.stream_enc, true);
|
|
if (pipe_ctx->stream_res.audio) {
|
|
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
|
|
+
|
|
if (option != KEEP_ACQUIRED_RESOURCE ||
|
|
!dc->debug.az_endpoint_mute_only) {
|
|
/*only disalbe az_endpoint if power down or free*/
|
|
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
|
|
pipe_ctx->stream_res.audio = NULL;
|
|
}
|
|
+ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
|
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
|
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
|
|
|
|
/* TODO: notify audio driver for if audio modes list changed
|
|
* add audio mode list change flag */
|
|
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
|
|
index 2d6506c08bf72..6f91634880aa2 100644
|
|
--- a/drivers/gpu/drm/i915/intel_fbdev.c
|
|
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
|
|
@@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|
bool *enabled, int width, int height)
|
|
{
|
|
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
|
|
- unsigned long conn_configured, conn_seq, mask;
|
|
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
|
+ unsigned long conn_configured, conn_seq;
|
|
int i, j;
|
|
bool *save_enabled;
|
|
bool fallback = true, ret = true;
|
|
@@ -353,10 +353,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|
drm_modeset_backoff(&ctx);
|
|
|
|
memcpy(save_enabled, enabled, count);
|
|
- mask = GENMASK(count - 1, 0);
|
|
+ conn_seq = GENMASK(count - 1, 0);
|
|
conn_configured = 0;
|
|
retry:
|
|
- conn_seq = conn_configured;
|
|
for (i = 0; i < count; i++) {
|
|
struct drm_fb_helper_connector *fb_conn;
|
|
struct drm_connector *connector;
|
|
@@ -369,7 +368,8 @@ retry:
|
|
if (conn_configured & BIT(i))
|
|
continue;
|
|
|
|
- if (conn_seq == 0 && !connector->has_tile)
|
|
+ /* First pass, only consider tiled connectors */
|
|
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
|
|
continue;
|
|
|
|
if (connector->status == connector_status_connected)
|
|
@@ -473,8 +473,10 @@ retry:
|
|
conn_configured |= BIT(i);
|
|
}
|
|
|
|
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
|
|
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
|
|
+ conn_seq = conn_configured;
|
|
goto retry;
|
|
+ }
|
|
|
|
/*
|
|
* If the BIOS didn't enable everything it could, fall back to have the
|
|
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
|
|
index bf5f294f172fa..611ac340fb289 100644
|
|
--- a/drivers/gpu/drm/meson/meson_drv.c
|
|
+++ b/drivers/gpu/drm/meson/meson_drv.c
|
|
@@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
|
|
remote_node = of_graph_get_remote_port_parent(ep);
|
|
if (!remote_node ||
|
|
remote_node == parent || /* Ignore parent endpoint */
|
|
- !of_device_is_available(remote_node))
|
|
+ !of_device_is_available(remote_node)) {
|
|
+ of_node_put(remote_node);
|
|
continue;
|
|
+ }
|
|
|
|
count += meson_probe_remote(pdev, match, remote, remote_node);
|
|
|
|
@@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
|
|
|
|
for_each_endpoint_of_node(np, ep) {
|
|
remote = of_graph_get_remote_port_parent(ep);
|
|
- if (!remote || !of_device_is_available(remote))
|
|
+ if (!remote || !of_device_is_available(remote)) {
|
|
+ of_node_put(remote);
|
|
continue;
|
|
+ }
|
|
|
|
count += meson_probe_remote(pdev, &match, np, remote);
|
|
+ of_node_put(remote);
|
|
}
|
|
|
|
if (count && !match)
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
|
|
index dec1e081f5295..6a8fb6fd183c3 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_kms.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
|
|
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
}
|
|
|
|
if (radeon_is_px(dev)) {
|
|
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
|
|
pm_runtime_use_autosuspend(dev->dev);
|
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
|
pm_runtime_set_active(dev->dev);
|
|
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
|
|
index d7950b52a1fd9..e30b1f5b9d91a 100644
|
|
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
|
|
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
|
|
@@ -717,17 +717,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
|
|
remote = of_graph_get_remote_port_parent(ep);
|
|
if (!remote)
|
|
continue;
|
|
+ of_node_put(remote);
|
|
|
|
/* does this node match any registered engines? */
|
|
list_for_each_entry(frontend, &drv->frontend_list, list) {
|
|
if (remote == frontend->node) {
|
|
- of_node_put(remote);
|
|
of_node_put(port);
|
|
+ of_node_put(ep);
|
|
return frontend;
|
|
}
|
|
}
|
|
}
|
|
-
|
|
+ of_node_put(port);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
|
|
index e36399213324d..ceb3db6f3fdda 100644
|
|
--- a/drivers/hwmon/tmp421.c
|
|
+++ b/drivers/hwmon/tmp421.c
|
|
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
|
|
.data = (void *)2
|
|
},
|
|
{
|
|
- .compatible = "ti,tmp422",
|
|
+ .compatible = "ti,tmp442",
|
|
.data = (void *)3
|
|
},
|
|
{ },
|
|
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
index 0d3473b4596e1..21f4239022c7a 100644
|
|
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
@@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
|
{
|
|
struct mthca_ucontext *context;
|
|
|
|
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
|
|
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
|
if (!qp)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
@@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
|
if (pd->uobject)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
|
|
+ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
|
|
if (!qp)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
index 0b34e909505f5..2c1114ee0c6da 100644
|
|
--- a/drivers/infiniband/ulp/srp/ib_srp.c
|
|
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
@@ -2951,7 +2951,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|
{
|
|
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
|
struct srp_rdma_ch *ch;
|
|
- int i, j;
|
|
u8 status;
|
|
|
|
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
|
|
@@ -2963,15 +2962,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|
if (status)
|
|
return FAILED;
|
|
|
|
- for (i = 0; i < target->ch_count; i++) {
|
|
- ch = &target->ch[i];
|
|
- for (j = 0; j < target->req_ring_size; ++j) {
|
|
- struct srp_request *req = &ch->req_ring[j];
|
|
-
|
|
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
|
|
- }
|
|
- }
|
|
-
|
|
return SUCCESS;
|
|
}
|
|
|
|
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
|
|
index 4ac378e489023..40ca1e8fa09fc 100644
|
|
--- a/drivers/isdn/hardware/avm/b1.c
|
|
+++ b/drivers/isdn/hardware/avm/b1.c
|
|
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
|
|
int i, j;
|
|
|
|
for (j = 0; j < AVM_MAXVERSION; j++)
|
|
- cinfo->version[j] = "\0\0" + 1;
|
|
+ cinfo->version[j] = "";
|
|
for (i = 0, j = 0;
|
|
j < AVM_MAXVERSION && i < cinfo->versionlen;
|
|
j++, i += cinfo->versionbuf[i] + 1)
|
|
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
|
|
index b730037a0e2d3..9cff667b2d245 100644
|
|
--- a/drivers/isdn/i4l/isdn_tty.c
|
|
+++ b/drivers/isdn/i4l/isdn_tty.c
|
|
@@ -1456,15 +1456,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
|
|
{
|
|
modem_info *info = (modem_info *) tty->driver_data;
|
|
|
|
+ mutex_lock(&modem_info_mutex);
|
|
if (!old_termios)
|
|
isdn_tty_change_speed(info);
|
|
else {
|
|
if (tty->termios.c_cflag == old_termios->c_cflag &&
|
|
tty->termios.c_ispeed == old_termios->c_ispeed &&
|
|
- tty->termios.c_ospeed == old_termios->c_ospeed)
|
|
+ tty->termios.c_ospeed == old_termios->c_ospeed) {
|
|
+ mutex_unlock(&modem_info_mutex);
|
|
return;
|
|
+ }
|
|
isdn_tty_change_speed(info);
|
|
}
|
|
+ mutex_unlock(&modem_info_mutex);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
|
|
index a2e74feee2b2f..fd64df5a57a5e 100644
|
|
--- a/drivers/leds/leds-lp5523.c
|
|
+++ b/drivers/leds/leds-lp5523.c
|
|
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
|
|
|
|
/* Let the programs run for couple of ms and check the engine status */
|
|
usleep_range(3000, 6000);
|
|
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
|
|
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
|
|
+ if (ret)
|
|
+ return ret;
|
|
status &= LP5523_ENG_STATUS_MASK;
|
|
|
|
if (status != LP5523_ENG_STATUS_MASK) {
|
|
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
|
|
index 30d09d1771717..11ab17f64c649 100644
|
|
--- a/drivers/mfd/ab8500-core.c
|
|
+++ b/drivers/mfd/ab8500-core.c
|
|
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
|
|
mutex_unlock(&ab8500->lock);
|
|
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
|
|
|
|
- return ret;
|
|
+ return (ret < 0) ? ret : 0;
|
|
}
|
|
|
|
static int ab8500_get_register(struct device *dev, u8 bank,
|
|
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
|
|
index 503979c81dae1..fab3cdc27ed64 100644
|
|
--- a/drivers/mfd/bd9571mwv.c
|
|
+++ b/drivers/mfd/bd9571mwv.c
|
|
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
|
|
};
|
|
|
|
static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
|
|
+ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
|
|
regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
|
|
regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
|
|
regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
|
|
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
|
|
index 6b22d54a540d1..bccde3eac92ca 100644
|
|
--- a/drivers/mfd/cros_ec_dev.c
|
|
+++ b/drivers/mfd/cros_ec_dev.c
|
|
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
|
|
|
|
cros_ec_debugfs_remove(ec);
|
|
|
|
+ mfd_remove_devices(ec->dev);
|
|
cdev_del(&ec->cdev);
|
|
device_unregister(&ec->class_dev);
|
|
return 0;
|
|
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
|
|
index 5970b8def5487..aec20e1c7d3d5 100644
|
|
--- a/drivers/mfd/db8500-prcmu.c
|
|
+++ b/drivers/mfd/db8500-prcmu.c
|
|
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
|
|
.irq_unmask = prcmu_irq_unmask,
|
|
};
|
|
|
|
-static __init char *fw_project_name(u32 project)
|
|
+static char *fw_project_name(u32 project)
|
|
{
|
|
switch (project) {
|
|
case PRCMU_FW_PROJECT_U8500:
|
|
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
|
|
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
|
|
}
|
|
|
|
-static void __init init_prcm_registers(void)
|
|
+static void init_prcm_registers(void)
|
|
{
|
|
u32 val;
|
|
|
|
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
|
|
index c63e331738c17..234febfe6398b 100644
|
|
--- a/drivers/mfd/mc13xxx-core.c
|
|
+++ b/drivers/mfd/mc13xxx-core.c
|
|
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
|
|
|
|
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
|
|
|
|
- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
|
|
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
|
|
+ if (ret)
|
|
+ goto out;
|
|
|
|
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
|
|
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
|
|
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
|
|
index 77b64bd64df36..ab24e176ef448 100644
|
|
--- a/drivers/mfd/mt6397-core.c
|
|
+++ b/drivers/mfd/mt6397-core.c
|
|
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
|
|
|
|
default:
|
|
dev_err(&pdev->dev, "unsupported chip: %d\n", id);
|
|
- ret = -ENODEV;
|
|
- break;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
if (ret) {
|
|
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
|
|
index 52fafea06067e..8d420c37b2a61 100644
|
|
--- a/drivers/mfd/qcom_rpm.c
|
|
+++ b/drivers/mfd/qcom_rpm.c
|
|
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
|
|
return -EFAULT;
|
|
}
|
|
|
|
+ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
|
|
+ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
|
|
+ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
|
|
+
|
|
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
|
|
fw_version[1],
|
|
fw_version[2]);
|
|
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
|
|
index 7a30546880a42..fe8d335a4d74d 100644
|
|
--- a/drivers/mfd/ti_am335x_tscadc.c
|
|
+++ b/drivers/mfd/ti_am335x_tscadc.c
|
|
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
|
|
cell->pdata_size = sizeof(tscadc);
|
|
}
|
|
|
|
- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
|
|
- tscadc->used_cells, NULL, 0, NULL);
|
|
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
|
|
+ tscadc->cells, tscadc->used_cells, NULL,
|
|
+ 0, NULL);
|
|
if (err < 0)
|
|
goto err_disable_clk;
|
|
|
|
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
|
|
index 910f569ff77c1..8bcdecf494d05 100644
|
|
--- a/drivers/mfd/tps65218.c
|
|
+++ b/drivers/mfd/tps65218.c
|
|
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
|
|
|
|
mutex_init(&tps->tps_lock);
|
|
|
|
- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
|
|
- IRQF_ONESHOT, 0, &tps65218_irq_chip,
|
|
- &tps->irq_data);
|
|
+ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
|
|
+ IRQF_ONESHOT, 0, &tps65218_irq_chip,
|
|
+ &tps->irq_data);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
|
|
ARRAY_SIZE(tps65218_cells), NULL, 0,
|
|
regmap_irq_get_domain(tps->irq_data));
|
|
|
|
- if (ret < 0)
|
|
- goto err_irq;
|
|
-
|
|
- return 0;
|
|
-
|
|
-err_irq:
|
|
- regmap_del_irq_chip(tps->irq, tps->irq_data);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
-static int tps65218_remove(struct i2c_client *client)
|
|
-{
|
|
- struct tps65218 *tps = i2c_get_clientdata(client);
|
|
-
|
|
- regmap_del_irq_chip(tps->irq, tps->irq_data);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static const struct i2c_device_id tps65218_id_table[] = {
|
|
{ "tps65218", TPS65218 },
|
|
{ },
|
|
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
|
|
.of_match_table = of_tps65218_match_table,
|
|
},
|
|
.probe = tps65218_probe,
|
|
- .remove = tps65218_remove,
|
|
.id_table = tps65218_id_table,
|
|
};
|
|
|
|
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
|
|
index 4be3d239da9ec..299016bc46d90 100644
|
|
--- a/drivers/mfd/twl-core.c
|
|
+++ b/drivers/mfd/twl-core.c
|
|
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
|
|
* letting it generate the right frequencies for USB, MADC, and
|
|
* other purposes.
|
|
*/
|
|
-static inline int __init protect_pm_master(void)
|
|
+static inline int protect_pm_master(void)
|
|
{
|
|
int e = 0;
|
|
|
|
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
|
|
return e;
|
|
}
|
|
|
|
-static inline int __init unprotect_pm_master(void)
|
|
+static inline int unprotect_pm_master(void)
|
|
{
|
|
int e = 0;
|
|
|
|
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
|
|
index 1ee68bd440fbc..16c6e2accfaa5 100644
|
|
--- a/drivers/mfd/wm5110-tables.c
|
|
+++ b/drivers/mfd/wm5110-tables.c
|
|
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
|
|
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
|
|
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
|
|
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
|
|
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
|
|
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
|
|
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
|
|
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
|
|
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
|
|
case ARIZONA_ASRC_ENABLE:
|
|
case ARIZONA_ASRC_STATUS:
|
|
case ARIZONA_ASRC_RATE1:
|
|
+ case ARIZONA_ASRC_RATE2:
|
|
case ARIZONA_ISRC_1_CTRL_1:
|
|
case ARIZONA_ISRC_1_CTRL_2:
|
|
case ARIZONA_ISRC_1_CTRL_3:
|
|
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
index 4b73131a0f206..1b5f591cf0a23 100644
|
|
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
@@ -2595,11 +2595,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|
goto err_device_destroy;
|
|
}
|
|
|
|
- clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
|
|
- /* Make sure we don't have a race with AENQ Links state handler */
|
|
- if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
|
|
- netif_carrier_on(adapter->netdev);
|
|
-
|
|
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
|
|
adapter->num_queues);
|
|
if (rc) {
|
|
@@ -2616,6 +2611,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|
}
|
|
|
|
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
|
|
+
|
|
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
|
|
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
|
|
+ netif_carrier_on(adapter->netdev);
|
|
+
|
|
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
|
dev_err(&pdev->dev, "Device reset completed successfully\n");
|
|
|
|
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
index 65a22cd9aef26..029730bbe7db1 100644
|
|
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
@@ -2052,6 +2052,7 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
|
bool nonlinear = skb_is_nonlinear(skb);
|
|
struct rtnl_link_stats64 *percpu_stats;
|
|
struct dpaa_percpu_priv *percpu_priv;
|
|
+ struct netdev_queue *txq;
|
|
struct dpaa_priv *priv;
|
|
struct qm_fd fd;
|
|
int offset = 0;
|
|
@@ -2101,6 +2102,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
|
if (unlikely(err < 0))
|
|
goto skb_to_fd_failed;
|
|
|
|
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
|
|
+
|
|
+ /* LLTX requires to do our own update of trans_start */
|
|
+ txq->trans_start = jiffies;
|
|
+
|
|
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
|
|
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
|
|
index ad1779fc410e6..a78bfafd212c8 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
|
|
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
|
|
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
|
|
int i;
|
|
|
|
- vf_cb->mac_cb = NULL;
|
|
-
|
|
- kfree(vf_cb);
|
|
-
|
|
for (i = 0; i < handle->q_num; i++)
|
|
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
|
|
+
|
|
+ kfree(vf_cb);
|
|
}
|
|
|
|
static int hns_ae_wait_flow_down(struct hnae_handle *handle)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
index a1aeeb8094c37..f5cd9539980f8 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
@@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
|
|
}
|
|
#endif
|
|
|
|
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
|
|
+
|
|
/* We reach this function only after checking that any of
|
|
* the (IPv4 | IPv6) bits are set in cqe->status.
|
|
*/
|
|
@@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
|
|
netdev_features_t dev_features)
|
|
{
|
|
__wsum hw_checksum = 0;
|
|
+ void *hdr;
|
|
+
|
|
+ /* CQE csum doesn't cover padding octets in short ethernet
|
|
+ * frames. And the pad field is appended prior to calculating
|
|
+ * and appending the FCS field.
|
|
+ *
|
|
+ * Detecting these padded frames requires to verify and parse
|
|
+ * IP headers, so we simply force all those small frames to skip
|
|
+ * checksum complete.
|
|
+ */
|
|
+ if (short_frame(skb->len))
|
|
+ return -EINVAL;
|
|
|
|
- void *hdr = (u8 *)va + sizeof(struct ethhdr);
|
|
-
|
|
+ hdr = (u8 *)va + sizeof(struct ethhdr);
|
|
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
|
|
|
|
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
|
|
@@ -822,6 +835,11 @@ xdp_drop_no_cnt:
|
|
skb_record_rx_queue(skb, cq_ring);
|
|
|
|
if (likely(dev->features & NETIF_F_RXCSUM)) {
|
|
+ /* TODO: For IP non TCP/UDP packets when csum complete is
|
|
+ * not an option (not supported or any other reason) we can
|
|
+ * actually check cqe IPOK status bit and report
|
|
+ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
|
|
+ */
|
|
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
|
|
MLX4_CQE_STATUS_UDP)) &&
|
|
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
|
|
index 7262c6310650e..288fca826a55c 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
|
|
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
|
|
int i;
|
|
|
|
if (chunk->nsg > 0)
|
|
- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
|
|
+ pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
for (i = 0; i < chunk->npages; ++i)
|
|
- __free_pages(sg_page(&chunk->mem[i]),
|
|
- get_order(chunk->mem[i].length));
|
|
+ __free_pages(sg_page(&chunk->sg[i]),
|
|
+ get_order(chunk->sg[i].length));
|
|
}
|
|
|
|
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
|
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
|
|
|
|
for (i = 0; i < chunk->npages; ++i)
|
|
dma_free_coherent(&dev->persist->pdev->dev,
|
|
- chunk->mem[i].length,
|
|
- lowmem_page_address(sg_page(&chunk->mem[i])),
|
|
- sg_dma_address(&chunk->mem[i]));
|
|
+ chunk->buf[i].size,
|
|
+ chunk->buf[i].addr,
|
|
+ chunk->buf[i].dma_addr);
|
|
}
|
|
|
|
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
|
|
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
|
|
return 0;
|
|
}
|
|
|
|
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
|
|
- int order, gfp_t gfp_mask)
|
|
+static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
|
|
+ int order, gfp_t gfp_mask)
|
|
{
|
|
- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
|
|
- &sg_dma_address(mem), gfp_mask);
|
|
- if (!buf)
|
|
+ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
|
|
+ &buf->dma_addr, gfp_mask);
|
|
+ if (!buf->addr)
|
|
return -ENOMEM;
|
|
|
|
- if (offset_in_page(buf)) {
|
|
- dma_free_coherent(dev, PAGE_SIZE << order,
|
|
- buf, sg_dma_address(mem));
|
|
+ if (offset_in_page(buf->addr)) {
|
|
+ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
|
|
+ buf->dma_addr);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- sg_set_buf(mem, buf, PAGE_SIZE << order);
|
|
- sg_dma_len(mem) = PAGE_SIZE << order;
|
|
+ buf->size = PAGE_SIZE << order;
|
|
return 0;
|
|
}
|
|
|
|
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
|
while (npages > 0) {
|
|
if (!chunk) {
|
|
- chunk = kmalloc_node(sizeof(*chunk),
|
|
+ chunk = kzalloc_node(sizeof(*chunk),
|
|
gfp_mask & ~(__GFP_HIGHMEM |
|
|
__GFP_NOWARN),
|
|
dev->numa_node);
|
|
if (!chunk) {
|
|
- chunk = kmalloc(sizeof(*chunk),
|
|
+ chunk = kzalloc(sizeof(*chunk),
|
|
gfp_mask & ~(__GFP_HIGHMEM |
|
|
__GFP_NOWARN));
|
|
if (!chunk)
|
|
goto fail;
|
|
}
|
|
+ chunk->coherent = coherent;
|
|
|
|
- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
|
|
- chunk->npages = 0;
|
|
- chunk->nsg = 0;
|
|
+ if (!coherent)
|
|
+ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
|
|
list_add_tail(&chunk->list, &icm->chunk_list);
|
|
}
|
|
|
|
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
|
if (coherent)
|
|
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
|
|
- &chunk->mem[chunk->npages],
|
|
- cur_order, mask);
|
|
+ &chunk->buf[chunk->npages],
|
|
+ cur_order, mask);
|
|
else
|
|
- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
|
|
+ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
|
|
cur_order, mask,
|
|
dev->numa_node);
|
|
|
|
@@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
if (coherent)
|
|
++chunk->nsg;
|
|
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
|
|
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
|
|
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
|
|
chunk->npages,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
@@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
}
|
|
|
|
if (!coherent && chunk) {
|
|
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
|
|
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
|
|
chunk->npages,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
@@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
|
|
u64 idx;
|
|
struct mlx4_icm_chunk *chunk;
|
|
struct mlx4_icm *icm;
|
|
- struct page *page = NULL;
|
|
+ void *addr = NULL;
|
|
|
|
if (!table->lowmem)
|
|
return NULL;
|
|
@@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
|
|
|
|
list_for_each_entry(chunk, &icm->chunk_list, list) {
|
|
for (i = 0; i < chunk->npages; ++i) {
|
|
+ dma_addr_t dma_addr;
|
|
+ size_t len;
|
|
+
|
|
+ if (table->coherent) {
|
|
+ len = chunk->buf[i].size;
|
|
+ dma_addr = chunk->buf[i].dma_addr;
|
|
+ addr = chunk->buf[i].addr;
|
|
+ } else {
|
|
+ struct page *page;
|
|
+
|
|
+ len = sg_dma_len(&chunk->sg[i]);
|
|
+ dma_addr = sg_dma_address(&chunk->sg[i]);
|
|
+
|
|
+ /* XXX: we should never do this for highmem
|
|
+ * allocation. This function either needs
|
|
+ * to be split, or the kernel virtual address
|
|
+ * return needs to be made optional.
|
|
+ */
|
|
+ page = sg_page(&chunk->sg[i]);
|
|
+ addr = lowmem_page_address(page);
|
|
+ }
|
|
+
|
|
if (dma_handle && dma_offset >= 0) {
|
|
- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
|
|
- *dma_handle = sg_dma_address(&chunk->mem[i]) +
|
|
- dma_offset;
|
|
- dma_offset -= sg_dma_len(&chunk->mem[i]);
|
|
+ if (len > dma_offset)
|
|
+ *dma_handle = dma_addr + dma_offset;
|
|
+ dma_offset -= len;
|
|
}
|
|
+
|
|
/*
|
|
* DMA mapping can merge pages but not split them,
|
|
* so if we found the page, dma_handle has already
|
|
* been assigned to.
|
|
*/
|
|
- if (chunk->mem[i].length > offset) {
|
|
- page = sg_page(&chunk->mem[i]);
|
|
+ if (len > offset)
|
|
goto out;
|
|
- }
|
|
- offset -= chunk->mem[i].length;
|
|
+ offset -= len;
|
|
}
|
|
}
|
|
|
|
+ addr = NULL;
|
|
out:
|
|
mutex_unlock(&table->mutex);
|
|
- return page ? lowmem_page_address(page) + offset : NULL;
|
|
+ return addr ? addr + offset : NULL;
|
|
}
|
|
|
|
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
|
|
index c9169a490557c..d199874b1c074 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
|
|
@@ -47,11 +47,21 @@ enum {
|
|
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
|
|
};
|
|
|
|
+struct mlx4_icm_buf {
|
|
+ void *addr;
|
|
+ size_t size;
|
|
+ dma_addr_t dma_addr;
|
|
+};
|
|
+
|
|
struct mlx4_icm_chunk {
|
|
struct list_head list;
|
|
int npages;
|
|
int nsg;
|
|
- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
|
|
+ bool coherent;
|
|
+ union {
|
|
+ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
|
|
+ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
|
|
+ };
|
|
};
|
|
|
|
struct mlx4_icm {
|
|
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
|
|
|
|
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
|
|
{
|
|
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
|
|
+ if (iter->chunk->coherent)
|
|
+ return iter->chunk->buf[iter->page_idx].dma_addr;
|
|
+ else
|
|
+ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
|
|
}
|
|
|
|
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
|
|
{
|
|
- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
|
|
+ if (iter->chunk->coherent)
|
|
+ return iter->chunk->buf[iter->page_idx].size;
|
|
+ else
|
|
+ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
|
|
}
|
|
|
|
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
|
|
index 16ceeb1b2c9d8..da52e60d4437c 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
|
|
@@ -633,6 +633,7 @@ enum {
|
|
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
|
|
MLX5E_STATE_OPENED,
|
|
MLX5E_STATE_DESTROYING,
|
|
+ MLX5E_STATE_XDP_TX_ENABLED,
|
|
};
|
|
|
|
struct mlx5e_rqt {
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
|
|
index ad6d471d00dd4..4a33c9a7cac7e 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
|
|
@@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|
int sq_num;
|
|
int i;
|
|
|
|
- if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
|
|
+ /* this flag is sufficient, no need to test internal sq state */
|
|
+ if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
|
|
return -ENETDOWN;
|
|
|
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
|
@@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|
|
|
sq = &priv->channels.c[sq_num]->xdpsq;
|
|
|
|
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
|
|
- return -ENETDOWN;
|
|
-
|
|
for (i = 0; i < n; i++) {
|
|
struct xdp_frame *xdpf = frames[i];
|
|
struct mlx5e_xdp_info xdpi;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
|
|
index 6dfab045925f0..4d096623178b9 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
|
|
@@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
|
|
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|
u32 flags);
|
|
|
|
+static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
|
|
+{
|
|
+ set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
|
+}
|
|
+
|
|
+static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
|
|
+{
|
|
+ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
|
+ /* let other device's napi(s) see our new state */
|
|
+ synchronize_rcu();
|
|
+}
|
|
+
|
|
+static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
|
|
+{
|
|
+ return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
|
+}
|
|
+
|
|
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
|
|
{
|
|
struct mlx5_wq_cyc *wq = &sq->wq;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
index 944f21f99d437..637d59c01fe5c 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
@@ -2890,6 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
|
|
|
mlx5e_build_tx2sq_maps(priv);
|
|
mlx5e_activate_channels(&priv->channels);
|
|
+ mlx5e_xdp_tx_enable(priv);
|
|
netif_tx_start_all_queues(priv->netdev);
|
|
|
|
if (MLX5_ESWITCH_MANAGER(priv->mdev))
|
|
@@ -2911,6 +2912,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
|
|
*/
|
|
netif_tx_stop_all_queues(priv->netdev);
|
|
netif_tx_disable(priv->netdev);
|
|
+ mlx5e_xdp_tx_disable(priv);
|
|
mlx5e_deactivate_channels(&priv->channels);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
index c9cc9747d21d1..701624a63d2f4 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
@@ -144,6 +144,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
|
|
|
|
s->tx_packets += sq_stats->packets;
|
|
s->tx_bytes += sq_stats->bytes;
|
|
+ s->tx_queue_dropped += sq_stats->dropped;
|
|
}
|
|
}
|
|
}
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
index 3092c59c0dc71..9f7f8425f6767 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
@@ -96,6 +96,7 @@ struct mlx5e_tc_flow_parse_attr {
|
|
struct ip_tunnel_info tun_info;
|
|
struct mlx5_flow_spec spec;
|
|
int num_mod_hdr_actions;
|
|
+ int max_mod_hdr_actions;
|
|
void *mod_hdr_actions;
|
|
int mirred_ifindex;
|
|
};
|
|
@@ -1742,9 +1743,9 @@ static struct mlx5_fields fields[] = {
|
|
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
|
|
};
|
|
|
|
-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
|
|
- * max from the SW pedit action. On success, it says how many HW actions were
|
|
- * actually parsed.
|
|
+/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
|
|
+ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
|
|
+ * says how many HW actions were actually parsed.
|
|
*/
|
|
static int offload_pedit_fields(struct pedit_headers *masks,
|
|
struct pedit_headers *vals,
|
|
@@ -1767,9 +1768,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
|
|
add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
|
|
|
|
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
|
|
- action = parse_attr->mod_hdr_actions;
|
|
- max_actions = parse_attr->num_mod_hdr_actions;
|
|
- nactions = 0;
|
|
+ action = parse_attr->mod_hdr_actions +
|
|
+ parse_attr->num_mod_hdr_actions * action_size;
|
|
+
|
|
+ max_actions = parse_attr->max_mod_hdr_actions;
|
|
+ nactions = parse_attr->num_mod_hdr_actions;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(fields); i++) {
|
|
f = &fields[i];
|
|
@@ -1874,7 +1877,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
|
|
if (!parse_attr->mod_hdr_actions)
|
|
return -ENOMEM;
|
|
|
|
- parse_attr->num_mod_hdr_actions = max_actions;
|
|
+ parse_attr->max_mod_hdr_actions = max_actions;
|
|
return 0;
|
|
}
|
|
|
|
@@ -1918,9 +1921,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
|
goto out_err;
|
|
}
|
|
|
|
- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
|
|
- if (err)
|
|
- goto out_err;
|
|
+ if (!parse_attr->mod_hdr_actions) {
|
|
+ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
|
|
+ if (err)
|
|
+ goto out_err;
|
|
+ }
|
|
|
|
err = offload_pedit_fields(masks, vals, parse_attr);
|
|
if (err < 0)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
|
|
index c7901a3f2a794..a903e97793f9a 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
|
|
@@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
|
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
|
|
|
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
|
|
- break;
|
|
+ return 0;
|
|
cond_resched();
|
|
} while (time_before(jiffies, end));
|
|
- return 0;
|
|
+ return -EBUSY;
|
|
}
|
|
|
|
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
|
|
index e3c6fe8b1d406..1dcf152b28138 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
|
|
@@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
|
|
act_set = mlxsw_afa_block_first_set(rulei->act_block);
|
|
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
|
|
|
|
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
|
|
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
|
|
+ if (err)
|
|
+ goto err_ptce2_write;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_ptce2_write:
|
|
+ cregion->ops->entry_remove(cregion, centry);
|
|
+ return err;
|
|
}
|
|
|
|
static void
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
index cdec48bcc6ad5..af673abdb4823 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
@@ -1209,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
|
|
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
|
|
{
|
|
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
|
|
- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
|
|
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
|
|
}
|
|
|
|
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
|
|
@@ -1221,7 +1221,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
|
|
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
const char *mac, u16 fid, bool adding,
|
|
enum mlxsw_reg_sfd_rec_action action,
|
|
- bool dynamic)
|
|
+ enum mlxsw_reg_sfd_rec_policy policy)
|
|
{
|
|
char *sfd_pl;
|
|
u8 num_rec;
|
|
@@ -1232,8 +1232,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
return -ENOMEM;
|
|
|
|
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
|
|
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
|
|
- mac, fid, action, local_port);
|
|
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
|
|
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
|
if (err)
|
|
@@ -1252,7 +1251,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
bool dynamic)
|
|
{
|
|
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
|
|
- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
|
|
+ MLXSW_REG_SFD_REC_ACTION_NOP,
|
|
+ mlxsw_sp_sfd_rec_policy(dynamic));
|
|
}
|
|
|
|
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
|
|
@@ -1260,7 +1260,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
|
|
{
|
|
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
|
|
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
|
|
- false);
|
|
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
|
|
}
|
|
|
|
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
|
|
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
|
|
index 2fa1c050a14b4..92cd8abeb41d7 100644
|
|
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
|
|
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
|
|
@@ -1592,6 +1592,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
|
|
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
|
|
rx_prod.bd_prod = cpu_to_le16(bd_prod);
|
|
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
|
|
+
|
|
+ /* Make sure chain element is updated before ringing the doorbell */
|
|
+ dma_wmb();
|
|
+
|
|
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
|
|
index 20909036e0028..1c39305274440 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
|
|
@@ -260,6 +260,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
|
|
struct stmmac_extra_stats *x, u32 chan)
|
|
{
|
|
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
|
|
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
|
int ret = 0;
|
|
|
|
/* ABNORMAL interrupts */
|
|
@@ -279,8 +280,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
|
|
x->normal_irq_n++;
|
|
|
|
if (likely(intr_status & XGMAC_RI)) {
|
|
- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
|
- if (likely(value & XGMAC_RIE)) {
|
|
+ if (likely(intr_en & XGMAC_RIE)) {
|
|
x->rx_normal_irq_n++;
|
|
ret |= handle_rx;
|
|
}
|
|
@@ -292,7 +292,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
|
|
}
|
|
|
|
/* Clear interrupts */
|
|
- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
|
|
+ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
index 2103b865726ac..123b74e25ed81 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
@@ -3522,27 +3522,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
|
|
struct stmmac_channel *ch =
|
|
container_of(napi, struct stmmac_channel, napi);
|
|
struct stmmac_priv *priv = ch->priv_data;
|
|
- int work_done = 0, work_rem = budget;
|
|
+ int work_done, rx_done = 0, tx_done = 0;
|
|
u32 chan = ch->index;
|
|
|
|
priv->xstats.napi_poll++;
|
|
|
|
- if (ch->has_tx) {
|
|
- int done = stmmac_tx_clean(priv, work_rem, chan);
|
|
+ if (ch->has_tx)
|
|
+ tx_done = stmmac_tx_clean(priv, budget, chan);
|
|
+ if (ch->has_rx)
|
|
+ rx_done = stmmac_rx(priv, budget, chan);
|
|
|
|
- work_done += done;
|
|
- work_rem -= done;
|
|
- }
|
|
-
|
|
- if (ch->has_rx) {
|
|
- int done = stmmac_rx(priv, work_rem, chan);
|
|
+ work_done = max(rx_done, tx_done);
|
|
+ work_done = min(work_done, budget);
|
|
|
|
- work_done += done;
|
|
- work_rem -= done;
|
|
- }
|
|
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
|
|
+ int stat;
|
|
|
|
- if (work_done < budget && napi_complete_done(napi, work_done))
|
|
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
|
|
+ stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
|
|
+ &priv->xstats, chan);
|
|
+ if (stat && napi_reschedule(napi))
|
|
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
|
|
+ }
|
|
|
|
return work_done;
|
|
}
|
|
@@ -4191,6 +4192,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
return ret;
|
|
}
|
|
|
|
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
|
|
+ * In some case, for example on bugged HW this feature
|
|
+ * has to be disable and this can be done by passing the
|
|
+ * riwt_off field from the platform.
|
|
+ */
|
|
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
|
|
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
|
|
+ priv->use_riwt = 1;
|
|
+ dev_info(priv->device,
|
|
+ "Enable RX Mitigation via HW Watchdog Timer\n");
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -4323,18 +4336,6 @@ int stmmac_dvr_probe(struct device *device,
|
|
if (flow_ctrl)
|
|
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
|
|
|
|
- /* Rx Watchdog is available in the COREs newer than the 3.40.
|
|
- * In some case, for example on bugged HW this feature
|
|
- * has to be disable and this can be done by passing the
|
|
- * riwt_off field from the platform.
|
|
- */
|
|
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
|
|
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
|
|
- priv->use_riwt = 1;
|
|
- dev_info(priv->device,
|
|
- "Enable RX Mitigation via HW Watchdog Timer\n");
|
|
- }
|
|
-
|
|
/* Setup channels NAPI */
|
|
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
|
|
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
|
|
index c54a50dbd5ac2..d819e8eaba122 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
|
|
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
|
|
*/
|
|
static void stmmac_pci_remove(struct pci_dev *pdev)
|
|
{
|
|
+ int i;
|
|
+
|
|
stmmac_dvr_remove(&pdev->dev);
|
|
+
|
|
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
|
|
+ if (pci_resource_len(pdev, i) == 0)
|
|
+ continue;
|
|
+ pcim_iounmap_regions(pdev, BIT(i));
|
|
+ break;
|
|
+ }
|
|
+
|
|
pci_disable_device(pdev);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
|
|
index 531294f4978bc..58ea18af9813a 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
|
|
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
|
/* Queue 0 is not AVB capable */
|
|
if (queue <= 0 || queue >= tx_queues_count)
|
|
return -EINVAL;
|
|
+ if (!priv->dma_cap.av)
|
|
+ return -EOPNOTSUPP;
|
|
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
|
|
return -EOPNOTSUPP;
|
|
|
|
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
|
|
index 493cd382b8aa0..01711e6e9a394 100644
|
|
--- a/drivers/net/geneve.c
|
|
+++ b/drivers/net/geneve.c
|
|
@@ -1406,9 +1406,13 @@ static void geneve_link_config(struct net_device *dev,
|
|
}
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
case AF_INET6: {
|
|
- struct rt6_info *rt = rt6_lookup(geneve->net,
|
|
- &info->key.u.ipv6.dst, NULL, 0,
|
|
- NULL, 0);
|
|
+ struct rt6_info *rt;
|
|
+
|
|
+ if (!__in6_dev_get(dev))
|
|
+ break;
|
|
+
|
|
+ rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
|
|
+ NULL, 0);
|
|
|
|
if (rt && rt->dst.dev)
|
|
ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
|
|
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
|
|
index 70f3f90c2ed69..2787e8b1d668a 100644
|
|
--- a/drivers/net/phy/phylink.c
|
|
+++ b/drivers/net/phy/phylink.c
|
|
@@ -502,6 +502,17 @@ static void phylink_run_resolve(struct phylink *pl)
|
|
queue_work(system_power_efficient_wq, &pl->resolve);
|
|
}
|
|
|
|
+static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
|
|
+{
|
|
+ unsigned long state = pl->phylink_disable_state;
|
|
+
|
|
+ set_bit(bit, &pl->phylink_disable_state);
|
|
+ if (state == 0) {
|
|
+ queue_work(system_power_efficient_wq, &pl->resolve);
|
|
+ flush_work(&pl->resolve);
|
|
+ }
|
|
+}
|
|
+
|
|
static void phylink_fixed_poll(struct timer_list *t)
|
|
{
|
|
struct phylink *pl = container_of(t, struct phylink, link_poll);
|
|
@@ -955,9 +966,7 @@ void phylink_stop(struct phylink *pl)
|
|
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
|
|
del_timer_sync(&pl->link_poll);
|
|
|
|
- set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
|
|
- queue_work(system_power_efficient_wq, &pl->resolve);
|
|
- flush_work(&pl->resolve);
|
|
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
|
|
}
|
|
EXPORT_SYMBOL_GPL(phylink_stop);
|
|
|
|
@@ -1664,9 +1673,7 @@ static void phylink_sfp_link_down(void *upstream)
|
|
|
|
ASSERT_RTNL();
|
|
|
|
- set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
|
|
- queue_work(system_power_efficient_wq, &pl->resolve);
|
|
- flush_work(&pl->resolve);
|
|
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
|
|
}
|
|
|
|
static void phylink_sfp_link_up(void *upstream)
|
|
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
|
|
index ad9db652874dc..fef701bfad62e 100644
|
|
--- a/drivers/net/phy/sfp-bus.c
|
|
+++ b/drivers/net/phy/sfp-bus.c
|
|
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
|
|
return ret;
|
|
}
|
|
}
|
|
+ bus->socket_ops->attach(bus->sfp);
|
|
if (bus->started)
|
|
bus->socket_ops->start(bus->sfp);
|
|
bus->netdev->sfp_bus = bus;
|
|
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
|
|
if (bus->registered) {
|
|
if (bus->started)
|
|
bus->socket_ops->stop(bus->sfp);
|
|
+ bus->socket_ops->detach(bus->sfp);
|
|
if (bus->phydev && ops && ops->disconnect_phy)
|
|
ops->disconnect_phy(bus->upstream);
|
|
}
|
|
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
|
|
index fd8bb998ae52d..68c8fbf099f87 100644
|
|
--- a/drivers/net/phy/sfp.c
|
|
+++ b/drivers/net/phy/sfp.c
|
|
@@ -184,6 +184,7 @@ struct sfp {
|
|
|
|
struct gpio_desc *gpio[GPIO_MAX];
|
|
|
|
+ bool attached;
|
|
unsigned int state;
|
|
struct delayed_work poll;
|
|
struct delayed_work timeout;
|
|
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
|
|
*/
|
|
switch (sfp->sm_mod_state) {
|
|
default:
|
|
- if (event == SFP_E_INSERT) {
|
|
+ if (event == SFP_E_INSERT && sfp->attached) {
|
|
sfp_module_tx_disable(sfp);
|
|
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
|
|
}
|
|
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
|
|
mutex_unlock(&sfp->sm_mutex);
|
|
}
|
|
|
|
+static void sfp_attach(struct sfp *sfp)
|
|
+{
|
|
+ sfp->attached = true;
|
|
+ if (sfp->state & SFP_F_PRESENT)
|
|
+ sfp_sm_event(sfp, SFP_E_INSERT);
|
|
+}
|
|
+
|
|
+static void sfp_detach(struct sfp *sfp)
|
|
+{
|
|
+ sfp->attached = false;
|
|
+ sfp_sm_event(sfp, SFP_E_REMOVE);
|
|
+}
|
|
+
|
|
static void sfp_start(struct sfp *sfp)
|
|
{
|
|
sfp_sm_event(sfp, SFP_E_DEV_UP);
|
|
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
|
|
}
|
|
|
|
static const struct sfp_socket_ops sfp_module_ops = {
|
|
+ .attach = sfp_attach,
|
|
+ .detach = sfp_detach,
|
|
.start = sfp_start,
|
|
.stop = sfp_stop,
|
|
.module_info = sfp_module_info,
|
|
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
|
|
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
|
|
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
|
|
|
|
- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
|
|
- if (!sfp->sfp_bus)
|
|
- return -ENOMEM;
|
|
-
|
|
/* Get the initial state, and always signal TX disable,
|
|
* since the network interface will not be up.
|
|
*/
|
|
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
|
|
sfp->state |= SFP_F_RATE_SELECT;
|
|
sfp_set_state(sfp, sfp->state);
|
|
sfp_module_tx_disable(sfp);
|
|
- rtnl_lock();
|
|
- if (sfp->state & SFP_F_PRESENT)
|
|
- sfp_sm_event(sfp, SFP_E_INSERT);
|
|
- rtnl_unlock();
|
|
|
|
for (i = 0; i < GPIO_MAX; i++) {
|
|
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
|
|
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
|
|
dev_warn(sfp->dev,
|
|
"No tx_disable pin: SFP modules will always be emitting.\n");
|
|
|
|
+ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
|
|
+ if (!sfp->sfp_bus)
|
|
+ return -ENOMEM;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
|
|
index 31b0acf337e27..64f54b0bbd8c4 100644
|
|
--- a/drivers/net/phy/sfp.h
|
|
+++ b/drivers/net/phy/sfp.h
|
|
@@ -7,6 +7,8 @@
|
|
struct sfp;
|
|
|
|
struct sfp_socket_ops {
|
|
+ void (*attach)(struct sfp *sfp);
|
|
+ void (*detach)(struct sfp *sfp);
|
|
void (*start)(struct sfp *sfp);
|
|
void (*stop)(struct sfp *sfp);
|
|
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
|
|
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
|
|
index 4b6572f0188a7..723814d84b7d8 100644
|
|
--- a/drivers/net/team/team.c
|
|
+++ b/drivers/net/team/team.c
|
|
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
|
|
}
|
|
}
|
|
|
|
-static bool __team_option_inst_tmp_find(const struct list_head *opts,
|
|
- const struct team_option_inst *needle)
|
|
-{
|
|
- struct team_option_inst *opt_inst;
|
|
-
|
|
- list_for_each_entry(opt_inst, opts, tmp_list)
|
|
- if (opt_inst == needle)
|
|
- return true;
|
|
- return false;
|
|
-}
|
|
-
|
|
static int __team_options_register(struct team *team,
|
|
const struct team_option *option,
|
|
size_t option_count)
|
|
@@ -2463,7 +2452,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
int err = 0;
|
|
int i;
|
|
struct nlattr *nl_option;
|
|
- LIST_HEAD(opt_inst_list);
|
|
|
|
rtnl_lock();
|
|
|
|
@@ -2483,6 +2471,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
|
|
struct nlattr *attr;
|
|
struct nlattr *attr_data;
|
|
+ LIST_HEAD(opt_inst_list);
|
|
enum team_option_type opt_type;
|
|
int opt_port_ifindex = 0; /* != 0 for per-port options */
|
|
u32 opt_array_index = 0;
|
|
@@ -2587,23 +2576,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
if (err)
|
|
goto team_put;
|
|
opt_inst->changed = true;
|
|
-
|
|
- /* dumb/evil user-space can send us duplicate opt,
|
|
- * keep only the last one
|
|
- */
|
|
- if (__team_option_inst_tmp_find(&opt_inst_list,
|
|
- opt_inst))
|
|
- continue;
|
|
-
|
|
list_add(&opt_inst->tmp_list, &opt_inst_list);
|
|
}
|
|
if (!opt_found) {
|
|
err = -ENOENT;
|
|
goto team_put;
|
|
}
|
|
- }
|
|
|
|
- err = team_nl_send_event_options_get(team, &opt_inst_list);
|
|
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
|
|
+ if (err)
|
|
+ break;
|
|
+ }
|
|
|
|
team_put:
|
|
team_nl_team_put(team);
|
|
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
|
|
index a7f37063518ec..3d05bc1937d40 100644
|
|
--- a/drivers/pinctrl/pinctrl-max77620.c
|
|
+++ b/drivers/pinctrl/pinctrl-max77620.c
|
|
@@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
|
|
MAX77620_PIN_PP_DRV,
|
|
};
|
|
|
|
-enum max77620_pinconf_param {
|
|
- MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
|
|
- MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
|
|
- MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
|
|
- MAX77620_SUSPEND_FPS_SOURCE,
|
|
- MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
|
|
- MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
|
|
-};
|
|
+#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
|
|
+#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
|
|
+#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
|
|
+#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
|
|
+#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
|
|
+#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
|
|
|
|
struct max77620_pin_function {
|
|
const char *name;
|
|
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
|
|
index bf07735275a49..0fc382cb977bf 100644
|
|
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
|
|
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
|
|
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
|
|
}
|
|
|
|
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
|
- unsigned int tid, int pg_idx, bool reply)
|
|
+ unsigned int tid, int pg_idx)
|
|
{
|
|
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
|
|
GFP_KERNEL);
|
|
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
|
|
- req->reply = V_NO_REPLY(reply ? 0 : 1);
|
|
+ req->reply = V_NO_REPLY(1);
|
|
req->cpu_idx = 0;
|
|
req->word = htons(31);
|
|
req->mask = cpu_to_be64(0xF0000000);
|
|
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
|
* @tid: connection id
|
|
* @hcrc: header digest enabled
|
|
* @dcrc: data digest enabled
|
|
- * @reply: request reply from h/w
|
|
* set up the iscsi digest settings for a connection identified by tid
|
|
*/
|
|
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
- int hcrc, int dcrc, int reply)
|
|
+ int hcrc, int dcrc)
|
|
{
|
|
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
|
|
GFP_KERNEL);
|
|
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
|
|
- req->reply = V_NO_REPLY(reply ? 0 : 1);
|
|
+ req->reply = V_NO_REPLY(1);
|
|
req->cpu_idx = 0;
|
|
req->word = htons(31);
|
|
req->mask = cpu_to_be64(0x0F000000);
|
|
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
|
|
index 211da1d5a8699..689d6c813a50d 100644
|
|
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
|
|
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
|
|
@@ -1517,16 +1517,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
|
|
struct cxgbi_sock *csk;
|
|
|
|
csk = lookup_tid(t, tid);
|
|
- if (!csk)
|
|
+ if (!csk) {
|
|
pr_err("can't find conn. for tid %u.\n", tid);
|
|
+ return;
|
|
+ }
|
|
|
|
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
|
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
|
|
csk, csk->state, csk->flags, csk->tid, rpl->status);
|
|
|
|
- if (rpl->status != CPL_ERR_NONE)
|
|
+ if (rpl->status != CPL_ERR_NONE) {
|
|
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
|
|
csk, tid, rpl->status);
|
|
+ csk->err = -EINVAL;
|
|
+ }
|
|
+
|
|
+ complete(&csk->cmpl);
|
|
|
|
__kfree_skb(skb);
|
|
}
|
|
@@ -1903,7 +1909,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
|
|
}
|
|
|
|
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
|
|
- int pg_idx, bool reply)
|
|
+ int pg_idx)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct cpl_set_tcb_field *req;
|
|
@@ -1919,7 +1925,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
INIT_TP_WR(req, csk->tid);
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
|
|
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
|
|
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
|
|
req->word_cookie = htons(0);
|
|
req->mask = cpu_to_be64(0x3 << 8);
|
|
req->val = cpu_to_be64(pg_idx << 8);
|
|
@@ -1928,12 +1934,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
|
|
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
|
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
|
|
|
|
+ reinit_completion(&csk->cmpl);
|
|
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
|
|
- return 0;
|
|
+ wait_for_completion(&csk->cmpl);
|
|
+
|
|
+ return csk->err;
|
|
}
|
|
|
|
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
- int hcrc, int dcrc, int reply)
|
|
+ int hcrc, int dcrc)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct cpl_set_tcb_field *req;
|
|
@@ -1951,7 +1960,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
INIT_TP_WR(req, tid);
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
|
|
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
|
|
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
|
|
req->word_cookie = htons(0);
|
|
req->mask = cpu_to_be64(0x3 << 4);
|
|
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
|
|
@@ -1961,8 +1970,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
|
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
|
|
|
|
+ reinit_completion(&csk->cmpl);
|
|
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
|
|
- return 0;
|
|
+ wait_for_completion(&csk->cmpl);
|
|
+
|
|
+ return csk->err;
|
|
}
|
|
|
|
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
|
|
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
|
|
index 3f3af5e74a07d..f2c561ca731a3 100644
|
|
--- a/drivers/scsi/cxgbi/libcxgbi.c
|
|
+++ b/drivers/scsi/cxgbi/libcxgbi.c
|
|
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
|
|
skb_queue_head_init(&csk->receive_queue);
|
|
skb_queue_head_init(&csk->write_queue);
|
|
timer_setup(&csk->retry_timer, NULL, 0);
|
|
+ init_completion(&csk->cmpl);
|
|
rwlock_init(&csk->callback_lock);
|
|
csk->cdev = cdev;
|
|
csk->flags = 0;
|
|
@@ -2252,14 +2253,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
|
|
if (!err && conn->hdrdgst_en)
|
|
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
|
|
conn->hdrdgst_en,
|
|
- conn->datadgst_en, 0);
|
|
+ conn->datadgst_en);
|
|
break;
|
|
case ISCSI_PARAM_DATADGST_EN:
|
|
err = iscsi_set_param(cls_conn, param, buf, buflen);
|
|
if (!err && conn->datadgst_en)
|
|
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
|
|
conn->hdrdgst_en,
|
|
- conn->datadgst_en, 0);
|
|
+ conn->datadgst_en);
|
|
break;
|
|
case ISCSI_PARAM_MAX_R2T:
|
|
return iscsi_tcp_set_max_r2t(conn, buf);
|
|
@@ -2385,7 +2386,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
|
|
|
|
ppm = csk->cdev->cdev2ppm(csk->cdev);
|
|
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
|
|
- ppm->tformat.pgsz_idx_dflt, 0);
|
|
+ ppm->tformat.pgsz_idx_dflt);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
|
|
index dcb190e753434..3bf7414a75e5e 100644
|
|
--- a/drivers/scsi/cxgbi/libcxgbi.h
|
|
+++ b/drivers/scsi/cxgbi/libcxgbi.h
|
|
@@ -146,6 +146,7 @@ struct cxgbi_sock {
|
|
struct sk_buff_head receive_queue;
|
|
struct sk_buff_head write_queue;
|
|
struct timer_list retry_timer;
|
|
+ struct completion cmpl;
|
|
int err;
|
|
rwlock_t callback_lock;
|
|
void *user_data;
|
|
@@ -487,9 +488,9 @@ struct cxgbi_device {
|
|
struct cxgbi_ppm *,
|
|
struct cxgbi_task_tag_info *);
|
|
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
|
|
- unsigned int, int, int, int);
|
|
+ unsigned int, int, int);
|
|
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
|
|
- unsigned int, int, bool);
|
|
+ unsigned int, int);
|
|
|
|
void (*csk_release_offload_resources)(struct cxgbi_sock *);
|
|
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
|
|
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
|
|
index 08c7b1e25fe48..dde84f7443136 100644
|
|
--- a/drivers/scsi/isci/init.c
|
|
+++ b/drivers/scsi/isci/init.c
|
|
@@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
|
|
shost->max_lun = ~0;
|
|
shost->max_cmd_len = MAX_COMMAND_SIZE;
|
|
|
|
+ /* turn on DIF support */
|
|
+ scsi_host_set_prot(shost,
|
|
+ SHOST_DIF_TYPE1_PROTECTION |
|
|
+ SHOST_DIF_TYPE2_PROTECTION |
|
|
+ SHOST_DIF_TYPE3_PROTECTION);
|
|
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
|
|
+
|
|
err = scsi_add_host(shost, &pdev->dev);
|
|
if (err)
|
|
goto err_shost;
|
|
@@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
goto err_host_alloc;
|
|
}
|
|
pci_info->hosts[i] = h;
|
|
-
|
|
- /* turn on DIF support */
|
|
- scsi_host_set_prot(to_shost(h),
|
|
- SHOST_DIF_TYPE1_PROTECTION |
|
|
- SHOST_DIF_TYPE2_PROTECTION |
|
|
- SHOST_DIF_TYPE3_PROTECTION);
|
|
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
|
|
}
|
|
|
|
err = isci_setup_interrupts(pdev);
|
|
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
|
|
index 2f0a4f2c5ff80..d4821b9dea45d 100644
|
|
--- a/drivers/scsi/qedi/qedi_iscsi.c
|
|
+++ b/drivers/scsi/qedi/qedi_iscsi.c
|
|
@@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
|
|
|
|
qedi_ep = ep->dd_data;
|
|
if (qedi_ep->state == EP_STATE_IDLE ||
|
|
+ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
|
|
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
|
|
return -1;
|
|
|
|
@@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
|
|
|
switch (qedi_ep->state) {
|
|
case EP_STATE_OFLDCONN_START:
|
|
+ case EP_STATE_OFLDCONN_NONE:
|
|
goto ep_release_conn;
|
|
case EP_STATE_OFLDCONN_FAILED:
|
|
break;
|
|
@@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
|
|
|
|
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
|
|
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
|
|
+ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
|
|
ret = -EIO;
|
|
goto set_path_exit;
|
|
}
|
|
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
|
|
index 11260776212fa..892d70d545537 100644
|
|
--- a/drivers/scsi/qedi/qedi_iscsi.h
|
|
+++ b/drivers/scsi/qedi/qedi_iscsi.h
|
|
@@ -59,6 +59,7 @@ enum {
|
|
EP_STATE_OFLDCONN_FAILED = 0x2000,
|
|
EP_STATE_CONNECT_FAILED = 0x4000,
|
|
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
|
|
+ EP_STATE_OFLDCONN_NONE = 0x10000,
|
|
};
|
|
|
|
struct qedi_conn;
|
|
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
|
|
index 0e13349dce570..575445c761b48 100644
|
|
--- a/drivers/scsi/qla4xxx/ql4_os.c
|
|
+++ b/drivers/scsi/qla4xxx/ql4_os.c
|
|
@@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
|
|
|
|
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
|
|
fw_ddb_entry);
|
|
+ if (rc)
|
|
+ goto free_sess;
|
|
|
|
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
|
|
__func__, fnode_sess->dev.kobj.name);
|
|
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
|
|
index 14e5bf7af0bb1..c3bcaaec0fc5c 100644
|
|
--- a/drivers/scsi/ufs/ufs.h
|
|
+++ b/drivers/scsi/ufs/ufs.h
|
|
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
|
|
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
|
|
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
|
|
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
|
|
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
|
|
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
|
|
QUERY_DESC_POWER_DEF_SIZE = 0x62,
|
|
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
|
|
};
|
|
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
|
index 12ddb5928a738..6e80dfe4fa979 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.c
|
|
+++ b/drivers/scsi/ufs/ufshcd.c
|
|
@@ -7768,6 +7768,8 @@ out:
|
|
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
|
|
ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
hba->curr_dev_pwr_mode, hba->uic_link_state);
|
|
+ if (!ret)
|
|
+ hba->is_sys_suspended = false;
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(ufshcd_system_resume);
|
|
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
|
|
index ac263a180253e..894e60ecebe20 100644
|
|
--- a/drivers/staging/erofs/data.c
|
|
+++ b/drivers/staging/erofs/data.c
|
|
@@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
|
|
struct page *page = bvec->bv_page;
|
|
|
|
/* page is already locked */
|
|
- BUG_ON(PageUptodate(page));
|
|
+ DBG_BUGON(PageUptodate(page));
|
|
|
|
if (unlikely(err))
|
|
SetPageError(page);
|
|
@@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
|
|
struct erofs_map_blocks *map,
|
|
int flags)
|
|
{
|
|
+ int err = 0;
|
|
erofs_blk_t nblocks, lastblk;
|
|
u64 offset = map->m_la;
|
|
struct erofs_vnode *vi = EROFS_V(inode);
|
|
|
|
trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
|
|
- BUG_ON(is_inode_layout_compression(inode));
|
|
|
|
nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
|
|
lastblk = nblocks - is_inode_layout_inline(inode);
|
|
@@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
|
|
map->m_plen = inode->i_size - offset;
|
|
|
|
/* inline data should locate in one meta block */
|
|
- BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
|
|
+ if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
|
|
+ DBG_BUGON(1);
|
|
+ err = -EIO;
|
|
+ goto err_out;
|
|
+ }
|
|
+
|
|
map->m_flags |= EROFS_MAP_META;
|
|
} else {
|
|
errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
|
|
vi->nid, inode->i_size, map->m_la);
|
|
- BUG();
|
|
+ DBG_BUGON(1);
|
|
+ err = -EIO;
|
|
+ goto err_out;
|
|
}
|
|
|
|
out:
|
|
map->m_llen = map->m_plen;
|
|
+
|
|
+err_out:
|
|
trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
|
|
- return 0;
|
|
+ return err;
|
|
}
|
|
|
|
#ifdef CONFIG_EROFS_FS_ZIP
|
|
@@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw_page(
|
|
erofs_off_t current_block = (erofs_off_t)page->index;
|
|
int err;
|
|
|
|
- BUG_ON(!nblocks);
|
|
+ DBG_BUGON(!nblocks);
|
|
|
|
if (PageUptodate(page)) {
|
|
err = 0;
|
|
@@ -233,7 +242,7 @@ submit_bio_retry:
|
|
}
|
|
|
|
/* for RAW access mode, m_plen must be equal to m_llen */
|
|
- BUG_ON(map.m_plen != map.m_llen);
|
|
+ DBG_BUGON(map.m_plen != map.m_llen);
|
|
|
|
blknr = erofs_blknr(map.m_pa);
|
|
blkoff = erofs_blkoff(map.m_pa);
|
|
@@ -243,7 +252,7 @@ submit_bio_retry:
|
|
void *vsrc, *vto;
|
|
struct page *ipage;
|
|
|
|
- BUG_ON(map.m_plen > PAGE_SIZE);
|
|
+ DBG_BUGON(map.m_plen > PAGE_SIZE);
|
|
|
|
ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
|
|
|
|
@@ -270,7 +279,7 @@ submit_bio_retry:
|
|
}
|
|
|
|
/* pa must be block-aligned for raw reading */
|
|
- BUG_ON(erofs_blkoff(map.m_pa) != 0);
|
|
+ DBG_BUGON(erofs_blkoff(map.m_pa));
|
|
|
|
/* max # of continuous pages */
|
|
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
|
|
@@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
|
|
if (IS_ERR(bio))
|
|
return PTR_ERR(bio);
|
|
|
|
- BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
|
|
+ DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
|
|
return 0;
|
|
}
|
|
|
|
@@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(struct file *filp,
|
|
/* pages could still be locked */
|
|
put_page(page);
|
|
}
|
|
- BUG_ON(!list_empty(pages));
|
|
+ DBG_BUGON(!list_empty(pages));
|
|
|
|
/* the rare case (end in gaps) */
|
|
if (unlikely(bio != NULL))
|
|
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
|
|
index be6ae3b1bdbe1..04b84ff31d036 100644
|
|
--- a/drivers/staging/erofs/dir.c
|
|
+++ b/drivers/staging/erofs/dir.c
|
|
@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
|
|
strnlen(de_name, maxsize - nameoff) :
|
|
le16_to_cpu(de[1].nameoff) - nameoff;
|
|
|
|
- /* the corrupted directory found */
|
|
- BUG_ON(de_namelen < 0);
|
|
+ /* a corrupted entry is found */
|
|
+ if (unlikely(de_namelen < 0)) {
|
|
+ DBG_BUGON(1);
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
#ifdef CONFIG_EROFS_FS_DEBUG
|
|
dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
|
|
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
|
|
index fbf6ff25cd1bd..9e7815f55a17c 100644
|
|
--- a/drivers/staging/erofs/inode.c
|
|
+++ b/drivers/staging/erofs/inode.c
|
|
@@ -132,7 +132,13 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
|
|
return -ENOMEM;
|
|
|
|
m_pofs += vi->inode_isize + vi->xattr_isize;
|
|
- BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
|
|
+
|
|
+ /* inline symlink data shouldn't across page boundary as well */
|
|
+ if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
|
|
+ DBG_BUGON(1);
|
|
+ kfree(lnk);
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
/* get in-page inline data */
|
|
memcpy(lnk, data + m_pofs, inode->i_size);
|
|
@@ -170,7 +176,7 @@ static int fill_inode(struct inode *inode, int isdir)
|
|
return PTR_ERR(page);
|
|
}
|
|
|
|
- BUG_ON(!PageUptodate(page));
|
|
+ DBG_BUGON(!PageUptodate(page));
|
|
data = page_address(page);
|
|
|
|
err = read_inode(inode, data + ofs);
|
|
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
|
|
index e6313c54e3ad6..9f44ed8f00239 100644
|
|
--- a/drivers/staging/erofs/internal.h
|
|
+++ b/drivers/staging/erofs/internal.h
|
|
@@ -184,50 +184,70 @@ struct erofs_workgroup {
|
|
|
|
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
|
|
|
|
-static inline bool erofs_workgroup_try_to_freeze(
|
|
- struct erofs_workgroup *grp, int v)
|
|
+#if defined(CONFIG_SMP)
|
|
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
|
+ int val)
|
|
{
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
- if (v != atomic_cmpxchg(&grp->refcount,
|
|
- v, EROFS_LOCKED_MAGIC))
|
|
- return false;
|
|
preempt_disable();
|
|
+ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
|
|
+ preempt_enable();
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
|
|
+ int orig_val)
|
|
+{
|
|
+ /*
|
|
+ * other observers should notice all modifications
|
|
+ * in the freezing period.
|
|
+ */
|
|
+ smp_mb();
|
|
+ atomic_set(&grp->refcount, orig_val);
|
|
+ preempt_enable();
|
|
+}
|
|
+
|
|
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
|
+{
|
|
+ return atomic_cond_read_relaxed(&grp->refcount,
|
|
+ VAL != EROFS_LOCKED_MAGIC);
|
|
+}
|
|
#else
|
|
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
|
+ int val)
|
|
+{
|
|
preempt_disable();
|
|
- if (atomic_read(&grp->refcount) != v) {
|
|
+ /* no need to spin on UP platforms, let's just disable preemption. */
|
|
+ if (val != atomic_read(&grp->refcount)) {
|
|
preempt_enable();
|
|
return false;
|
|
}
|
|
-#endif
|
|
return true;
|
|
}
|
|
|
|
-static inline void erofs_workgroup_unfreeze(
|
|
- struct erofs_workgroup *grp, int v)
|
|
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
|
|
+ int orig_val)
|
|
{
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
- atomic_set(&grp->refcount, v);
|
|
-#endif
|
|
preempt_enable();
|
|
}
|
|
|
|
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
|
+{
|
|
+ int v = atomic_read(&grp->refcount);
|
|
+
|
|
+ /* workgroup is never freezed on uniprocessor systems */
|
|
+ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
|
|
+ return v;
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
|
|
{
|
|
- const int locked = (int)EROFS_LOCKED_MAGIC;
|
|
int o;
|
|
|
|
repeat:
|
|
- o = atomic_read(&grp->refcount);
|
|
-
|
|
- /* spin if it is temporarily locked at the reclaim path */
|
|
- if (unlikely(o == locked)) {
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
- do
|
|
- cpu_relax();
|
|
- while (atomic_read(&grp->refcount) == locked);
|
|
-#endif
|
|
- goto repeat;
|
|
- }
|
|
+ o = erofs_wait_on_workgroup_freezed(grp);
|
|
|
|
if (unlikely(o <= 0))
|
|
return -1;
|
|
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
|
|
index 2df9768edac96..b0583cdb079ae 100644
|
|
--- a/drivers/staging/erofs/super.c
|
|
+++ b/drivers/staging/erofs/super.c
|
|
@@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void)
|
|
|
|
static void erofs_exit_inode_cache(void)
|
|
{
|
|
- BUG_ON(erofs_inode_cachep == NULL);
|
|
kmem_cache_destroy(erofs_inode_cachep);
|
|
}
|
|
|
|
@@ -265,8 +264,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
|
|
int ret = 1; /* 0 - busy */
|
|
struct address_space *const mapping = page->mapping;
|
|
|
|
- BUG_ON(!PageLocked(page));
|
|
- BUG_ON(mapping->a_ops != &managed_cache_aops);
|
|
+ DBG_BUGON(!PageLocked(page));
|
|
+ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
|
|
|
|
if (PagePrivate(page))
|
|
ret = erofs_try_to_free_cached_page(mapping, page);
|
|
@@ -279,10 +278,10 @@ static void managed_cache_invalidatepage(struct page *page,
|
|
{
|
|
const unsigned int stop = length + offset;
|
|
|
|
- BUG_ON(!PageLocked(page));
|
|
+ DBG_BUGON(!PageLocked(page));
|
|
|
|
- /* Check for overflow */
|
|
- BUG_ON(stop > PAGE_SIZE || stop < length);
|
|
+ /* Check for potential overflow in debug mode */
|
|
+ DBG_BUGON(stop > PAGE_SIZE || stop < length);
|
|
|
|
if (offset == 0 && stop == PAGE_SIZE)
|
|
while (!managed_cache_releasepage(page, GFP_NOFS))
|
|
@@ -404,12 +403,6 @@ static int erofs_read_super(struct super_block *sb,
|
|
|
|
erofs_register_super(sb);
|
|
|
|
- /*
|
|
- * We already have a positive dentry, which was instantiated
|
|
- * by d_make_root. Just need to d_rehash it.
|
|
- */
|
|
- d_rehash(sb->s_root);
|
|
-
|
|
if (!silent)
|
|
infoln("mounted on %s with opts: %s.", dev_name,
|
|
(char *)data);
|
|
@@ -625,7 +618,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
|
|
|
|
static int erofs_remount(struct super_block *sb, int *flags, char *data)
|
|
{
|
|
- BUG_ON(!sb_rdonly(sb));
|
|
+ DBG_BUGON(!sb_rdonly(sb));
|
|
|
|
*flags |= SB_RDONLY;
|
|
return 0;
|
|
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
|
|
index 0956615b86f72..23856ba2742d8 100644
|
|
--- a/drivers/staging/erofs/unzip_pagevec.h
|
|
+++ b/drivers/staging/erofs/unzip_pagevec.h
|
|
@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
|
|
erofs_vtptr_t t;
|
|
|
|
if (unlikely(ctor->index >= ctor->nr)) {
|
|
- BUG_ON(ctor->next == NULL);
|
|
+ DBG_BUGON(!ctor->next);
|
|
z_erofs_pagevec_ctor_pagedown(ctor, true);
|
|
}
|
|
|
|
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
|
|
index 0346630b67c8c..1279241449f4b 100644
|
|
--- a/drivers/staging/erofs/unzip_vle.c
|
|
+++ b/drivers/staging/erofs/unzip_vle.c
|
|
@@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
|
|
|
|
void z_erofs_exit_zip_subsystem(void)
|
|
{
|
|
- BUG_ON(z_erofs_workqueue == NULL);
|
|
- BUG_ON(z_erofs_workgroup_cachep == NULL);
|
|
-
|
|
destroy_workqueue(z_erofs_workqueue);
|
|
kmem_cache_destroy(z_erofs_workgroup_cachep);
|
|
}
|
|
@@ -293,12 +290,9 @@ z_erofs_vle_work_lookup(struct super_block *sb,
|
|
*grp_ret = grp = container_of(egrp,
|
|
struct z_erofs_vle_workgroup, obj);
|
|
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
work = z_erofs_vle_grab_work(grp, pageofs);
|
|
+ /* if multiref is disabled, `primary' is always true */
|
|
primary = true;
|
|
-#else
|
|
- BUG();
|
|
-#endif
|
|
|
|
DBG_BUGON(work->pageofs != pageofs);
|
|
|
|
@@ -365,12 +359,12 @@ z_erofs_vle_work_register(struct super_block *sb,
|
|
struct z_erofs_vle_workgroup *grp = *grp_ret;
|
|
struct z_erofs_vle_work *work;
|
|
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
- BUG_ON(grp != NULL);
|
|
-#else
|
|
- if (grp != NULL)
|
|
- goto skip;
|
|
-#endif
|
|
+ /* if multiref is disabled, grp should never be nullptr */
|
|
+ if (unlikely(grp)) {
|
|
+ DBG_BUGON(1);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
/* no available workgroup, let's allocate one */
|
|
grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
|
|
if (unlikely(grp == NULL))
|
|
@@ -393,13 +387,7 @@ z_erofs_vle_work_register(struct super_block *sb,
|
|
*hosted = true;
|
|
|
|
newgrp = true;
|
|
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
-skip:
|
|
- /* currently unimplemented */
|
|
- BUG();
|
|
-#else
|
|
work = z_erofs_vle_grab_primary_work(grp);
|
|
-#endif
|
|
work->pageofs = pageofs;
|
|
|
|
mutex_init(&work->lock);
|
|
@@ -606,7 +594,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
|
|
|
|
enum z_erofs_page_type page_type;
|
|
unsigned cur, end, spiltted, index;
|
|
- int err;
|
|
+ int err = 0;
|
|
|
|
/* register locked file pages as online pages in pack */
|
|
z_erofs_onlinepage_init(page);
|
|
@@ -624,7 +612,7 @@ repeat:
|
|
/* go ahead the next map_blocks */
|
|
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
|
|
|
|
- if (!z_erofs_vle_work_iter_end(builder))
|
|
+ if (z_erofs_vle_work_iter_end(builder))
|
|
fe->initial = false;
|
|
|
|
map->m_la = offset + cur;
|
|
@@ -633,12 +621,11 @@ repeat:
|
|
if (unlikely(err))
|
|
goto err_out;
|
|
|
|
- /* deal with hole (FIXME! broken now) */
|
|
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
|
|
goto hitted;
|
|
|
|
DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
|
|
- BUG_ON(erofs_blkoff(map->m_pa));
|
|
+ DBG_BUGON(erofs_blkoff(map->m_pa));
|
|
|
|
err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
|
|
if (unlikely(err))
|
|
@@ -683,7 +670,7 @@ retry:
|
|
|
|
err = z_erofs_vle_work_add_page(builder,
|
|
newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
|
|
- if (!err)
|
|
+ if (likely(!err))
|
|
goto retry;
|
|
}
|
|
|
|
@@ -694,9 +681,10 @@ retry:
|
|
|
|
/* FIXME! avoid the last relundant fixup & endio */
|
|
z_erofs_onlinepage_fixup(page, index, true);
|
|
- ++spiltted;
|
|
|
|
- /* also update nr_pages and increase queued_pages */
|
|
+ /* bump up the number of spiltted parts of a page */
|
|
+ ++spiltted;
|
|
+ /* also update nr_pages */
|
|
work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
|
|
next_part:
|
|
/* can be used for verification */
|
|
@@ -706,16 +694,18 @@ next_part:
|
|
if (end > 0)
|
|
goto repeat;
|
|
|
|
+out:
|
|
/* FIXME! avoid the last relundant fixup & endio */
|
|
z_erofs_onlinepage_endio(page);
|
|
|
|
debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
|
|
__func__, page, spiltted, map->m_llen);
|
|
- return 0;
|
|
+ return err;
|
|
|
|
+ /* if some error occurred while processing this page */
|
|
err_out:
|
|
- /* TODO: the missing error handing cases */
|
|
- return err;
|
|
+ SetPageError(page);
|
|
+ goto out;
|
|
}
|
|
|
|
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
|
|
@@ -752,7 +742,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
|
|
bool cachemngd = false;
|
|
|
|
DBG_BUGON(PageUptodate(page));
|
|
- BUG_ON(page->mapping == NULL);
|
|
+ DBG_BUGON(!page->mapping);
|
|
|
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
|
|
@@ -796,10 +786,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|
const unsigned clusterpages = erofs_clusterpages(sbi);
|
|
|
|
struct z_erofs_pagevec_ctor ctor;
|
|
- unsigned nr_pages;
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
- unsigned sparsemem_pages = 0;
|
|
-#endif
|
|
+ unsigned int nr_pages;
|
|
+ unsigned int sparsemem_pages = 0;
|
|
struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
|
|
struct page **pages, **compressed_pages, *page;
|
|
unsigned i, llen;
|
|
@@ -811,12 +799,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|
int err;
|
|
|
|
might_sleep();
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
work = z_erofs_vle_grab_primary_work(grp);
|
|
-#else
|
|
- BUG();
|
|
-#endif
|
|
- BUG_ON(!READ_ONCE(work->nr_pages));
|
|
+ DBG_BUGON(!READ_ONCE(work->nr_pages));
|
|
|
|
mutex_lock(&work->lock);
|
|
nr_pages = work->nr_pages;
|
|
@@ -865,14 +849,12 @@ repeat:
|
|
else
|
|
pagenr = z_erofs_onlinepage_index(page);
|
|
|
|
- BUG_ON(pagenr >= nr_pages);
|
|
+ DBG_BUGON(pagenr >= nr_pages);
|
|
+ DBG_BUGON(pages[pagenr]);
|
|
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
- BUG_ON(pages[pagenr] != NULL);
|
|
- ++sparsemem_pages;
|
|
-#endif
|
|
pages[pagenr] = page;
|
|
}
|
|
+ sparsemem_pages = i;
|
|
|
|
z_erofs_pagevec_ctor_exit(&ctor, true);
|
|
|
|
@@ -891,9 +873,8 @@ repeat:
|
|
if (z_erofs_is_stagingpage(page))
|
|
continue;
|
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
- else if (page->mapping == mngda) {
|
|
- BUG_ON(PageLocked(page));
|
|
- BUG_ON(!PageUptodate(page));
|
|
+ if (page->mapping == mngda) {
|
|
+ DBG_BUGON(!PageUptodate(page));
|
|
continue;
|
|
}
|
|
#endif
|
|
@@ -901,11 +882,9 @@ repeat:
|
|
/* only non-head page could be reused as a compressed page */
|
|
pagenr = z_erofs_onlinepage_index(page);
|
|
|
|
- BUG_ON(pagenr >= nr_pages);
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
- BUG_ON(pages[pagenr] != NULL);
|
|
+ DBG_BUGON(pagenr >= nr_pages);
|
|
+ DBG_BUGON(pages[pagenr]);
|
|
++sparsemem_pages;
|
|
-#endif
|
|
pages[pagenr] = page;
|
|
|
|
overlapped = true;
|
|
@@ -914,9 +893,6 @@ repeat:
|
|
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
|
|
|
|
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
|
|
- /* FIXME! this should be fixed in the future */
|
|
- BUG_ON(grp->llen != llen);
|
|
-
|
|
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
|
|
pages, nr_pages, work->pageofs);
|
|
goto out;
|
|
@@ -931,12 +907,8 @@ repeat:
|
|
if (err != -ENOTSUPP)
|
|
goto out_percpu;
|
|
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
- if (sparsemem_pages >= nr_pages) {
|
|
- BUG_ON(sparsemem_pages > nr_pages);
|
|
+ if (sparsemem_pages >= nr_pages)
|
|
goto skip_allocpage;
|
|
- }
|
|
-#endif
|
|
|
|
for (i = 0; i < nr_pages; ++i) {
|
|
if (pages[i] != NULL)
|
|
@@ -945,9 +917,7 @@ repeat:
|
|
pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
|
|
}
|
|
|
|
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
skip_allocpage:
|
|
-#endif
|
|
vout = erofs_vmap(pages, nr_pages);
|
|
|
|
err = z_erofs_vle_unzip_vmap(compressed_pages,
|
|
@@ -1031,7 +1001,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
|
|
struct z_erofs_vle_unzip_io_sb, io.u.work);
|
|
LIST_HEAD(page_pool);
|
|
|
|
- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
|
|
+ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
|
|
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
|
|
|
|
put_pages_list(&page_pool);
|
|
@@ -1360,7 +1330,6 @@ static inline int __z_erofs_vle_normalaccess_readpages(
|
|
continue;
|
|
}
|
|
|
|
- BUG_ON(PagePrivate(page));
|
|
set_page_private(page, (unsigned long)head);
|
|
head = page;
|
|
}
|
|
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
|
|
index 3939985008652..3316bc36965d4 100644
|
|
--- a/drivers/staging/erofs/unzip_vle.h
|
|
+++ b/drivers/staging/erofs/unzip_vle.h
|
|
@@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
|
|
#define Z_EROFS_VLE_INLINE_PAGEVECS 3
|
|
|
|
struct z_erofs_vle_work {
|
|
- /* struct z_erofs_vle_work *left, *right; */
|
|
-
|
|
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
- struct list_head list;
|
|
-
|
|
- atomic_t refcount;
|
|
-#endif
|
|
struct mutex lock;
|
|
|
|
/* I: decompression offset in page */
|
|
@@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt(
|
|
grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
|
|
}
|
|
|
|
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
|
|
-#error multiref decompression is unimplemented yet
|
|
-#else
|
|
|
|
+/* definitions if multiref is disabled */
|
|
#define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
|
|
#define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
|
|
#define z_erofs_vle_work_workgroup(wrk, primary) \
|
|
@@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt(
|
|
struct z_erofs_vle_workgroup, work) : \
|
|
({ BUG(); (void *)NULL; }))
|
|
|
|
-#endif
|
|
|
|
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
|
|
|
|
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
|
|
index f5b665f15be52..9cb35cd33365a 100644
|
|
--- a/drivers/staging/erofs/unzip_vle_lz4.c
|
|
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
|
|
@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
|
|
if (compressed_pages[j] != page)
|
|
continue;
|
|
|
|
- BUG_ON(mirrored[j]);
|
|
+ DBG_BUGON(mirrored[j]);
|
|
memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
|
|
mirrored[j] = true;
|
|
break;
|
|
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
|
|
index 595cf90af9bb2..dd2ac9dbc4b47 100644
|
|
--- a/drivers/staging/erofs/utils.c
|
|
+++ b/drivers/staging/erofs/utils.c
|
|
@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
|
|
list_del(&page->lru);
|
|
} else {
|
|
page = alloc_pages(gfp | __GFP_NOFAIL, 0);
|
|
-
|
|
- BUG_ON(page == NULL);
|
|
- BUG_ON(page->mapping != NULL);
|
|
}
|
|
return page;
|
|
}
|
|
@@ -60,7 +57,7 @@ repeat:
|
|
/* decrease refcount added by erofs_workgroup_put */
|
|
if (unlikely(oldcount == 1))
|
|
atomic_long_dec(&erofs_global_shrink_cnt);
|
|
- BUG_ON(index != grp->index);
|
|
+ DBG_BUGON(index != grp->index);
|
|
}
|
|
rcu_read_unlock();
|
|
return grp;
|
|
@@ -73,8 +70,11 @@ int erofs_register_workgroup(struct super_block *sb,
|
|
struct erofs_sb_info *sbi;
|
|
int err;
|
|
|
|
- /* grp->refcount should not < 1 */
|
|
- BUG_ON(!atomic_read(&grp->refcount));
|
|
+ /* grp shouldn't be broken or used before */
|
|
+ if (unlikely(atomic_read(&grp->refcount) != 1)) {
|
|
+ DBG_BUGON(1);
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
err = radix_tree_preload(GFP_NOFS);
|
|
if (err)
|
|
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
|
|
index 9cd404acdb82b..ac7620120491b 100644
|
|
--- a/drivers/target/target_core_user.c
|
|
+++ b/drivers/target/target_core_user.c
|
|
@@ -148,7 +148,7 @@ struct tcmu_dev {
|
|
size_t ring_size;
|
|
|
|
struct mutex cmdr_lock;
|
|
- struct list_head cmdr_queue;
|
|
+ struct list_head qfull_queue;
|
|
|
|
uint32_t dbi_max;
|
|
uint32_t dbi_thresh;
|
|
@@ -159,6 +159,7 @@ struct tcmu_dev {
|
|
|
|
struct timer_list cmd_timer;
|
|
unsigned int cmd_time_out;
|
|
+ struct list_head inflight_queue;
|
|
|
|
struct timer_list qfull_timer;
|
|
int qfull_time_out;
|
|
@@ -179,7 +180,7 @@ struct tcmu_dev {
|
|
struct tcmu_cmd {
|
|
struct se_cmd *se_cmd;
|
|
struct tcmu_dev *tcmu_dev;
|
|
- struct list_head cmdr_queue_entry;
|
|
+ struct list_head queue_entry;
|
|
|
|
uint16_t cmd_id;
|
|
|
|
@@ -192,6 +193,7 @@ struct tcmu_cmd {
|
|
unsigned long deadline;
|
|
|
|
#define TCMU_CMD_BIT_EXPIRED 0
|
|
+#define TCMU_CMD_BIT_INFLIGHT 1
|
|
unsigned long flags;
|
|
};
|
|
/*
|
|
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
|
if (!tcmu_cmd)
|
|
return NULL;
|
|
|
|
- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
|
|
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
|
|
tcmu_cmd->se_cmd = se_cmd;
|
|
tcmu_cmd->tcmu_dev = udev;
|
|
|
|
@@ -915,11 +917,13 @@ setup_timer:
|
|
return 0;
|
|
|
|
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
|
|
- mod_timer(timer, tcmu_cmd->deadline);
|
|
+ if (!timer_pending(timer))
|
|
+ mod_timer(timer, tcmu_cmd->deadline);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
|
|
{
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
unsigned int tmo;
|
|
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
|
|
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
|
|
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
|
|
tcmu_cmd->cmd_id, udev->name);
|
|
return 0;
|
|
@@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
|
|
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
|
|
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
|
|
|
|
- if (!list_empty(&udev->cmdr_queue))
|
|
+ if (!list_empty(&udev->qfull_queue))
|
|
goto queue;
|
|
|
|
mb = udev->mb_addr;
|
|
@@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
|
|
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
|
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
|
|
|
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
|
|
+ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
|
|
+
|
|
/* TODO: only if FLUSH and FUA? */
|
|
uio_event_notify(&udev->uio_info);
|
|
|
|
return 0;
|
|
|
|
queue:
|
|
- if (add_to_cmdr_queue(tcmu_cmd)) {
|
|
+ if (add_to_qfull_queue(tcmu_cmd)) {
|
|
*scsi_err = TCM_OUT_OF_RESOURCES;
|
|
return -1;
|
|
}
|
|
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
|
|
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
|
|
goto out;
|
|
|
|
+ list_del_init(&cmd->queue_entry);
|
|
+
|
|
tcmu_cmd_reset_dbi_cur(cmd);
|
|
|
|
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
|
|
@@ -1194,9 +1203,29 @@ out:
|
|
tcmu_free_cmd(cmd);
|
|
}
|
|
|
|
+static void tcmu_set_next_deadline(struct list_head *queue,
|
|
+ struct timer_list *timer)
|
|
+{
|
|
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
|
|
+ unsigned long deadline = 0;
|
|
+
|
|
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
|
|
+ if (!time_after(jiffies, tcmu_cmd->deadline)) {
|
|
+ deadline = tcmu_cmd->deadline;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (deadline)
|
|
+ mod_timer(timer, deadline);
|
|
+ else
|
|
+ del_timer(timer);
|
|
+}
|
|
+
|
|
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
{
|
|
struct tcmu_mailbox *mb;
|
|
+ struct tcmu_cmd *cmd;
|
|
int handled = 0;
|
|
|
|
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
|
|
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
|
|
|
|
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
|
|
- struct tcmu_cmd *cmd;
|
|
|
|
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
|
|
|
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
/* no more pending commands */
|
|
del_timer(&udev->cmd_timer);
|
|
|
|
- if (list_empty(&udev->cmdr_queue)) {
|
|
+ if (list_empty(&udev->qfull_queue)) {
|
|
/*
|
|
* no more pending or waiting commands so try to
|
|
* reclaim blocks if needed.
|
|
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
tcmu_global_max_blocks)
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
}
|
|
+ } else if (udev->cmd_time_out) {
|
|
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
|
|
}
|
|
|
|
return handled;
|
|
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
if (!time_after(jiffies, cmd->deadline))
|
|
return 0;
|
|
|
|
- is_running = list_empty(&cmd->cmdr_queue_entry);
|
|
+ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
|
|
se_cmd = cmd->se_cmd;
|
|
|
|
if (is_running) {
|
|
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
*/
|
|
scsi_status = SAM_STAT_CHECK_CONDITION;
|
|
} else {
|
|
- list_del_init(&cmd->cmdr_queue_entry);
|
|
-
|
|
idr_remove(&udev->commands, id);
|
|
tcmu_free_cmd(cmd);
|
|
scsi_status = SAM_STAT_TASK_SET_FULL;
|
|
}
|
|
+ list_del_init(&cmd->queue_entry);
|
|
|
|
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
|
id, udev->name, is_running ? "inflight" : "queued");
|
|
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
|
|
INIT_LIST_HEAD(&udev->node);
|
|
INIT_LIST_HEAD(&udev->timedout_entry);
|
|
- INIT_LIST_HEAD(&udev->cmdr_queue);
|
|
+ INIT_LIST_HEAD(&udev->qfull_queue);
|
|
+ INIT_LIST_HEAD(&udev->inflight_queue);
|
|
idr_init(&udev->commands);
|
|
|
|
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
|
|
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
return &udev->se_dev;
|
|
}
|
|
|
|
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
|
|
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
|
|
{
|
|
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
|
|
LIST_HEAD(cmds);
|
|
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
|
|
sense_reason_t scsi_ret;
|
|
int ret;
|
|
|
|
- if (list_empty(&udev->cmdr_queue))
|
|
+ if (list_empty(&udev->qfull_queue))
|
|
return true;
|
|
|
|
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
|
|
|
|
- list_splice_init(&udev->cmdr_queue, &cmds);
|
|
+ list_splice_init(&udev->qfull_queue, &cmds);
|
|
|
|
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
|
|
- list_del_init(&tcmu_cmd->cmdr_queue_entry);
|
|
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
|
|
+ list_del_init(&tcmu_cmd->queue_entry);
|
|
|
|
pr_debug("removing cmd %u on dev %s from queue\n",
|
|
tcmu_cmd->cmd_id, udev->name);
|
|
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
|
|
* cmd was requeued, so just put all cmds back in
|
|
* the queue
|
|
*/
|
|
- list_splice_tail(&cmds, &udev->cmdr_queue);
|
|
+ list_splice_tail(&cmds, &udev->qfull_queue);
|
|
drained = false;
|
|
- goto done;
|
|
+ break;
|
|
}
|
|
}
|
|
- if (list_empty(&udev->cmdr_queue))
|
|
- del_timer(&udev->qfull_timer);
|
|
-done:
|
|
+
|
|
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
|
|
return drained;
|
|
}
|
|
|
|
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
|
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
tcmu_handle_completions(udev);
|
|
- run_cmdr_queue(udev, false);
|
|
+ run_qfull_queue(udev, false);
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
return 0;
|
|
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
|
|
/* complete IO that has executed successfully */
|
|
tcmu_handle_completions(udev);
|
|
/* fail IO waiting to be queued */
|
|
- run_cmdr_queue(udev, true);
|
|
+ run_qfull_queue(udev, true);
|
|
|
|
unlock:
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
|
|
mutex_lock(&udev->cmdr_lock);
|
|
|
|
idr_for_each_entry(&udev->commands, cmd, i) {
|
|
- if (!list_empty(&cmd->cmdr_queue_entry))
|
|
+ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
|
|
continue;
|
|
|
|
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
|
|
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
|
|
|
|
idr_remove(&udev->commands, i);
|
|
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
|
+ list_del_init(&cmd->queue_entry);
|
|
if (err_level == 1) {
|
|
/*
|
|
* Userspace was not able to start the
|
|
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
|
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
|
|
+
|
|
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
|
|
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
|
|
+
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
spin_lock_bh(&timed_out_udevs_lock);
|
|
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
|
|
index 5eaeca805c95c..b214a72d5caad 100644
|
|
--- a/drivers/vhost/vhost.c
|
|
+++ b/drivers/vhost/vhost.c
|
|
@@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
|
int type, ret;
|
|
|
|
ret = copy_from_iter(&type, sizeof(type), from);
|
|
- if (ret != sizeof(type))
|
|
+ if (ret != sizeof(type)) {
|
|
+ ret = -EINVAL;
|
|
goto done;
|
|
+ }
|
|
|
|
switch (type) {
|
|
case VHOST_IOTLB_MSG:
|
|
@@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
|
|
|
iov_iter_advance(from, offset);
|
|
ret = copy_from_iter(&msg, sizeof(msg), from);
|
|
- if (ret != sizeof(msg))
|
|
+ if (ret != sizeof(msg)) {
|
|
+ ret = -EINVAL;
|
|
goto done;
|
|
+ }
|
|
if (vhost_process_iotlb_msg(dev, &msg)) {
|
|
ret = -EFAULT;
|
|
goto done;
|
|
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
|
|
index bdfcc0a71db14..6bde543452f25 100644
|
|
--- a/drivers/video/backlight/pwm_bl.c
|
|
+++ b/drivers/video/backlight/pwm_bl.c
|
|
@@ -262,6 +262,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
|
|
|
|
memset(data, 0, sizeof(*data));
|
|
|
|
+ /*
|
|
+ * These values are optional and set as 0 by default, the out values
|
|
+ * are modified only if a valid u32 value can be decoded.
|
|
+ */
|
|
+ of_property_read_u32(node, "post-pwm-on-delay-ms",
|
|
+ &data->post_pwm_on_delay);
|
|
+ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
|
|
+
|
|
+ data->enable_gpio = -EINVAL;
|
|
+
|
|
/*
|
|
* Determine the number of brightness levels, if this property is not
|
|
* set a default table of brightness levels will be used.
|
|
@@ -374,15 +384,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
|
|
data->max_brightness--;
|
|
}
|
|
|
|
- /*
|
|
- * These values are optional and set as 0 by default, the out values
|
|
- * are modified only if a valid u32 value can be decoded.
|
|
- */
|
|
- of_property_read_u32(node, "post-pwm-on-delay-ms",
|
|
- &data->post_pwm_on_delay);
|
|
- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
|
|
-
|
|
- data->enable_gpio = -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
|
|
index afbd6101c78eb..070026a7e55a5 100644
|
|
--- a/drivers/video/fbdev/udlfb.c
|
|
+++ b/drivers/video/fbdev/udlfb.c
|
|
@@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user)
|
|
|
|
dlfb->fb_count++;
|
|
|
|
- kref_get(&dlfb->kref);
|
|
-
|
|
if (fb_defio && (info->fbdefio == NULL)) {
|
|
/* enable defio at last moment if not disabled by client */
|
|
|
|
@@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user)
|
|
return 0;
|
|
}
|
|
|
|
-/*
|
|
- * Called when all client interfaces to start transactions have been disabled,
|
|
- * and all references to our device instance (dlfb_data) are released.
|
|
- * Every transaction must have a reference, so we know are fully spun down
|
|
- */
|
|
-static void dlfb_free(struct kref *kref)
|
|
+static void dlfb_ops_destroy(struct fb_info *info)
|
|
{
|
|
- struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
|
|
+ struct dlfb_data *dlfb = info->par;
|
|
+
|
|
+ if (info->cmap.len != 0)
|
|
+ fb_dealloc_cmap(&info->cmap);
|
|
+ if (info->monspecs.modedb)
|
|
+ fb_destroy_modedb(info->monspecs.modedb);
|
|
+ vfree(info->screen_base);
|
|
+
|
|
+ fb_destroy_modelist(&info->modelist);
|
|
|
|
while (!list_empty(&dlfb->deferred_free)) {
|
|
struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
|
|
@@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref)
|
|
}
|
|
vfree(dlfb->backing_buffer);
|
|
kfree(dlfb->edid);
|
|
+ usb_put_dev(dlfb->udev);
|
|
kfree(dlfb);
|
|
-}
|
|
-
|
|
-static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
|
|
-{
|
|
- struct fb_info *info = dlfb->info;
|
|
-
|
|
- if (info) {
|
|
- unregister_framebuffer(info);
|
|
-
|
|
- if (info->cmap.len != 0)
|
|
- fb_dealloc_cmap(&info->cmap);
|
|
- if (info->monspecs.modedb)
|
|
- fb_destroy_modedb(info->monspecs.modedb);
|
|
- vfree(info->screen_base);
|
|
-
|
|
- fb_destroy_modelist(&info->modelist);
|
|
-
|
|
- dlfb->info = NULL;
|
|
-
|
|
- /* Assume info structure is freed after this point */
|
|
- framebuffer_release(info);
|
|
- }
|
|
|
|
- /* ref taken in probe() as part of registering framebfufer */
|
|
- kref_put(&dlfb->kref, dlfb_free);
|
|
+ /* Assume info structure is freed after this point */
|
|
+ framebuffer_release(info);
|
|
}
|
|
|
|
-static void dlfb_free_framebuffer_work(struct work_struct *work)
|
|
-{
|
|
- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
|
|
- free_framebuffer_work.work);
|
|
- dlfb_free_framebuffer(dlfb);
|
|
-}
|
|
/*
|
|
* Assumes caller is holding info->lock mutex (for open and release at least)
|
|
*/
|
|
@@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
|
|
|
|
dlfb->fb_count--;
|
|
|
|
- /* We can't free fb_info here - fbmem will touch it when we return */
|
|
- if (dlfb->virtualized && (dlfb->fb_count == 0))
|
|
- schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
|
|
-
|
|
if ((dlfb->fb_count == 0) && (info->fbdefio)) {
|
|
fb_deferred_io_cleanup(info);
|
|
kfree(info->fbdefio);
|
|
@@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
|
|
|
|
dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
|
|
|
|
- kref_put(&dlfb->kref, dlfb_free);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = {
|
|
.fb_blank = dlfb_ops_blank,
|
|
.fb_check_var = dlfb_ops_check_var,
|
|
.fb_set_par = dlfb_ops_set_par,
|
|
+ .fb_destroy = dlfb_ops_destroy,
|
|
};
|
|
|
|
|
|
@@ -1615,12 +1584,13 @@ success:
|
|
return true;
|
|
}
|
|
|
|
-static void dlfb_init_framebuffer_work(struct work_struct *work);
|
|
-
|
|
static int dlfb_usb_probe(struct usb_interface *intf,
|
|
const struct usb_device_id *id)
|
|
{
|
|
+ int i;
|
|
+ const struct device_attribute *attr;
|
|
struct dlfb_data *dlfb;
|
|
+ struct fb_info *info;
|
|
int retval = -ENOMEM;
|
|
struct usb_device *usbdev = interface_to_usbdev(intf);
|
|
|
|
@@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
|
|
goto error;
|
|
}
|
|
|
|
- kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
|
|
INIT_LIST_HEAD(&dlfb->deferred_free);
|
|
|
|
- dlfb->udev = usbdev;
|
|
+ dlfb->udev = usb_get_dev(usbdev);
|
|
usb_set_intfdata(intf, dlfb);
|
|
|
|
dev_dbg(&intf->dev, "console enable=%d\n", console);
|
|
@@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf,
|
|
}
|
|
|
|
|
|
- if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
|
|
- retval = -ENOMEM;
|
|
- dev_err(&intf->dev, "unable to allocate urb list\n");
|
|
- goto error;
|
|
- }
|
|
-
|
|
- kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
|
|
-
|
|
- /* We don't register a new USB class. Our client interface is dlfbev */
|
|
-
|
|
- /* Workitem keep things fast & simple during USB enumeration */
|
|
- INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
|
|
- dlfb_init_framebuffer_work);
|
|
- schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
|
|
-
|
|
- return 0;
|
|
-
|
|
-error:
|
|
- if (dlfb) {
|
|
-
|
|
- kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
|
|
-
|
|
- /* dev has been deallocated. Do not dereference */
|
|
- }
|
|
-
|
|
- return retval;
|
|
-}
|
|
-
|
|
-static void dlfb_init_framebuffer_work(struct work_struct *work)
|
|
-{
|
|
- int i, retval;
|
|
- struct fb_info *info;
|
|
- const struct device_attribute *attr;
|
|
- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
|
|
- init_framebuffer_work.work);
|
|
-
|
|
/* allocates framebuffer driver structure, not framebuffer memory */
|
|
info = framebuffer_alloc(0, &dlfb->udev->dev);
|
|
if (!info) {
|
|
@@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
|
|
dlfb->ops = dlfb_ops;
|
|
info->fbops = &dlfb->ops;
|
|
|
|
+ INIT_LIST_HEAD(&info->modelist);
|
|
+
|
|
+ if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
|
|
+ retval = -ENOMEM;
|
|
+ dev_err(&intf->dev, "unable to allocate urb list\n");
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ /* We don't register a new USB class. Our client interface is dlfbev */
|
|
+
|
|
retval = fb_alloc_cmap(&info->cmap, 256, 0);
|
|
if (retval < 0) {
|
|
dev_err(info->device, "cmap allocation failed: %d\n", retval);
|
|
goto error;
|
|
}
|
|
|
|
- INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
|
|
- dlfb_free_framebuffer_work);
|
|
-
|
|
- INIT_LIST_HEAD(&info->modelist);
|
|
-
|
|
retval = dlfb_setup_modes(dlfb, info, NULL, 0);
|
|
if (retval != 0) {
|
|
dev_err(info->device,
|
|
@@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
|
|
dev_name(info->dev), info->var.xres, info->var.yres,
|
|
((dlfb->backing_buffer) ?
|
|
info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
|
|
- return;
|
|
+ return 0;
|
|
|
|
error:
|
|
- dlfb_free_framebuffer(dlfb);
|
|
+ if (dlfb->info) {
|
|
+ dlfb_ops_destroy(dlfb->info);
|
|
+ } else if (dlfb) {
|
|
+ usb_put_dev(dlfb->udev);
|
|
+ kfree(dlfb);
|
|
+ }
|
|
+ return retval;
|
|
}
|
|
|
|
static void dlfb_usb_disconnect(struct usb_interface *intf)
|
|
@@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
|
|
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
|
|
device_remove_file(info->dev, &fb_device_attrs[i]);
|
|
device_remove_bin_file(info->dev, &edid_attr);
|
|
- unlink_framebuffer(info);
|
|
}
|
|
|
|
- usb_set_intfdata(intf, NULL);
|
|
- dlfb->udev = NULL;
|
|
-
|
|
- /* if clients still have us open, will be freed on last close */
|
|
- if (dlfb->fb_count == 0)
|
|
- schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
|
|
-
|
|
- /* release reference taken by kref_init in probe() */
|
|
- kref_put(&dlfb->kref, dlfb_free);
|
|
-
|
|
- /* consider dlfb_data freed */
|
|
+ unregister_framebuffer(info);
|
|
}
|
|
|
|
static struct usb_driver dlfb_driver = {
|
|
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
|
|
index 5c4a764717c4d..81208cd3f4ecb 100644
|
|
--- a/drivers/watchdog/mt7621_wdt.c
|
|
+++ b/drivers/watchdog/mt7621_wdt.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/watchdog.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
|
|
#include <asm/mach-ralink/ralink_regs.h>
|
|
|
|
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
|
|
index 98967f0a7d10e..db7c57d82cfdc 100644
|
|
--- a/drivers/watchdog/rt2880_wdt.c
|
|
+++ b/drivers/watchdog/rt2880_wdt.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/watchdog.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
|
|
#include <asm/mach-ralink/ralink_regs.h>
|
|
|
|
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
|
|
index b1092fbefa630..d4ea33581ac26 100644
|
|
--- a/drivers/xen/pvcalls-back.c
|
|
+++ b/drivers/xen/pvcalls-back.c
|
|
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
|
|
|
|
/* write the data, then modify the indexes */
|
|
virt_wmb();
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ atomic_set(&map->read, 0);
|
|
intf->in_error = ret;
|
|
- else
|
|
+ } else
|
|
intf->in_prod = prod + ret;
|
|
/* update the indexes, then notify the other end */
|
|
virt_wmb();
|
|
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
|
|
static void pvcalls_sk_state_change(struct sock *sock)
|
|
{
|
|
struct sock_mapping *map = sock->sk_user_data;
|
|
- struct pvcalls_data_intf *intf;
|
|
|
|
if (map == NULL)
|
|
return;
|
|
|
|
- intf = map->ring;
|
|
- intf->in_error = -ENOTCONN;
|
|
+ atomic_inc(&map->read);
|
|
notify_remote_via_irq(map->irq);
|
|
}
|
|
|
|
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
|
|
index 77224d8f3e6fe..91da7e44d5d4f 100644
|
|
--- a/drivers/xen/pvcalls-front.c
|
|
+++ b/drivers/xen/pvcalls-front.c
|
|
@@ -31,6 +31,12 @@
|
|
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
|
|
#define PVCALLS_FRONT_MAX_SPIN 5000
|
|
|
|
+static struct proto pvcalls_proto = {
|
|
+ .name = "PVCalls",
|
|
+ .owner = THIS_MODULE,
|
|
+ .obj_size = sizeof(struct sock),
|
|
+};
|
|
+
|
|
struct pvcalls_bedata {
|
|
struct xen_pvcalls_front_ring ring;
|
|
grant_ref_t ref;
|
|
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
|
|
return ret;
|
|
}
|
|
|
|
+static void free_active_ring(struct sock_mapping *map)
|
|
+{
|
|
+ if (!map->active.ring)
|
|
+ return;
|
|
+
|
|
+ free_pages((unsigned long)map->active.data.in,
|
|
+ map->active.ring->ring_order);
|
|
+ free_page((unsigned long)map->active.ring);
|
|
+}
|
|
+
|
|
+static int alloc_active_ring(struct sock_mapping *map)
|
|
+{
|
|
+ void *bytes;
|
|
+
|
|
+ map->active.ring = (struct pvcalls_data_intf *)
|
|
+ get_zeroed_page(GFP_KERNEL);
|
|
+ if (!map->active.ring)
|
|
+ goto out;
|
|
+
|
|
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
|
|
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
|
+ PVCALLS_RING_ORDER);
|
|
+ if (!bytes)
|
|
+ goto out;
|
|
+
|
|
+ map->active.data.in = bytes;
|
|
+ map->active.data.out = bytes +
|
|
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out:
|
|
+ free_active_ring(map);
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
static int create_active(struct sock_mapping *map, int *evtchn)
|
|
{
|
|
void *bytes;
|
|
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
*evtchn = -1;
|
|
init_waitqueue_head(&map->active.inflight_conn_req);
|
|
|
|
- map->active.ring = (struct pvcalls_data_intf *)
|
|
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
|
|
- if (map->active.ring == NULL)
|
|
- goto out_error;
|
|
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
|
|
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
|
- PVCALLS_RING_ORDER);
|
|
- if (bytes == NULL)
|
|
- goto out_error;
|
|
+ bytes = map->active.data.in;
|
|
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
|
|
map->active.ring->ref[i] = gnttab_grant_foreign_access(
|
|
pvcalls_front_dev->otherend_id,
|
|
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
pvcalls_front_dev->otherend_id,
|
|
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
|
|
|
|
- map->active.data.in = bytes;
|
|
- map->active.data.out = bytes +
|
|
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
|
|
-
|
|
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
|
|
if (ret)
|
|
goto out_error;
|
|
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
out_error:
|
|
if (*evtchn >= 0)
|
|
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
|
|
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
|
|
- free_page((unsigned long)map->active.ring);
|
|
return ret;
|
|
}
|
|
|
|
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
|
|
return PTR_ERR(map);
|
|
|
|
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
|
+ ret = alloc_active_ring(map);
|
|
+ if (ret < 0) {
|
|
+ pvcalls_exit_sock(sock);
|
|
+ return ret;
|
|
+ }
|
|
|
|
spin_lock(&bedata->socket_lock);
|
|
ret = get_request(bedata, &req_id);
|
|
if (ret < 0) {
|
|
spin_unlock(&bedata->socket_lock);
|
|
+ free_active_ring(map);
|
|
pvcalls_exit_sock(sock);
|
|
return ret;
|
|
}
|
|
ret = create_active(map, &evtchn);
|
|
if (ret < 0) {
|
|
spin_unlock(&bedata->socket_lock);
|
|
+ free_active_ring(map);
|
|
pvcalls_exit_sock(sock);
|
|
return ret;
|
|
}
|
|
@@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
|
|
error = intf->in_error;
|
|
/* get pointers before reading from the ring */
|
|
virt_rmb();
|
|
- if (error < 0)
|
|
- return error;
|
|
|
|
size = pvcalls_queued(prod, cons, array_size);
|
|
masked_prod = pvcalls_mask(prod, array_size);
|
|
masked_cons = pvcalls_mask(cons, array_size);
|
|
|
|
if (size == 0)
|
|
- return 0;
|
|
+ return error ?: size;
|
|
|
|
if (len > size)
|
|
len = size;
|
|
@@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|
}
|
|
}
|
|
|
|
- spin_lock(&bedata->socket_lock);
|
|
- ret = get_request(bedata, &req_id);
|
|
- if (ret < 0) {
|
|
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
|
|
+ if (map2 == NULL) {
|
|
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
(void *)&map->passive.flags);
|
|
- spin_unlock(&bedata->socket_lock);
|
|
+ pvcalls_exit_sock(sock);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ ret = alloc_active_ring(map2);
|
|
+ if (ret < 0) {
|
|
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
+ (void *)&map->passive.flags);
|
|
+ kfree(map2);
|
|
pvcalls_exit_sock(sock);
|
|
return ret;
|
|
}
|
|
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
|
|
- if (map2 == NULL) {
|
|
+ spin_lock(&bedata->socket_lock);
|
|
+ ret = get_request(bedata, &req_id);
|
|
+ if (ret < 0) {
|
|
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
(void *)&map->passive.flags);
|
|
spin_unlock(&bedata->socket_lock);
|
|
+ free_active_ring(map2);
|
|
+ kfree(map2);
|
|
pvcalls_exit_sock(sock);
|
|
- return -ENOMEM;
|
|
+ return ret;
|
|
}
|
|
+
|
|
ret = create_active(map2, &evtchn);
|
|
if (ret < 0) {
|
|
+ free_active_ring(map2);
|
|
kfree(map2);
|
|
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
(void *)&map->passive.flags);
|
|
@@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|
|
|
received:
|
|
map2->sock = newsock;
|
|
- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
|
|
+ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
|
|
if (!newsock->sk) {
|
|
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
|
|
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
|
|
@@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
|
|
spin_lock(&bedata->socket_lock);
|
|
list_del(&map->list);
|
|
spin_unlock(&bedata->socket_lock);
|
|
- if (READ_ONCE(map->passive.inflight_req_id) !=
|
|
- PVCALLS_INVALID_ID) {
|
|
+ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
|
|
+ READ_ONCE(map->passive.inflight_req_id) != 0) {
|
|
pvcalls_front_free_map(bedata,
|
|
map->passive.accept_map);
|
|
}
|
|
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
|
|
index dc62d15a964b8..1bb300ef362b0 100644
|
|
--- a/fs/afs/flock.c
|
|
+++ b/fs/afs/flock.c
|
|
@@ -208,7 +208,7 @@ again:
|
|
/* The new front of the queue now owns the state variables. */
|
|
next = list_entry(vnode->pending_locks.next,
|
|
struct file_lock, fl_u.afs.link);
|
|
- vnode->lock_key = afs_file_key(next->fl_file);
|
|
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
|
|
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
|
|
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
|
|
goto again;
|
|
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
|
|
/* The new front of the queue now owns the state variables. */
|
|
next = list_entry(vnode->pending_locks.next,
|
|
struct file_lock, fl_u.afs.link);
|
|
- vnode->lock_key = afs_file_key(next->fl_file);
|
|
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
|
|
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
|
|
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
|
|
afs_lock_may_be_available(vnode);
|
|
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
|
|
index 071075d775a95..0726e40db0f8b 100644
|
|
--- a/fs/afs/inode.c
|
|
+++ b/fs/afs/inode.c
|
|
@@ -411,7 +411,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
|
|
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
|
|
valid = true;
|
|
} else {
|
|
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
|
|
vnode->cb_v_break = vnode->volume->cb_v_break;
|
|
valid = false;
|
|
}
|
|
@@ -543,6 +542,8 @@ void afs_evict_inode(struct inode *inode)
|
|
#endif
|
|
|
|
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
|
|
+ key_put(vnode->lock_key);
|
|
+ vnode->lock_key = NULL;
|
|
_leave("");
|
|
}
|
|
|
|
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
|
|
index 041c27ea8de15..f74193da0e092 100644
|
|
--- a/fs/ceph/snap.c
|
|
+++ b/fs/ceph/snap.c
|
|
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
|
|
capsnap->size);
|
|
|
|
spin_lock(&mdsc->snap_flush_lock);
|
|
- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
|
|
+ if (list_empty(&ci->i_snap_flush_item))
|
|
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
|
|
spin_unlock(&mdsc->snap_flush_lock);
|
|
return 1; /* caller may want to ceph_flush_snaps */
|
|
}
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index 7e9f07bf260d2..81d77b15b3479 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -1084,10 +1084,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
|
|
|
|
task_lock(p);
|
|
if (!p->vfork_done && process_shares_mm(p, mm)) {
|
|
- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
|
|
- task_pid_nr(p), p->comm,
|
|
- p->signal->oom_score_adj, oom_adj,
|
|
- task_pid_nr(task), task->comm);
|
|
p->signal->oom_score_adj = oom_adj;
|
|
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
|
|
p->signal->oom_score_adj_min = (short)oom_adj;
|
|
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
|
|
index e098cbe27db54..12babe9915944 100644
|
|
--- a/include/keys/user-type.h
|
|
+++ b/include/keys/user-type.h
|
|
@@ -31,7 +31,7 @@
|
|
struct user_key_payload {
|
|
struct rcu_head rcu; /* RCU destructor */
|
|
unsigned short datalen; /* length of this data */
|
|
- char data[0]; /* actual data */
|
|
+ char data[0] __aligned(__alignof__(u64)); /* actual data */
|
|
};
|
|
|
|
extern struct key_type key_type_user;
|
|
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
|
|
index b1ce500fe8b3d..d756f2318efe0 100644
|
|
--- a/include/linux/compiler-clang.h
|
|
+++ b/include/linux/compiler-clang.h
|
|
@@ -3,9 +3,8 @@
|
|
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
|
|
#endif
|
|
|
|
-/* Some compiler specific definitions are overwritten here
|
|
- * for Clang compiler
|
|
- */
|
|
+/* Compiler specific definitions for Clang compiler */
|
|
+
|
|
#define uninitialized_var(x) x = *(&(x))
|
|
|
|
/* same as gcc, this was present in clang-2.6 so we can assume it works
|
|
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
|
|
index 0242f6eec4eaf..a8ff0ca0c3213 100644
|
|
--- a/include/linux/compiler-gcc.h
|
|
+++ b/include/linux/compiler-gcc.h
|
|
@@ -58,10 +58,6 @@
|
|
(typeof(ptr)) (__ptr + (off)); \
|
|
})
|
|
|
|
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
|
-#define OPTIMIZER_HIDE_VAR(var) \
|
|
- __asm__ ("" : "=r" (var) : "0" (var))
|
|
-
|
|
/*
|
|
* A trick to suppress uninitialized variable warning without generating any
|
|
* code
|
|
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
|
|
index 4c7f9befa9f6c..f1fc60f103176 100644
|
|
--- a/include/linux/compiler-intel.h
|
|
+++ b/include/linux/compiler-intel.h
|
|
@@ -5,9 +5,7 @@
|
|
|
|
#ifdef __ECC
|
|
|
|
-/* Some compiler specific definitions are overwritten here
|
|
- * for Intel ECC compiler
|
|
- */
|
|
+/* Compiler specific definitions for Intel ECC compiler */
|
|
|
|
#include <asm/intrinsics.h>
|
|
|
|
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
|
|
index 681d866efb1eb..269d376f5a119 100644
|
|
--- a/include/linux/compiler.h
|
|
+++ b/include/linux/compiler.h
|
|
@@ -158,7 +158,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|
#endif
|
|
|
|
#ifndef OPTIMIZER_HIDE_VAR
|
|
-#define OPTIMIZER_HIDE_VAR(var) barrier()
|
|
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
|
+#define OPTIMIZER_HIDE_VAR(var) \
|
|
+ __asm__ ("" : "=r" (var) : "0" (var))
|
|
#endif
|
|
|
|
/* Not-quite-unique ID. */
|
|
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
|
|
index 59ddf9af909e4..2dd0a9ed5b361 100644
|
|
--- a/include/linux/qed/qed_chain.h
|
|
+++ b/include/linux/qed/qed_chain.h
|
|
@@ -663,6 +663,37 @@ out:
|
|
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
|
|
u32 prod_idx, void *p_prod_elem)
|
|
{
|
|
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
|
|
+ u32 cur_prod, page_mask, page_cnt, page_diff;
|
|
+
|
|
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
|
|
+ p_chain->u.chain32.prod_idx;
|
|
+
|
|
+ /* Assume that number of elements in a page is power of 2 */
|
|
+ page_mask = ~p_chain->elem_per_page_mask;
|
|
+
|
|
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
|
|
+ * reaches the first element of next page before the page index
|
|
+ * is incremented. See qed_chain_produce().
|
|
+ * Index wrap around is not a problem because the difference
|
|
+ * between current and given producer indices is always
|
|
+ * positive and lower than the chain's capacity.
|
|
+ */
|
|
+ page_diff = (((cur_prod - 1) & page_mask) -
|
|
+ ((prod_idx - 1) & page_mask)) /
|
|
+ p_chain->elem_per_page;
|
|
+
|
|
+ page_cnt = qed_chain_get_page_cnt(p_chain);
|
|
+ if (is_chain_u16(p_chain))
|
|
+ p_chain->pbl.c.u16.prod_page_idx =
|
|
+ (p_chain->pbl.c.u16.prod_page_idx -
|
|
+ page_diff + page_cnt) % page_cnt;
|
|
+ else
|
|
+ p_chain->pbl.c.u32.prod_page_idx =
|
|
+ (p_chain->pbl.c.u32.prod_page_idx -
|
|
+ page_diff + page_cnt) % page_cnt;
|
|
+ }
|
|
+
|
|
if (is_chain_u16(p_chain))
|
|
p_chain->u.chain16.prod_idx = (u16) prod_idx;
|
|
else
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 5d69e208e8d91..a404d475acee3 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -2392,7 +2392,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
|
|
|
|
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
|
|
skb_set_transport_header(skb, keys.control.thoff);
|
|
- else
|
|
+ else if (offset_hint >= 0)
|
|
skb_set_transport_header(skb, offset_hint);
|
|
}
|
|
|
|
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
|
|
index cb462f9ab7dd5..e0348cb0a1dd7 100644
|
|
--- a/include/linux/virtio_net.h
|
|
+++ b/include/linux/virtio_net.h
|
|
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|
|
|
if (!skb_partial_csum_set(skb, start, off))
|
|
return -EINVAL;
|
|
+ } else {
|
|
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
|
|
+ * probe and drop if does not match one of the above types.
|
|
+ */
|
|
+ if (gso_type && skb->network_header) {
|
|
+ if (!skb->protocol)
|
|
+ virtio_net_hdr_set_proto(skb, hdr);
|
|
+retry:
|
|
+ skb_probe_transport_header(skb, -1);
|
|
+ if (!skb_transport_header_was_set(skb)) {
|
|
+ /* UFO does not specify ipv4 or 6: try both */
|
|
+ if (gso_type & SKB_GSO_UDP &&
|
|
+ skb->protocol == htons(ETH_P_IP)) {
|
|
+ skb->protocol = htons(ETH_P_IPV6);
|
|
+ goto retry;
|
|
+ }
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
|
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
|
|
index 0e355f4a3d763..0a3de10c6dece 100644
|
|
--- a/include/net/netfilter/nf_flow_table.h
|
|
+++ b/include/net/netfilter/nf_flow_table.h
|
|
@@ -84,7 +84,6 @@ struct flow_offload {
|
|
struct nf_flow_route {
|
|
struct {
|
|
struct dst_entry *dst;
|
|
- int ifindex;
|
|
} tuple[FLOW_OFFLOAD_DIR_MAX];
|
|
};
|
|
|
|
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
|
|
index 14565d703291b..e8baca85bac6a 100644
|
|
--- a/include/uapi/linux/inet_diag.h
|
|
+++ b/include/uapi/linux/inet_diag.h
|
|
@@ -137,15 +137,21 @@ enum {
|
|
INET_DIAG_TCLASS,
|
|
INET_DIAG_SKMEMINFO,
|
|
INET_DIAG_SHUTDOWN,
|
|
- INET_DIAG_DCTCPINFO,
|
|
- INET_DIAG_PROTOCOL, /* response attribute only */
|
|
+
|
|
+ /*
|
|
+ * Next extenstions cannot be requested in struct inet_diag_req_v2:
|
|
+ * its field idiag_ext has only 8 bits.
|
|
+ */
|
|
+
|
|
+ INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
|
|
+ INET_DIAG_PROTOCOL, /* response attribute only */
|
|
INET_DIAG_SKV6ONLY,
|
|
INET_DIAG_LOCALS,
|
|
INET_DIAG_PEERS,
|
|
INET_DIAG_PAD,
|
|
- INET_DIAG_MARK,
|
|
- INET_DIAG_BBRINFO,
|
|
- INET_DIAG_CLASS_ID,
|
|
+ INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
|
|
+ INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
|
|
+ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
|
|
INET_DIAG_MD5SIG,
|
|
__INET_DIAG_MAX,
|
|
};
|
|
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
|
|
index 3abd327bada64..7d09e54ae54e0 100644
|
|
--- a/include/video/udlfb.h
|
|
+++ b/include/video/udlfb.h
|
|
@@ -36,12 +36,9 @@ struct dlfb_data {
|
|
struct usb_device *udev;
|
|
struct fb_info *info;
|
|
struct urb_list urbs;
|
|
- struct kref kref;
|
|
char *backing_buffer;
|
|
int fb_count;
|
|
bool virtualized; /* true when physical usb device not present */
|
|
- struct delayed_work init_framebuffer_work;
|
|
- struct delayed_work free_framebuffer_work;
|
|
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
|
|
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
|
|
char *edid; /* null until we read edid from hw or get from sysfs */
|
|
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
|
|
index 8061a439ef18c..6a32933cae4ff 100644
|
|
--- a/kernel/bpf/stackmap.c
|
|
+++ b/kernel/bpf/stackmap.c
|
|
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
|
|
|
|
if (nhdr->n_type == BPF_BUILD_ID &&
|
|
nhdr->n_namesz == sizeof("GNU") &&
|
|
- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
|
|
+ nhdr->n_descsz > 0 &&
|
|
+ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
|
|
memcpy(build_id,
|
|
note_start + note_offs +
|
|
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
|
|
- BPF_BUILD_ID_SIZE);
|
|
+ nhdr->n_descsz);
|
|
+ memset(build_id + nhdr->n_descsz, 0,
|
|
+ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
|
|
return 0;
|
|
}
|
|
new_offs = note_offs + sizeof(Elf32_Nhdr) +
|
|
@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
|
|
return -EFAULT; /* page not mapped */
|
|
|
|
ret = -EINVAL;
|
|
- page_addr = page_address(page);
|
|
+ page_addr = kmap_atomic(page);
|
|
ehdr = (Elf32_Ehdr *)page_addr;
|
|
|
|
/* compare magic x7f "ELF" */
|
|
@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
|
|
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
|
|
ret = stack_map_get_build_id_64(page_addr, build_id);
|
|
out:
|
|
+ kunmap_atomic(page_addr);
|
|
put_page(page);
|
|
return ret;
|
|
}
|
|
@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|
for (i = 0; i < trace_nr; i++) {
|
|
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
|
|
id_offs[i].ip = ips[i];
|
|
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
|
|
}
|
|
return;
|
|
}
|
|
@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|
/* per entry fall back to ips */
|
|
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
|
|
id_offs[i].ip = ips[i];
|
|
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
|
|
continue;
|
|
}
|
|
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index bf6f1d70484dc..17bd0c0dfa98a 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -3383,6 +3383,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
|
|
const char tgid_space[] = " ";
|
|
const char space[] = " ";
|
|
|
|
+ print_event_info(buf, m);
|
|
+
|
|
seq_printf(m, "# %s _-----=> irqs-off\n",
|
|
tgid ? tgid_space : space);
|
|
seq_printf(m, "# %s / _----=> need-resched\n",
|
|
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
|
|
index 149b6f4cf0233..89d4439516f6c 100644
|
|
--- a/mm/mempolicy.c
|
|
+++ b/mm/mempolicy.c
|
|
@@ -1300,7 +1300,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
|
|
nodemask_t *nodes)
|
|
{
|
|
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
|
|
- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
|
|
+ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
|
|
|
|
if (copy > nbytes) {
|
|
if (copy > PAGE_SIZE)
|
|
@@ -1477,7 +1477,7 @@ static int kernel_get_mempolicy(int __user *policy,
|
|
int uninitialized_var(pval);
|
|
nodemask_t nodes;
|
|
|
|
- if (nmask != NULL && maxnode < MAX_NUMNODES)
|
|
+ if (nmask != NULL && maxnode < nr_node_ids)
|
|
return -EINVAL;
|
|
|
|
err = do_get_mempolicy(&pval, &nodes, addr, flags);
|
|
@@ -1513,7 +1513,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
|
|
unsigned long nr_bits, alloc_size;
|
|
DECLARE_BITMAP(bm, MAX_NUMNODES);
|
|
|
|
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
|
|
+ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
|
|
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
|
|
|
if (nmask)
|
|
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
|
|
index 3899fa6e201dd..a2976adeeedce 100644
|
|
--- a/net/batman-adv/soft-interface.c
|
|
+++ b/net/batman-adv/soft-interface.c
|
|
@@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
|
|
|
|
switch (ntohs(ethhdr->h_proto)) {
|
|
case ETH_P_8021Q:
|
|
+ if (!pskb_may_pull(skb, sizeof(*vhdr)))
|
|
+ goto dropped;
|
|
vhdr = vlan_eth_hdr(skb);
|
|
|
|
/* drop batman-in-batman packets to prevent loops */
|
|
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
|
|
index 502f663495308..4d4b9b5ea1c17 100644
|
|
--- a/net/bridge/br_fdb.c
|
|
+++ b/net/bridge/br_fdb.c
|
|
@@ -1088,6 +1088,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|
err = -ENOMEM;
|
|
goto err_unlock;
|
|
}
|
|
+ if (swdev_notify)
|
|
+ fdb->added_by_user = 1;
|
|
fdb->added_by_external_learn = 1;
|
|
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
|
} else {
|
|
@@ -1107,6 +1109,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|
modified = true;
|
|
}
|
|
|
|
+ if (swdev_notify)
|
|
+ fdb->added_by_user = 1;
|
|
+
|
|
if (modified)
|
|
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
|
}
|
|
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
|
|
index 6dec8e9b34511..20ed7adcf1cc4 100644
|
|
--- a/net/bridge/br_multicast.c
|
|
+++ b/net/bridge/br_multicast.c
|
|
@@ -1420,14 +1420,7 @@ static void br_multicast_query_received(struct net_bridge *br,
|
|
return;
|
|
|
|
br_multicast_update_query_timer(br, query, max_delay);
|
|
-
|
|
- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
|
|
- * the arrival port for IGMP Queries where the source address
|
|
- * is 0.0.0.0 should not be added to router port list.
|
|
- */
|
|
- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
|
|
- saddr->proto == htons(ETH_P_IPV6))
|
|
- br_multicast_mark_router(br, port);
|
|
+ br_multicast_mark_router(br, port);
|
|
}
|
|
|
|
static void br_ip4_multicast_query(struct net_bridge *br,
|
|
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
|
|
index a127d14421164..f7d7f32ac673c 100644
|
|
--- a/net/ceph/messenger.c
|
|
+++ b/net/ceph/messenger.c
|
|
@@ -2091,6 +2091,8 @@ static int process_connect(struct ceph_connection *con)
|
|
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
|
|
|
|
if (con->auth) {
|
|
+ int len = le32_to_cpu(con->in_reply.authorizer_len);
|
|
+
|
|
/*
|
|
* Any connection that defines ->get_authorizer()
|
|
* should also define ->add_authorizer_challenge() and
|
|
@@ -2100,8 +2102,7 @@ static int process_connect(struct ceph_connection *con)
|
|
*/
|
|
if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
|
|
ret = con->ops->add_authorizer_challenge(
|
|
- con, con->auth->authorizer_reply_buf,
|
|
- le32_to_cpu(con->in_reply.authorizer_len));
|
|
+ con, con->auth->authorizer_reply_buf, len);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -2111,10 +2112,12 @@ static int process_connect(struct ceph_connection *con)
|
|
return 0;
|
|
}
|
|
|
|
- ret = con->ops->verify_authorizer_reply(con);
|
|
- if (ret < 0) {
|
|
- con->error_msg = "bad authorize reply";
|
|
- return ret;
|
|
+ if (len) {
|
|
+ ret = con->ops->verify_authorizer_reply(con);
|
|
+ if (ret < 0) {
|
|
+ con->error_msg = "bad authorize reply";
|
|
+ return ret;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index 8c2411fb25090..fb0080e84bd43 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -3930,7 +3930,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|
sk->sk_rcvlowat = val ? : 1;
|
|
break;
|
|
case SO_MARK:
|
|
- sk->sk_mark = val;
|
|
+ if (sk->sk_mark != val) {
|
|
+ sk->sk_mark = val;
|
|
+ sk_dst_reset(sk);
|
|
+ }
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
@@ -4001,7 +4004,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|
/* Only some options are supported */
|
|
switch (optname) {
|
|
case TCP_BPF_IW:
|
|
- if (val <= 0 || tp->data_segs_out > 0)
|
|
+ if (val <= 0 || tp->data_segs_out > tp->syn_data)
|
|
ret = -EINVAL;
|
|
else
|
|
tp->snd_cwnd = val;
|
|
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
|
|
index 1a4e9ff02762e..5731670c560b0 100644
|
|
--- a/net/ipv4/inet_diag.c
|
|
+++ b/net/ipv4/inet_diag.c
|
|
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
|
|
+ nla_total_size(1) /* INET_DIAG_TOS */
|
|
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
|
+ nla_total_size(4) /* INET_DIAG_MARK */
|
|
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
|
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
|
+ nla_total_size(sizeof(struct inet_diag_msg))
|
|
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
|
|
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
|
goto errout;
|
|
}
|
|
|
|
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
|
|
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
|
|
+ ext & (1 << (INET_DIAG_TCLASS - 1))) {
|
|
u32 classid = 0;
|
|
|
|
#ifdef CONFIG_SOCK_CGROUP_DATA
|
|
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
|
|
#endif
|
|
+ /* Fallback to socket priority if class id isn't set.
|
|
+ * Classful qdiscs use it as direct reference to class.
|
|
+ * For cgroup2 classid is always zero.
|
|
+ */
|
|
+ if (!classid)
|
|
+ classid = sk->sk_priority;
|
|
|
|
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
|
|
goto errout;
|
|
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
index fb1e7f237f531..3cd237b42f446 100644
|
|
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
@@ -56,7 +56,7 @@ struct clusterip_config {
|
|
#endif
|
|
enum clusterip_hashmode hash_mode; /* which hashing mode */
|
|
u_int32_t hash_initval; /* hash initialization */
|
|
- struct rcu_head rcu;
|
|
+ struct rcu_head rcu; /* for call_rcu_bh */
|
|
struct net *net; /* netns for pernet list */
|
|
char ifname[IFNAMSIZ]; /* device ifname */
|
|
};
|
|
@@ -72,6 +72,8 @@ struct clusterip_net {
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
struct proc_dir_entry *procdir;
|
|
+ /* mutex protects the config->pde*/
|
|
+ struct mutex mutex;
|
|
#endif
|
|
};
|
|
|
|
@@ -118,17 +120,18 @@ clusterip_config_entry_put(struct clusterip_config *c)
|
|
|
|
local_bh_disable();
|
|
if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
|
|
+ list_del_rcu(&c->list);
|
|
+ spin_unlock(&cn->lock);
|
|
+ local_bh_enable();
|
|
/* In case anyone still accesses the file, the open/close
|
|
* functions are also incrementing the refcount on their own,
|
|
* so it's safe to remove the entry even if it's in use. */
|
|
#ifdef CONFIG_PROC_FS
|
|
+ mutex_lock(&cn->mutex);
|
|
if (cn->procdir)
|
|
proc_remove(c->pde);
|
|
+ mutex_unlock(&cn->mutex);
|
|
#endif
|
|
- list_del_rcu(&c->list);
|
|
- spin_unlock(&cn->lock);
|
|
- local_bh_enable();
|
|
-
|
|
return;
|
|
}
|
|
local_bh_enable();
|
|
@@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
|
|
|
|
/* create proc dir entry */
|
|
sprintf(buffer, "%pI4", &ip);
|
|
+ mutex_lock(&cn->mutex);
|
|
c->pde = proc_create_data(buffer, 0600,
|
|
cn->procdir,
|
|
&clusterip_proc_fops, c);
|
|
+ mutex_unlock(&cn->mutex);
|
|
if (!c->pde) {
|
|
err = -ENOMEM;
|
|
goto err;
|
|
@@ -833,6 +838,7 @@ static int clusterip_net_init(struct net *net)
|
|
pr_err("Unable to proc dir entry\n");
|
|
return -ENOMEM;
|
|
}
|
|
+ mutex_init(&cn->mutex);
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
return 0;
|
|
@@ -841,9 +847,12 @@ static int clusterip_net_init(struct net *net)
|
|
static void clusterip_net_exit(struct net *net)
|
|
{
|
|
struct clusterip_net *cn = clusterip_pernet(net);
|
|
+
|
|
#ifdef CONFIG_PROC_FS
|
|
+ mutex_lock(&cn->mutex);
|
|
proc_remove(cn->procdir);
|
|
cn->procdir = NULL;
|
|
+ mutex_unlock(&cn->mutex);
|
|
#endif
|
|
nf_unregister_net_hook(net, &cip_arp_ops);
|
|
}
|
|
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
|
|
index 8b075f0bc3516..6d0b1f3e927bd 100644
|
|
--- a/net/ipv6/netfilter.c
|
|
+++ b/net/ipv6/netfilter.c
|
|
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
|
|
struct sock *sk = sk_to_full_sk(skb->sk);
|
|
unsigned int hh_len;
|
|
struct dst_entry *dst;
|
|
+ int strict = (ipv6_addr_type(&iph->daddr) &
|
|
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
|
|
struct flowi6 fl6 = {
|
|
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
|
|
- rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
|
|
+ strict ? skb_dst(skb)->dev->ifindex : 0,
|
|
.flowi6_mark = skb->mark,
|
|
.flowi6_uid = sock_net_uid(net, sk),
|
|
.daddr = iph->daddr,
|
|
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
|
|
index 8d0ba757a46ce..9b2f272ca1649 100644
|
|
--- a/net/ipv6/seg6.c
|
|
+++ b/net/ipv6/seg6.c
|
|
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
|
|
rcu_read_unlock();
|
|
|
|
genlmsg_end(msg, hdr);
|
|
- genlmsg_reply(msg, info);
|
|
-
|
|
- return 0;
|
|
+ return genlmsg_reply(msg, info);
|
|
|
|
nla_put_failure:
|
|
rcu_read_unlock();
|
|
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
|
|
index eb162bd0e0419..da6d5a3f53995 100644
|
|
--- a/net/ipv6/sit.c
|
|
+++ b/net/ipv6/sit.c
|
|
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
|
|
}
|
|
|
|
err = 0;
|
|
- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
|
|
+ if (__in6_dev_get(skb->dev) &&
|
|
+ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
|
|
goto out;
|
|
|
|
if (t->parms.iph.daddr == 0)
|
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
|
index 76ba2f34ef6b1..cab8b2b647f96 100644
|
|
--- a/net/ipv6/udp.c
|
|
+++ b/net/ipv6/udp.c
|
|
@@ -1322,10 +1322,7 @@ do_udp_sendmsg:
|
|
ipc6.opt = opt;
|
|
|
|
fl6.flowi6_proto = sk->sk_protocol;
|
|
- if (!ipv6_addr_any(daddr))
|
|
- fl6.daddr = *daddr;
|
|
- else
|
|
- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
|
|
+ fl6.daddr = *daddr;
|
|
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
|
|
fl6.saddr = np->saddr;
|
|
fl6.fl6_sport = inet->inet_sport;
|
|
@@ -1353,6 +1350,9 @@ do_udp_sendmsg:
|
|
}
|
|
}
|
|
|
|
+ if (ipv6_addr_any(&fl6.daddr))
|
|
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
|
|
+
|
|
final_p = fl6_update_dst(&fl6, opt, &final);
|
|
if (final_p)
|
|
connected = false;
|
|
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
|
|
index 5d22eda8a6b1e..c2abe9db1ea24 100644
|
|
--- a/net/mac80211/cfg.c
|
|
+++ b/net/mac80211/cfg.c
|
|
@@ -887,6 +887,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|
BSS_CHANGED_P2P_PS |
|
|
BSS_CHANGED_TXPOWER;
|
|
int err;
|
|
+ int prev_beacon_int;
|
|
|
|
old = sdata_dereference(sdata->u.ap.beacon, sdata);
|
|
if (old)
|
|
@@ -909,6 +910,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
sdata->needed_rx_chains = sdata->local->rx_chains;
|
|
|
|
+ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
|
|
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
|
|
|
|
mutex_lock(&local->mtx);
|
|
@@ -917,8 +919,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|
if (!err)
|
|
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
|
|
mutex_unlock(&local->mtx);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
|
|
return err;
|
|
+ }
|
|
|
|
/*
|
|
* Apply control port protocol, this allows us to
|
|
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
|
|
index 21526630bf655..e84103b405341 100644
|
|
--- a/net/mac80211/mesh.h
|
|
+++ b/net/mac80211/mesh.h
|
|
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
|
|
* @dst: mesh path destination mac address
|
|
* @mpp: mesh proxy mac address
|
|
* @rhash: rhashtable list pointer
|
|
+ * @walk_list: linked list containing all mesh_path objects.
|
|
* @gate_list: list pointer for known gates list
|
|
* @sdata: mesh subif
|
|
* @next_hop: mesh neighbor to which frames for this destination will be
|
|
@@ -105,6 +106,7 @@ struct mesh_path {
|
|
u8 dst[ETH_ALEN];
|
|
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
|
|
struct rhash_head rhash;
|
|
+ struct hlist_node walk_list;
|
|
struct hlist_node gate_list;
|
|
struct ieee80211_sub_if_data *sdata;
|
|
struct sta_info __rcu *next_hop;
|
|
@@ -133,12 +135,16 @@ struct mesh_path {
|
|
* gate's mpath may or may not be resolved and active.
|
|
* @gates_lock: protects updates to known_gates
|
|
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
|
+ * @walk_head: linked list containging all mesh_path objects
|
|
+ * @walk_lock: lock protecting walk_head
|
|
* @entries: number of entries in the table
|
|
*/
|
|
struct mesh_table {
|
|
struct hlist_head known_gates;
|
|
spinlock_t gates_lock;
|
|
struct rhashtable rhead;
|
|
+ struct hlist_head walk_head;
|
|
+ spinlock_t walk_lock;
|
|
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
|
};
|
|
|
|
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
|
|
index a5125624a76dc..c3a7396fb9556 100644
|
|
--- a/net/mac80211/mesh_pathtbl.c
|
|
+++ b/net/mac80211/mesh_pathtbl.c
|
|
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
|
|
return NULL;
|
|
|
|
INIT_HLIST_HEAD(&newtbl->known_gates);
|
|
+ INIT_HLIST_HEAD(&newtbl->walk_head);
|
|
atomic_set(&newtbl->entries, 0);
|
|
spin_lock_init(&newtbl->gates_lock);
|
|
+ spin_lock_init(&newtbl->walk_lock);
|
|
|
|
return newtbl;
|
|
}
|
|
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
|
static struct mesh_path *
|
|
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
|
{
|
|
- int i = 0, ret;
|
|
- struct mesh_path *mpath = NULL;
|
|
- struct rhashtable_iter iter;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return NULL;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
+ int i = 0;
|
|
+ struct mesh_path *mpath;
|
|
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
|
|
if (i++ == idx)
|
|
break;
|
|
}
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
|
|
- if (IS_ERR(mpath) || !mpath)
|
|
+ if (!mpath)
|
|
return NULL;
|
|
|
|
if (mpath_expired(mpath)) {
|
|
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
tbl = sdata->u.mesh.mesh_paths;
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
do {
|
|
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
|
&new_mpath->rhash,
|
|
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|
mpath = rhashtable_lookup_fast(&tbl->rhead,
|
|
dst,
|
|
mesh_rht_params);
|
|
-
|
|
+ else if (!ret)
|
|
+ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
|
|
} while (unlikely(ret == -EEXIST && !mpath));
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
|
|
- if (ret && ret != -EEXIST)
|
|
- return ERR_PTR(ret);
|
|
-
|
|
- /* At this point either new_mpath was added, or we found a
|
|
- * matching entry already in the table; in the latter case
|
|
- * free the unnecessary new entry.
|
|
- */
|
|
- if (ret == -EEXIST) {
|
|
+ if (ret) {
|
|
kfree(new_mpath);
|
|
+
|
|
+ if (ret != -EEXIST)
|
|
+ return ERR_PTR(ret);
|
|
+
|
|
new_mpath = mpath;
|
|
}
|
|
+
|
|
sdata->u.mesh.mesh_paths_generation++;
|
|
return new_mpath;
|
|
}
|
|
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
|
|
|
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
|
tbl = sdata->u.mesh.mpp_paths;
|
|
+
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
|
&new_mpath->rhash,
|
|
mesh_rht_params);
|
|
+ if (!ret)
|
|
+ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
+
|
|
+ if (ret)
|
|
+ kfree(new_mpath);
|
|
|
|
sdata->u.mesh.mpp_paths_generation++;
|
|
return ret;
|
|
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
|
|
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
|
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
|
|
if (rcu_access_pointer(mpath->next_hop) == sta &&
|
|
mpath->flags & MESH_PATH_ACTIVE &&
|
|
!(mpath->flags & MESH_PATH_FIXED)) {
|
|
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
|
|
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
|
|
}
|
|
}
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static void mesh_path_free_rcu(struct mesh_table *tbl,
|
|
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
|
|
|
|
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
|
|
{
|
|
+ hlist_del_rcu(&mpath->walk_list);
|
|
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
|
|
mesh_path_free_rcu(tbl, mpath);
|
|
}
|
|
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ struct hlist_node *n;
|
|
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
if (rcu_access_pointer(mpath->next_hop) == sta)
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|
{
|
|
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ struct hlist_node *n;
|
|
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
if (ether_addr_equal(mpath->mpp, proxy))
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
static void table_flush_by_iface(struct mesh_table *tbl)
|
|
{
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
+ struct hlist_node *n;
|
|
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
/**
|
|
@@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
|
|
{
|
|
struct mesh_path *mpath;
|
|
|
|
- rcu_read_lock();
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
|
|
if (!mpath) {
|
|
rcu_read_unlock();
|
|
@@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
|
|
}
|
|
|
|
__mesh_path_del(tbl, mpath);
|
|
- rcu_read_unlock();
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
|
struct mesh_table *tbl)
|
|
{
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
+ struct hlist_node *n;
|
|
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
|
|
(!(mpath->flags & MESH_PATH_FIXED)) &&
|
|
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
|
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
|
|
index 51ad330bf8e83..828348b2a504d 100644
|
|
--- a/net/mac80211/rx.c
|
|
+++ b/net/mac80211/rx.c
|
|
@@ -2598,6 +2598,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|
struct ieee80211_sub_if_data *sdata = rx->sdata;
|
|
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
|
u16 ac, q, hdrlen;
|
|
+ int tailroom = 0;
|
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
@@ -2684,8 +2685,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|
if (!ifmsh->mshcfg.dot11MeshForwarding)
|
|
goto out;
|
|
|
|
+ if (sdata->crypto_tx_tailroom_needed_cnt)
|
|
+ tailroom = IEEE80211_ENCRYPT_TAILROOM;
|
|
+
|
|
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
|
|
- sdata->encrypt_headroom, 0, GFP_ATOMIC);
|
|
+ sdata->encrypt_headroom,
|
|
+ tailroom, GFP_ATOMIC);
|
|
if (!fwd_skb)
|
|
goto out;
|
|
|
|
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
|
|
index d8125616edc79..e1537ace2b90c 100644
|
|
--- a/net/netfilter/nf_flow_table_core.c
|
|
+++ b/net/netfilter/nf_flow_table_core.c
|
|
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
|
|
{
|
|
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
|
|
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
|
|
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
|
|
struct dst_entry *dst = route->tuple[dir].dst;
|
|
|
|
ft->dir = dir;
|
|
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
|
|
ft->src_port = ctt->src.u.tcp.port;
|
|
ft->dst_port = ctt->dst.u.tcp.port;
|
|
|
|
- ft->iifidx = route->tuple[dir].ifindex;
|
|
- ft->oifidx = route->tuple[!dir].ifindex;
|
|
+ ft->iifidx = other_dst->dev->ifindex;
|
|
+ ft->oifidx = dst->dev->ifindex;
|
|
ft->dst_cache = dst;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index ed9af46720e14..7d424fd270255 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -291,6 +291,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
|
|
int err;
|
|
|
|
list_for_each_entry(rule, &ctx->chain->rules, list) {
|
|
+ if (!nft_is_active_next(ctx->net, rule))
|
|
+ continue;
|
|
+
|
|
err = nft_delrule(ctx, rule);
|
|
if (err < 0)
|
|
return err;
|
|
@@ -4439,6 +4442,8 @@ err6:
|
|
err5:
|
|
kfree(trans);
|
|
err4:
|
|
+ if (obj)
|
|
+ obj->use--;
|
|
kfree(elem.priv);
|
|
err3:
|
|
if (nla[NFTA_SET_ELEM_DATA] != NULL)
|
|
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
|
|
index 00db27dfd2ff7..b0bc130947c94 100644
|
|
--- a/net/netfilter/nfnetlink_osf.c
|
|
+++ b/net/netfilter/nfnetlink_osf.c
|
|
@@ -71,6 +71,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
|
|
int ttl_check,
|
|
struct nf_osf_hdr_ctx *ctx)
|
|
{
|
|
+ const __u8 *optpinit = ctx->optp;
|
|
unsigned int check_WSS = 0;
|
|
int fmatch = FMATCH_WRONG;
|
|
int foptsize, optnum;
|
|
@@ -160,6 +161,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
|
|
}
|
|
}
|
|
|
|
+ if (fmatch != FMATCH_OK)
|
|
+ ctx->optp = optpinit;
|
|
+
|
|
return fmatch == FMATCH_OK;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
|
|
index 29d6fc73caf99..38da1f5436b48 100644
|
|
--- a/net/netfilter/nft_compat.c
|
|
+++ b/net/netfilter/nft_compat.c
|
|
@@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|
{
|
|
struct xt_target *target = expr->ops->data;
|
|
void *info = nft_expr_priv(expr);
|
|
+ struct module *me = target->me;
|
|
struct xt_tgdtor_param par;
|
|
|
|
par.net = ctx->net;
|
|
@@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|
par.target->destroy(&par);
|
|
|
|
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
|
- module_put(target->me);
|
|
+ module_put(me);
|
|
}
|
|
|
|
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
|
|
index 5fd4c57c79cc9..436cc14cfc59b 100644
|
|
--- a/net/netfilter/nft_flow_offload.c
|
|
+++ b/net/netfilter/nft_flow_offload.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
#include <linux/netfilter/nf_conntrack_common.h>
|
|
#include <net/netfilter/nf_flow_table.h>
|
|
+#include <net/netfilter/nf_conntrack_helper.h>
|
|
|
|
struct nft_flow_offload {
|
|
struct nft_flowtable *flowtable;
|
|
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
|
memset(&fl, 0, sizeof(fl));
|
|
switch (nft_pf(pkt)) {
|
|
case NFPROTO_IPV4:
|
|
- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
|
|
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
|
|
+ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
|
|
break;
|
|
case NFPROTO_IPV6:
|
|
- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
|
|
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
|
|
+ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
|
|
break;
|
|
}
|
|
|
|
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
|
return -ENOENT;
|
|
|
|
route->tuple[dir].dst = this_dst;
|
|
- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
|
|
route->tuple[!dir].dst = other_dst;
|
|
- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
|
|
|
|
return 0;
|
|
}
|
|
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
|
{
|
|
struct nft_flow_offload *priv = nft_expr_priv(expr);
|
|
struct nf_flowtable *flowtable = &priv->flowtable->data;
|
|
+ const struct nf_conn_help *help;
|
|
enum ip_conntrack_info ctinfo;
|
|
struct nf_flow_route route;
|
|
struct flow_offload *flow;
|
|
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
|
goto out;
|
|
}
|
|
|
|
- if (test_bit(IPS_HELPER_BIT, &ct->status))
|
|
+ help = nfct_help(ct);
|
|
+ if (help)
|
|
goto out;
|
|
|
|
if (ctinfo == IP_CT_NEW ||
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index c76c21604ffd9..fd16fb836df28 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -4275,7 +4275,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
|
|
if (unlikely(rb->frames_per_block == 0))
|
|
goto out;
|
|
- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
|
|
+ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
|
|
goto out;
|
|
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
|
|
req->tp_frame_nr))
|
|
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
|
|
index 9ccc93f257db0..38bb882bb9587 100644
|
|
--- a/net/sched/cls_tcindex.c
|
|
+++ b/net/sched/cls_tcindex.c
|
|
@@ -48,7 +48,7 @@ struct tcindex_data {
|
|
u32 hash; /* hash table size; 0 if undefined */
|
|
u32 alloc_hash; /* allocated size */
|
|
u32 fall_through; /* 0: only classify if explicit match */
|
|
- struct rcu_head rcu;
|
|
+ struct rcu_work rwork;
|
|
};
|
|
|
|
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
|
|
@@ -221,17 +221,11 @@ found:
|
|
return 0;
|
|
}
|
|
|
|
-static int tcindex_destroy_element(struct tcf_proto *tp,
|
|
- void *arg, struct tcf_walker *walker)
|
|
-{
|
|
- bool last;
|
|
-
|
|
- return tcindex_delete(tp, arg, &last, NULL);
|
|
-}
|
|
-
|
|
-static void __tcindex_destroy(struct rcu_head *head)
|
|
+static void tcindex_destroy_work(struct work_struct *work)
|
|
{
|
|
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
|
|
+ struct tcindex_data *p = container_of(to_rcu_work(work),
|
|
+ struct tcindex_data,
|
|
+ rwork);
|
|
|
|
kfree(p->perfect);
|
|
kfree(p->h);
|
|
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
|
|
return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
|
}
|
|
|
|
-static void __tcindex_partial_destroy(struct rcu_head *head)
|
|
+static void tcindex_partial_destroy_work(struct work_struct *work)
|
|
{
|
|
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
|
|
+ struct tcindex_data *p = container_of(to_rcu_work(work),
|
|
+ struct tcindex_data,
|
|
+ rwork);
|
|
|
|
kfree(p->perfect);
|
|
kfree(p);
|
|
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
|
|
kfree(cp->perfect);
|
|
}
|
|
|
|
-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
|
|
+static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
|
|
{
|
|
int i, err = 0;
|
|
|
|
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
|
|
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
|
if (err < 0)
|
|
goto errout;
|
|
+#ifdef CONFIG_NET_CLS_ACT
|
|
+ cp->perfect[i].exts.net = net;
|
|
+#endif
|
|
}
|
|
|
|
return 0;
|
|
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
|
|
{
|
|
struct tcindex_filter_result new_filter_result, *old_r = r;
|
|
- struct tcindex_filter_result cr;
|
|
struct tcindex_data *cp = NULL, *oldp;
|
|
struct tcindex_filter *f = NULL; /* make gcc behave */
|
|
+ struct tcf_result cr = {};
|
|
int err, balloc = 0;
|
|
struct tcf_exts e;
|
|
|
|
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
if (p->perfect) {
|
|
int i;
|
|
|
|
- if (tcindex_alloc_perfect_hash(cp) < 0)
|
|
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
|
|
goto errout;
|
|
for (i = 0; i < cp->hash; i++)
|
|
cp->perfect[i].res = p->perfect[i].res;
|
|
@@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
cp->h = p->h;
|
|
|
|
err = tcindex_filter_result_init(&new_filter_result);
|
|
- if (err < 0)
|
|
- goto errout1;
|
|
- err = tcindex_filter_result_init(&cr);
|
|
if (err < 0)
|
|
goto errout1;
|
|
if (old_r)
|
|
- cr.res = r->res;
|
|
+ cr = r->res;
|
|
|
|
if (tb[TCA_TCINDEX_HASH])
|
|
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
|
|
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
err = -ENOMEM;
|
|
if (!cp->perfect && !cp->h) {
|
|
if (valid_perfect_hash(cp)) {
|
|
- if (tcindex_alloc_perfect_hash(cp) < 0)
|
|
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
|
|
goto errout_alloc;
|
|
balloc = 1;
|
|
} else {
|
|
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
}
|
|
|
|
if (tb[TCA_TCINDEX_CLASSID]) {
|
|
- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
|
|
- tcf_bind_filter(tp, &cr.res, base);
|
|
+ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
|
|
+ tcf_bind_filter(tp, &cr, base);
|
|
}
|
|
|
|
if (old_r && old_r != r) {
|
|
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
}
|
|
|
|
oldp = p;
|
|
- r->res = cr.res;
|
|
+ r->res = cr;
|
|
tcf_exts_change(&r->exts, &e);
|
|
|
|
rcu_assign_pointer(tp->root, cp);
|
|
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
; /* nothing */
|
|
|
|
rcu_assign_pointer(*fp, f);
|
|
+ } else {
|
|
+ tcf_exts_destroy(&new_filter_result.exts);
|
|
}
|
|
|
|
if (oldp)
|
|
- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
|
|
+ tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
|
|
return 0;
|
|
|
|
errout_alloc:
|
|
@@ -487,7 +485,6 @@ errout_alloc:
|
|
else if (balloc == 2)
|
|
kfree(cp->h);
|
|
errout1:
|
|
- tcf_exts_destroy(&cr.exts);
|
|
tcf_exts_destroy(&new_filter_result.exts);
|
|
errout:
|
|
kfree(cp);
|
|
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct tcindex_data *p = rtnl_dereference(tp->root);
|
|
- struct tcf_walker walker;
|
|
+ int i;
|
|
|
|
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
|
|
- walker.count = 0;
|
|
- walker.skip = 0;
|
|
- walker.fn = tcindex_destroy_element;
|
|
- tcindex_walk(tp, &walker);
|
|
|
|
- call_rcu(&p->rcu, __tcindex_destroy);
|
|
+ if (p->perfect) {
|
|
+ for (i = 0; i < p->hash; i++) {
|
|
+ struct tcindex_filter_result *r = p->perfect + i;
|
|
+
|
|
+ tcf_unbind_filter(tp, &r->res);
|
|
+ if (tcf_exts_get_net(&r->exts))
|
|
+ tcf_queue_work(&r->rwork,
|
|
+ tcindex_destroy_rexts_work);
|
|
+ else
|
|
+ __tcindex_destroy_rexts(r);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; p->h && i < p->hash; i++) {
|
|
+ struct tcindex_filter *f, *next;
|
|
+ bool last;
|
|
+
|
|
+ for (f = rtnl_dereference(p->h[i]); f; f = next) {
|
|
+ next = rtnl_dereference(f->next);
|
|
+ tcindex_delete(tp, &f->result, &last, NULL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ tcf_queue_work(&p->rwork, tcindex_destroy_work);
|
|
}
|
|
|
|
|
|
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
|
|
index 078f01a8d582a..435847d98b51c 100644
|
|
--- a/net/sctp/diag.c
|
|
+++ b/net/sctp/diag.c
|
|
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
|
|
+ nla_total_size(1) /* INET_DIAG_TOS */
|
|
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
|
+ nla_total_size(4) /* INET_DIAG_MARK */
|
|
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
|
|
+ nla_total_size(addrlen * asoc->peer.transport_count)
|
|
+ nla_total_size(addrlen * addrcnt)
|
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
|
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
|
|
index 123e9f2dc2265..edfcf16e704c4 100644
|
|
--- a/net/sctp/offload.c
|
|
+++ b/net/sctp/offload.c
|
|
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
|
|
{
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
skb->csum_not_inet = 0;
|
|
+ gso_reset_checksum(skb, ~0);
|
|
return sctp_compute_cksum(skb, skb_transport_offset(skb));
|
|
}
|
|
|
|
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
|
|
index f24633114dfdf..2936ed17bf9ef 100644
|
|
--- a/net/sctp/stream.c
|
|
+++ b/net/sctp/stream.c
|
|
@@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
|
|
}
|
|
}
|
|
|
|
- for (i = outcnt; i < stream->outcnt; i++)
|
|
+ for (i = outcnt; i < stream->outcnt; i++) {
|
|
kfree(SCTP_SO(stream, i)->ext);
|
|
+ SCTP_SO(stream, i)->ext = NULL;
|
|
+ }
|
|
}
|
|
|
|
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
|
|
diff --git a/net/socket.c b/net/socket.c
|
|
index 390a8ecef4bf4..5c820212ba815 100644
|
|
--- a/net/socket.c
|
|
+++ b/net/socket.c
|
|
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
|
|
EXPORT_SYMBOL(dlci_ioctl_set);
|
|
|
|
static long sock_do_ioctl(struct net *net, struct socket *sock,
|
|
- unsigned int cmd, unsigned long arg,
|
|
- unsigned int ifreq_size)
|
|
+ unsigned int cmd, unsigned long arg)
|
|
{
|
|
int err;
|
|
void __user *argp = (void __user *)arg;
|
|
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
|
|
} else {
|
|
struct ifreq ifr;
|
|
bool need_copyout;
|
|
- if (copy_from_user(&ifr, argp, ifreq_size))
|
|
+ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
|
|
return -EFAULT;
|
|
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
|
|
if (!err && need_copyout)
|
|
- if (copy_to_user(argp, &ifr, ifreq_size))
|
|
+ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
|
|
return -EFAULT;
|
|
}
|
|
return err;
|
|
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|
err = open_related_ns(&net->ns, get_net_ns);
|
|
break;
|
|
default:
|
|
- err = sock_do_ioctl(net, sock, cmd, arg,
|
|
- sizeof(struct ifreq));
|
|
+ err = sock_do_ioctl(net, sock, cmd, arg);
|
|
break;
|
|
}
|
|
return err;
|
|
@@ -2752,8 +2750,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
|
|
int err;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
|
|
- sizeof(struct compat_ifreq));
|
|
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
|
|
set_fs(old_fs);
|
|
if (!err)
|
|
err = compat_put_timeval(&ktv, up);
|
|
@@ -2769,8 +2766,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
|
|
int err;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
|
|
- sizeof(struct compat_ifreq));
|
|
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
|
|
set_fs(old_fs);
|
|
if (!err)
|
|
err = compat_put_timespec(&kts, up);
|
|
@@ -2966,6 +2962,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
|
|
return dev_ioctl(net, cmd, &ifreq, NULL);
|
|
}
|
|
|
|
+static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
|
|
+ unsigned int cmd,
|
|
+ struct compat_ifreq __user *uifr32)
|
|
+{
|
|
+ struct ifreq __user *uifr;
|
|
+ int err;
|
|
+
|
|
+ /* Handle the fact that while struct ifreq has the same *layout* on
|
|
+ * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
|
|
+ * which are handled elsewhere, it still has different *size* due to
|
|
+ * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
|
|
+ * resulting in struct ifreq being 32 and 40 bytes respectively).
|
|
+ * As a result, if the struct happens to be at the end of a page and
|
|
+ * the next page isn't readable/writable, we get a fault. To prevent
|
|
+ * that, copy back and forth to the full size.
|
|
+ */
|
|
+
|
|
+ uifr = compat_alloc_user_space(sizeof(*uifr));
|
|
+ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
|
|
+
|
|
+ if (!err) {
|
|
+ switch (cmd) {
|
|
+ case SIOCGIFFLAGS:
|
|
+ case SIOCGIFMETRIC:
|
|
+ case SIOCGIFMTU:
|
|
+ case SIOCGIFMEM:
|
|
+ case SIOCGIFHWADDR:
|
|
+ case SIOCGIFINDEX:
|
|
+ case SIOCGIFADDR:
|
|
+ case SIOCGIFBRDADDR:
|
|
+ case SIOCGIFDSTADDR:
|
|
+ case SIOCGIFNETMASK:
|
|
+ case SIOCGIFPFLAGS:
|
|
+ case SIOCGIFTXQLEN:
|
|
+ case SIOCGMIIPHY:
|
|
+ case SIOCGMIIREG:
|
|
+ case SIOCGIFNAME:
|
|
+ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
|
|
+ err = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return err;
|
|
+}
|
|
+
|
|
static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
|
|
struct compat_ifreq __user *uifr32)
|
|
{
|
|
@@ -3081,8 +3125,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
|
|
}
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
|
|
- sizeof(struct compat_ifreq));
|
|
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
|
|
set_fs(old_fs);
|
|
|
|
out:
|
|
@@ -3182,21 +3225,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
|
|
case SIOCSIFTXQLEN:
|
|
case SIOCBRADDIF:
|
|
case SIOCBRDELIF:
|
|
+ case SIOCGIFNAME:
|
|
case SIOCSIFNAME:
|
|
case SIOCGMIIPHY:
|
|
case SIOCGMIIREG:
|
|
case SIOCSMIIREG:
|
|
- case SIOCSARP:
|
|
- case SIOCGARP:
|
|
- case SIOCDARP:
|
|
- case SIOCATMARK:
|
|
case SIOCBONDENSLAVE:
|
|
case SIOCBONDRELEASE:
|
|
case SIOCBONDSETHWADDR:
|
|
case SIOCBONDCHANGEACTIVE:
|
|
- case SIOCGIFNAME:
|
|
- return sock_do_ioctl(net, sock, cmd, arg,
|
|
- sizeof(struct compat_ifreq));
|
|
+ return compat_ifreq_ioctl(net, sock, cmd, argp);
|
|
+
|
|
+ case SIOCSARP:
|
|
+ case SIOCGARP:
|
|
+ case SIOCDARP:
|
|
+ case SIOCATMARK:
|
|
+ return sock_do_ioctl(net, sock, cmd, arg);
|
|
}
|
|
|
|
return -ENOIOCTLCMD;
|
|
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
|
|
index 956a5ea47b58e..3d6bf790cf1fb 100644
|
|
--- a/net/sunrpc/xprtrdma/verbs.c
|
|
+++ b/net/sunrpc/xprtrdma/verbs.c
|
|
@@ -872,7 +872,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
|
|
for (i = 0; i <= buf->rb_sc_last; i++) {
|
|
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
|
|
if (!sc)
|
|
- goto out_destroy;
|
|
+ return -ENOMEM;
|
|
|
|
sc->sc_xprt = r_xprt;
|
|
buf->rb_sc_ctxs[i] = sc;
|
|
@@ -880,10 +880,6 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
|
|
buf->rb_flags = 0;
|
|
|
|
return 0;
|
|
-
|
|
-out_destroy:
|
|
- rpcrdma_sendctxs_destroy(buf);
|
|
- return -ENOMEM;
|
|
}
|
|
|
|
/* The sendctx queue is not guaranteed to have a size that is a
|
|
diff --git a/security/keys/key.c b/security/keys/key.c
|
|
index d97c9394b5dd4..249a6da4d2770 100644
|
|
--- a/security/keys/key.c
|
|
+++ b/security/keys/key.c
|
|
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
|
|
|
|
spin_lock(&user->lock);
|
|
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
|
|
- if (user->qnkeys + 1 >= maxkeys ||
|
|
- user->qnbytes + quotalen >= maxbytes ||
|
|
+ if (user->qnkeys + 1 > maxkeys ||
|
|
+ user->qnbytes + quotalen > maxbytes ||
|
|
user->qnbytes + quotalen < user->qnbytes)
|
|
goto no_quota;
|
|
}
|
|
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
|
|
index 41bcf57e96f21..99a55145ddcd2 100644
|
|
--- a/security/keys/keyring.c
|
|
+++ b/security/keys/keyring.c
|
|
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
|
|
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
|
|
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
|
|
|
|
- if (ctx->index_key.description)
|
|
- ctx->index_key.desc_len = strlen(ctx->index_key.description);
|
|
-
|
|
/* Check to see if this top-level keyring is what we are looking for
|
|
* and whether it is valid or not.
|
|
*/
|
|
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
|
|
struct keyring_search_context ctx = {
|
|
.index_key.type = type,
|
|
.index_key.description = description,
|
|
+ .index_key.desc_len = strlen(description),
|
|
.cred = current_cred(),
|
|
.match_data.cmp = key_default_cmp,
|
|
.match_data.raw_data = description,
|
|
diff --git a/security/keys/proc.c b/security/keys/proc.c
|
|
index 5af2934965d80..d38be9db2cc07 100644
|
|
--- a/security/keys/proc.c
|
|
+++ b/security/keys/proc.c
|
|
@@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
|
|
int rc;
|
|
|
|
struct keyring_search_context ctx = {
|
|
- .index_key.type = key->type,
|
|
- .index_key.description = key->description,
|
|
+ .index_key = key->index_key,
|
|
.cred = m->file->f_cred,
|
|
.match_data.cmp = lookup_user_key_possessed,
|
|
.match_data.raw_data = key,
|
|
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
|
|
index 114f7408feee6..7385536986497 100644
|
|
--- a/security/keys/request_key.c
|
|
+++ b/security/keys/request_key.c
|
|
@@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
|
|
struct keyring_search_context ctx = {
|
|
.index_key.type = type,
|
|
.index_key.description = description,
|
|
+ .index_key.desc_len = strlen(description),
|
|
.cred = current_cred(),
|
|
.match_data.cmp = key_default_cmp,
|
|
.match_data.raw_data = description,
|
|
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
|
|
index 424e1d90412ea..6797843154f03 100644
|
|
--- a/security/keys/request_key_auth.c
|
|
+++ b/security/keys/request_key_auth.c
|
|
@@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
|
|
struct key *authkey;
|
|
key_ref_t authkey_ref;
|
|
|
|
- sprintf(description, "%x", target_id);
|
|
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
|
|
|
|
authkey_ref = search_process_keyrings(&ctx);
|
|
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 9199d91d0a594..bf1ffcaab23fe 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -1855,6 +1855,8 @@ enum {
|
|
ALC887_FIXUP_BASS_CHMAP,
|
|
ALC1220_FIXUP_GB_DUAL_CODECS,
|
|
ALC1220_FIXUP_CLEVO_P950,
|
|
+ ALC1220_FIXUP_SYSTEM76_ORYP5,
|
|
+ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
|
|
};
|
|
|
|
static void alc889_fixup_coef(struct hda_codec *codec,
|
|
@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
|
|
snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
|
|
}
|
|
|
|
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action);
|
|
+
|
|
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix,
|
|
+ int action)
|
|
+{
|
|
+ alc1220_fixup_clevo_p950(codec, fix, action);
|
|
+ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
|
|
+}
|
|
+
|
|
static const struct hda_fixup alc882_fixups[] = {
|
|
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc1220_fixup_clevo_p950,
|
|
},
|
|
+ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc1220_fixup_system76_oryp5,
|
|
+ },
|
|
+ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
|
|
+ {}
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
|
|
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
|
|
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
|
|
+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
|
|
+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
|
|
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
|
|
@@ -5573,6 +5601,7 @@ enum {
|
|
ALC294_FIXUP_ASUS_HEADSET_MIC,
|
|
ALC294_FIXUP_ASUS_SPK,
|
|
ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
|
|
+ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
|
|
};
|
|
|
|
static const struct hda_fixup alc269_fixups[] = {
|
|
@@ -6506,6 +6535,17 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
|
|
},
|
|
+ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
|
|
+ .type = HDA_FIXUP_VERBS,
|
|
+ .v.verbs = (const struct hda_verb[]) {
|
|
+ /* Disable PCBEEP-IN passthrough */
|
|
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
|
|
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
@@ -7187,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|
{0x12, 0x90a60130},
|
|
{0x19, 0x03a11020},
|
|
{0x21, 0x0321101f}),
|
|
- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
|
|
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
|
|
{0x12, 0x90a60130},
|
|
{0x14, 0x90170110},
|
|
{0x19, 0x04a11040},
|
|
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
|
|
index d029cad08cbd8..89f8b0dae7ef0 100644
|
|
--- a/tools/testing/selftests/bpf/test_progs.c
|
|
+++ b/tools/testing/selftests/bpf/test_progs.c
|
|
@@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
|
|
int i, j;
|
|
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
|
|
int build_id_matches = 0;
|
|
+ int retry = 1;
|
|
|
|
+retry:
|
|
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
|
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
|
goto out;
|
|
@@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
|
|
previous_key = key;
|
|
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
|
|
|
|
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
|
|
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
|
|
+ * try it one more time.
|
|
+ */
|
|
+ if (build_id_matches < 1 && retry--) {
|
|
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
|
+ close(pmu_fd);
|
|
+ bpf_object__close(obj);
|
|
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
|
|
+ __func__);
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
if (CHECK(build_id_matches < 1, "build id match",
|
|
"Didn't find expected build ID from the map\n"))
|
|
goto disable_pmu;
|
|
@@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
|
|
int i, j;
|
|
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
|
|
int build_id_matches = 0;
|
|
+ int retry = 1;
|
|
|
|
+retry:
|
|
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
|
|
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
|
return;
|
|
@@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
|
|
previous_key = key;
|
|
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
|
|
|
|
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
|
|
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
|
|
+ * try it one more time.
|
|
+ */
|
|
+ if (build_id_matches < 1 && retry--) {
|
|
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
|
+ close(pmu_fd);
|
|
+ bpf_object__close(obj);
|
|
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
|
|
+ __func__);
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
if (CHECK(build_id_matches < 1, "build id match",
|
|
"Didn't find expected build ID from the map\n"))
|
|
goto disable_pmu;
|
|
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
|
|
index aeeb76a54d633..e38f1cb7089d3 100644
|
|
--- a/tools/testing/selftests/bpf/test_sock_addr.c
|
|
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
|
|
@@ -44,6 +44,7 @@
|
|
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
|
|
#define SRC6_IP "::1"
|
|
#define SRC6_REWRITE_IP "::6"
|
|
+#define WILDCARD6_IP "::"
|
|
#define SERV6_PORT 6060
|
|
#define SERV6_REWRITE_PORT 6666
|
|
|
|
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
|
|
static int bind6_prog_load(const struct sock_addr_test *test);
|
|
static int connect4_prog_load(const struct sock_addr_test *test);
|
|
static int connect6_prog_load(const struct sock_addr_test *test);
|
|
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
|
|
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
|
|
|
|
static struct sock_addr_test tests[] = {
|
|
/* bind */
|
|
@@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
|
|
SRC6_REWRITE_IP,
|
|
SYSCALL_ENOTSUPP,
|
|
},
|
|
+ {
|
|
+ "sendmsg6: set dst IP = [::] (BSD'ism)",
|
|
+ sendmsg6_rw_wildcard_prog_load,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ AF_INET6,
|
|
+ SOCK_DGRAM,
|
|
+ SERV6_IP,
|
|
+ SERV6_PORT,
|
|
+ SERV6_REWRITE_IP,
|
|
+ SERV6_REWRITE_PORT,
|
|
+ SRC6_REWRITE_IP,
|
|
+ SUCCESS,
|
|
+ },
|
|
+ {
|
|
+ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
|
|
+ sendmsg_allow_prog_load,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ AF_INET6,
|
|
+ SOCK_DGRAM,
|
|
+ WILDCARD6_IP,
|
|
+ SERV6_PORT,
|
|
+ SERV6_REWRITE_IP,
|
|
+ SERV6_PORT,
|
|
+ SRC6_IP,
|
|
+ SUCCESS,
|
|
+ },
|
|
{
|
|
"sendmsg6: deny call",
|
|
sendmsg_deny_prog_load,
|
|
@@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
|
|
return load_path(test, CONNECT6_PROG_PATH);
|
|
}
|
|
|
|
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
|
|
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
|
|
+ int32_t rc)
|
|
{
|
|
struct bpf_insn insns[] = {
|
|
- /* return 0 */
|
|
- BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ /* return rc */
|
|
+ BPF_MOV64_IMM(BPF_REG_0, rc),
|
|
BPF_EXIT_INSN(),
|
|
};
|
|
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
|
|
}
|
|
|
|
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
|
|
+{
|
|
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
|
|
+}
|
|
+
|
|
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
|
|
+{
|
|
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
|
|
+}
|
|
+
|
|
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
|
|
{
|
|
struct sockaddr_in dst4_rw_addr;
|
|
@@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
|
|
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
|
|
}
|
|
|
|
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
|
|
+{
|
|
+ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
|
|
+}
|
|
+
|
|
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
|
|
{
|
|
return load_path(test, SENDMSG6_PROG_PATH);
|
|
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
|
|
index d8313d0438b74..b90dff8d3a94b 100755
|
|
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
|
|
@@ -1,7 +1,7 @@
|
|
#!/bin/bash
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
|
|
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
|
|
NUM_NETIFS=4
|
|
CHECK_TC="yes"
|
|
source lib.sh
|
|
@@ -96,6 +96,51 @@ flooding()
|
|
flood_test $swp2 $h1 $h2
|
|
}
|
|
|
|
+vlan_deletion()
|
|
+{
|
|
+ # Test that the deletion of a VLAN on a bridge port does not affect
|
|
+ # the PVID VLAN
|
|
+ log_info "Add and delete a VLAN on bridge port $swp1"
|
|
+
|
|
+ bridge vlan add vid 10 dev $swp1
|
|
+ bridge vlan del vid 10 dev $swp1
|
|
+
|
|
+ ping_ipv4
|
|
+ ping_ipv6
|
|
+}
|
|
+
|
|
+extern_learn()
|
|
+{
|
|
+ local mac=de:ad:be:ef:13:37
|
|
+ local ageing_time
|
|
+
|
|
+ # Test that externally learned FDB entries can roam, but not age out
|
|
+ RET=0
|
|
+
|
|
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
|
|
+
|
|
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
|
|
+ check_err $? "Did not find FDB entry when should"
|
|
+
|
|
+ # Wait for 10 seconds after the ageing time to make sure the FDB entry
|
|
+ # was not aged out
|
|
+ ageing_time=$(bridge_ageing_time_get br0)
|
|
+ sleep $((ageing_time + 10))
|
|
+
|
|
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
|
|
+ check_err $? "FDB entry was aged out when should not"
|
|
+
|
|
+ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
|
|
+
|
|
+ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
|
|
+ check_err $? "FDB entry did not roam when should"
|
|
+
|
|
+ log_test "Externally learned FDB entry - ageing & roaming"
|
|
+
|
|
+ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
|
|
+ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
|
|
+}
|
|
+
|
|
trap cleanup EXIT
|
|
|
|
setup_prepare
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
|
|
index 637ea0219617f..0da3545cabdb6 100644
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
|
|
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
|
|
@@ -17,7 +17,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -41,7 +41,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -65,7 +65,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -89,7 +89,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -113,7 +113,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -137,7 +137,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -161,7 +161,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 90",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -185,7 +185,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 90",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -207,7 +207,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -231,7 +231,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -255,7 +255,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -279,7 +279,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -303,7 +303,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -327,7 +327,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -351,7 +351,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 99",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -375,7 +375,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 99",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -397,7 +397,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -421,7 +421,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -445,7 +445,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -469,7 +469,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -493,7 +493,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 77",
|
|
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -517,7 +517,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 77",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -541,7 +541,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 77",
|
|
- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -565,7 +565,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -589,7 +589,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -611,7 +611,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -635,7 +635,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -659,7 +659,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 11",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -683,7 +683,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -707,7 +707,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 21",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -731,7 +731,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 21",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -739,7 +739,7 @@
|
|
},
|
|
{
|
|
"id": "fac3",
|
|
- "name": "Create valid ife encode action with index at 32-bit maximnum",
|
|
+ "name": "Create valid ife encode action with index at 32-bit maximum",
|
|
"category": [
|
|
"actions",
|
|
"ife"
|
|
@@ -755,7 +755,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 4294967295",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -779,7 +779,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -803,7 +803,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode pipe index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -827,7 +827,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode continue index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -851,7 +851,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode drop index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -875,7 +875,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -899,7 +899,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -923,7 +923,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4294967295999",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -945,7 +945,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -967,7 +967,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -991,7 +991,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -1013,7 +1013,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
|
|
index 10b2d894e4362..e7e15a7336b6d 100644
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
|
|
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
|
|
@@ -81,35 +81,6 @@
|
|
]
|
|
]
|
|
},
|
|
- {
|
|
- "id": "ba4e",
|
|
- "name": "Add tunnel_key set action with missing mandatory id parameter",
|
|
- "category": [
|
|
- "actions",
|
|
- "tunnel_key"
|
|
- ],
|
|
- "setup": [
|
|
- [
|
|
- "$TC actions flush action tunnel_key",
|
|
- 0,
|
|
- 1,
|
|
- 255
|
|
- ]
|
|
- ],
|
|
- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
|
|
- "expExitCode": "255",
|
|
- "verifyCmd": "$TC actions list action tunnel_key",
|
|
- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- [
|
|
- "$TC actions flush action tunnel_key",
|
|
- 0,
|
|
- 1,
|
|
- 255
|
|
- ]
|
|
- ]
|
|
- },
|
|
{
|
|
"id": "a5e0",
|
|
"name": "Add tunnel_key set action with invalid src_ip parameter",
|
|
@@ -634,7 +605,7 @@
|
|
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action tunnel_key index 4",
|
|
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
|
|
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action tunnel_key"
|