mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-23 21:39:02 +00:00
7703 lines
255 KiB
Diff
7703 lines
255 KiB
Diff
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
|
|
index 5138a2f6232a..646cb3525373 100644
|
|
--- a/Documentation/devicetree/bindings/Makefile
|
|
+++ b/Documentation/devicetree/bindings/Makefile
|
|
@@ -12,7 +12,6 @@ $(obj)/%.example.dts: $(src)/%.yaml FORCE
|
|
$(call if_changed,chk_binding)
|
|
|
|
DT_TMP_SCHEMA := processed-schema.yaml
|
|
-extra-y += $(DT_TMP_SCHEMA)
|
|
|
|
quiet_cmd_mk_schema = SCHEMA $@
|
|
cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs)
|
|
@@ -26,8 +25,12 @@ DT_DOCS = $(shell \
|
|
|
|
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
|
|
|
|
+ifeq ($(CHECK_DTBS),)
|
|
extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
|
|
extra-y += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
|
|
+endif
|
|
|
|
$(obj)/$(DT_TMP_SCHEMA): $(DT_SCHEMA_FILES) FORCE
|
|
$(call if_changed,mk_schema)
|
|
+
|
|
+extra-y += $(DT_TMP_SCHEMA)
|
|
diff --git a/Documentation/devicetree/writing-schema.rst b/Documentation/devicetree/writing-schema.rst
|
|
index f4a638072262..83e04e5c342d 100644
|
|
--- a/Documentation/devicetree/writing-schema.rst
|
|
+++ b/Documentation/devicetree/writing-schema.rst
|
|
@@ -130,11 +130,13 @@ binding schema. All of the DT binding documents can be validated using the
|
|
|
|
make dt_binding_check
|
|
|
|
-In order to perform validation of DT source files, use the `dtbs_check` target::
|
|
+In order to perform validation of DT source files, use the ``dtbs_check`` target::
|
|
|
|
make dtbs_check
|
|
|
|
-This will first run the `dt_binding_check` which generates the processed schema.
|
|
+Note that ``dtbs_check`` will skip any binding schema files with errors. It is
|
|
+necessary to use ``dt_binding_check`` to get all the validation errors in the
|
|
+binding schema files.
|
|
|
|
It is also possible to run checks with a single schema file by setting the
|
|
``DT_SCHEMA_FILES`` variable to a specific schema file.
|
|
diff --git a/Makefile b/Makefile
|
|
index 0e2e0a034064..1adee1b06f3d 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 7
|
|
+SUBLEVEL = 8
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
|
|
index b36c0289a308..6a0f1f524466 100644
|
|
--- a/arch/arm/boot/compressed/libfdt_env.h
|
|
+++ b/arch/arm/boot/compressed/libfdt_env.h
|
|
@@ -2,11 +2,13 @@
|
|
#ifndef _ARM_LIBFDT_ENV_H
|
|
#define _ARM_LIBFDT_ENV_H
|
|
|
|
+#include <linux/limits.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <asm/byteorder.h>
|
|
|
|
-#define INT_MAX ((int)(~0U>>1))
|
|
+#define INT32_MAX S32_MAX
|
|
+#define UINT32_MAX U32_MAX
|
|
|
|
typedef __be16 fdt16_t;
|
|
typedef __be32 fdt32_t;
|
|
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
|
|
index db9247898300..287ef898a55e 100644
|
|
--- a/arch/arm/mm/dma-mapping-nommu.c
|
|
+++ b/arch/arm/mm/dma-mapping-nommu.c
|
|
@@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
|
|
unsigned long attrs)
|
|
|
|
{
|
|
- void *ret = dma_alloc_from_global_coherent(size, dma_handle);
|
|
+ void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
|
|
|
|
/*
|
|
* dma_alloc_from_global_coherent() may fail because:
|
|
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
|
|
index 9a07916af8dd..a6554fdb56c5 100644
|
|
--- a/arch/arm/mm/proc-v7-bugs.c
|
|
+++ b/arch/arm/mm/proc-v7-bugs.c
|
|
@@ -65,6 +65,9 @@ static void cpu_v7_spectre_init(void)
|
|
break;
|
|
|
|
#ifdef CONFIG_ARM_PSCI
|
|
+ case ARM_CPU_PART_BRAHMA_B53:
|
|
+ /* Requires no workaround */
|
|
+ break;
|
|
default:
|
|
/* Other ARM CPUs require no workaround */
|
|
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
|
|
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
|
|
index fb842965d541..9228f7386220 100644
|
|
--- a/arch/mips/include/asm/barrier.h
|
|
+++ b/arch/mips/include/asm/barrier.h
|
|
@@ -218,14 +218,13 @@
|
|
* ordering will be done by smp_llsc_mb() and friends.
|
|
*/
|
|
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
|
|
-# define __WEAK_LLSC_MB sync
|
|
-# define smp_llsc_mb() \
|
|
- __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
|
|
-# define __LLSC_CLOBBER
|
|
+#define __WEAK_LLSC_MB " sync \n"
|
|
+#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
|
+#define __LLSC_CLOBBER
|
|
#else
|
|
-# define __WEAK_LLSC_MB
|
|
-# define smp_llsc_mb() do { } while (0)
|
|
-# define __LLSC_CLOBBER "memory"
|
|
+#define __WEAK_LLSC_MB " \n"
|
|
+#define smp_llsc_mb() do { } while (0)
|
|
+#define __LLSC_CLOBBER "memory"
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
|
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
|
|
index 110220705e97..b83b0397462d 100644
|
|
--- a/arch/mips/include/asm/futex.h
|
|
+++ b/arch/mips/include/asm/futex.h
|
|
@@ -16,7 +16,6 @@
|
|
#include <asm/barrier.h>
|
|
#include <asm/compiler.h>
|
|
#include <asm/errno.h>
|
|
-#include <asm/sync.h>
|
|
#include <asm/war.h>
|
|
|
|
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
|
@@ -33,7 +32,7 @@
|
|
" .set arch=r4000 \n" \
|
|
"2: sc $1, %2 \n" \
|
|
" beqzl $1, 1b \n" \
|
|
- __stringify(__WEAK_LLSC_MB) " \n" \
|
|
+ __WEAK_LLSC_MB \
|
|
"3: \n" \
|
|
" .insn \n" \
|
|
" .set pop \n" \
|
|
@@ -51,19 +50,19 @@
|
|
"i" (-EFAULT) \
|
|
: "memory"); \
|
|
} else if (cpu_has_llsc) { \
|
|
+ loongson_llsc_mb(); \
|
|
__asm__ __volatile__( \
|
|
" .set push \n" \
|
|
" .set noat \n" \
|
|
" .set push \n" \
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
|
|
- " " __SYNC(full, loongson3_war) " \n" \
|
|
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
|
|
" .set pop \n" \
|
|
" " insn " \n" \
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
|
|
"2: "user_sc("$1", "%2")" \n" \
|
|
" beqz $1, 1b \n" \
|
|
- __stringify(__WEAK_LLSC_MB) " \n" \
|
|
+ __WEAK_LLSC_MB \
|
|
"3: \n" \
|
|
" .insn \n" \
|
|
" .set pop \n" \
|
|
@@ -148,7 +147,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
" .set arch=r4000 \n"
|
|
"2: sc $1, %2 \n"
|
|
" beqzl $1, 1b \n"
|
|
- __stringify(__WEAK_LLSC_MB) " \n"
|
|
+ __WEAK_LLSC_MB
|
|
"3: \n"
|
|
" .insn \n"
|
|
" .set pop \n"
|
|
@@ -165,13 +164,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
"i" (-EFAULT)
|
|
: "memory");
|
|
} else if (cpu_has_llsc) {
|
|
+ loongson_llsc_mb();
|
|
__asm__ __volatile__(
|
|
"# futex_atomic_cmpxchg_inatomic \n"
|
|
" .set push \n"
|
|
" .set noat \n"
|
|
" .set push \n"
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
|
- " " __SYNC(full, loongson3_war) " \n"
|
|
"1: "user_ll("%1", "%3")" \n"
|
|
" bne %1, %z4, 3f \n"
|
|
" .set pop \n"
|
|
@@ -179,7 +178,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
|
"2: "user_sc("$1", "%2")" \n"
|
|
" beqz $1, 1b \n"
|
|
- "3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
|
|
+ __WEAK_LLSC_MB
|
|
+ "3: \n"
|
|
" .insn \n"
|
|
" .set pop \n"
|
|
" .section .fixup,\"ax\" \n"
|
|
@@ -194,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
|
|
"i" (-EFAULT)
|
|
: "memory");
|
|
+ loongson_llsc_mb();
|
|
} else
|
|
return -ENOSYS;
|
|
|
|
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
|
|
index 83522c9fc7b6..37ac731a556b 100644
|
|
--- a/arch/powerpc/Makefile
|
|
+++ b/arch/powerpc/Makefile
|
|
@@ -91,11 +91,13 @@ MULTIPLEWORD := -mmultiple
|
|
endif
|
|
|
|
ifdef CONFIG_PPC64
|
|
+ifndef CONFIG_CC_IS_CLANG
|
|
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
|
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
|
|
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
|
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
|
|
endif
|
|
+endif
|
|
|
|
ifndef CONFIG_CC_IS_CLANG
|
|
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
|
|
@@ -141,6 +143,7 @@ endif
|
|
endif
|
|
|
|
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
|
|
+ifndef CONFIG_CC_IS_CLANG
|
|
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
|
|
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
|
|
@@ -149,6 +152,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
|
|
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
|
endif
|
|
+endif
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
|
|
|
|
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
|
|
index 2abc8e83b95e..9757d4f6331e 100644
|
|
--- a/arch/powerpc/boot/libfdt_env.h
|
|
+++ b/arch/powerpc/boot/libfdt_env.h
|
|
@@ -6,6 +6,8 @@
|
|
#include <string.h>
|
|
|
|
#define INT_MAX ((int)(~0U>>1))
|
|
+#define UINT32_MAX ((u32)~0U)
|
|
+#define INT32_MAX ((s32)(UINT32_MAX >> 1))
|
|
|
|
#include "of.h"
|
|
|
|
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
|
|
index 0cfc365d814b..722289a1d000 100644
|
|
--- a/arch/powerpc/include/asm/fixmap.h
|
|
+++ b/arch/powerpc/include/asm/fixmap.h
|
|
@@ -77,7 +77,12 @@ enum fixed_addresses {
|
|
static inline void __set_fixmap(enum fixed_addresses idx,
|
|
phys_addr_t phys, pgprot_t flags)
|
|
{
|
|
- map_kernel_page(fix_to_virt(idx), phys, flags);
|
|
+ if (__builtin_constant_p(idx))
|
|
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
|
|
+ else if (WARN_ON(idx >= __end_of_fixed_addresses))
|
|
+ return;
|
|
+
|
|
+ map_kernel_page(__fix_to_virt(idx), phys, flags);
|
|
}
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
|
|
index cac95a3f30c2..e9a960e28f3c 100644
|
|
--- a/arch/powerpc/include/asm/spinlock.h
|
|
+++ b/arch/powerpc/include/asm/spinlock.h
|
|
@@ -36,12 +36,10 @@
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
-DECLARE_STATIC_KEY_FALSE(shared_processor);
|
|
-
|
|
#define vcpu_is_preempted vcpu_is_preempted
|
|
static inline bool vcpu_is_preempted(int cpu)
|
|
{
|
|
- if (!static_branch_unlikely(&shared_processor))
|
|
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
|
return false;
|
|
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
|
|
}
|
|
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
|
|
index 15002b51ff18..c92fe7fe9692 100644
|
|
--- a/arch/powerpc/include/asm/uaccess.h
|
|
+++ b/arch/powerpc/include/asm/uaccess.h
|
|
@@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
|
|
return n;
|
|
}
|
|
|
|
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
|
|
+unsigned long __arch_clear_user(void __user *addr, unsigned long size);
|
|
|
|
static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
|
{
|
|
@@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
|
might_fault();
|
|
if (likely(access_ok(addr, size))) {
|
|
allow_write_to_user(addr, size);
|
|
- ret = __clear_user(addr, size);
|
|
+ ret = __arch_clear_user(addr, size);
|
|
prevent_write_to_user(addr, size);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
|
+{
|
|
+ return clear_user(addr, size);
|
|
+}
|
|
+
|
|
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
|
extern __must_check long strnlen_user(const char __user *str, long n);
|
|
|
|
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
|
|
index d9279d0ee9f5..c031be8d41ff 100644
|
|
--- a/arch/powerpc/kernel/eeh_driver.c
|
|
+++ b/arch/powerpc/kernel/eeh_driver.c
|
|
@@ -897,12 +897,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
|
|
|
/* Log the event */
|
|
if (pe->type & EEH_PE_PHB) {
|
|
- pr_err("EEH: PHB#%x failure detected, location: %s\n",
|
|
+ pr_err("EEH: Recovering PHB#%x, location: %s\n",
|
|
pe->phb->global_number, eeh_pe_loc_get(pe));
|
|
} else {
|
|
struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
|
|
|
|
- pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
|
|
+ pr_err("EEH: Recovering PHB#%x-PE#%x\n",
|
|
pe->phb->global_number, pe->addr);
|
|
pr_err("EEH: PE location: %s, PHB location: %s\n",
|
|
eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
|
|
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
|
|
index bd91dceb7010..d341b464f23c 100644
|
|
--- a/arch/powerpc/kernel/security.c
|
|
+++ b/arch/powerpc/kernel/security.c
|
|
@@ -142,32 +142,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
|
|
|
|
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
|
|
|
|
- if (rfi_flush || thread_priv) {
|
|
+ if (rfi_flush) {
|
|
struct seq_buf s;
|
|
seq_buf_init(&s, buf, PAGE_SIZE - 1);
|
|
|
|
- seq_buf_printf(&s, "Mitigation: ");
|
|
-
|
|
- if (rfi_flush)
|
|
- seq_buf_printf(&s, "RFI Flush");
|
|
-
|
|
- if (rfi_flush && thread_priv)
|
|
- seq_buf_printf(&s, ", ");
|
|
-
|
|
+ seq_buf_printf(&s, "Mitigation: RFI Flush");
|
|
if (thread_priv)
|
|
- seq_buf_printf(&s, "L1D private per thread");
|
|
+ seq_buf_printf(&s, ", L1D private per thread");
|
|
|
|
seq_buf_printf(&s, "\n");
|
|
|
|
return s.len;
|
|
}
|
|
|
|
+ if (thread_priv)
|
|
+ return sprintf(buf, "Vulnerable: L1D private per thread\n");
|
|
+
|
|
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
|
|
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
|
|
return sprintf(buf, "Not affected\n");
|
|
|
|
return sprintf(buf, "Vulnerable\n");
|
|
}
|
|
+
|
|
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return cpu_show_meltdown(dev, attr, buf);
|
|
+}
|
|
#endif
|
|
|
|
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
|
|
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
|
|
index 619447b1b797..11301a1187f3 100644
|
|
--- a/arch/powerpc/kernel/time.c
|
|
+++ b/arch/powerpc/kernel/time.c
|
|
@@ -232,7 +232,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
|
|
* Accumulate stolen time by scanning the dispatch trace log.
|
|
* Called on entry from user mode.
|
|
*/
|
|
-void accumulate_stolen_time(void)
|
|
+void notrace accumulate_stolen_time(void)
|
|
{
|
|
u64 sst, ust;
|
|
unsigned long save_irq_soft_mask = irq_soft_mask_return();
|
|
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
|
|
index 82f43535e686..014ff0701f24 100644
|
|
--- a/arch/powerpc/kernel/traps.c
|
|
+++ b/arch/powerpc/kernel/traps.c
|
|
@@ -250,15 +250,22 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
|
|
}
|
|
NOKPROBE_SYMBOL(oops_end);
|
|
|
|
+static char *get_mmu_str(void)
|
|
+{
|
|
+ if (early_radix_enabled())
|
|
+ return " MMU=Radix";
|
|
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
|
|
+ return " MMU=Hash";
|
|
+ return "";
|
|
+}
|
|
+
|
|
static int __die(const char *str, struct pt_regs *regs, long err)
|
|
{
|
|
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
|
|
|
|
- printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
|
|
+ printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
|
|
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
|
|
- PAGE_SIZE / 1024,
|
|
- early_radix_enabled() ? " MMU=Radix" : "",
|
|
- early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
|
|
+ PAGE_SIZE / 1024, get_mmu_str(),
|
|
IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
|
|
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
|
|
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
|
|
diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S
|
|
index f69a6aab7bfb..1ddb26394e8a 100644
|
|
--- a/arch/powerpc/lib/string_32.S
|
|
+++ b/arch/powerpc/lib/string_32.S
|
|
@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
|
|
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
|
|
CACHELINE_MASK = (L1_CACHE_BYTES-1)
|
|
|
|
-_GLOBAL(__clear_user)
|
|
+_GLOBAL(__arch_clear_user)
|
|
/*
|
|
* Use dcbz on the complete cache lines in the destination
|
|
* to set them to zero. This requires that the destination
|
|
@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
|
|
EX_TABLE(8b, 91b)
|
|
EX_TABLE(9b, 91b)
|
|
|
|
-EXPORT_SYMBOL(__clear_user)
|
|
+EXPORT_SYMBOL(__arch_clear_user)
|
|
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
|
|
index 507b18b1660e..169872bc0892 100644
|
|
--- a/arch/powerpc/lib/string_64.S
|
|
+++ b/arch/powerpc/lib/string_64.S
|
|
@@ -17,7 +17,7 @@ PPC64_CACHES:
|
|
.section ".text"
|
|
|
|
/**
|
|
- * __clear_user: - Zero a block of memory in user space, with less checking.
|
|
+ * __arch_clear_user: - Zero a block of memory in user space, with less checking.
|
|
* @to: Destination address, in user space.
|
|
* @n: Number of bytes to zero.
|
|
*
|
|
@@ -58,7 +58,7 @@ err3; stb r0,0(r3)
|
|
mr r3,r4
|
|
blr
|
|
|
|
-_GLOBAL_TOC(__clear_user)
|
|
+_GLOBAL_TOC(__arch_clear_user)
|
|
cmpdi r4,32
|
|
neg r6,r3
|
|
li r0,0
|
|
@@ -181,4 +181,4 @@ err1; dcbz 0,r3
|
|
cmpdi r4,32
|
|
blt .Lshort_clear
|
|
b .Lmedium_clear
|
|
-EXPORT_SYMBOL(__clear_user)
|
|
+EXPORT_SYMBOL(__arch_clear_user)
|
|
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
|
|
index 6c123760164e..83c51a7d7eee 100644
|
|
--- a/arch/powerpc/mm/book3s64/hash_utils.c
|
|
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
|
|
@@ -294,10 +294,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
|
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
|
|
HPTE_V_BOLTED, psize, psize,
|
|
ssize);
|
|
-
|
|
+ if (ret == -1) {
|
|
+ /* Try to remove a non bolted entry */
|
|
+ ret = mmu_hash_ops.hpte_remove(hpteg);
|
|
+ if (ret != -1)
|
|
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
|
|
+ HPTE_V_BOLTED, psize, psize,
|
|
+ ssize);
|
|
+ }
|
|
if (ret < 0)
|
|
break;
|
|
|
|
+ cond_resched();
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
if (debug_pagealloc_enabled() &&
|
|
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
|
|
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
|
|
index b33251d75927..572651a5c87b 100644
|
|
--- a/arch/powerpc/platforms/pseries/cmm.c
|
|
+++ b/arch/powerpc/platforms/pseries/cmm.c
|
|
@@ -411,6 +411,10 @@ static struct bus_type cmm_subsys = {
|
|
.dev_name = "cmm",
|
|
};
|
|
|
|
+static void cmm_release_device(struct device *dev)
|
|
+{
|
|
+}
|
|
+
|
|
/**
|
|
* cmm_sysfs_register - Register with sysfs
|
|
*
|
|
@@ -426,6 +430,7 @@ static int cmm_sysfs_register(struct device *dev)
|
|
|
|
dev->id = 0;
|
|
dev->bus = &cmm_subsys;
|
|
+ dev->release = cmm_release_device;
|
|
|
|
if ((rc = device_register(dev)))
|
|
goto subsys_unregister;
|
|
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
|
|
index 61883291defc..ee07d0718bf1 100644
|
|
--- a/arch/powerpc/platforms/pseries/papr_scm.c
|
|
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
|
|
@@ -152,7 +152,7 @@ static int papr_scm_meta_get(struct papr_scm_priv *p,
|
|
int len, read;
|
|
int64_t ret;
|
|
|
|
- if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
|
|
+ if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
|
|
return -EINVAL;
|
|
|
|
for (len = hdr->in_length; len; len -= read) {
|
|
@@ -206,7 +206,7 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
|
|
__be64 data_be;
|
|
int64_t ret;
|
|
|
|
- if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
|
|
+ if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
|
|
return -EINVAL;
|
|
|
|
for (len = hdr->in_length; len; len -= wrote) {
|
|
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
|
|
index 0c8421dd01ab..0a40201f315f 100644
|
|
--- a/arch/powerpc/platforms/pseries/setup.c
|
|
+++ b/arch/powerpc/platforms/pseries/setup.c
|
|
@@ -74,9 +74,6 @@
|
|
#include "pseries.h"
|
|
#include "../../../../drivers/pci/pci.h"
|
|
|
|
-DEFINE_STATIC_KEY_FALSE(shared_processor);
|
|
-EXPORT_SYMBOL_GPL(shared_processor);
|
|
-
|
|
int CMO_PrPSP = -1;
|
|
int CMO_SecPSP = -1;
|
|
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
|
|
@@ -761,10 +758,6 @@ static void __init pSeries_setup_arch(void)
|
|
|
|
if (firmware_has_feature(FW_FEATURE_LPAR)) {
|
|
vpa_init(boot_cpuid);
|
|
-
|
|
- if (lppaca_shared_proc(get_lppaca()))
|
|
- static_branch_enable(&shared_processor);
|
|
-
|
|
ppc_md.power_save = pseries_lpar_idle;
|
|
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
|
|
#ifdef CONFIG_PCI_IOV
|
|
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
|
|
index 2b4e959caa36..7b9fe0a567cf 100755
|
|
--- a/arch/powerpc/tools/relocs_check.sh
|
|
+++ b/arch/powerpc/tools/relocs_check.sh
|
|
@@ -20,7 +20,7 @@ objdump="$1"
|
|
vmlinux="$2"
|
|
|
|
bad_relocs=$(
|
|
-"$objdump" -R "$vmlinux" |
|
|
+$objdump -R "$vmlinux" |
|
|
# Only look at relocation lines.
|
|
grep -E '\<R_' |
|
|
# These relocations are okay
|
|
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
|
|
index 1e972df3107e..77114755dc6f 100755
|
|
--- a/arch/powerpc/tools/unrel_branch_check.sh
|
|
+++ b/arch/powerpc/tools/unrel_branch_check.sh
|
|
@@ -18,14 +18,14 @@ vmlinux="$2"
|
|
#__end_interrupts should be located within the first 64K
|
|
|
|
end_intr=0x$(
|
|
-"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
|
|
+$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
|
|
--stop-address=0xc000000000010000 |
|
|
grep '\<__end_interrupts>:' |
|
|
awk '{print $1}'
|
|
)
|
|
|
|
BRANCHES=$(
|
|
-"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
|
|
+$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
|
|
--stop-address=${end_intr} |
|
|
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
|
|
grep -v '\<__start_initialization_multiplatform>' |
|
|
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
|
|
index d402ced7f7c3..cb8b1cc285c9 100644
|
|
--- a/arch/s390/kernel/machine_kexec.c
|
|
+++ b/arch/s390/kernel/machine_kexec.c
|
|
@@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image)
|
|
#ifdef CONFIG_CRASH_DUMP
|
|
int rc;
|
|
|
|
+ preempt_disable();
|
|
rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
|
|
+ preempt_enable();
|
|
return rc == 0;
|
|
#else
|
|
return false;
|
|
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
|
|
index 3d8b12a9a6ff..7511b71d2931 100644
|
|
--- a/arch/s390/kernel/perf_cpum_sf.c
|
|
+++ b/arch/s390/kernel/perf_cpum_sf.c
|
|
@@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
|
|
unsigned long num_sdb, gfp_t gfp_flags)
|
|
{
|
|
int i, rc;
|
|
- unsigned long *new, *tail;
|
|
+ unsigned long *new, *tail, *tail_prev = NULL;
|
|
|
|
if (!sfb->sdbt || !sfb->tail)
|
|
return -EINVAL;
|
|
@@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
|
|
sfb->num_sdbt++;
|
|
/* Link current page to tail of chain */
|
|
*tail = (unsigned long)(void *) new + 1;
|
|
+ tail_prev = tail;
|
|
tail = new;
|
|
}
|
|
|
|
@@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
|
|
* issue, a new realloc call (if required) might succeed.
|
|
*/
|
|
rc = alloc_sample_data_block(tail, gfp_flags);
|
|
- if (rc)
|
|
+ if (rc) {
|
|
+ /* Undo last SDBT. An SDBT with no SDB at its first
|
|
+ * entry but with an SDBT entry instead can not be
|
|
+ * handled by the interrupt handler code.
|
|
+ * Avoid this situation.
|
|
+ */
|
|
+ if (tail_prev) {
|
|
+ sfb->num_sdbt--;
|
|
+ free_page((unsigned long) new);
|
|
+ tail = tail_prev;
|
|
+ }
|
|
break;
|
|
+ }
|
|
sfb->num_sdb++;
|
|
tail++;
|
|
+ tail_prev = new = NULL; /* Allocated at least one SBD */
|
|
}
|
|
|
|
/* Link sampling buffer to its origin */
|
|
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
|
|
index a8204f952315..6e609b13c0ce 100644
|
|
--- a/arch/s390/kernel/unwind_bc.c
|
|
+++ b/arch/s390/kernel/unwind_bc.c
|
|
@@ -60,6 +60,11 @@ bool unwind_next_frame(struct unwind_state *state)
|
|
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
|
|
reliable = false;
|
|
regs = NULL;
|
|
+ if (!__kernel_text_address(ip)) {
|
|
+ /* skip bogus %r14 */
|
|
+ state->regs = NULL;
|
|
+ return unwind_next_frame(state);
|
|
+ }
|
|
} else {
|
|
sf = (struct stack_frame *) state->sp;
|
|
sp = READ_ONCE_NOCHECK(sf->back_chain);
|
|
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
|
|
index 59ad7997fed1..de7ca4b6718f 100644
|
|
--- a/arch/s390/mm/maccess.c
|
|
+++ b/arch/s390/mm/maccess.c
|
|
@@ -119,9 +119,15 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
|
|
*/
|
|
int memcpy_real(void *dest, void *src, size_t count)
|
|
{
|
|
- if (S390_lowcore.nodat_stack != 0)
|
|
- return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
|
|
- 3, dest, src, count);
|
|
+ int rc;
|
|
+
|
|
+ if (S390_lowcore.nodat_stack != 0) {
|
|
+ preempt_disable();
|
|
+ rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
|
|
+ dest, src, count);
|
|
+ preempt_enable();
|
|
+ return rc;
|
|
+ }
|
|
/*
|
|
* This is a really early memcpy_real call, the stacks are
|
|
* not set up yet. Just call _memcpy_real on the early boot
|
|
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
|
|
index fc8c52cff5aa..c5643a59a8c7 100644
|
|
--- a/arch/um/drivers/virtio_uml.c
|
|
+++ b/arch/um/drivers/virtio_uml.c
|
|
@@ -83,7 +83,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
|
|
return 0;
|
|
}
|
|
|
|
-static int full_read(int fd, void *buf, int len)
|
|
+static int full_read(int fd, void *buf, int len, bool abortable)
|
|
{
|
|
int rc;
|
|
|
|
@@ -93,7 +93,7 @@ static int full_read(int fd, void *buf, int len)
|
|
buf += rc;
|
|
len -= rc;
|
|
}
|
|
- } while (len && (rc > 0 || rc == -EINTR));
|
|
+ } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
|
|
|
|
if (rc < 0)
|
|
return rc;
|
|
@@ -104,7 +104,7 @@ static int full_read(int fd, void *buf, int len)
|
|
|
|
static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
|
|
{
|
|
- return full_read(fd, msg, sizeof(msg->header));
|
|
+ return full_read(fd, msg, sizeof(msg->header), true);
|
|
}
|
|
|
|
static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
|
|
@@ -118,7 +118,7 @@ static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
|
|
size = msg->header.size;
|
|
if (size > max_payload_size)
|
|
return -EPROTO;
|
|
- return full_read(fd, &msg->payload, size);
|
|
+ return full_read(fd, &msg->payload, size, false);
|
|
}
|
|
|
|
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
|
|
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
|
|
index ac42ae4651ce..eebdcbef0578 100644
|
|
--- a/drivers/cdrom/cdrom.c
|
|
+++ b/drivers/cdrom/cdrom.c
|
|
@@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
|
|
tracks->xa = 0;
|
|
tracks->error = 0;
|
|
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
|
|
+
|
|
+ if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
|
|
+ tracks->error = CDS_NO_INFO;
|
|
+ return;
|
|
+ }
|
|
+
|
|
/* Grab the TOC header so we can see how many tracks there are */
|
|
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
|
|
if (ret) {
|
|
@@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
|
|
ret = open_for_data(cdi);
|
|
if (ret)
|
|
goto err;
|
|
- cdrom_mmc3_profile(cdi);
|
|
+ if (CDROM_CAN(CDC_GENERIC_PACKET))
|
|
+ cdrom_mmc3_profile(cdi);
|
|
if (mode & FMODE_WRITE) {
|
|
ret = -EROFS;
|
|
if (cdrom_open_write(cdi))
|
|
@@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
|
|
it doesn't give enough information or fails. then we return
|
|
the toc contents. */
|
|
use_toc:
|
|
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
|
|
+ return -ENOSYS;
|
|
+
|
|
toc.cdte_format = CDROM_MSF;
|
|
toc.cdte_track = CDROM_LEADOUT;
|
|
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
|
|
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
|
|
index 9d930edd6516..13304cf5f2a8 100644
|
|
--- a/drivers/clk/clk-gpio.c
|
|
+++ b/drivers/clk/clk-gpio.c
|
|
@@ -280,7 +280,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
|
|
else
|
|
clk = clk_register_gpio_gate(&pdev->dev, node->name,
|
|
parent_names ? parent_names[0] : NULL, gpiod,
|
|
- 0);
|
|
+ CLK_SET_RATE_PARENT);
|
|
if (IS_ERR(clk))
|
|
return PTR_ERR(clk);
|
|
|
|
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
|
|
index 287fdeae7c7c..7b123105b5de 100644
|
|
--- a/drivers/clk/pxa/clk-pxa27x.c
|
|
+++ b/drivers/clk/pxa/clk-pxa27x.c
|
|
@@ -459,6 +459,7 @@ struct dummy_clk {
|
|
};
|
|
static struct dummy_clk dummy_clks[] __initdata = {
|
|
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
|
|
+ DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
|
|
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
|
|
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
|
|
};
|
|
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
|
|
index b98b81ef43a1..5a89ed88cc27 100644
|
|
--- a/drivers/clk/qcom/clk-rcg2.c
|
|
+++ b/drivers/clk/qcom/clk-rcg2.c
|
|
@@ -220,6 +220,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
|
|
if (clk_flags & CLK_SET_RATE_PARENT) {
|
|
rate = f->freq;
|
|
if (f->pre_div) {
|
|
+ if (!rate)
|
|
+ rate = req->rate;
|
|
rate /= 2;
|
|
rate *= f->pre_div + 1;
|
|
}
|
|
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
|
|
index fef5e8157061..930fa4a4c52a 100644
|
|
--- a/drivers/clk/qcom/clk-smd-rpm.c
|
|
+++ b/drivers/clk/qcom/clk-smd-rpm.c
|
|
@@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
|
|
};
|
|
|
|
/* msm8998 */
|
|
+DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
|
|
DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
|
|
DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
|
|
DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
|
|
@@ -670,6 +671,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
|
|
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
|
|
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
|
|
static struct clk_smd_rpm *msm8998_clks[] = {
|
|
+ [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
|
|
+ [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
|
|
[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
|
|
[RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
|
|
[RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
|
|
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
|
|
index 28ddc747d703..bdeacebbf0e4 100644
|
|
--- a/drivers/clk/qcom/common.c
|
|
+++ b/drivers/clk/qcom/common.c
|
|
@@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
|
|
if (!f)
|
|
return NULL;
|
|
|
|
+ if (!f->freq)
|
|
+ return f;
|
|
+
|
|
for (; f->freq; f++)
|
|
if (rate <= f->freq)
|
|
return f;
|
|
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
|
|
index 9f09a59161e7..5b39d3701fa3 100644
|
|
--- a/drivers/clocksource/asm9260_timer.c
|
|
+++ b/drivers/clocksource/asm9260_timer.c
|
|
@@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
|
|
}
|
|
|
|
clk = of_clk_get(np, 0);
|
|
+ if (IS_ERR(clk)) {
|
|
+ pr_err("Failed to get clk!\n");
|
|
+ return PTR_ERR(clk);
|
|
+ }
|
|
|
|
ret = clk_prepare_enable(clk);
|
|
if (ret) {
|
|
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
|
|
index 11ff701ff4bb..a3c73e972fce 100644
|
|
--- a/drivers/clocksource/timer-of.c
|
|
+++ b/drivers/clocksource/timer-of.c
|
|
@@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
|
|
}
|
|
|
|
if (!to->clkevt.name)
|
|
- to->clkevt.name = np->name;
|
|
+ to->clkevt.name = np->full_name;
|
|
|
|
to->np = np;
|
|
|
|
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
|
|
index 06664fbd2d91..89792083d62c 100644
|
|
--- a/drivers/dma/fsl-qdma.c
|
|
+++ b/drivers/dma/fsl-qdma.c
|
|
@@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
|
return ret;
|
|
|
|
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
|
|
+ if (fsl_qdma->irq_base < 0)
|
|
+ return fsl_qdma->irq_base;
|
|
+
|
|
fsl_qdma->feature = of_property_read_bool(np, "big-endian");
|
|
INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
|
|
|
|
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
|
|
index 5d56f1e4d332..43acba2a1c0e 100644
|
|
--- a/drivers/dma/xilinx/xilinx_dma.c
|
|
+++ b/drivers/dma/xilinx/xilinx_dma.c
|
|
@@ -1433,6 +1433,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
|
|
|
|
chan->err = false;
|
|
chan->idle = true;
|
|
+ chan->desc_pendingcount = 0;
|
|
chan->desc_submitcount = 0;
|
|
|
|
return err;
|
|
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
|
|
index e9e47c0d5be7..490ce7bae25e 100644
|
|
--- a/drivers/gpio/gpio-lynxpoint.c
|
|
+++ b/drivers/gpio/gpio-lynxpoint.c
|
|
@@ -164,6 +164,12 @@ static int lp_irq_type(struct irq_data *d, unsigned type)
|
|
value |= TRIG_SEL_BIT | INT_INV_BIT;
|
|
|
|
outl(value, reg);
|
|
+
|
|
+ if (type & IRQ_TYPE_EDGE_BOTH)
|
|
+ irq_set_handler_locked(d, handle_edge_irq);
|
|
+ else if (type & IRQ_TYPE_LEVEL_MASK)
|
|
+ irq_set_handler_locked(d, handle_level_irq);
|
|
+
|
|
spin_unlock_irqrestore(&lg->lock, flags);
|
|
|
|
return 0;
|
|
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
|
|
index 16a47de29c94..a031cbcdf6ef 100644
|
|
--- a/drivers/gpio/gpio-mpc8xxx.c
|
|
+++ b/drivers/gpio/gpio-mpc8xxx.c
|
|
@@ -377,7 +377,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
|
* It's assumed that only a single type of gpio controller is available
|
|
* on the current machine, so overwriting global data is fine.
|
|
*/
|
|
- mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
|
|
+ if (devtype->irq_set_type)
|
|
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
|
|
|
|
if (devtype->gpio_dir_out)
|
|
gc->direction_output = devtype->gpio_dir_out;
|
|
@@ -386,6 +387,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
|
|
|
gc->to_irq = mpc8xxx_gpio_to_irq;
|
|
|
|
+ if (of_device_is_compatible(np, "fsl,qoriq-gpio"))
|
|
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
|
|
+
|
|
ret = gpiochip_add_data(gc, mpc8xxx_gc);
|
|
if (ret) {
|
|
pr_err("%pOF: GPIO chip registration failed with status %d\n",
|
|
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
|
|
index 7907a8755866..c77d474185f3 100644
|
|
--- a/drivers/gpio/gpio-mxc.c
|
|
+++ b/drivers/gpio/gpio-mxc.c
|
|
@@ -411,6 +411,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
|
|
{
|
|
struct device_node *np = pdev->dev.of_node;
|
|
struct mxc_gpio_port *port;
|
|
+ int irq_count;
|
|
int irq_base;
|
|
int err;
|
|
|
|
@@ -426,9 +427,15 @@ static int mxc_gpio_probe(struct platform_device *pdev)
|
|
if (IS_ERR(port->base))
|
|
return PTR_ERR(port->base);
|
|
|
|
- port->irq_high = platform_get_irq(pdev, 1);
|
|
- if (port->irq_high < 0)
|
|
- port->irq_high = 0;
|
|
+ irq_count = platform_irq_count(pdev);
|
|
+ if (irq_count < 0)
|
|
+ return irq_count;
|
|
+
|
|
+ if (irq_count > 1) {
|
|
+ port->irq_high = platform_get_irq(pdev, 1);
|
|
+ if (port->irq_high < 0)
|
|
+ port->irq_high = 0;
|
|
+ }
|
|
|
|
port->irq = platform_get_irq(pdev, 0);
|
|
if (port->irq < 0)
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
|
|
index dff41d0a85fe..c0e41f1f0c23 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
|
|
@@ -35,6 +35,7 @@
|
|
#include <linux/hmm.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/sched/task.h>
|
|
+#include <linux/sched/mm.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/swap.h>
|
|
@@ -788,7 +789,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
|
struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
|
|
struct ttm_tt *ttm = bo->tbo.ttm;
|
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
|
- struct mm_struct *mm = gtt->usertask->mm;
|
|
+ struct mm_struct *mm;
|
|
unsigned long start = gtt->userptr;
|
|
struct vm_area_struct *vma;
|
|
struct hmm_range *range;
|
|
@@ -796,25 +797,14 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
|
uint64_t *pfns;
|
|
int r = 0;
|
|
|
|
- if (!mm) /* Happens during process shutdown */
|
|
- return -ESRCH;
|
|
-
|
|
if (unlikely(!mirror)) {
|
|
DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
|
|
- r = -EFAULT;
|
|
- goto out;
|
|
+ return -EFAULT;
|
|
}
|
|
|
|
- vma = find_vma(mm, start);
|
|
- if (unlikely(!vma || start < vma->vm_start)) {
|
|
- r = -EFAULT;
|
|
- goto out;
|
|
- }
|
|
- if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
|
|
- vma->vm_file)) {
|
|
- r = -EPERM;
|
|
- goto out;
|
|
- }
|
|
+ mm = mirror->hmm->mmu_notifier.mm;
|
|
+ if (!mmget_not_zero(mm)) /* Happens during process shutdown */
|
|
+ return -ESRCH;
|
|
|
|
range = kzalloc(sizeof(*range), GFP_KERNEL);
|
|
if (unlikely(!range)) {
|
|
@@ -847,6 +837,17 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
|
hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
|
|
|
|
down_read(&mm->mmap_sem);
|
|
+ vma = find_vma(mm, start);
|
|
+ if (unlikely(!vma || start < vma->vm_start)) {
|
|
+ r = -EFAULT;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+ if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
|
|
+ vma->vm_file)) {
|
|
+ r = -EPERM;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
r = hmm_range_fault(range, 0);
|
|
up_read(&mm->mmap_sem);
|
|
|
|
@@ -865,15 +866,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
|
}
|
|
|
|
gtt->range = range;
|
|
+ mmput(mm);
|
|
|
|
return 0;
|
|
|
|
+out_unlock:
|
|
+ up_read(&mm->mmap_sem);
|
|
out_free_pfns:
|
|
hmm_range_unregister(range);
|
|
kvfree(pfns);
|
|
out_free_ranges:
|
|
kfree(range);
|
|
out:
|
|
+ mmput(mm);
|
|
return r;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
|
|
index 892ce636ef72..6ee04803c362 100644
|
|
--- a/drivers/gpu/drm/drm_property.c
|
|
+++ b/drivers/gpu/drm/drm_property.c
|
|
@@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
|
|
struct drm_property_blob *blob;
|
|
int ret;
|
|
|
|
- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
|
|
+ if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
|
|
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
|
|
index 2fa3587d974f..e0b241bd3070 100644
|
|
--- a/drivers/hid/hid-core.c
|
|
+++ b/drivers/hid/hid-core.c
|
|
@@ -781,6 +781,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
|
|
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
|
|
parser->global.report_size == 8)
|
|
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
|
|
+
|
|
+ if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
|
|
+ parser->global.report_size == 8)
|
|
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
|
|
}
|
|
|
|
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
|
|
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
|
|
index 447e8db21174..6273e7178e78 100644
|
|
--- a/drivers/hid/hid-ids.h
|
|
+++ b/drivers/hid/hid-ids.h
|
|
@@ -573,6 +573,7 @@
|
|
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
|
|
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
|
|
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
|
|
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a
|
|
|
|
#define USB_VENDOR_ID_HUION 0x256c
|
|
#define USB_DEVICE_ID_HUION_TABLET 0x006e
|
|
@@ -959,6 +960,7 @@
|
|
|
|
#define I2C_VENDOR_ID_RAYDIUM 0x2386
|
|
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
|
|
+#define I2C_PRODUCT_ID_RAYDIUM_3118 0x3118
|
|
|
|
#define USB_VENDOR_ID_RAZER 0x1532
|
|
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
|
|
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
|
|
index 8e91e2f06cb4..cd9193078525 100644
|
|
--- a/drivers/hid/hid-logitech-hidpp.c
|
|
+++ b/drivers/hid/hid-logitech-hidpp.c
|
|
@@ -1102,6 +1102,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
|
|
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
|
|
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
|
|
NULL, 0, &response);
|
|
+ /* Ignore these intermittent errors */
|
|
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
|
|
+ return -EIO;
|
|
if (ret > 0) {
|
|
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
|
|
__func__, ret);
|
|
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
|
|
index c50bcd967d99..9a35af1e2662 100644
|
|
--- a/drivers/hid/hid-quirks.c
|
|
+++ b/drivers/hid/hid-quirks.c
|
|
@@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
|
|
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
|
|
index 7c6abd7e0979..9ce22acdfaca 100644
|
|
--- a/drivers/hid/hid-rmi.c
|
|
+++ b/drivers/hid/hid-rmi.c
|
|
@@ -744,7 +744,8 @@ static void rmi_remove(struct hid_device *hdev)
|
|
{
|
|
struct rmi_data *hdata = hid_get_drvdata(hdev);
|
|
|
|
- if (hdata->device_flags & RMI_DEVICE) {
|
|
+ if ((hdata->device_flags & RMI_DEVICE)
|
|
+ && test_bit(RMI_STARTED, &hdata->flags)) {
|
|
clear_bit(RMI_STARTED, &hdata->flags);
|
|
cancel_work_sync(&hdata->reset_work);
|
|
rmi_unregister_transport_device(&hdata->xport);
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
index 04c088131e04..7608ee053114 100644
|
|
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
@@ -170,6 +170,8 @@ static const struct i2c_hid_quirks {
|
|
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
|
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
|
|
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
|
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
|
|
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
|
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
|
I2C_HID_QUIRK_BOGUS_IRQ },
|
|
{ 0, 0 }
|
|
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
|
|
index 53a60c81e220..05ead1735c6e 100644
|
|
--- a/drivers/hv/vmbus_drv.c
|
|
+++ b/drivers/hv/vmbus_drv.c
|
|
@@ -2308,7 +2308,7 @@ static void hv_crash_handler(struct pt_regs *regs)
|
|
vmbus_connection.conn_state = DISCONNECTED;
|
|
cpu = smp_processor_id();
|
|
hv_stimer_cleanup(cpu);
|
|
- hv_synic_cleanup(cpu);
|
|
+ hv_synic_disable_regs(cpu);
|
|
hyperv_cleanup();
|
|
};
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
|
|
index b24e7b937f21..84cfed17ff4f 100644
|
|
--- a/drivers/i2c/busses/i2c-stm32f7.c
|
|
+++ b/drivers/i2c/busses/i2c-stm32f7.c
|
|
@@ -1985,6 +1985,11 @@ pm_disable:
|
|
pm_runtime_set_suspended(i2c_dev->dev);
|
|
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
|
|
|
|
+ if (i2c_dev->dma) {
|
|
+ stm32_i2c_dma_free(i2c_dev->dma);
|
|
+ i2c_dev->dma = NULL;
|
|
+ }
|
|
+
|
|
clk_free:
|
|
clk_disable_unprepare(i2c_dev->clk);
|
|
|
|
@@ -1995,21 +2000,21 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
|
|
{
|
|
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
|
|
|
|
- if (i2c_dev->dma) {
|
|
- stm32_i2c_dma_free(i2c_dev->dma);
|
|
- i2c_dev->dma = NULL;
|
|
- }
|
|
-
|
|
i2c_del_adapter(&i2c_dev->adap);
|
|
pm_runtime_get_sync(i2c_dev->dev);
|
|
|
|
- clk_disable_unprepare(i2c_dev->clk);
|
|
-
|
|
pm_runtime_put_noidle(i2c_dev->dev);
|
|
pm_runtime_disable(i2c_dev->dev);
|
|
pm_runtime_set_suspended(i2c_dev->dev);
|
|
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
|
|
|
|
+ if (i2c_dev->dma) {
|
|
+ stm32_i2c_dma_free(i2c_dev->dma);
|
|
+ i2c_dev->dma = NULL;
|
|
+ }
|
|
+
|
|
+ clk_disable_unprepare(i2c_dev->clk);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
index 24c4b691b1c9..ae60442efda0 100644
|
|
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
@@ -3156,6 +3156,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
|
|
|
|
mutex_unlock(&input_dev->mutex);
|
|
|
|
+ disable_irq(data->irq);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -3168,6 +3170,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
|
|
if (!input_dev)
|
|
return 0;
|
|
|
|
+ enable_irq(data->irq);
|
|
+
|
|
mutex_lock(&input_dev->mutex);
|
|
|
|
if (input_dev->users)
|
|
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
|
|
index e9006407c9bc..f4ebdab06280 100644
|
|
--- a/drivers/input/touchscreen/ili210x.c
|
|
+++ b/drivers/input/touchscreen/ili210x.c
|
|
@@ -334,7 +334,12 @@ static int ili210x_i2c_probe(struct i2c_client *client,
|
|
input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0);
|
|
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0);
|
|
touchscreen_parse_properties(input, true, &priv->prop);
|
|
- input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
|
|
+
|
|
+ error = input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
|
|
+ if (error) {
|
|
+ dev_err(dev, "Unable to set up slots, err: %d\n", error);
|
|
+ return error;
|
|
+ }
|
|
|
|
error = devm_add_action(dev, ili210x_cancel_work, priv);
|
|
if (error)
|
|
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
|
|
index 1139714e72e2..1c5f8875cb79 100644
|
|
--- a/drivers/input/touchscreen/st1232.c
|
|
+++ b/drivers/input/touchscreen/st1232.c
|
|
@@ -149,6 +149,11 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
|
|
gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
|
|
}
|
|
|
|
+static void st1232_ts_power_off(void *data)
|
|
+{
|
|
+ st1232_ts_power(data, false);
|
|
+}
|
|
+
|
|
static const struct st_chip_info st1232_chip_info = {
|
|
.have_z = true,
|
|
.max_x = 0x31f, /* 800 - 1 */
|
|
@@ -229,6 +234,13 @@ static int st1232_ts_probe(struct i2c_client *client,
|
|
|
|
st1232_ts_power(ts, true);
|
|
|
|
+ error = devm_add_action_or_reset(&client->dev, st1232_ts_power_off, ts);
|
|
+ if (error) {
|
|
+ dev_err(&client->dev,
|
|
+ "Failed to install power off action: %d\n", error);
|
|
+ return error;
|
|
+ }
|
|
+
|
|
input_dev->name = "st1232-touchscreen";
|
|
input_dev->id.bustype = BUS_I2C;
|
|
input_dev->dev.parent = &client->dev;
|
|
@@ -271,15 +283,6 @@ static int st1232_ts_probe(struct i2c_client *client,
|
|
return 0;
|
|
}
|
|
|
|
-static int st1232_ts_remove(struct i2c_client *client)
|
|
-{
|
|
- struct st1232_ts_data *ts = i2c_get_clientdata(client);
|
|
-
|
|
- st1232_ts_power(ts, false);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static int __maybe_unused st1232_ts_suspend(struct device *dev)
|
|
{
|
|
struct i2c_client *client = to_i2c_client(dev);
|
|
@@ -329,7 +332,6 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
|
|
|
|
static struct i2c_driver st1232_ts_driver = {
|
|
.probe = st1232_ts_probe,
|
|
- .remove = st1232_ts_remove,
|
|
.id_table = st1232_ts_id,
|
|
.driver = {
|
|
.name = ST1232_TS_NAME,
|
|
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
|
|
index 8da93e730d6f..ed90361b84dc 100644
|
|
--- a/drivers/iommu/arm-smmu-v3.c
|
|
+++ b/drivers/iommu/arm-smmu-v3.c
|
|
@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|
|
|
/* Interrupt lines */
|
|
|
|
- irq = platform_get_irq_byname(pdev, "combined");
|
|
+ irq = platform_get_irq_byname_optional(pdev, "combined");
|
|
if (irq > 0)
|
|
smmu->combined_irq = irq;
|
|
else {
|
|
- irq = platform_get_irq_byname(pdev, "eventq");
|
|
+ irq = platform_get_irq_byname_optional(pdev, "eventq");
|
|
if (irq > 0)
|
|
smmu->evtq.q.irq = irq;
|
|
|
|
- irq = platform_get_irq_byname(pdev, "priq");
|
|
+ irq = platform_get_irq_byname_optional(pdev, "priq");
|
|
if (irq > 0)
|
|
smmu->priq.q.irq = irq;
|
|
|
|
- irq = platform_get_irq_byname(pdev, "gerror");
|
|
+ irq = platform_get_irq_byname_optional(pdev, "gerror");
|
|
if (irq > 0)
|
|
smmu->gerr_irq = irq;
|
|
}
|
|
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
|
|
index 4dcbf68dfda4..0df091934361 100644
|
|
--- a/drivers/iommu/rockchip-iommu.c
|
|
+++ b/drivers/iommu/rockchip-iommu.c
|
|
@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
|
|
if (!dma_dev)
|
|
return NULL;
|
|
|
|
- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
|
|
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
|
|
if (!rk_domain)
|
|
return NULL;
|
|
|
|
if (type == IOMMU_DOMAIN_DMA &&
|
|
iommu_get_dma_cookie(&rk_domain->domain))
|
|
- return NULL;
|
|
+ goto err_free_domain;
|
|
|
|
/*
|
|
* rk32xx iommus use a 2 level pagetable.
|
|
@@ -1021,6 +1021,8 @@ err_free_dt:
|
|
err_put_cookie:
|
|
if (type == IOMMU_DOMAIN_DMA)
|
|
iommu_put_dma_cookie(&rk_domain->domain);
|
|
+err_free_domain:
|
|
+ kfree(rk_domain);
|
|
|
|
return NULL;
|
|
}
|
|
@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
|
|
|
|
if (domain->type == IOMMU_DOMAIN_DMA)
|
|
iommu_put_dma_cookie(&rk_domain->domain);
|
|
+ kfree(rk_domain);
|
|
}
|
|
|
|
static int rk_iommu_add_device(struct device *dev)
|
|
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
|
|
index 7293fc3f796d..dd486233e282 100644
|
|
--- a/drivers/iommu/tegra-smmu.c
|
|
+++ b/drivers/iommu/tegra-smmu.c
|
|
@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
|
|
return (addr & smmu->pfn_mask) == addr;
|
|
}
|
|
|
|
-static dma_addr_t smmu_pde_to_dma(u32 pde)
|
|
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
|
|
{
|
|
- return pde << 12;
|
|
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
|
|
}
|
|
|
|
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
|
|
@@ -549,6 +549,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
|
|
dma_addr_t *dmap)
|
|
{
|
|
unsigned int pd_index = iova_pd_index(iova);
|
|
+ struct tegra_smmu *smmu = as->smmu;
|
|
struct page *pt_page;
|
|
u32 *pd;
|
|
|
|
@@ -557,7 +558,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
|
|
return NULL;
|
|
|
|
pd = page_address(as->pd);
|
|
- *dmap = smmu_pde_to_dma(pd[pd_index]);
|
|
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
|
|
|
|
return tegra_smmu_pte_offset(pt_page, iova);
|
|
}
|
|
@@ -599,7 +600,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
|
} else {
|
|
u32 *pd = page_address(as->pd);
|
|
|
|
- *dmap = smmu_pde_to_dma(pd[pde]);
|
|
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
|
|
}
|
|
|
|
return tegra_smmu_pte_offset(as->pts[pde], iova);
|
|
@@ -624,7 +625,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
|
|
if (--as->count[pde] == 0) {
|
|
struct tegra_smmu *smmu = as->smmu;
|
|
u32 *pd = page_address(as->pd);
|
|
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
|
|
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
|
|
|
|
tegra_smmu_set_pde(as, iova, 0);
|
|
|
|
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
|
|
index fc75c61233aa..58bec2126966 100644
|
|
--- a/drivers/irqchip/irq-bcm7038-l1.c
|
|
+++ b/drivers/irqchip/irq-bcm7038-l1.c
|
|
@@ -281,6 +281,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
|
|
pr_err("failed to map parent interrupt %d\n", parent_irq);
|
|
return -EINVAL;
|
|
}
|
|
+
|
|
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
|
|
+ enable_irq_wake(parent_irq);
|
|
+
|
|
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
|
|
intc);
|
|
|
|
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
|
|
index f126255b3260..dda512dfe2c1 100644
|
|
--- a/drivers/irqchip/irq-ingenic.c
|
|
+++ b/drivers/irqchip/irq-ingenic.c
|
|
@@ -108,6 +108,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
|
|
goto out_unmap_irq;
|
|
}
|
|
|
|
+ domain = irq_domain_add_legacy(node, num_chips * 32,
|
|
+ JZ4740_IRQ_BASE, 0,
|
|
+ &irq_domain_simple_ops, NULL);
|
|
+ if (!domain) {
|
|
+ err = -ENOMEM;
|
|
+ goto out_unmap_base;
|
|
+ }
|
|
+
|
|
for (i = 0; i < num_chips; i++) {
|
|
/* Mask all irqs */
|
|
writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
|
|
@@ -134,14 +142,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
|
|
IRQ_NOPROBE | IRQ_LEVEL);
|
|
}
|
|
|
|
- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
|
|
- &irq_domain_simple_ops, NULL);
|
|
- if (!domain)
|
|
- pr_warn("unable to register IRQ domain\n");
|
|
-
|
|
setup_irq(parent_irq, &intc_cascade_action);
|
|
return 0;
|
|
|
|
+out_unmap_base:
|
|
+ iounmap(intc->base);
|
|
out_unmap_irq:
|
|
irq_dispose_mapping(parent_irq);
|
|
out_free:
|
|
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
|
|
index 250dc9d6f635..82350a28a564 100644
|
|
--- a/drivers/leds/leds-an30259a.c
|
|
+++ b/drivers/leds/leds-an30259a.c
|
|
@@ -305,6 +305,13 @@ static int an30259a_probe(struct i2c_client *client)
|
|
|
|
chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
|
|
|
|
+ if (IS_ERR(chip->regmap)) {
|
|
+ err = PTR_ERR(chip->regmap);
|
|
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
|
|
+ err);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
for (i = 0; i < chip->num_leds; i++) {
|
|
struct led_init_data init_data = {};
|
|
|
|
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
|
|
index 3d381f2f73d0..1ac9a44570ee 100644
|
|
--- a/drivers/leds/leds-lm3692x.c
|
|
+++ b/drivers/leds/leds-lm3692x.c
|
|
@@ -334,9 +334,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
|
|
return ret;
|
|
}
|
|
|
|
- led->regulator = devm_regulator_get(&led->client->dev, "vled");
|
|
- if (IS_ERR(led->regulator))
|
|
+ led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
|
|
+ if (IS_ERR(led->regulator)) {
|
|
+ ret = PTR_ERR(led->regulator);
|
|
+ if (ret != -ENODEV) {
|
|
+ if (ret != -EPROBE_DEFER)
|
|
+ dev_err(&led->client->dev,
|
|
+ "Failed to get vled regulator: %d\n",
|
|
+ ret);
|
|
+ return ret;
|
|
+ }
|
|
led->regulator = NULL;
|
|
+ }
|
|
|
|
child = device_get_next_child_node(&led->client->dev, child);
|
|
if (!child) {
|
|
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
|
|
index 136f86a1627d..d5e774d83021 100644
|
|
--- a/drivers/leds/trigger/ledtrig-netdev.c
|
|
+++ b/drivers/leds/trigger/ledtrig-netdev.c
|
|
@@ -302,10 +302,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
|
|
container_of(nb, struct led_netdev_data, notifier);
|
|
|
|
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
|
|
- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
|
|
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
|
|
+ && evt != NETDEV_CHANGENAME)
|
|
return NOTIFY_DONE;
|
|
|
|
if (!(dev == trigger_data->net_dev ||
|
|
+ (evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) ||
|
|
(evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
|
|
return NOTIFY_DONE;
|
|
|
|
@@ -315,6 +317,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
|
|
|
|
clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
|
|
switch (evt) {
|
|
+ case NETDEV_CHANGENAME:
|
|
case NETDEV_REGISTER:
|
|
if (trigger_data->net_dev)
|
|
dev_put(trigger_data->net_dev);
|
|
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
|
|
index 9f74dee1a58c..afe625e88a5c 100644
|
|
--- a/drivers/mailbox/imx-mailbox.c
|
|
+++ b/drivers/mailbox/imx-mailbox.c
|
|
@@ -214,11 +214,24 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
|
|
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
|
|
struct imx_mu_con_priv *cp = chan->con_priv;
|
|
|
|
- if (cp->type == IMX_MU_TYPE_TXDB)
|
|
+ if (cp->type == IMX_MU_TYPE_TXDB) {
|
|
tasklet_kill(&cp->txdb_tasklet);
|
|
+ return;
|
|
+ }
|
|
|
|
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) |
|
|
- IMX_MU_xCR_RIEn(cp->idx) | IMX_MU_xCR_GIEn(cp->idx));
|
|
+ switch (cp->type) {
|
|
+ case IMX_MU_TYPE_TX:
|
|
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
|
|
+ break;
|
|
+ case IMX_MU_TYPE_RX:
|
|
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
|
|
+ break;
|
|
+ case IMX_MU_TYPE_RXDB:
|
|
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
|
|
free_irq(priv->irq, chan);
|
|
}
|
|
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
|
|
index ba434d9ac720..46a8b5a91c38 100644
|
|
--- a/drivers/md/bcache/btree.c
|
|
+++ b/drivers/md/bcache/btree.c
|
|
@@ -723,6 +723,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
|
|
* IO can always make forward progress:
|
|
*/
|
|
nr /= c->btree_pages;
|
|
+ if (nr == 0)
|
|
+ nr = 1;
|
|
nr = min_t(unsigned long, nr, mca_can_free(c));
|
|
|
|
i = 0;
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index 805b33e27496..4e7c9f398bc6 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
|
|
/* not spare disk, or LEVEL_MULTIPATH */
|
|
if (sb->level == LEVEL_MULTIPATH ||
|
|
(rdev->desc_nr >= 0 &&
|
|
+ rdev->desc_nr < MD_SB_DISKS &&
|
|
sb->disks[rdev->desc_nr].state &
|
|
((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
|
|
spare_disk = false;
|
|
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
|
|
index 365fb0cb8dff..22566b75ca50 100644
|
|
--- a/drivers/misc/habanalabs/memory.c
|
|
+++ b/drivers/misc/habanalabs/memory.c
|
|
@@ -965,17 +965,19 @@ init_page_pack_err:
|
|
*
|
|
* @ctx : current context
|
|
* @vaddr : device virtual address to unmap
|
|
+ * @ctx_free : true if in context free flow, false otherwise.
|
|
*
|
|
* This function does the following:
|
|
* - Unmap the physical pages related to the given virtual address
|
|
* - return the device virtual block to the virtual block list
|
|
*/
|
|
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
|
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
|
|
{
|
|
struct hl_device *hdev = ctx->hdev;
|
|
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
|
|
struct hl_vm_hash_node *hnode = NULL;
|
|
struct hl_userptr *userptr = NULL;
|
|
+ struct hl_va_range *va_range;
|
|
enum vm_type_t *vm_type;
|
|
u64 next_vaddr, i;
|
|
u32 page_size;
|
|
@@ -1003,6 +1005,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
|
|
|
if (*vm_type == VM_TYPE_USERPTR) {
|
|
is_userptr = true;
|
|
+ va_range = &ctx->host_va_range;
|
|
userptr = hnode->ptr;
|
|
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
|
|
&phys_pg_pack);
|
|
@@ -1014,6 +1017,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
|
}
|
|
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
|
|
is_userptr = false;
|
|
+ va_range = &ctx->dram_va_range;
|
|
phys_pg_pack = hnode->ptr;
|
|
} else {
|
|
dev_warn(hdev->dev,
|
|
@@ -1052,12 +1056,18 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
|
|
|
|
mutex_unlock(&ctx->mmu_lock);
|
|
|
|
- if (add_va_block(hdev,
|
|
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
|
|
- vaddr,
|
|
- vaddr + phys_pg_pack->total_size - 1))
|
|
- dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
|
|
- vaddr);
|
|
+ /*
|
|
+ * No point in maintaining the free VA block list if the context is
|
|
+ * closing as the list will be freed anyway
|
|
+ */
|
|
+ if (!ctx_free) {
|
|
+ rc = add_va_block(hdev, va_range, vaddr,
|
|
+ vaddr + phys_pg_pack->total_size - 1);
|
|
+ if (rc)
|
|
+ dev_warn(hdev->dev,
|
|
+ "add va block failed for vaddr: 0x%llx\n",
|
|
+ vaddr);
|
|
+ }
|
|
|
|
atomic_dec(&phys_pg_pack->mapping_cnt);
|
|
kfree(hnode);
|
|
@@ -1189,8 +1199,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
|
|
break;
|
|
|
|
case HL_MEM_OP_UNMAP:
|
|
- rc = unmap_device_va(ctx,
|
|
- args->in.unmap.device_virt_addr);
|
|
+ rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
|
|
+ false);
|
|
break;
|
|
|
|
default:
|
|
@@ -1620,7 +1630,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
|
|
dev_dbg(hdev->dev,
|
|
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
|
|
hnode->vaddr, ctx->asid);
|
|
- unmap_device_va(ctx, hnode->vaddr);
|
|
+ unmap_device_va(ctx, hnode->vaddr, true);
|
|
}
|
|
|
|
spin_lock(&vm->idr_lock);
|
|
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
|
|
index 57b582bf73d9..9289bb4d633e 100644
|
|
--- a/drivers/mmc/host/sdhci-esdhc.h
|
|
+++ b/drivers/mmc/host/sdhci-esdhc.h
|
|
@@ -51,6 +51,11 @@
|
|
#define ESDHC_CLOCK_HCKEN 0x00000002
|
|
#define ESDHC_CLOCK_IPGEN 0x00000001
|
|
|
|
+/* System Control 2 Register */
|
|
+#define ESDHC_SYSTEM_CONTROL_2 0x3c
|
|
+#define ESDHC_SMPCLKSEL 0x00800000
|
|
+#define ESDHC_EXTN 0x00400000
|
|
+
|
|
/* Host Controller Capabilities Register 2 */
|
|
#define ESDHC_CAPABILITIES_1 0x114
|
|
|
|
@@ -59,7 +64,16 @@
|
|
#define ESDHC_HS400_WNDW_ADJUST 0x00000040
|
|
#define ESDHC_HS400_MODE 0x00000010
|
|
#define ESDHC_TB_EN 0x00000004
|
|
+#define ESDHC_TB_MODE_MASK 0x00000003
|
|
+#define ESDHC_TB_MODE_SW 0x00000003
|
|
+#define ESDHC_TB_MODE_3 0x00000002
|
|
+
|
|
+#define ESDHC_TBSTAT 0x124
|
|
+
|
|
#define ESDHC_TBPTR 0x128
|
|
+#define ESDHC_WNDW_STRT_PTR_SHIFT 8
|
|
+#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
|
|
+#define ESDHC_WNDW_END_PTR_MASK 0x7f
|
|
|
|
/* SD Clock Control Register */
|
|
#define ESDHC_SDCLKCTL 0x144
|
|
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
|
|
index 889ed98ec0e7..fcfb50f84c8b 100644
|
|
--- a/drivers/mmc/host/sdhci-of-esdhc.c
|
|
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
|
|
@@ -77,8 +77,11 @@ struct sdhci_esdhc {
|
|
bool quirk_incorrect_hostver;
|
|
bool quirk_limited_clk_division;
|
|
bool quirk_unreliable_pulse_detection;
|
|
- bool quirk_fixup_tuning;
|
|
+ bool quirk_tuning_erratum_type1;
|
|
+ bool quirk_tuning_erratum_type2;
|
|
bool quirk_ignore_data_inhibit;
|
|
+ bool quirk_delay_before_data_reset;
|
|
+ bool in_sw_tuning;
|
|
unsigned int peripheral_clock;
|
|
const struct esdhc_clk_fixup *clk_fixup;
|
|
u32 div_ratio;
|
|
@@ -408,6 +411,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
|
|
|
|
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
|
|
{
|
|
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
int base = reg & ~0x3;
|
|
u32 value;
|
|
u32 ret;
|
|
@@ -416,10 +421,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
|
|
ret = esdhc_writew_fixup(host, reg, val, value);
|
|
if (reg != SDHCI_TRANSFER_MODE)
|
|
iowrite32be(ret, host->ioaddr + base);
|
|
+
|
|
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
|
|
+ * 1us later after ESDHC_EXTN is set.
|
|
+ */
|
|
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
|
|
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
|
|
+ esdhc->in_sw_tuning) {
|
|
+ udelay(1);
|
|
+ ret |= ESDHC_SMPCLKSEL;
|
|
+ iowrite32be(ret, host->ioaddr + base);
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
|
|
{
|
|
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
int base = reg & ~0x3;
|
|
u32 value;
|
|
u32 ret;
|
|
@@ -428,6 +447,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
|
|
ret = esdhc_writew_fixup(host, reg, val, value);
|
|
if (reg != SDHCI_TRANSFER_MODE)
|
|
iowrite32(ret, host->ioaddr + base);
|
|
+
|
|
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
|
|
+ * 1us later after ESDHC_EXTN is set.
|
|
+ */
|
|
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
|
|
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
|
|
+ esdhc->in_sw_tuning) {
|
|
+ udelay(1);
|
|
+ ret |= ESDHC_SMPCLKSEL;
|
|
+ iowrite32(ret, host->ioaddr + base);
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
|
|
@@ -705,6 +736,11 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
|
|
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
u32 val;
|
|
|
|
+ if (esdhc->quirk_delay_before_data_reset &&
|
|
+ (mask & SDHCI_RESET_DATA) &&
|
|
+ (host->flags & SDHCI_REQ_USE_DMA))
|
|
+ mdelay(5);
|
|
+
|
|
sdhci_reset(host, mask);
|
|
|
|
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
|
@@ -793,16 +829,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
|
|
}
|
|
}
|
|
|
|
-static struct soc_device_attribute soc_fixup_tuning[] = {
|
|
+static struct soc_device_attribute soc_tuning_erratum_type1[] = {
|
|
+ { .family = "QorIQ T1023", .revision = "1.0", },
|
|
{ .family = "QorIQ T1040", .revision = "1.0", },
|
|
{ .family = "QorIQ T2080", .revision = "1.0", },
|
|
- { .family = "QorIQ T1023", .revision = "1.0", },
|
|
{ .family = "QorIQ LS1021A", .revision = "1.0", },
|
|
- { .family = "QorIQ LS1080A", .revision = "1.0", },
|
|
- { .family = "QorIQ LS2080A", .revision = "1.0", },
|
|
+ { },
|
|
+};
|
|
+
|
|
+static struct soc_device_attribute soc_tuning_erratum_type2[] = {
|
|
{ .family = "QorIQ LS1012A", .revision = "1.0", },
|
|
{ .family = "QorIQ LS1043A", .revision = "1.*", },
|
|
{ .family = "QorIQ LS1046A", .revision = "1.0", },
|
|
+ { .family = "QorIQ LS1080A", .revision = "1.0", },
|
|
+ { .family = "QorIQ LS2080A", .revision = "1.0", },
|
|
+ { .family = "QorIQ LA1575A", .revision = "1.0", },
|
|
{ },
|
|
};
|
|
|
|
@@ -826,15 +867,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
|
|
esdhc_clock_enable(host, true);
|
|
}
|
|
|
|
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
|
|
+ u8 *window_end)
|
|
+{
|
|
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
+ u8 tbstat_15_8, tbstat_7_0;
|
|
+ u32 val;
|
|
+
|
|
+ if (esdhc->quirk_tuning_erratum_type1) {
|
|
+ *window_start = 5 * esdhc->div_ratio;
|
|
+ *window_end = 3 * esdhc->div_ratio;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Write TBCTL[11:8]=4'h8 */
|
|
+ val = sdhci_readl(host, ESDHC_TBCTL);
|
|
+ val &= ~(0xf << 8);
|
|
+ val |= 8 << 8;
|
|
+ sdhci_writel(host, val, ESDHC_TBCTL);
|
|
+
|
|
+ mdelay(1);
|
|
+
|
|
+ /* Read TBCTL[31:0] register and rewrite again */
|
|
+ val = sdhci_readl(host, ESDHC_TBCTL);
|
|
+ sdhci_writel(host, val, ESDHC_TBCTL);
|
|
+
|
|
+ mdelay(1);
|
|
+
|
|
+ /* Read the TBSTAT[31:0] register twice */
|
|
+ val = sdhci_readl(host, ESDHC_TBSTAT);
|
|
+ val = sdhci_readl(host, ESDHC_TBSTAT);
|
|
+
|
|
+ /* Reset data lines by setting ESDHCCTL[RSTD] */
|
|
+ sdhci_reset(host, SDHCI_RESET_DATA);
|
|
+ /* Write 32'hFFFF_FFFF to IRQSTAT register */
|
|
+ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
|
|
+
|
|
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
|
|
+ * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
|
|
+ * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
|
|
+ * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
|
|
+ */
|
|
+ tbstat_7_0 = val & 0xff;
|
|
+ tbstat_15_8 = (val >> 8) & 0xff;
|
|
+
|
|
+ if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
|
|
+ *window_start = 8 * esdhc->div_ratio;
|
|
+ *window_end = 4 * esdhc->div_ratio;
|
|
+ } else {
|
|
+ *window_start = 5 * esdhc->div_ratio;
|
|
+ *window_end = 3 * esdhc->div_ratio;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
|
|
+ u8 window_start, u8 window_end)
|
|
+{
|
|
+ struct sdhci_host *host = mmc_priv(mmc);
|
|
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
+ u32 val;
|
|
+ int ret;
|
|
+
|
|
+ /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
|
|
+ val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
|
|
+ ESDHC_WNDW_STRT_PTR_MASK;
|
|
+ val |= window_end & ESDHC_WNDW_END_PTR_MASK;
|
|
+ sdhci_writel(host, val, ESDHC_TBPTR);
|
|
+
|
|
+ /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
|
|
+ val = sdhci_readl(host, ESDHC_TBCTL);
|
|
+ val &= ~ESDHC_TB_MODE_MASK;
|
|
+ val |= ESDHC_TB_MODE_SW;
|
|
+ sdhci_writel(host, val, ESDHC_TBCTL);
|
|
+
|
|
+ esdhc->in_sw_tuning = true;
|
|
+ ret = sdhci_execute_tuning(mmc, opcode);
|
|
+ esdhc->in_sw_tuning = false;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
{
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
|
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
+ u8 window_start, window_end;
|
|
+ int ret, retries = 1;
|
|
bool hs400_tuning;
|
|
unsigned int clk;
|
|
u32 val;
|
|
- int ret;
|
|
|
|
/* For tuning mode, the sd clock divisor value
|
|
* must be larger than 3 according to reference manual.
|
|
@@ -843,39 +966,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
if (host->clock > clk)
|
|
esdhc_of_set_clock(host, clk);
|
|
|
|
- if (esdhc->quirk_limited_clk_division &&
|
|
- host->flags & SDHCI_HS400_TUNING)
|
|
- esdhc_of_set_clock(host, host->clock);
|
|
-
|
|
esdhc_tuning_block_enable(host, true);
|
|
|
|
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
|
|
- ret = sdhci_execute_tuning(mmc, opcode);
|
|
|
|
- if (hs400_tuning) {
|
|
- val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
|
|
- val |= ESDHC_FLW_CTL_BG;
|
|
- sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
|
|
- }
|
|
+ do {
|
|
+ if (esdhc->quirk_limited_clk_division &&
|
|
+ hs400_tuning)
|
|
+ esdhc_of_set_clock(host, host->clock);
|
|
|
|
- if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
|
|
+ /* Do HW tuning */
|
|
+ val = sdhci_readl(host, ESDHC_TBCTL);
|
|
+ val &= ~ESDHC_TB_MODE_MASK;
|
|
+ val |= ESDHC_TB_MODE_3;
|
|
+ sdhci_writel(host, val, ESDHC_TBCTL);
|
|
|
|
- /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
|
|
- * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
|
|
- */
|
|
- val = sdhci_readl(host, ESDHC_TBPTR);
|
|
- val = (val & ~((0x7f << 8) | 0x7f)) |
|
|
- (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
|
|
- sdhci_writel(host, val, ESDHC_TBPTR);
|
|
+ ret = sdhci_execute_tuning(mmc, opcode);
|
|
+ if (ret)
|
|
+ break;
|
|
|
|
- /* program the software tuning mode by setting
|
|
- * TBCTL[TB_MODE]=2'h3
|
|
+ /* If HW tuning fails and triggers erratum,
|
|
+ * try workaround.
|
|
*/
|
|
- val = sdhci_readl(host, ESDHC_TBCTL);
|
|
- val |= 0x3;
|
|
- sdhci_writel(host, val, ESDHC_TBCTL);
|
|
- sdhci_execute_tuning(mmc, opcode);
|
|
+ ret = host->tuning_err;
|
|
+ if (ret == -EAGAIN &&
|
|
+ (esdhc->quirk_tuning_erratum_type1 ||
|
|
+ esdhc->quirk_tuning_erratum_type2)) {
|
|
+ /* Recover HS400 tuning flag */
|
|
+ if (hs400_tuning)
|
|
+ host->flags |= SDHCI_HS400_TUNING;
|
|
+ pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
|
|
+ mmc_hostname(mmc));
|
|
+ /* Do SW tuning */
|
|
+ esdhc_prepare_sw_tuning(host, &window_start,
|
|
+ &window_end);
|
|
+ ret = esdhc_execute_sw_tuning(mmc, opcode,
|
|
+ window_start,
|
|
+ window_end);
|
|
+ if (ret)
|
|
+ break;
|
|
+
|
|
+ /* Retry both HW/SW tuning with reduced clock. */
|
|
+ ret = host->tuning_err;
|
|
+ if (ret == -EAGAIN && retries) {
|
|
+ /* Recover HS400 tuning flag */
|
|
+ if (hs400_tuning)
|
|
+ host->flags |= SDHCI_HS400_TUNING;
|
|
+
|
|
+ clk = host->max_clk / (esdhc->div_ratio + 1);
|
|
+ esdhc_of_set_clock(host, clk);
|
|
+ pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
|
|
+ mmc_hostname(mmc));
|
|
+ } else {
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ break;
|
|
+ }
|
|
+ } while (retries--);
|
|
+
|
|
+ if (ret) {
|
|
+ esdhc_tuning_block_enable(host, false);
|
|
+ } else if (hs400_tuning) {
|
|
+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
|
|
+ val |= ESDHC_FLW_CTL_BG;
|
|
+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
|
|
}
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -1046,6 +1203,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
|
|
if (match)
|
|
esdhc->clk_fixup = match->data;
|
|
np = pdev->dev.of_node;
|
|
+
|
|
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
|
|
+ esdhc->quirk_delay_before_data_reset = true;
|
|
+
|
|
clk = of_clk_get(np, 0);
|
|
if (!IS_ERR(clk)) {
|
|
/*
|
|
@@ -1111,10 +1272,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|
|
|
pltfm_host = sdhci_priv(host);
|
|
esdhc = sdhci_pltfm_priv(pltfm_host);
|
|
- if (soc_device_match(soc_fixup_tuning))
|
|
- esdhc->quirk_fixup_tuning = true;
|
|
+ if (soc_device_match(soc_tuning_erratum_type1))
|
|
+ esdhc->quirk_tuning_erratum_type1 = true;
|
|
+ else
|
|
+ esdhc->quirk_tuning_erratum_type1 = false;
|
|
+
|
|
+ if (soc_device_match(soc_tuning_erratum_type2))
|
|
+ esdhc->quirk_tuning_erratum_type2 = true;
|
|
else
|
|
- esdhc->quirk_fixup_tuning = false;
|
|
+ esdhc->quirk_tuning_erratum_type2 = false;
|
|
|
|
if (esdhc->vendor_ver == VENDOR_V_22)
|
|
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
|
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
|
index face00c622ed..7dcd709f4ac3 100644
|
|
--- a/drivers/net/bonding/bond_main.c
|
|
+++ b/drivers/net/bonding/bond_main.c
|
|
@@ -2225,9 +2225,6 @@ static void bond_miimon_commit(struct bonding *bond)
|
|
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
|
|
/* make it immediately active */
|
|
bond_set_active_slave(slave);
|
|
- } else if (slave != primary) {
|
|
- /* prevent it from being the active one */
|
|
- bond_set_backup_slave(slave);
|
|
}
|
|
|
|
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
|
|
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
|
|
index d264776a95a3..471837cf0b21 100644
|
|
--- a/drivers/net/dsa/bcm_sf2_cfp.c
|
|
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
|
|
@@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
|
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
|
|
|
|
/* Locate the first rule available */
|
|
if (fs->location == RX_CLS_LOC_ANY)
|
|
@@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
|
|
|
|
if (rule->fs.flow_type != fs->flow_type ||
|
|
rule->fs.ring_cookie != fs->ring_cookie ||
|
|
- rule->fs.m_ext.data[0] != fs->m_ext.data[0])
|
|
+ rule->fs.h_ext.data[0] != fs->h_ext.data[0])
|
|
continue;
|
|
|
|
switch (fs->flow_type & ~FLOW_EXT) {
|
|
@@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
|
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
|
|
|
|
layout = &udf_tcpip6_layout;
|
|
slice_num = bcm_sf2_get_slice_number(layout, 0);
|
|
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
|
|
index 4e5a428ab1a4..7763221286d4 100644
|
|
--- a/drivers/net/dsa/sja1105/sja1105_main.c
|
|
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
|
|
@@ -1560,8 +1560,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
|
|
|
|
if (enabled) {
|
|
/* Enable VLAN filtering. */
|
|
- tpid = ETH_P_8021AD;
|
|
- tpid2 = ETH_P_8021Q;
|
|
+ tpid = ETH_P_8021Q;
|
|
+ tpid2 = ETH_P_8021AD;
|
|
} else {
|
|
/* Disable VLAN filtering. */
|
|
tpid = ETH_P_SJA1105;
|
|
@@ -1570,9 +1570,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
|
|
|
|
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
|
|
general_params = table->entries;
|
|
- /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
|
|
- general_params->tpid = tpid;
|
|
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
|
|
+ general_params->tpid = tpid;
|
|
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
|
|
general_params->tpid2 = tpid2;
|
|
/* When VLAN filtering is on, we need to at least be able to
|
|
* decode management traffic through the "backup plan".
|
|
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
|
|
index 0d03e13e9909..63d2311817c4 100644
|
|
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
|
|
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
|
|
@@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
|
|
return size;
|
|
}
|
|
|
|
+/* TPID and TPID2 are intentionally reversed so that semantic
|
|
+ * compatibility with E/T is kept.
|
|
+ */
|
|
static size_t
|
|
sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
|
|
enum packing_op op)
|
|
@@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
|
|
sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
|
|
sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
|
|
sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
|
|
- sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
|
|
+ sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
|
|
sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
|
|
- sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
|
|
+ sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
|
|
sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
|
|
sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
|
|
sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
|
|
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
index c487d2a7d6dd..b4a145220aba 100644
|
|
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
@@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
|
|
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
|
|
struct ena_ring *tx_ring, *rx_ring;
|
|
|
|
- u32 tx_work_done;
|
|
- u32 rx_work_done;
|
|
+ int tx_work_done;
|
|
+ int rx_work_done = 0;
|
|
int tx_budget;
|
|
int napi_comp_call = 0;
|
|
int ret;
|
|
@@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
|
|
}
|
|
|
|
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
|
|
- rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
|
|
+ /* On netpoll the budget is zero and the handler should only clean the
|
|
+ * tx completions.
|
|
+ */
|
|
+ if (likely(budget))
|
|
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
|
|
|
|
/* If the device is about to reset or down, avoid unmask
|
|
* the interrupt and return 0 so NAPI won't reschedule
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
index 527e1bf93116..5c75b061243f 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
@@ -1995,6 +1995,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
|
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
|
|
u32 data1 = le32_to_cpu(cmpl->event_data1);
|
|
|
|
+ if (!bp->fw_health)
|
|
+ goto async_event_process_exit;
|
|
+
|
|
bp->fw_reset_timestamp = jiffies;
|
|
bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
|
|
if (!bp->fw_reset_min_dsecs)
|
|
@@ -4438,8 +4441,9 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
|
|
FUNC_DRV_RGTR_REQ_ENABLES_VER);
|
|
|
|
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
|
|
- flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
|
|
- FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
|
|
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
|
|
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
|
|
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
|
|
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
|
|
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
|
|
req.flags = cpu_to_le32(flags);
|
|
@@ -6174,7 +6178,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
|
|
tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
|
|
val = clamp_t(u16, tmr, 1,
|
|
coal_cap->cmpl_aggr_dma_tmr_during_int_max);
|
|
- req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
|
|
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
|
|
req->enables |=
|
|
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
|
|
}
|
|
@@ -7096,14 +7100,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
|
|
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
if (rc)
|
|
goto err_recovery_out;
|
|
- if (!fw_health) {
|
|
- fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
|
|
- bp->fw_health = fw_health;
|
|
- if (!fw_health) {
|
|
- rc = -ENOMEM;
|
|
- goto err_recovery_out;
|
|
- }
|
|
- }
|
|
fw_health->flags = le32_to_cpu(resp->flags);
|
|
if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
|
|
!(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
|
|
@@ -8766,6 +8762,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
|
|
}
|
|
if (resc_reinit || fw_reset) {
|
|
if (fw_reset) {
|
|
+ bnxt_free_ctx_mem(bp);
|
|
+ kfree(bp->ctx);
|
|
+ bp->ctx = NULL;
|
|
rc = bnxt_fw_init_one(bp);
|
|
if (rc) {
|
|
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
|
|
@@ -9954,8 +9953,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
|
|
struct bnxt_fw_health *fw_health = bp->fw_health;
|
|
u32 val;
|
|
|
|
- if (!fw_health || !fw_health->enabled ||
|
|
- test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
|
+ if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
|
return;
|
|
|
|
if (fw_health->tmr_counter) {
|
|
@@ -10416,6 +10414,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
|
|
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
|
|
}
|
|
|
|
+static void bnxt_alloc_fw_health(struct bnxt *bp)
|
|
+{
|
|
+ if (bp->fw_health)
|
|
+ return;
|
|
+
|
|
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
|
|
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
|
|
+ return;
|
|
+
|
|
+ bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
|
|
+ if (!bp->fw_health) {
|
|
+ netdev_warn(bp->dev, "Failed to allocate fw_health\n");
|
|
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
|
|
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
|
|
+ }
|
|
+}
|
|
+
|
|
static int bnxt_fw_init_one_p1(struct bnxt *bp)
|
|
{
|
|
int rc;
|
|
@@ -10462,6 +10477,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
|
|
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
|
|
rc);
|
|
|
|
+ bnxt_alloc_fw_health(bp);
|
|
rc = bnxt_hwrm_error_recovery_qcfg(bp);
|
|
if (rc)
|
|
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
|
|
@@ -10547,6 +10563,12 @@ static int bnxt_fw_init_one(struct bnxt *bp)
|
|
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
|
|
if (rc)
|
|
return rc;
|
|
+
|
|
+ /* In case fw capabilities have changed, destroy the unneeded
|
|
+ * reporters and create newly capable ones.
|
|
+ */
|
|
+ bnxt_dl_fw_reporters_destroy(bp, false);
|
|
+ bnxt_dl_fw_reporters_create(bp);
|
|
bnxt_fw_init_one_p3(bp);
|
|
return 0;
|
|
}
|
|
@@ -10680,8 +10702,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
|
|
return;
|
|
case BNXT_FW_RESET_STATE_ENABLE_DEV:
|
|
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
|
|
- bp->fw_health) {
|
|
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
|
|
u32 val;
|
|
|
|
val = bnxt_fw_health_readl(bp,
|
|
@@ -11322,11 +11343,11 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
|
struct net_device *dev = pci_get_drvdata(pdev);
|
|
struct bnxt *bp = netdev_priv(dev);
|
|
|
|
- if (BNXT_PF(bp)) {
|
|
+ if (BNXT_PF(bp))
|
|
bnxt_sriov_disable(bp);
|
|
- bnxt_dl_unregister(bp);
|
|
- }
|
|
|
|
+ bnxt_dl_fw_reporters_destroy(bp, true);
|
|
+ bnxt_dl_unregister(bp);
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
unregister_netdev(dev);
|
|
bnxt_shutdown_tc(bp);
|
|
@@ -11341,6 +11362,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
|
bnxt_dcb_free(bp);
|
|
kfree(bp->edev);
|
|
bp->edev = NULL;
|
|
+ kfree(bp->fw_health);
|
|
+ bp->fw_health = NULL;
|
|
bnxt_cleanup_pci(bp);
|
|
bnxt_free_ctx_mem(bp);
|
|
kfree(bp->ctx);
|
|
@@ -11820,8 +11843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
if (rc)
|
|
goto init_err_cleanup_tc;
|
|
|
|
- if (BNXT_PF(bp))
|
|
- bnxt_dl_register(bp);
|
|
+ bnxt_dl_register(bp);
|
|
+ bnxt_dl_fw_reporters_create(bp);
|
|
|
|
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
|
|
board_info[ent->driver_data].name,
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
|
|
index 5163bb848618..dc26e3ace43f 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
|
|
@@ -1658,6 +1658,7 @@ struct bnxt {
|
|
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
|
|
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
|
|
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
|
|
+ #define BNXT_FW_CAP_HOT_RESET 0x00200000
|
|
|
|
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
|
|
u32 hwrm_spec_code;
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
|
|
index 7d2cfea05737..1e236e74ff2f 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
|
|
@@ -19,11 +19,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
|
|
struct devlink_fmsg *fmsg)
|
|
{
|
|
struct bnxt *bp = devlink_health_reporter_priv(reporter);
|
|
- struct bnxt_fw_health *health = bp->fw_health;
|
|
u32 val, health_status;
|
|
int rc;
|
|
|
|
- if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
|
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
|
return 0;
|
|
|
|
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
|
|
@@ -103,21 +102,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
|
|
.recover = bnxt_fw_fatal_recover,
|
|
};
|
|
|
|
-static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
|
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
|
{
|
|
struct bnxt_fw_health *health = bp->fw_health;
|
|
|
|
- if (!health)
|
|
+ if (!bp->dl || !health)
|
|
return;
|
|
|
|
- health->fw_reporter =
|
|
- devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
|
|
- 0, false, bp);
|
|
- if (IS_ERR(health->fw_reporter)) {
|
|
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
|
|
- PTR_ERR(health->fw_reporter));
|
|
- health->fw_reporter = NULL;
|
|
- }
|
|
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
|
|
+ goto err_recovery;
|
|
|
|
health->fw_reset_reporter =
|
|
devlink_health_reporter_create(bp->dl,
|
|
@@ -127,8 +120,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
|
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
|
|
PTR_ERR(health->fw_reset_reporter));
|
|
health->fw_reset_reporter = NULL;
|
|
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
|
|
+ }
|
|
+
|
|
+err_recovery:
|
|
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
|
|
+ return;
|
|
+
|
|
+ if (!health->fw_reporter) {
|
|
+ health->fw_reporter =
|
|
+ devlink_health_reporter_create(bp->dl,
|
|
+ &bnxt_dl_fw_reporter_ops,
|
|
+ 0, false, bp);
|
|
+ if (IS_ERR(health->fw_reporter)) {
|
|
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
|
|
+ PTR_ERR(health->fw_reporter));
|
|
+ health->fw_reporter = NULL;
|
|
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
|
|
+ return;
|
|
+ }
|
|
}
|
|
|
|
+ if (health->fw_fatal_reporter)
|
|
+ return;
|
|
+
|
|
health->fw_fatal_reporter =
|
|
devlink_health_reporter_create(bp->dl,
|
|
&bnxt_dl_fw_fatal_reporter_ops,
|
|
@@ -137,24 +152,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
|
|
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
|
|
PTR_ERR(health->fw_fatal_reporter));
|
|
health->fw_fatal_reporter = NULL;
|
|
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
|
|
}
|
|
}
|
|
|
|
-static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
|
|
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
|
|
{
|
|
struct bnxt_fw_health *health = bp->fw_health;
|
|
|
|
- if (!health)
|
|
+ if (!bp->dl || !health)
|
|
return;
|
|
|
|
- if (health->fw_reporter)
|
|
- devlink_health_reporter_destroy(health->fw_reporter);
|
|
-
|
|
- if (health->fw_reset_reporter)
|
|
+ if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
|
|
+ health->fw_reset_reporter) {
|
|
devlink_health_reporter_destroy(health->fw_reset_reporter);
|
|
+ health->fw_reset_reporter = NULL;
|
|
+ }
|
|
|
|
- if (health->fw_fatal_reporter)
|
|
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
|
|
+ return;
|
|
+
|
|
+ if (health->fw_reporter) {
|
|
+ devlink_health_reporter_destroy(health->fw_reporter);
|
|
+ health->fw_reporter = NULL;
|
|
+ }
|
|
+
|
|
+ if (health->fw_fatal_reporter) {
|
|
devlink_health_reporter_destroy(health->fw_fatal_reporter);
|
|
+ health->fw_fatal_reporter = NULL;
|
|
+ }
|
|
}
|
|
|
|
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
|
|
@@ -162,9 +188,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
|
|
struct bnxt_fw_health *fw_health = bp->fw_health;
|
|
struct bnxt_fw_reporter_ctx fw_reporter_ctx;
|
|
|
|
- if (!fw_health)
|
|
- return;
|
|
-
|
|
fw_reporter_ctx.sp_event = event;
|
|
switch (event) {
|
|
case BNXT_FW_RESET_NOTIFY_SP_EVENT:
|
|
@@ -203,6 +226,8 @@ static const struct devlink_ops bnxt_dl_ops = {
|
|
#endif /* CONFIG_BNXT_SRIOV */
|
|
};
|
|
|
|
+static const struct devlink_ops bnxt_vf_dl_ops;
|
|
+
|
|
enum bnxt_dl_param_id {
|
|
BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
|
|
BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
|
|
@@ -416,7 +441,10 @@ int bnxt_dl_register(struct bnxt *bp)
|
|
return -ENOTSUPP;
|
|
}
|
|
|
|
- dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
|
|
+ if (BNXT_PF(bp))
|
|
+ dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
|
|
+ else
|
|
+ dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
|
|
if (!dl) {
|
|
netdev_warn(bp->dev, "devlink_alloc failed");
|
|
return -ENOMEM;
|
|
@@ -435,6 +463,9 @@ int bnxt_dl_register(struct bnxt *bp)
|
|
goto err_dl_free;
|
|
}
|
|
|
|
+ if (!BNXT_PF(bp))
|
|
+ return 0;
|
|
+
|
|
rc = devlink_params_register(dl, bnxt_dl_params,
|
|
ARRAY_SIZE(bnxt_dl_params));
|
|
if (rc) {
|
|
@@ -462,8 +493,6 @@ int bnxt_dl_register(struct bnxt *bp)
|
|
|
|
devlink_params_publish(dl);
|
|
|
|
- bnxt_dl_fw_reporters_create(bp);
|
|
-
|
|
return 0;
|
|
|
|
err_dl_port_unreg:
|
|
@@ -486,12 +515,14 @@ void bnxt_dl_unregister(struct bnxt *bp)
|
|
if (!dl)
|
|
return;
|
|
|
|
- bnxt_dl_fw_reporters_destroy(bp);
|
|
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
|
|
- ARRAY_SIZE(bnxt_dl_port_params));
|
|
- devlink_port_unregister(&bp->dl_port);
|
|
- devlink_params_unregister(dl, bnxt_dl_params,
|
|
- ARRAY_SIZE(bnxt_dl_params));
|
|
+ if (BNXT_PF(bp)) {
|
|
+ devlink_port_params_unregister(&bp->dl_port,
|
|
+ bnxt_dl_port_params,
|
|
+ ARRAY_SIZE(bnxt_dl_port_params));
|
|
+ devlink_port_unregister(&bp->dl_port);
|
|
+ devlink_params_unregister(dl, bnxt_dl_params,
|
|
+ ARRAY_SIZE(bnxt_dl_params));
|
|
+ }
|
|
devlink_unregister(dl);
|
|
devlink_free(dl);
|
|
}
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
|
|
index 2f4fd0a7d04b..689c47ab2155 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
|
|
@@ -57,6 +57,8 @@ struct bnxt_dl_nvm_param {
|
|
};
|
|
|
|
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
|
|
+void bnxt_dl_fw_reporters_create(struct bnxt *bp);
|
|
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
|
|
int bnxt_dl_register(struct bnxt *bp);
|
|
void bnxt_dl_unregister(struct bnxt *bp);
|
|
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
|
|
index 89f95428556e..ece70f61c89a 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
|
|
@@ -3064,8 +3064,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
|
|
}
|
|
}
|
|
|
|
- if (info->dest_buf)
|
|
- memcpy(info->dest_buf + off, dma_buf, len);
|
|
+ if (info->dest_buf) {
|
|
+ if ((info->seg_start + off + len) <=
|
|
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
|
|
+ memcpy(info->dest_buf + off, dma_buf, len);
|
|
+ } else {
|
|
+ rc = -ENOBUFS;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
|
|
if (cmn_req->req_type ==
|
|
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
|
|
@@ -3119,7 +3126,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
|
|
|
|
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
|
|
u16 segment_id, u32 *seg_len,
|
|
- void *buf, u32 offset)
|
|
+ void *buf, u32 buf_len, u32 offset)
|
|
{
|
|
struct hwrm_dbg_coredump_retrieve_input req = {0};
|
|
struct bnxt_hwrm_dbg_dma_info info = {NULL};
|
|
@@ -3134,8 +3141,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
|
|
seq_no);
|
|
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
|
|
data_len);
|
|
- if (buf)
|
|
+ if (buf) {
|
|
info.dest_buf = buf + offset;
|
|
+ info.buf_len = buf_len;
|
|
+ info.seg_start = offset;
|
|
+ }
|
|
|
|
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
|
|
if (!rc)
|
|
@@ -3225,14 +3235,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
|
|
static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
|
{
|
|
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
|
|
+ u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
|
|
struct coredump_segment_record *seg_record = NULL;
|
|
- u32 offset = 0, seg_hdr_len, seg_record_len;
|
|
struct bnxt_coredump_segment_hdr seg_hdr;
|
|
struct bnxt_coredump coredump = {NULL};
|
|
time64_t start_time;
|
|
u16 start_utc;
|
|
int rc = 0, i;
|
|
|
|
+ if (buf)
|
|
+ buf_len = *dump_len;
|
|
+
|
|
start_time = ktime_get_real_seconds();
|
|
start_utc = sys_tz.tz_minuteswest * 60;
|
|
seg_hdr_len = sizeof(seg_hdr);
|
|
@@ -3265,6 +3278,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
|
u32 duration = 0, seg_len = 0;
|
|
unsigned long start, end;
|
|
|
|
+ if (buf && ((offset + seg_hdr_len) >
|
|
+ BNXT_COREDUMP_BUF_LEN(buf_len))) {
|
|
+ rc = -ENOBUFS;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
start = jiffies;
|
|
|
|
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
|
|
@@ -3277,9 +3296,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
|
|
|
|
/* Write segment data into the buffer */
|
|
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
|
|
- &seg_len, buf,
|
|
+ &seg_len, buf, buf_len,
|
|
offset + seg_hdr_len);
|
|
- if (rc)
|
|
+ if (rc && rc == -ENOBUFS)
|
|
+ goto err;
|
|
+ else if (rc)
|
|
netdev_err(bp->dev,
|
|
"Failed to retrieve coredump for seg = %d\n",
|
|
seg_record->segment_id);
|
|
@@ -3309,7 +3330,8 @@ err:
|
|
rc);
|
|
kfree(coredump.data);
|
|
*dump_len += sizeof(struct bnxt_coredump_record);
|
|
-
|
|
+ if (rc == -ENOBUFS)
|
|
+ netdev_err(bp->dev, "Firmware returned large coredump buffer");
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
|
|
index b5b65b3f8534..3998f6e809a9 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
|
|
@@ -31,6 +31,8 @@ struct bnxt_coredump {
|
|
u16 total_segs;
|
|
};
|
|
|
|
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
|
|
+
|
|
struct bnxt_hwrm_dbg_dma_info {
|
|
void *dest_buf;
|
|
int dest_buf_size;
|
|
@@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
|
|
u16 seq_off;
|
|
u16 data_len_off;
|
|
u16 segs;
|
|
+ u32 seg_start;
|
|
+ u32 buf_len;
|
|
};
|
|
|
|
struct hwrm_dbg_cmn_input {
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
|
|
index b2c160947fc8..30816ec4fa91 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
|
|
@@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
|
|
{
|
|
struct net_device *dev = edev->net;
|
|
struct bnxt *bp = netdev_priv(dev);
|
|
+ struct bnxt_hw_resc *hw_resc;
|
|
int max_idx, max_cp_rings;
|
|
int avail_msix, idx;
|
|
+ int total_vecs;
|
|
int rc = 0;
|
|
|
|
ASSERT_RTNL();
|
|
@@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
|
|
}
|
|
edev->ulp_tbl[ulp_id].msix_base = idx;
|
|
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
|
|
- if (bp->total_irqs < (idx + avail_msix)) {
|
|
+ hw_resc = &bp->hw_resc;
|
|
+ total_vecs = idx + avail_msix;
|
|
+ if (bp->total_irqs < total_vecs ||
|
|
+ (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
|
|
if (netif_running(dev)) {
|
|
bnxt_close_nic(bp, true, false);
|
|
rc = bnxt_open_nic(bp, true, false);
|
|
@@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
|
|
}
|
|
|
|
if (BNXT_NEW_RM(bp)) {
|
|
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
|
|
int resv_msix;
|
|
|
|
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
|
|
index 1fbb640e896a..4025a683fa7d 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
|
|
@@ -503,6 +503,7 @@ struct link_config {
|
|
|
|
enum cc_pause requested_fc; /* flow control user has requested */
|
|
enum cc_pause fc; /* actual link flow control */
|
|
+ enum cc_pause advertised_fc; /* actual advertised flow control */
|
|
|
|
enum cc_fec requested_fec; /* Forward Error Correction: */
|
|
enum cc_fec fec; /* requested and actual in use */
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
|
|
index 76538f4cd595..f537be9cb315 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
|
|
@@ -793,8 +793,8 @@ static void get_pauseparam(struct net_device *dev,
|
|
struct port_info *p = netdev_priv(dev);
|
|
|
|
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
|
- epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
|
|
- epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
|
|
+ epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
|
|
+ epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
|
|
}
|
|
|
|
static int set_pauseparam(struct net_device *dev,
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
|
|
index f2a7824da42b..3f6813daf3c1 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
|
|
@@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
|
|
if (cc_pause & PAUSE_TX)
|
|
fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
|
|
else
|
|
- fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
|
|
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
|
|
+ FW_PORT_CAP32_802_3_PAUSE;
|
|
} else if (cc_pause & PAUSE_TX) {
|
|
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
|
|
}
|
|
@@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
|
|
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|
{
|
|
const struct fw_port_cmd *cmd = (const void *)rpl;
|
|
- int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
|
- struct adapter *adapter = pi->adapter;
|
|
+ fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
|
struct link_config *lc = &pi->link_cfg;
|
|
- int link_ok, linkdnrc;
|
|
- enum fw_port_type port_type;
|
|
+ struct adapter *adapter = pi->adapter;
|
|
+ unsigned int speed, fc, fec, adv_fc;
|
|
enum fw_port_module_type mod_type;
|
|
- unsigned int speed, fc, fec;
|
|
- fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
|
+ int action, link_ok, linkdnrc;
|
|
+ enum fw_port_type port_type;
|
|
|
|
/* Extract the various fields from the Port Information message.
|
|
*/
|
|
+ action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
|
switch (action) {
|
|
case FW_PORT_ACTION_GET_PORT_INFO: {
|
|
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
|
|
@@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|
}
|
|
|
|
fec = fwcap_to_cc_fec(acaps);
|
|
+ adv_fc = fwcap_to_cc_pause(acaps);
|
|
fc = fwcap_to_cc_pause(linkattr);
|
|
speed = fwcap_to_speed(linkattr);
|
|
|
|
@@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|
}
|
|
|
|
if (link_ok != lc->link_ok || speed != lc->speed ||
|
|
- fc != lc->fc || fec != lc->fec) { /* something changed */
|
|
+ fc != lc->fc || adv_fc != lc->advertised_fc ||
|
|
+ fec != lc->fec) {
|
|
+ /* something changed */
|
|
if (!link_ok && lc->link_ok) {
|
|
lc->link_down_rc = linkdnrc;
|
|
dev_warn_ratelimited(adapter->pdev_dev,
|
|
@@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|
}
|
|
lc->link_ok = link_ok;
|
|
lc->speed = speed;
|
|
+ lc->advertised_fc = adv_fc;
|
|
lc->fc = fc;
|
|
lc->fec = fec;
|
|
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
|
|
index f6fc0875d5b0..f4d41f968afa 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
|
|
@@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
|
- pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
|
|
- pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
|
|
+ pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
|
|
+ pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
|
|
index ccca67cf4487..57cfd10a99ec 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
|
|
@@ -135,6 +135,7 @@ struct link_config {
|
|
|
|
enum cc_pause requested_fc; /* flow control user has requested */
|
|
enum cc_pause fc; /* actual link flow control */
|
|
+ enum cc_pause advertised_fc; /* actual advertised flow control */
|
|
|
|
enum cc_fec auto_fec; /* Forward Error Correction: */
|
|
enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
|
|
index 8a389d617a23..9d49ff211cc1 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
|
|
@@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
|
|
static void t4vf_handle_get_port_info(struct port_info *pi,
|
|
const struct fw_port_cmd *cmd)
|
|
{
|
|
- int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
|
- struct adapter *adapter = pi->adapter;
|
|
+ fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
|
struct link_config *lc = &pi->link_cfg;
|
|
- int link_ok, linkdnrc;
|
|
- enum fw_port_type port_type;
|
|
+ struct adapter *adapter = pi->adapter;
|
|
+ unsigned int speed, fc, fec, adv_fc;
|
|
enum fw_port_module_type mod_type;
|
|
- unsigned int speed, fc, fec;
|
|
- fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
|
+ int action, link_ok, linkdnrc;
|
|
+ enum fw_port_type port_type;
|
|
|
|
/* Extract the various fields from the Port Information message. */
|
|
+ action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
|
switch (action) {
|
|
case FW_PORT_ACTION_GET_PORT_INFO: {
|
|
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
|
|
@@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
|
}
|
|
|
|
fec = fwcap_to_cc_fec(acaps);
|
|
+ adv_fc = fwcap_to_cc_pause(acaps);
|
|
fc = fwcap_to_cc_pause(linkattr);
|
|
speed = fwcap_to_speed(linkattr);
|
|
|
|
@@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
|
}
|
|
|
|
if (link_ok != lc->link_ok || speed != lc->speed ||
|
|
- fc != lc->fc || fec != lc->fec) { /* something changed */
|
|
+ fc != lc->fc || adv_fc != lc->advertised_fc ||
|
|
+ fec != lc->fec) {
|
|
+ /* something changed */
|
|
if (!link_ok && lc->link_ok) {
|
|
lc->link_down_rc = linkdnrc;
|
|
dev_warn_ratelimited(adapter->pdev_dev,
|
|
@@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
|
}
|
|
lc->link_ok = link_ok;
|
|
lc->speed = speed;
|
|
+ lc->advertised_fc = adv_fc;
|
|
lc->fc = fc;
|
|
lc->fec = fec;
|
|
|
|
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
|
|
index 111b3b8239e1..ef44c6979a31 100644
|
|
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
|
|
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
|
|
@@ -3674,7 +3674,7 @@ static int mvpp2_open(struct net_device *dev)
|
|
valid = true;
|
|
}
|
|
|
|
- if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
|
|
+ if (priv->hw_version == MVPP22 && port->link_irq) {
|
|
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
|
|
dev->name, port);
|
|
if (err) {
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
|
|
index 544344ac4894..79057af4fe99 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
|
|
@@ -6,6 +6,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/netlink.h>
|
|
+#include <linux/vmalloc.h>
|
|
#include <linux/xz.h>
|
|
#include "mlxfw_mfa2.h"
|
|
#include "mlxfw_mfa2_file.h"
|
|
@@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
|
|
comp_size = be32_to_cpu(comp->size);
|
|
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
|
|
|
|
- comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
|
|
+ comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
|
|
if (!comp_data)
|
|
return ERR_PTR(-ENOMEM);
|
|
comp_data->comp.data_size = comp_size;
|
|
@@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
|
|
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
|
|
return &comp_data->comp;
|
|
err_out:
|
|
- kfree(comp_data);
|
|
+ vfree(comp_data);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
@@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
|
|
const struct mlxfw_mfa2_comp_data *comp_data;
|
|
|
|
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
|
|
- kfree(comp_data);
|
|
+ vfree(comp_data);
|
|
}
|
|
|
|
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
|
|
index 5494cf93f34c..8e42ebdbd487 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
|
|
@@ -5421,6 +5421,7 @@ enum mlxsw_reg_htgt_trap_group {
|
|
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
|
|
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
|
|
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
|
|
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
|
|
|
|
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
|
|
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
index dcf9562bce8a..3ec18fb0d479 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
@@ -4398,8 +4398,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
|
|
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
|
|
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
|
|
MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
|
|
- MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
|
|
- MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
|
|
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
|
|
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
|
|
/* PKT Sample trap */
|
|
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
|
|
false, SP_IP2ME, DISCARD),
|
|
@@ -4483,6 +4483,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
|
|
rate = 19 * 1024;
|
|
burst_size = 12;
|
|
break;
|
|
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
|
|
+ rate = 360;
|
|
+ burst_size = 7;
|
|
+ break;
|
|
default:
|
|
continue;
|
|
}
|
|
@@ -4522,6 +4526,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
|
|
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
|
|
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
|
|
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
|
|
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
|
|
priority = 5;
|
|
tc = 5;
|
|
break;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
index 210ebc91d3d6..efdf8cb5114c 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
@@ -6985,6 +6985,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
|
|
rif = mlxsw_sp->router->rifs[i];
|
|
+ if (rif && rif->ops &&
|
|
+ rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
|
|
+ continue;
|
|
if (rif && rif->dev && rif->dev != dev &&
|
|
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
|
|
mlxsw_sp->mac_mask)) {
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
|
|
index 306da8f6b7d5..33ce139f090f 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
|
|
@@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
|
struct device *dev = dwmac->dev;
|
|
const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
|
|
struct meson8b_dwmac_clk_configs *clk_configs;
|
|
+ static const struct clk_div_table div_table[] = {
|
|
+ { .div = 2, .val = 2, },
|
|
+ { .div = 3, .val = 3, },
|
|
+ { .div = 4, .val = 4, },
|
|
+ { .div = 5, .val = 5, },
|
|
+ { .div = 6, .val = 6, },
|
|
+ { .div = 7, .val = 7, },
|
|
+ };
|
|
|
|
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
|
|
if (!clk_configs)
|
|
@@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
|
clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
|
|
clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
|
|
clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
|
|
- clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
|
|
- CLK_DIVIDER_ALLOW_ZERO |
|
|
- CLK_DIVIDER_ROUND_CLOSEST;
|
|
+ clk_configs->m250_div.table = div_table;
|
|
+ clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
|
|
+ CLK_DIVIDER_ROUND_CLOSEST;
|
|
clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
|
|
&clk_divider_ops,
|
|
&clk_configs->m250_div.hw);
|
|
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
|
|
index ecfe26215935..fca471e27f39 100644
|
|
--- a/drivers/net/gtp.c
|
|
+++ b/drivers/net/gtp.c
|
|
@@ -38,7 +38,6 @@ struct pdp_ctx {
|
|
struct hlist_node hlist_addr;
|
|
|
|
union {
|
|
- u64 tid;
|
|
struct {
|
|
u64 tid;
|
|
u16 flow;
|
|
@@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
|
|
mtu = dst_mtu(&rt->dst);
|
|
}
|
|
|
|
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
|
|
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
|
|
|
|
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
|
|
mtu < ntohs(iph->tot_len)) {
|
|
@@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev)
|
|
}
|
|
|
|
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
|
|
-static void gtp_hashtable_free(struct gtp_dev *gtp);
|
|
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
|
|
|
|
+static void gtp_destructor(struct net_device *dev)
|
|
+{
|
|
+ struct gtp_dev *gtp = netdev_priv(dev);
|
|
+
|
|
+ kfree(gtp->addr_hash);
|
|
+ kfree(gtp->tid_hash);
|
|
+}
|
|
+
|
|
static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
struct netlink_ext_ack *extack)
|
|
@@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
|
if (err < 0)
|
|
return err;
|
|
|
|
- if (!data[IFLA_GTP_PDP_HASHSIZE])
|
|
+ if (!data[IFLA_GTP_PDP_HASHSIZE]) {
|
|
hashsize = 1024;
|
|
- else
|
|
+ } else {
|
|
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
|
|
+ if (!hashsize)
|
|
+ hashsize = 1024;
|
|
+ }
|
|
|
|
err = gtp_hashtable_new(gtp, hashsize);
|
|
if (err < 0)
|
|
@@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
|
|
|
|
gn = net_generic(dev_net(dev), gtp_net_id);
|
|
list_add_rcu(>p->list, &gn->gtp_dev_list);
|
|
+ dev->priv_destructor = gtp_destructor;
|
|
|
|
netdev_dbg(dev, "registered new GTP interface\n");
|
|
|
|
return 0;
|
|
|
|
out_hashtable:
|
|
- gtp_hashtable_free(gtp);
|
|
+ kfree(gtp->addr_hash);
|
|
+ kfree(gtp->tid_hash);
|
|
out_encap:
|
|
gtp_encap_disable(gtp);
|
|
return err;
|
|
@@ -693,8 +704,13 @@ out_encap:
|
|
static void gtp_dellink(struct net_device *dev, struct list_head *head)
|
|
{
|
|
struct gtp_dev *gtp = netdev_priv(dev);
|
|
+ struct pdp_ctx *pctx;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < gtp->hash_size; i++)
|
|
+ hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
|
|
+ pdp_context_delete(pctx);
|
|
|
|
- gtp_hashtable_free(gtp);
|
|
list_del_rcu(>p->list);
|
|
unregister_netdevice_queue(dev, head);
|
|
}
|
|
@@ -772,20 +788,6 @@ err1:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
-static void gtp_hashtable_free(struct gtp_dev *gtp)
|
|
-{
|
|
- struct pdp_ctx *pctx;
|
|
- int i;
|
|
-
|
|
- for (i = 0; i < gtp->hash_size; i++)
|
|
- hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
|
|
- pdp_context_delete(pctx);
|
|
-
|
|
- synchronize_rcu();
|
|
- kfree(gtp->addr_hash);
|
|
- kfree(gtp->tid_hash);
|
|
-}
|
|
-
|
|
static struct sock *gtp_encap_enable_socket(int fd, int type,
|
|
struct gtp_dev *gtp)
|
|
{
|
|
@@ -926,24 +928,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
|
|
}
|
|
}
|
|
|
|
-static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
|
|
- struct genl_info *info)
|
|
+static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
|
|
+ struct genl_info *info)
|
|
{
|
|
+ struct pdp_ctx *pctx, *pctx_tid = NULL;
|
|
struct net_device *dev = gtp->dev;
|
|
u32 hash_ms, hash_tid = 0;
|
|
- struct pdp_ctx *pctx;
|
|
+ unsigned int version;
|
|
bool found = false;
|
|
__be32 ms_addr;
|
|
|
|
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
|
|
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
|
|
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
|
|
|
|
- hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) {
|
|
- if (pctx->ms_addr_ip4.s_addr == ms_addr) {
|
|
- found = true;
|
|
- break;
|
|
- }
|
|
- }
|
|
+ pctx = ipv4_pdp_find(gtp, ms_addr);
|
|
+ if (pctx)
|
|
+ found = true;
|
|
+ if (version == GTP_V0)
|
|
+ pctx_tid = gtp0_pdp_find(gtp,
|
|
+ nla_get_u64(info->attrs[GTPA_TID]));
|
|
+ else if (version == GTP_V1)
|
|
+ pctx_tid = gtp1_pdp_find(gtp,
|
|
+ nla_get_u32(info->attrs[GTPA_I_TEI]));
|
|
+ if (pctx_tid)
|
|
+ found = true;
|
|
|
|
if (found) {
|
|
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
|
|
@@ -951,6 +960,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
|
|
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
|
|
return -EOPNOTSUPP;
|
|
|
|
+ if (pctx && pctx_tid)
|
|
+ return -EEXIST;
|
|
+ if (!pctx)
|
|
+ pctx = pctx_tid;
|
|
+
|
|
ipv4_pdp_fill(pctx, info);
|
|
|
|
if (pctx->gtp_version == GTP_V0)
|
|
@@ -1074,7 +1088,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
|
|
goto out_unlock;
|
|
}
|
|
|
|
- err = ipv4_pdp_add(gtp, sk, info);
|
|
+ err = gtp_pdp_add(gtp, sk, info);
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
@@ -1232,43 +1246,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
|
|
struct netlink_callback *cb)
|
|
{
|
|
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
|
|
+ int i, j, bucket = cb->args[0], skip = cb->args[1];
|
|
struct net *net = sock_net(skb->sk);
|
|
- struct gtp_net *gn = net_generic(net, gtp_net_id);
|
|
- unsigned long tid = cb->args[1];
|
|
- int i, k = cb->args[0], ret;
|
|
struct pdp_ctx *pctx;
|
|
+ struct gtp_net *gn;
|
|
+
|
|
+ gn = net_generic(net, gtp_net_id);
|
|
|
|
if (cb->args[4])
|
|
return 0;
|
|
|
|
+ rcu_read_lock();
|
|
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
|
|
if (last_gtp && last_gtp != gtp)
|
|
continue;
|
|
else
|
|
last_gtp = NULL;
|
|
|
|
- for (i = k; i < gtp->hash_size; i++) {
|
|
- hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) {
|
|
- if (tid && tid != pctx->u.tid)
|
|
- continue;
|
|
- else
|
|
- tid = 0;
|
|
-
|
|
- ret = gtp_genl_fill_info(skb,
|
|
- NETLINK_CB(cb->skb).portid,
|
|
- cb->nlh->nlmsg_seq,
|
|
- cb->nlh->nlmsg_type, pctx);
|
|
- if (ret < 0) {
|
|
+ for (i = bucket; i < gtp->hash_size; i++) {
|
|
+ j = 0;
|
|
+ hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
|
|
+ hlist_tid) {
|
|
+ if (j >= skip &&
|
|
+ gtp_genl_fill_info(skb,
|
|
+ NETLINK_CB(cb->skb).portid,
|
|
+ cb->nlh->nlmsg_seq,
|
|
+ cb->nlh->nlmsg_type, pctx)) {
|
|
cb->args[0] = i;
|
|
- cb->args[1] = pctx->u.tid;
|
|
+ cb->args[1] = j;
|
|
cb->args[2] = (unsigned long)gtp;
|
|
goto out;
|
|
}
|
|
+ j++;
|
|
}
|
|
+ skip = 0;
|
|
}
|
|
+ bucket = 0;
|
|
}
|
|
cb->args[4] = 1;
|
|
out:
|
|
+ rcu_read_unlock();
|
|
return skb->len;
|
|
}
|
|
|
|
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
|
|
index 23281aeeb222..71d6629e65c9 100644
|
|
--- a/drivers/net/hamradio/6pack.c
|
|
+++ b/drivers/net/hamradio/6pack.c
|
|
@@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
|
|
{
|
|
struct sixpack *sp;
|
|
|
|
- write_lock_bh(&disc_data_lock);
|
|
+ write_lock_irq(&disc_data_lock);
|
|
sp = tty->disc_data;
|
|
tty->disc_data = NULL;
|
|
- write_unlock_bh(&disc_data_lock);
|
|
+ write_unlock_irq(&disc_data_lock);
|
|
if (!sp)
|
|
return;
|
|
|
|
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
|
|
index c5bfa19ddb93..deef14215110 100644
|
|
--- a/drivers/net/hamradio/mkiss.c
|
|
+++ b/drivers/net/hamradio/mkiss.c
|
|
@@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty)
|
|
{
|
|
struct mkiss *ax;
|
|
|
|
- write_lock_bh(&disc_data_lock);
|
|
+ write_lock_irq(&disc_data_lock);
|
|
ax = tty->disc_data;
|
|
tty->disc_data = NULL;
|
|
- write_unlock_bh(&disc_data_lock);
|
|
+ write_unlock_irq(&disc_data_lock);
|
|
|
|
if (!ax)
|
|
return;
|
|
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
|
|
index abaf8156d19d..e3d3c9097ff1 100644
|
|
--- a/drivers/net/hyperv/rndis_filter.c
|
|
+++ b/drivers/net/hyperv/rndis_filter.c
|
|
@@ -1165,6 +1165,9 @@ int rndis_set_subchannel(struct net_device *ndev,
|
|
wait_event(nvdev->subchan_open,
|
|
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
|
|
|
|
+ for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
|
|
+ ndev_ctx->tx_table[i] = i % nvdev->num_chn;
|
|
+
|
|
/* ignore failures from setting rss parameters, still have channels */
|
|
if (dev_info)
|
|
rndis_filter_set_rss_param(rdev, dev_info->rss_key);
|
|
@@ -1174,9 +1177,6 @@ int rndis_set_subchannel(struct net_device *ndev,
|
|
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
|
|
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
|
|
|
|
- for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
|
|
- ndev_ctx->tx_table[i] = i % nvdev->num_chn;
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
|
|
index 3b29d381116f..975789d9349d 100644
|
|
--- a/drivers/net/phy/aquantia_main.c
|
|
+++ b/drivers/net/phy/aquantia_main.c
|
|
@@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
|
|
.config_intr = aqr_config_intr,
|
|
.ack_interrupt = aqr_ack_interrupt,
|
|
.read_status = aqr_read_status,
|
|
+ .suspend = aqr107_suspend,
|
|
+ .resume = aqr107_resume,
|
|
},
|
|
{
|
|
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
|
|
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
|
|
index 536236fdb232..bf5bbb565cf5 100644
|
|
--- a/drivers/net/phy/phylink.c
|
|
+++ b/drivers/net/phy/phylink.c
|
|
@@ -444,8 +444,7 @@ static void phylink_mac_link_up(struct phylink *pl,
|
|
|
|
pl->cur_interface = link_state.interface;
|
|
pl->ops->mac_link_up(pl->config, pl->link_an_mode,
|
|
- pl->phy_state.interface,
|
|
- pl->phydev);
|
|
+ pl->cur_interface, pl->phydev);
|
|
|
|
if (ndev)
|
|
netif_carrier_on(ndev);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
|
|
index 040cec17d3ad..b0b7eca1754e 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
|
|
@@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
/* same thing for QuZ... */
|
|
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
|
|
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
|
|
- iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
|
|
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
|
|
- iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
|
|
- else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
|
- iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
|
- else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
|
- iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
|
- else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
|
- iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
|
- else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
|
- iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
|
+ if (cfg == &iwl_ax101_cfg_qu_hr)
|
|
+ cfg = &iwl_ax101_cfg_quz_hr;
|
|
+ else if (cfg == &iwl_ax201_cfg_qu_hr)
|
|
+ cfg = &iwl_ax201_cfg_quz_hr;
|
|
+ else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
|
+ cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
|
+ else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
|
+ cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
|
+ else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
|
+ cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
|
+ else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
|
+ cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
|
}
|
|
|
|
#endif
|
|
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
|
|
index 3e9f45aec8d1..5129543a0473 100644
|
|
--- a/drivers/nvdimm/btt.c
|
|
+++ b/drivers/nvdimm/btt.c
|
|
@@ -1261,11 +1261,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
|
|
|
|
ret = btt_data_read(arena, page, off, postmap, cur_len);
|
|
if (ret) {
|
|
- int rc;
|
|
-
|
|
/* Media error - set the e_flag */
|
|
- rc = btt_map_write(arena, premap, postmap, 0, 1,
|
|
- NVDIMM_IO_ATOMIC);
|
|
+ if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
|
|
+ dev_warn_ratelimited(to_dev(arena),
|
|
+ "Error persistently tracking bad blocks at %#x\n",
|
|
+ premap);
|
|
goto out_rtt;
|
|
}
|
|
|
|
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
|
|
index 92e895d86458..ca7823eef2b4 100644
|
|
--- a/drivers/of/unittest.c
|
|
+++ b/drivers/of/unittest.c
|
|
@@ -1146,8 +1146,10 @@ static void attach_node_and_children(struct device_node *np)
|
|
full_name = kasprintf(GFP_KERNEL, "%pOF", np);
|
|
|
|
if (!strcmp(full_name, "/__local_fixups__") ||
|
|
- !strcmp(full_name, "/__fixups__"))
|
|
+ !strcmp(full_name, "/__fixups__")) {
|
|
+ kfree(full_name);
|
|
return;
|
|
+ }
|
|
|
|
dup = of_find_node_by_path(full_name);
|
|
kfree(full_name);
|
|
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
|
|
index 18627bb21e9e..32eab1776cfe 100644
|
|
--- a/drivers/pci/hotplug/rpaphp_core.c
|
|
+++ b/drivers/pci/hotplug/rpaphp_core.c
|
|
@@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
|
|
return speed;
|
|
}
|
|
|
|
-static int get_children_props(struct device_node *dn, const int **drc_indexes,
|
|
- const int **drc_names, const int **drc_types,
|
|
- const int **drc_power_domains)
|
|
+static int get_children_props(struct device_node *dn, const __be32 **drc_indexes,
|
|
+ const __be32 **drc_names, const __be32 **drc_types,
|
|
+ const __be32 **drc_power_domains)
|
|
{
|
|
- const int *indexes, *names, *types, *domains;
|
|
+ const __be32 *indexes, *names, *types, *domains;
|
|
|
|
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
|
|
names = of_get_property(dn, "ibm,drc-names", NULL);
|
|
@@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
|
|
char *drc_type, unsigned int my_index)
|
|
{
|
|
char *name_tmp, *type_tmp;
|
|
- const int *indexes, *names;
|
|
- const int *types, *domains;
|
|
+ const __be32 *indexes, *names;
|
|
+ const __be32 *types, *domains;
|
|
int i, rc;
|
|
|
|
rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
|
|
@@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
|
|
|
|
/* Iterate through parent properties, looking for my-drc-index */
|
|
for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
|
|
- if ((unsigned int) indexes[i + 1] == my_index)
|
|
+ if (be32_to_cpu(indexes[i + 1]) == my_index)
|
|
break;
|
|
|
|
name_tmp += (strlen(name_tmp) + 1);
|
|
@@ -239,6 +239,8 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
|
|
value = of_prop_next_u32(info, NULL, &entries);
|
|
if (!value)
|
|
return -EINVAL;
|
|
+ else
|
|
+ value++;
|
|
|
|
for (j = 0; j < entries; j++) {
|
|
of_read_drc_info_cell(&info, &value, &drc);
|
|
@@ -246,9 +248,10 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
|
|
/* Should now know end of current entry */
|
|
|
|
/* Found it */
|
|
- if (my_index <= drc.last_drc_index) {
|
|
+ if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) {
|
|
+ int index = my_index - drc.drc_index_start;
|
|
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
|
|
- my_index);
|
|
+ drc.drc_name_suffix_start + index);
|
|
break;
|
|
}
|
|
}
|
|
@@ -265,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
|
|
int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
|
|
char *drc_type)
|
|
{
|
|
- const unsigned int *my_index;
|
|
+ const __be32 *my_index;
|
|
|
|
my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
|
|
if (!my_index) {
|
|
@@ -273,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (firmware_has_feature(FW_FEATURE_DRC_INFO))
|
|
+ if (of_find_property(dn->parent, "ibm,drc-info", NULL))
|
|
return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
|
|
- *my_index);
|
|
+ be32_to_cpu(*my_index));
|
|
else
|
|
return rpaphp_check_drc_props_v1(dn, drc_name, drc_type,
|
|
- *my_index);
|
|
+ be32_to_cpu(*my_index));
|
|
}
|
|
EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
|
|
|
|
@@ -309,10 +312,11 @@ static int is_php_type(char *drc_type)
|
|
* for built-in pci slots (even when the built-in slots are
|
|
* dlparable.)
|
|
*/
|
|
-static int is_php_dn(struct device_node *dn, const int **indexes,
|
|
- const int **names, const int **types, const int **power_domains)
|
|
+static int is_php_dn(struct device_node *dn, const __be32 **indexes,
|
|
+ const __be32 **names, const __be32 **types,
|
|
+ const __be32 **power_domains)
|
|
{
|
|
- const int *drc_types;
|
|
+ const __be32 *drc_types;
|
|
int rc;
|
|
|
|
rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
|
|
@@ -347,7 +351,7 @@ int rpaphp_add_slot(struct device_node *dn)
|
|
struct slot *slot;
|
|
int retval = 0;
|
|
int i;
|
|
- const int *indexes, *names, *types, *power_domains;
|
|
+ const __be32 *indexes, *names, *types, *power_domains;
|
|
char *name, *type;
|
|
|
|
if (!dn->name || strcmp(dn->name, "pci"))
|
|
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
|
|
index ae21d08c65e8..1cab99320514 100644
|
|
--- a/drivers/platform/x86/Kconfig
|
|
+++ b/drivers/platform/x86/Kconfig
|
|
@@ -806,7 +806,6 @@ config PEAQ_WMI
|
|
tristate "PEAQ 2-in-1 WMI hotkey driver"
|
|
depends on ACPI_WMI
|
|
depends on INPUT
|
|
- select INPUT_POLLDEV
|
|
help
|
|
Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
|
|
|
|
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
|
|
index 94a008efb09b..571b4754477c 100644
|
|
--- a/drivers/platform/x86/intel_pmc_core.c
|
|
+++ b/drivers/platform/x86/intel_pmc_core.c
|
|
@@ -158,8 +158,9 @@ static const struct pmc_reg_map spt_reg_map = {
|
|
.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
|
|
};
|
|
|
|
-/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
|
|
+/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
|
|
static const struct pmc_bit_map cnp_pfear_map[] = {
|
|
+ /* Reserved for Cannon Lake but valid for Comet Lake */
|
|
{"PMC", BIT(0)},
|
|
{"OPI-DMI", BIT(1)},
|
|
{"SPI/eSPI", BIT(2)},
|
|
@@ -185,7 +186,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
|
|
{"SDX", BIT(4)},
|
|
{"SPE", BIT(5)},
|
|
{"Fuse", BIT(6)},
|
|
- /* Reserved for Cannonlake but valid for Icelake */
|
|
+ /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
|
|
{"SBR8", BIT(7)},
|
|
|
|
{"CSME_FSC", BIT(0)},
|
|
@@ -229,12 +230,12 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
|
|
{"HDA_PGD4", BIT(2)},
|
|
{"HDA_PGD5", BIT(3)},
|
|
{"HDA_PGD6", BIT(4)},
|
|
- /* Reserved for Cannonlake but valid for Icelake */
|
|
+ /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
|
|
{"PSF6", BIT(5)},
|
|
{"PSF7", BIT(6)},
|
|
{"PSF8", BIT(7)},
|
|
|
|
- /* Icelake generation onwards only */
|
|
+ /* Ice Lake generation onwards only */
|
|
{"RES_65", BIT(0)},
|
|
{"RES_66", BIT(1)},
|
|
{"RES_67", BIT(2)},
|
|
@@ -324,7 +325,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
|
|
{"ISH", CNP_PMC_LTR_ISH},
|
|
{"UFSX2", CNP_PMC_LTR_UFSX2},
|
|
{"EMMC", CNP_PMC_LTR_EMMC},
|
|
- /* Reserved for Cannonlake but valid for Icelake */
|
|
+ /* Reserved for Cannon Lake but valid for Ice Lake */
|
|
{"WIGIG", ICL_PMC_LTR_WIGIG},
|
|
/* Below two cannot be used for LTR_IGNORE */
|
|
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
|
|
@@ -813,6 +814,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
|
|
INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
|
|
INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
|
|
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
|
|
+ INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
|
|
+ INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
|
|
{}
|
|
};
|
|
|
|
@@ -871,8 +874,8 @@ static int pmc_core_probe(struct platform_device *pdev)
|
|
pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
|
|
|
|
/*
|
|
- * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here
|
|
- * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
|
|
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
|
|
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
|
|
* in this case.
|
|
*/
|
|
if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
|
|
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
|
|
index fdeb3624c529..cf9c44c20a82 100644
|
|
--- a/drivers/platform/x86/peaq-wmi.c
|
|
+++ b/drivers/platform/x86/peaq-wmi.c
|
|
@@ -6,7 +6,7 @@
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/dmi.h>
|
|
-#include <linux/input-polldev.h>
|
|
+#include <linux/input.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
|
|
@@ -18,8 +18,7 @@
|
|
|
|
MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID);
|
|
|
|
-static unsigned int peaq_ignore_events_counter;
|
|
-static struct input_polled_dev *peaq_poll_dev;
|
|
+static struct input_dev *peaq_poll_dev;
|
|
|
|
/*
|
|
* The Dolby button (yes really a Dolby button) causes an ACPI variable to get
|
|
@@ -28,8 +27,10 @@ static struct input_polled_dev *peaq_poll_dev;
|
|
* (if polling after the release) or twice (polling between press and release).
|
|
* We ignore events for 0.5s after the first event to avoid reporting 2 presses.
|
|
*/
|
|
-static void peaq_wmi_poll(struct input_polled_dev *dev)
|
|
+static void peaq_wmi_poll(struct input_dev *input_dev)
|
|
{
|
|
+ static unsigned long last_event_time;
|
|
+ static bool had_events;
|
|
union acpi_object obj;
|
|
acpi_status status;
|
|
u32 dummy = 0;
|
|
@@ -44,22 +45,25 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
|
|
return;
|
|
|
|
if (obj.type != ACPI_TYPE_INTEGER) {
|
|
- dev_err(&peaq_poll_dev->input->dev,
|
|
+ dev_err(&input_dev->dev,
|
|
"Error WMBC did not return an integer\n");
|
|
return;
|
|
}
|
|
|
|
- if (peaq_ignore_events_counter && peaq_ignore_events_counter--)
|
|
+ if (!obj.integer.value)
|
|
return;
|
|
|
|
- if (obj.integer.value) {
|
|
- input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 1);
|
|
- input_sync(peaq_poll_dev->input);
|
|
- input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 0);
|
|
- input_sync(peaq_poll_dev->input);
|
|
- peaq_ignore_events_counter = max(1u,
|
|
- PEAQ_POLL_IGNORE_MS / peaq_poll_dev->poll_interval);
|
|
- }
|
|
+ if (had_events && time_before(jiffies, last_event_time +
|
|
+ msecs_to_jiffies(PEAQ_POLL_IGNORE_MS)))
|
|
+ return;
|
|
+
|
|
+ input_event(input_dev, EV_KEY, KEY_SOUND, 1);
|
|
+ input_sync(input_dev);
|
|
+ input_event(input_dev, EV_KEY, KEY_SOUND, 0);
|
|
+ input_sync(input_dev);
|
|
+
|
|
+ last_event_time = jiffies;
|
|
+ had_events = true;
|
|
}
|
|
|
|
/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
|
|
@@ -75,6 +79,8 @@ static const struct dmi_system_id peaq_dmi_table[] __initconst = {
|
|
|
|
static int __init peaq_wmi_init(void)
|
|
{
|
|
+ int err;
|
|
+
|
|
/* WMI GUID is not unique, also check for a DMI match */
|
|
if (!dmi_check_system(peaq_dmi_table))
|
|
return -ENODEV;
|
|
@@ -82,24 +88,36 @@ static int __init peaq_wmi_init(void)
|
|
if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
|
|
return -ENODEV;
|
|
|
|
- peaq_poll_dev = input_allocate_polled_device();
|
|
+ peaq_poll_dev = input_allocate_device();
|
|
if (!peaq_poll_dev)
|
|
return -ENOMEM;
|
|
|
|
- peaq_poll_dev->poll = peaq_wmi_poll;
|
|
- peaq_poll_dev->poll_interval = PEAQ_POLL_INTERVAL_MS;
|
|
- peaq_poll_dev->poll_interval_max = PEAQ_POLL_MAX_MS;
|
|
- peaq_poll_dev->input->name = "PEAQ WMI hotkeys";
|
|
- peaq_poll_dev->input->phys = "wmi/input0";
|
|
- peaq_poll_dev->input->id.bustype = BUS_HOST;
|
|
- input_set_capability(peaq_poll_dev->input, EV_KEY, KEY_SOUND);
|
|
+ peaq_poll_dev->name = "PEAQ WMI hotkeys";
|
|
+ peaq_poll_dev->phys = "wmi/input0";
|
|
+ peaq_poll_dev->id.bustype = BUS_HOST;
|
|
+ input_set_capability(peaq_poll_dev, EV_KEY, KEY_SOUND);
|
|
+
|
|
+ err = input_setup_polling(peaq_poll_dev, peaq_wmi_poll);
|
|
+ if (err)
|
|
+ goto err_out;
|
|
+
|
|
+ input_set_poll_interval(peaq_poll_dev, PEAQ_POLL_INTERVAL_MS);
|
|
+ input_set_max_poll_interval(peaq_poll_dev, PEAQ_POLL_MAX_MS);
|
|
+
|
|
+ err = input_register_device(peaq_poll_dev);
|
|
+ if (err)
|
|
+ goto err_out;
|
|
+
|
|
+ return 0;
|
|
|
|
- return input_register_polled_device(peaq_poll_dev);
|
|
+err_out:
|
|
+ input_free_device(peaq_poll_dev);
|
|
+ return err;
|
|
}
|
|
|
|
static void __exit peaq_wmi_exit(void)
|
|
{
|
|
- input_unregister_polled_device(peaq_poll_dev);
|
|
+ input_unregister_device(peaq_poll_dev);
|
|
}
|
|
|
|
module_init(peaq_wmi_init);
|
|
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
|
|
index e60eab7f8a61..61fafe0374ce 100644
|
|
--- a/drivers/ptp/ptp_clock.c
|
|
+++ b/drivers/ptp/ptp_clock.c
|
|
@@ -166,9 +166,9 @@ static struct posix_clock_operations ptp_clock_ops = {
|
|
.read = ptp_read,
|
|
};
|
|
|
|
-static void delete_ptp_clock(struct posix_clock *pc)
|
|
+static void ptp_clock_release(struct device *dev)
|
|
{
|
|
- struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
|
|
+ struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
|
|
|
mutex_destroy(&ptp->tsevq_mux);
|
|
mutex_destroy(&ptp->pincfg_mux);
|
|
@@ -213,7 +213,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|
}
|
|
|
|
ptp->clock.ops = ptp_clock_ops;
|
|
- ptp->clock.release = delete_ptp_clock;
|
|
ptp->info = info;
|
|
ptp->devid = MKDEV(major, index);
|
|
ptp->index = index;
|
|
@@ -236,15 +235,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|
if (err)
|
|
goto no_pin_groups;
|
|
|
|
- /* Create a new device in our class. */
|
|
- ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
|
|
- ptp, ptp->pin_attr_groups,
|
|
- "ptp%d", ptp->index);
|
|
- if (IS_ERR(ptp->dev)) {
|
|
- err = PTR_ERR(ptp->dev);
|
|
- goto no_device;
|
|
- }
|
|
-
|
|
/* Register a new PPS source. */
|
|
if (info->pps) {
|
|
struct pps_source_info pps;
|
|
@@ -260,8 +250,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|
}
|
|
}
|
|
|
|
- /* Create a posix clock. */
|
|
- err = posix_clock_register(&ptp->clock, ptp->devid);
|
|
+ /* Initialize a new device of our class in our clock structure. */
|
|
+ device_initialize(&ptp->dev);
|
|
+ ptp->dev.devt = ptp->devid;
|
|
+ ptp->dev.class = ptp_class;
|
|
+ ptp->dev.parent = parent;
|
|
+ ptp->dev.groups = ptp->pin_attr_groups;
|
|
+ ptp->dev.release = ptp_clock_release;
|
|
+ dev_set_drvdata(&ptp->dev, ptp);
|
|
+ dev_set_name(&ptp->dev, "ptp%d", ptp->index);
|
|
+
|
|
+ /* Create a posix clock and link it to the device. */
|
|
+ err = posix_clock_register(&ptp->clock, &ptp->dev);
|
|
if (err) {
|
|
pr_err("failed to create posix clock\n");
|
|
goto no_clock;
|
|
@@ -273,8 +273,6 @@ no_clock:
|
|
if (ptp->pps_source)
|
|
pps_unregister_source(ptp->pps_source);
|
|
no_pps:
|
|
- device_destroy(ptp_class, ptp->devid);
|
|
-no_device:
|
|
ptp_cleanup_pin_groups(ptp);
|
|
no_pin_groups:
|
|
if (ptp->kworker)
|
|
@@ -304,7 +302,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
|
|
if (ptp->pps_source)
|
|
pps_unregister_source(ptp->pps_source);
|
|
|
|
- device_destroy(ptp_class, ptp->devid);
|
|
ptp_cleanup_pin_groups(ptp);
|
|
|
|
posix_clock_unregister(&ptp->clock);
|
|
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
|
|
index 9171d42468fd..6b97155148f1 100644
|
|
--- a/drivers/ptp/ptp_private.h
|
|
+++ b/drivers/ptp/ptp_private.h
|
|
@@ -28,7 +28,7 @@ struct timestamp_event_queue {
|
|
|
|
struct ptp_clock {
|
|
struct posix_clock clock;
|
|
- struct device *dev;
|
|
+ struct device dev;
|
|
struct ptp_clock_info *info;
|
|
dev_t devid;
|
|
int index; /* index into clocks.map */
|
|
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
|
|
index f34ee41cbed8..4f4dd9d727c9 100644
|
|
--- a/drivers/s390/crypto/zcrypt_error.h
|
|
+++ b/drivers/s390/crypto/zcrypt_error.h
|
|
@@ -61,6 +61,7 @@ struct error_hdr {
|
|
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
|
|
#define REP82_ERROR_RESERVED_FIELD 0x88
|
|
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
|
|
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
|
|
#define REP82_ERROR_TRANSPORT_FAIL 0x90
|
|
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
|
|
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
|
|
@@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
|
|
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
|
|
case REP82_ERROR_INVALID_DOMAIN_PENDING:
|
|
case REP82_ERROR_INVALID_SPECIAL_CMD:
|
|
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
|
|
// REP88_ERROR_INVALID_KEY // '82' CEX2A
|
|
// REP88_ERROR_OPERAND // '84' CEX2A
|
|
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
|
|
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
|
|
index 536426f25e86..d4401c768a0c 100644
|
|
--- a/drivers/scsi/NCR5380.c
|
|
+++ b/drivers/scsi/NCR5380.c
|
|
@@ -129,6 +129,9 @@
|
|
#define NCR5380_release_dma_irq(x)
|
|
#endif
|
|
|
|
+static unsigned int disconnect_mask = ~0;
|
|
+module_param(disconnect_mask, int, 0444);
|
|
+
|
|
static int do_abort(struct Scsi_Host *);
|
|
static void do_reset(struct Scsi_Host *);
|
|
static void bus_reset_cleanup(struct Scsi_Host *);
|
|
@@ -954,7 +957,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
|
|
int err;
|
|
bool ret = true;
|
|
bool can_disconnect = instance->irq != NO_IRQ &&
|
|
- cmd->cmnd[0] != REQUEST_SENSE;
|
|
+ cmd->cmnd[0] != REQUEST_SENSE &&
|
|
+ (disconnect_mask & BIT(scmd_id(cmd)));
|
|
|
|
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
|
|
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
|
|
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
|
|
index e809493d0d06..a82b63a66635 100644
|
|
--- a/drivers/scsi/atari_scsi.c
|
|
+++ b/drivers/scsi/atari_scsi.c
|
|
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
|
|
atari_scsi_template.sg_tablesize = SG_ALL;
|
|
} else {
|
|
atari_scsi_template.can_queue = 1;
|
|
- atari_scsi_template.sg_tablesize = SG_NONE;
|
|
+ atari_scsi_template.sg_tablesize = 1;
|
|
}
|
|
|
|
if (setup_can_queue > 0)
|
|
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
|
|
if (setup_cmd_per_lun > 0)
|
|
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
|
|
|
|
- /* Leave sg_tablesize at 0 on a Falcon! */
|
|
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
|
|
+ /* Don't increase sg_tablesize on Falcon! */
|
|
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
|
|
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
|
|
|
|
if (setup_hostid >= 0) {
|
|
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
|
|
index 66e58f0a75dc..23cbe4cda760 100644
|
|
--- a/drivers/scsi/csiostor/csio_lnode.c
|
|
+++ b/drivers/scsi/csiostor/csio_lnode.c
|
|
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
|
struct fc_fdmi_port_name *port_name;
|
|
uint8_t buf[64];
|
|
uint8_t *fc4_type;
|
|
+ unsigned long flags;
|
|
|
|
if (fdmi_req->wr_status != FW_SUCCESS) {
|
|
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
|
|
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
|
len = (uint32_t)(pld - (uint8_t *)cmd);
|
|
|
|
/* Submit FDMI RPA request */
|
|
- spin_lock_irq(&hw->lock);
|
|
+ spin_lock_irqsave(&hw->lock, flags);
|
|
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
|
|
FCOE_CT, &fdmi_req->dma_buf, len)) {
|
|
CSIO_INC_STATS(ln, n_fdmi_err);
|
|
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
|
|
}
|
|
- spin_unlock_irq(&hw->lock);
|
|
+ spin_unlock_irqrestore(&hw->lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
|
struct fc_fdmi_rpl *reg_pl;
|
|
struct fs_fdmi_attrs *attrib_blk;
|
|
uint8_t buf[64];
|
|
+ unsigned long flags;
|
|
|
|
if (fdmi_req->wr_status != FW_SUCCESS) {
|
|
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
|
|
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
|
attrib_blk->numattrs = htonl(numattrs);
|
|
|
|
/* Submit FDMI RHBA request */
|
|
- spin_lock_irq(&hw->lock);
|
|
+ spin_lock_irqsave(&hw->lock, flags);
|
|
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
|
|
FCOE_CT, &fdmi_req->dma_buf, len)) {
|
|
CSIO_INC_STATS(ln, n_fdmi_err);
|
|
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
|
|
}
|
|
- spin_unlock_irq(&hw->lock);
|
|
+ spin_unlock_irqrestore(&hw->lock, flags);
|
|
}
|
|
|
|
/*
|
|
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
|
void *cmd;
|
|
struct fc_fdmi_port_name *port_name;
|
|
uint32_t len;
|
|
+ unsigned long flags;
|
|
|
|
if (fdmi_req->wr_status != FW_SUCCESS) {
|
|
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
|
|
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
|
len += sizeof(*port_name);
|
|
|
|
/* Submit FDMI request */
|
|
- spin_lock_irq(&hw->lock);
|
|
+ spin_lock_irqsave(&hw->lock, flags);
|
|
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
|
|
FCOE_CT, &fdmi_req->dma_buf, len)) {
|
|
CSIO_INC_STATS(ln, n_fdmi_err);
|
|
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
|
|
}
|
|
- spin_unlock_irq(&hw->lock);
|
|
+ spin_unlock_irqrestore(&hw->lock, flags);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
|
|
index 0847e682797b..633effb09c9c 100644
|
|
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
|
|
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
|
|
@@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
|
|
dev = hisi_hba->dev;
|
|
|
|
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
|
|
- if (in_softirq())
|
|
+ /*
|
|
+ * For IOs from upper layer, it may already disable preempt
|
|
+ * in the IO path, if disable preempt again in down(),
|
|
+ * function schedule() will report schedule_bug(), so check
|
|
+ * preemptible() before goto down().
|
|
+ */
|
|
+ if (!preemptible())
|
|
return -EINVAL;
|
|
|
|
down(&hisi_hba->sem);
|
|
@@ -2676,6 +2682,7 @@ int hisi_sas_probe(struct platform_device *pdev,
|
|
err_out_register_ha:
|
|
scsi_remove_host(shost);
|
|
err_out_ha:
|
|
+ hisi_sas_debugfs_exit(hisi_hba);
|
|
hisi_sas_free(hisi_hba);
|
|
scsi_host_put(shost);
|
|
return rc;
|
|
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
|
|
index cb8d087762db..ef32ee12f606 100644
|
|
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
|
|
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
|
|
@@ -3259,6 +3259,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
err_out_register_ha:
|
|
scsi_remove_host(shost);
|
|
err_out_ha:
|
|
+ hisi_sas_debugfs_exit(hisi_hba);
|
|
scsi_host_put(shost);
|
|
err_out_regions:
|
|
pci_release_regions(pdev);
|
|
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
|
|
index 7bedbe877704..0bc63a7ab41c 100644
|
|
--- a/drivers/scsi/iscsi_tcp.c
|
|
+++ b/drivers/scsi/iscsi_tcp.c
|
|
@@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
|
|
{
|
|
struct iscsi_conn *conn = task->conn;
|
|
unsigned int noreclaim_flag;
|
|
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
|
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
|
|
int rc = 0;
|
|
|
|
+ if (!tcp_sw_conn->sock) {
|
|
+ iscsi_conn_printk(KERN_ERR, conn,
|
|
+ "Transport not bound to socket!\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
noreclaim_flag = memalloc_noreclaim_save();
|
|
|
|
while (iscsi_sw_tcp_xmit_qlen(conn)) {
|
|
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
|
|
index 25e86706e207..f883fac2d2b1 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_ct.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_ct.c
|
|
@@ -1868,6 +1868,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
|
|
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
|
|
case IOERR_SLI_ABORTED:
|
|
+ case IOERR_SLI_DOWN:
|
|
+ /* Driver aborted this IO. No retry as error
|
|
+ * is likely Offline->Online or some adapter
|
|
+ * error. Recovery will try again.
|
|
+ */
|
|
+ break;
|
|
case IOERR_ABORT_IN_PROGRESS:
|
|
case IOERR_SEQUENCE_TIMEOUT:
|
|
case IOERR_ILLEGAL_FRAME:
|
|
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
|
|
index d5303994bfd6..66f8867dd837 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_els.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_els.c
|
|
@@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
|
IOCB_t *irsp;
|
|
struct lpfc_nodelist *ndlp;
|
|
+ char *mode;
|
|
|
|
/* we pass cmdiocb to state machine which needs rspiocb as well */
|
|
cmdiocb->context_un.rsp_iocb = rspiocb;
|
|
@@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
goto out;
|
|
}
|
|
|
|
+ /* If we don't send GFT_ID to Fabric, a PRLI error
|
|
+ * could be expected.
|
|
+ */
|
|
+ if ((vport->fc_flag & FC_FABRIC) ||
|
|
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
|
|
+ mode = KERN_ERR;
|
|
+ else
|
|
+ mode = KERN_INFO;
|
|
+
|
|
/* PRLI failed */
|
|
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
|
|
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
|
|
"data: x%x\n",
|
|
ndlp->nlp_DID, irsp->ulpStatus,
|
|
@@ -4430,7 +4440,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
mempool_free(mbox, phba->mbox_mem_pool);
|
|
}
|
|
out:
|
|
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
|
|
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
|
|
spin_lock_irq(shost->host_lock);
|
|
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
|
|
spin_unlock_irq(shost->host_lock);
|
|
@@ -6455,7 +6465,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
|
uint32_t payload_len, length, nportid, *cmd;
|
|
int rscn_cnt;
|
|
int rscn_id = 0, hba_id = 0;
|
|
- int i;
|
|
+ int i, tmo;
|
|
|
|
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
|
|
lp = (uint32_t *) pcmd->virt;
|
|
@@ -6561,6 +6571,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
|
|
|
spin_lock_irq(shost->host_lock);
|
|
vport->fc_flag |= FC_RSCN_DEFERRED;
|
|
+
|
|
+ /* Restart disctmo if its already running */
|
|
+ if (vport->fc_flag & FC_DISC_TMO) {
|
|
+ tmo = ((phba->fc_ratov * 3) + 3);
|
|
+ mod_timer(&vport->fc_disctmo,
|
|
+ jiffies + msecs_to_jiffies(1000 * tmo));
|
|
+ }
|
|
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
|
|
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
|
|
vport->fc_flag |= FC_RSCN_MODE;
|
|
@@ -7986,20 +8003,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
|
struct lpfc_sli_ring *pring;
|
|
struct lpfc_iocbq *tmp_iocb, *piocb;
|
|
IOCB_t *cmd = NULL;
|
|
+ unsigned long iflags = 0;
|
|
|
|
lpfc_fabric_abort_vport(vport);
|
|
+
|
|
/*
|
|
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
|
|
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
|
|
* ultimately grabs the ring_lock, the driver must splice the list into
|
|
* a working list and release the locks before calling the abort.
|
|
*/
|
|
- spin_lock_irq(&phba->hbalock);
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
pring = lpfc_phba_elsring(phba);
|
|
|
|
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
|
|
if (unlikely(!pring)) {
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
return;
|
|
}
|
|
|
|
@@ -8014,6 +8033,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
|
if (piocb->vport != vport)
|
|
continue;
|
|
|
|
+ if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
|
|
+ continue;
|
|
+
|
|
/* On the ELS ring we can have ELS_REQUESTs or
|
|
* GEN_REQUESTs waiting for a response.
|
|
*/
|
|
@@ -8037,21 +8059,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
|
|
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
|
spin_unlock(&pring->ring_lock);
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
|
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
|
|
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
|
|
- spin_lock_irq(&phba->hbalock);
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
list_del_init(&piocb->dlist);
|
|
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
}
|
|
if (!list_empty(&abort_list))
|
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
"3387 abort list for txq not empty\n");
|
|
INIT_LIST_HEAD(&abort_list);
|
|
|
|
- spin_lock_irq(&phba->hbalock);
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
|
spin_lock(&pring->ring_lock);
|
|
|
|
@@ -8091,7 +8113,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
|
|
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
|
spin_unlock(&pring->ring_lock);
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
|
|
/* Cancel all the IOCBs from the completions list */
|
|
lpfc_sli_cancel_iocbs(phba, &abort_list,
|
|
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
index 749286acdc17..1286c658ba34 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
|
|
@@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
|
|
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
|
|
set_bit(LPFC_DATA_READY, &phba->data_flags);
|
|
} else {
|
|
- if (phba->link_state >= LPFC_LINK_UP ||
|
|
+ /* Driver could have abort request completed in queue
|
|
+ * when link goes down. Allow for this transition.
|
|
+ */
|
|
+ if (phba->link_state >= LPFC_LINK_DOWN ||
|
|
phba->link_flag & LS_MDS_LOOPBACK) {
|
|
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
|
|
lpfc_sli_handle_slow_ring_event(phba, pring,
|
|
@@ -5405,9 +5408,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
|
|
/* If we've already received a PLOGI from this NPort
|
|
* we don't need to try to discover it again.
|
|
*/
|
|
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
|
|
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
|
|
+ !(ndlp->nlp_type &
|
|
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
|
|
return NULL;
|
|
|
|
+ ndlp->nlp_prev_state = ndlp->nlp_state;
|
|
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
|
+
|
|
spin_lock_irq(shost->host_lock);
|
|
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
|
spin_unlock_irq(shost->host_lock);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
index fc6e4546d738..696171382558 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
@@ -484,8 +484,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
* single discovery thread, this will cause a huge delay in
|
|
* discovery. Also this will cause multiple state machines
|
|
* running in parallel for this node.
|
|
+ * This only applies to a fabric environment.
|
|
*/
|
|
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
|
|
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
|
|
+ (vport->fc_flag & FC_FABRIC)) {
|
|
/* software abort outstanding PLOGI */
|
|
lpfc_els_abort(phba, ndlp);
|
|
}
|
|
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
index ad8ef67a1db3..aa82d538a18a 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_scsi.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
@@ -4846,20 +4846,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
|
|
abtsiocb, 0);
|
|
}
|
|
- /* no longer need the lock after this point */
|
|
- spin_unlock_irqrestore(&phba->hbalock, flags);
|
|
|
|
if (ret_val == IOCB_ERROR) {
|
|
/* Indicate the IO is not being aborted by the driver. */
|
|
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
|
lpfc_cmd->waitq = NULL;
|
|
spin_unlock(&lpfc_cmd->buf_lock);
|
|
+ spin_unlock_irqrestore(&phba->hbalock, flags);
|
|
lpfc_sli_release_iocbq(phba, abtsiocb);
|
|
ret = FAILED;
|
|
goto out;
|
|
}
|
|
|
|
+ /* no longer need the lock after this point */
|
|
spin_unlock(&lpfc_cmd->buf_lock);
|
|
+ spin_unlock_irqrestore(&phba->hbalock, flags);
|
|
|
|
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
|
|
lpfc_sli_handle_fast_ring_event(phba,
|
|
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
|
|
index 5ed4219675eb..2b0e7b32c2df 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_sli.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_sli.c
|
|
@@ -11050,9 +11050,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
irsp->ulpStatus, irsp->un.ulpWord[4]);
|
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
|
|
- irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
|
|
- lpfc_sli_release_iocbq(phba, abort_iocb);
|
|
}
|
|
release_iocb:
|
|
lpfc_sli_release_iocbq(phba, cmdiocb);
|
|
@@ -13161,13 +13158,19 @@ send_current_mbox:
|
|
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
|
|
/* Setting active mailbox pointer need to be in sync to flag clear */
|
|
phba->sli.mbox_active = NULL;
|
|
+ if (bf_get(lpfc_trailer_consumed, mcqe))
|
|
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
|
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
/* Wake up worker thread to post the next pending mailbox command */
|
|
lpfc_worker_wake_up(phba);
|
|
+ return workposted;
|
|
+
|
|
out_no_mqe_complete:
|
|
+ spin_lock_irqsave(&phba->hbalock, iflags);
|
|
if (bf_get(lpfc_trailer_consumed, mcqe))
|
|
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
|
|
- return workposted;
|
|
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
|
|
+ return false;
|
|
}
|
|
|
|
/**
|
|
@@ -18184,6 +18187,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
|
|
static void
|
|
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
|
|
{
|
|
+ /*
|
|
+ * if the rpi value indicates a prior unreg has already
|
|
+ * been done, skip the unreg.
|
|
+ */
|
|
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
|
|
+ return;
|
|
+
|
|
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
|
|
phba->sli4_hba.rpi_count--;
|
|
phba->sli4_hba.max_cfg_param.rpi_used--;
|
|
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
|
|
index 9c5566217ef6..b5dde9d0d054 100644
|
|
--- a/drivers/scsi/mac_scsi.c
|
|
+++ b/drivers/scsi/mac_scsi.c
|
|
@@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
|
|
mac_scsi_template.can_queue = setup_can_queue;
|
|
if (setup_cmd_per_lun > 0)
|
|
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
|
|
- if (setup_sg_tablesize >= 0)
|
|
+ if (setup_sg_tablesize > 0)
|
|
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
|
|
if (setup_hostid >= 0)
|
|
mac_scsi_template.this_id = setup_hostid & 7;
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
|
|
index 7d696952b376..b95f7d062ea4 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
|
|
@@ -778,6 +778,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
|
|
case MPI2_FUNCTION_NVME_ENCAPSULATED:
|
|
{
|
|
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
|
|
+ if (!ioc->pcie_sg_lookup) {
|
|
+ dtmprintk(ioc, ioc_info(ioc,
|
|
+ "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
|
|
+ ));
|
|
+
|
|
+ if (ioc->logging_level & MPT_DEBUG_TM)
|
|
+ _debug_dump_mf(nvme_encap_request,
|
|
+ ioc->request_sz/4);
|
|
+ mpt3sas_base_free_smid(ioc, smid);
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
/*
|
|
* Get the Physical Address of the sense buffer.
|
|
* Use Error Response buffer address field to hold the sense
|
|
@@ -1584,7 +1596,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
|
|
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
|
|
__func__, request_data_sz);
|
|
mpt3sas_base_free_smid(ioc, smid);
|
|
- return -ENOMEM;
|
|
+ rc = -ENOMEM;
|
|
+ goto out;
|
|
}
|
|
ioc->diag_buffer[buffer_type] = request_data;
|
|
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
|
|
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
|
|
index 73261902d75d..161bf4760eac 100644
|
|
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
|
|
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
|
|
@@ -2382,6 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
|
pm8001_printk("task 0x%p done with io_status 0x%x"
|
|
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
|
t, status, ts->resp, ts->stat));
|
|
+ if (t->slow_task)
|
|
+ complete(&t->slow_task->completion);
|
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
|
} else {
|
|
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
|
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
|
|
index d323523f5f9d..32965ec76965 100644
|
|
--- a/drivers/scsi/scsi_debug.c
|
|
+++ b/drivers/scsi/scsi_debug.c
|
|
@@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (sdebug_num_tgts < 0) {
|
|
+ pr_err("num_tgts must be >= 0\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
if (sdebug_guard > 1) {
|
|
pr_err("guard must be 0 or 1\n");
|
|
return -EINVAL;
|
|
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
|
|
index 0f17e7dac1b0..07a2425ffa2c 100644
|
|
--- a/drivers/scsi/scsi_trace.c
|
|
+++ b/drivers/scsi/scsi_trace.c
|
|
@@ -18,15 +18,18 @@ static const char *
|
|
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
|
|
{
|
|
const char *ret = trace_seq_buffer_ptr(p);
|
|
- sector_t lba = 0, txlen = 0;
|
|
+ u32 lba = 0, txlen;
|
|
|
|
lba |= ((cdb[1] & 0x1F) << 16);
|
|
lba |= (cdb[2] << 8);
|
|
lba |= cdb[3];
|
|
- txlen = cdb[4];
|
|
+ /*
|
|
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
|
|
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
|
|
+ */
|
|
+ txlen = cdb[4] ? cdb[4] : 256;
|
|
|
|
- trace_seq_printf(p, "lba=%llu txlen=%llu",
|
|
- (unsigned long long)lba, (unsigned long long)txlen);
|
|
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
|
|
trace_seq_putc(p, 0);
|
|
|
|
return ret;
|
|
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
|
|
index 955e4c938d49..701b842296f0 100644
|
|
--- a/drivers/scsi/sun3_scsi.c
|
|
+++ b/drivers/scsi/sun3_scsi.c
|
|
@@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = {
|
|
.eh_host_reset_handler = sun3scsi_host_reset,
|
|
.can_queue = 16,
|
|
.this_id = 7,
|
|
- .sg_tablesize = SG_NONE,
|
|
+ .sg_tablesize = 1,
|
|
.cmd_per_lun = 2,
|
|
.dma_boundary = PAGE_SIZE - 1,
|
|
.cmd_size = NCR5380_CMD_SIZE,
|
|
@@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
|
|
sun3_scsi_template.can_queue = setup_can_queue;
|
|
if (setup_cmd_per_lun > 0)
|
|
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
|
|
- if (setup_sg_tablesize >= 0)
|
|
+ if (setup_sg_tablesize > 0)
|
|
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
|
|
if (setup_hostid >= 0)
|
|
sun3_scsi_template.this_id = setup_hostid & 7;
|
|
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
|
|
index 969a36b15897..ad2abc96c0f1 100644
|
|
--- a/drivers/scsi/ufs/ufs-sysfs.c
|
|
+++ b/drivers/scsi/ufs/ufs-sysfs.c
|
|
@@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
|
|
return;
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
- if (hba->ahit == ahit)
|
|
- goto out_unlock;
|
|
- hba->ahit = ahit;
|
|
- if (!pm_runtime_suspended(hba->dev))
|
|
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
|
|
-out_unlock:
|
|
+ if (hba->ahit != ahit)
|
|
+ hba->ahit = ahit;
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
+ if (!pm_runtime_suspended(hba->dev)) {
|
|
+ pm_runtime_get_sync(hba->dev);
|
|
+ ufshcd_hold(hba, false);
|
|
+ ufshcd_auto_hibern8_enable(hba);
|
|
+ ufshcd_release(hba);
|
|
+ pm_runtime_put(hba->dev);
|
|
+ }
|
|
}
|
|
|
|
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
|
|
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
|
index 11a87f51c442..25a6a25b17a2 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.c
|
|
+++ b/drivers/scsi/ufs/ufshcd.c
|
|
@@ -2986,10 +2986,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
|
|
goto out_unlock;
|
|
}
|
|
|
|
- hba->dev_cmd.query.descriptor = NULL;
|
|
*buf_len = be16_to_cpu(response->upiu_res.length);
|
|
|
|
out_unlock:
|
|
+ hba->dev_cmd.query.descriptor = NULL;
|
|
mutex_unlock(&hba->dev_cmd.lock);
|
|
out:
|
|
ufshcd_release(hba);
|
|
@@ -3885,15 +3885,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
|
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
|
|
|
|
if (ret) {
|
|
+ int err;
|
|
+
|
|
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
|
|
__func__, ret);
|
|
|
|
/*
|
|
- * If link recovery fails then return error so that caller
|
|
- * don't retry the hibern8 enter again.
|
|
+ * If link recovery fails then return error code returned from
|
|
+ * ufshcd_link_recovery().
|
|
+ * If link recovery succeeds then return -EAGAIN to attempt
|
|
+ * hibern8 enter retry again.
|
|
*/
|
|
- if (ufshcd_link_recovery(hba))
|
|
- ret = -ENOLINK;
|
|
+ err = ufshcd_link_recovery(hba);
|
|
+ if (err) {
|
|
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
|
|
+ ret = err;
|
|
+ } else {
|
|
+ ret = -EAGAIN;
|
|
+ }
|
|
} else
|
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
|
|
POST_CHANGE);
|
|
@@ -3907,7 +3916,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
|
|
|
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
|
|
ret = __ufshcd_uic_hibern8_enter(hba);
|
|
- if (!ret || ret == -ENOLINK)
|
|
+ if (!ret)
|
|
goto out;
|
|
}
|
|
out:
|
|
@@ -3941,7 +3950,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
|
|
return ret;
|
|
}
|
|
|
|
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
|
|
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
|
|
{
|
|
unsigned long flags;
|
|
|
|
@@ -6881,9 +6890,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
|
|
/* UniPro link is active now */
|
|
ufshcd_set_link_active(hba);
|
|
|
|
- /* Enable Auto-Hibernate if configured */
|
|
- ufshcd_auto_hibern8_enable(hba);
|
|
-
|
|
ret = ufshcd_verify_dev_init(hba);
|
|
if (ret)
|
|
goto out;
|
|
@@ -6934,6 +6940,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
|
|
/* set the state as operational after switching to desired gear */
|
|
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
|
|
|
+ /* Enable Auto-Hibernate if configured */
|
|
+ ufshcd_auto_hibern8_enable(hba);
|
|
+
|
|
/*
|
|
* If we are in error handling context or in power management callbacks
|
|
* context, no need to scan the host
|
|
@@ -7950,12 +7959,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
if (hba->clk_scaling.is_allowed)
|
|
ufshcd_resume_clkscaling(hba);
|
|
|
|
- /* Schedule clock gating in case of no access to UFS device yet */
|
|
- ufshcd_release(hba);
|
|
-
|
|
/* Enable Auto-Hibernate if configured */
|
|
ufshcd_auto_hibern8_enable(hba);
|
|
|
|
+ /* Schedule clock gating in case of no access to UFS device yet */
|
|
+ ufshcd_release(hba);
|
|
+
|
|
goto out;
|
|
|
|
set_old_link_state:
|
|
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
|
|
index c94cfda52829..52c9676a1242 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.h
|
|
+++ b/drivers/scsi/ufs/ufshcd.h
|
|
@@ -916,6 +916,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
|
|
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
|
|
enum flag_idn idn, bool *flag_res);
|
|
|
|
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
|
|
+
|
|
#define SD_ASCII_STD true
|
|
#define SD_RAW false
|
|
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
|
|
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
|
|
index ca8e3abeb2c7..a23a8e5794f5 100644
|
|
--- a/drivers/scsi/zorro_esp.c
|
|
+++ b/drivers/scsi/zorro_esp.c
|
|
@@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
|
|
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
|
|
u32 dma_len)
|
|
{
|
|
- return dma_len > 0xFFFF ? 0xFFFF : dma_len;
|
|
+ return dma_len > (1U << 16) ? (1U << 16) : dma_len;
|
|
+}
|
|
+
|
|
+static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
|
|
+ u32 dma_len)
|
|
+{
|
|
+ /* The old driver used 0xfffc as limit, so do that here too */
|
|
+ return dma_len > 0xfffc ? 0xfffc : dma_len;
|
|
}
|
|
|
|
static void zorro_esp_reset_dma(struct esp *esp)
|
|
@@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
|
|
.esp_write8 = zorro_esp_write8,
|
|
.esp_read8 = zorro_esp_read8,
|
|
.irq_pending = fastlane_esp_irq_pending,
|
|
- .dma_length_limit = zorro_esp_dma_length_limit,
|
|
+ .dma_length_limit = fastlane_esp_dma_length_limit,
|
|
.reset_dma = zorro_esp_reset_dma,
|
|
.dma_drain = zorro_esp_dma_drain,
|
|
.dma_invalidate = fastlane_esp_dma_invalidate,
|
|
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
|
|
index d19e051f2bc2..f194ffc4699e 100644
|
|
--- a/drivers/target/iscsi/iscsi_target.c
|
|
+++ b/drivers/target/iscsi/iscsi_target.c
|
|
@@ -1165,7 +1165,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
|
|
conn->cid);
|
|
|
|
- target_get_sess_cmd(&cmd->se_cmd, true);
|
|
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
|
|
+ return iscsit_add_reject_cmd(cmd,
|
|
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
|
|
|
|
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
|
|
scsilun_to_int(&hdr->lun));
|
|
@@ -2002,7 +2004,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|
conn->sess->se_sess, 0, DMA_NONE,
|
|
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
|
|
|
|
- target_get_sess_cmd(&cmd->se_cmd, true);
|
|
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
|
|
+ return iscsit_add_reject_cmd(cmd,
|
|
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
|
|
|
|
/*
|
|
* TASK_REASSIGN for ERL=2 / connection stays inside of
|
|
@@ -4232,6 +4236,8 @@ int iscsit_close_connection(
|
|
* must wait until they have completed.
|
|
*/
|
|
iscsit_check_conn_usage_count(conn);
|
|
+ target_sess_cmd_list_set_waiting(sess->se_sess);
|
|
+ target_wait_for_sess_cmds(sess->se_sess);
|
|
|
|
ahash_request_free(conn->conn_tx_hash);
|
|
if (conn->conn_rx_hash) {
|
|
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
|
|
index 51ddca2033e0..8fe9b12a07a4 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_auth.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_auth.c
|
|
@@ -70,7 +70,7 @@ static int chap_check_algorithm(const char *a_str)
|
|
if (!token)
|
|
goto out;
|
|
|
|
- if (!strncmp(token, "5", 1)) {
|
|
+ if (!strcmp(token, "5")) {
|
|
pr_debug("Selected MD5 Algorithm\n");
|
|
kfree(orig);
|
|
return CHAP_DIGEST_MD5;
|
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
|
index 7f06a62f8661..eda8b4736c15 100644
|
|
--- a/drivers/target/target_core_transport.c
|
|
+++ b/drivers/target/target_core_transport.c
|
|
@@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess)
|
|
}
|
|
EXPORT_SYMBOL(transport_free_session);
|
|
|
|
+static int target_release_res(struct se_device *dev, void *data)
|
|
+{
|
|
+ struct se_session *sess = data;
|
|
+
|
|
+ if (dev->reservation_holder == sess)
|
|
+ target_release_reservation(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
void transport_deregister_session(struct se_session *se_sess)
|
|
{
|
|
struct se_portal_group *se_tpg = se_sess->se_tpg;
|
|
@@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess)
|
|
se_sess->fabric_sess_ptr = NULL;
|
|
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
|
|
|
|
+ /*
|
|
+ * Since the session is being removed, release SPC-2
|
|
+ * reservations held by the session that is disappearing.
|
|
+ */
|
|
+ target_for_each_device(target_release_res, se_sess);
|
|
+
|
|
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
|
|
se_tpg->se_tpg_tfo->fabric_name);
|
|
/*
|
|
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
|
|
index 9f57736fe15e..88a5aa6624b4 100644
|
|
--- a/drivers/vhost/vsock.c
|
|
+++ b/drivers/vhost/vsock.c
|
|
@@ -437,7 +437,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
|
virtio_transport_deliver_tap_pkt(pkt);
|
|
|
|
/* Only accept correctly addressed packets */
|
|
- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
|
|
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
|
|
+ le64_to_cpu(pkt->hdr.dst_cid) ==
|
|
+ vhost_transport_get_local_cid())
|
|
virtio_transport_recv_pkt(pkt);
|
|
else
|
|
virtio_transport_free_pkt(pkt);
|
|
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
|
|
index 5ce51026989a..ba5d535a6db2 100644
|
|
--- a/drivers/watchdog/imx7ulp_wdt.c
|
|
+++ b/drivers/watchdog/imx7ulp_wdt.c
|
|
@@ -106,12 +106,28 @@ static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
|
|
return 0;
|
|
}
|
|
|
|
+static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
|
|
+ unsigned long action, void *data)
|
|
+{
|
|
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
|
|
+
|
|
+ imx7ulp_wdt_enable(wdt->base, true);
|
|
+ imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
|
|
+
|
|
+ /* wait for wdog to fire */
|
|
+ while (true)
|
|
+ ;
|
|
+
|
|
+ return NOTIFY_DONE;
|
|
+}
|
|
+
|
|
static const struct watchdog_ops imx7ulp_wdt_ops = {
|
|
.owner = THIS_MODULE,
|
|
.start = imx7ulp_wdt_start,
|
|
.stop = imx7ulp_wdt_stop,
|
|
.ping = imx7ulp_wdt_ping,
|
|
.set_timeout = imx7ulp_wdt_set_timeout,
|
|
+ .restart = imx7ulp_wdt_restart,
|
|
};
|
|
|
|
static const struct watchdog_info imx7ulp_wdt_info = {
|
|
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
|
|
index dbd2ad4c9294..62483a99105c 100644
|
|
--- a/drivers/watchdog/watchdog_dev.c
|
|
+++ b/drivers/watchdog/watchdog_dev.c
|
|
@@ -34,7 +34,6 @@
|
|
#include <linux/init.h> /* For __init/__exit/... */
|
|
#include <linux/hrtimer.h> /* For hrtimers */
|
|
#include <linux/kernel.h> /* For printk/panic/... */
|
|
-#include <linux/kref.h> /* For data references */
|
|
#include <linux/kthread.h> /* For kthread_work */
|
|
#include <linux/miscdevice.h> /* For handling misc devices */
|
|
#include <linux/module.h> /* For module stuff/... */
|
|
@@ -52,14 +51,14 @@
|
|
|
|
/*
|
|
* struct watchdog_core_data - watchdog core internal data
|
|
- * @kref: Reference count.
|
|
+ * @dev: The watchdog's internal device
|
|
* @cdev: The watchdog's Character device.
|
|
* @wdd: Pointer to watchdog device.
|
|
* @lock: Lock for watchdog core.
|
|
* @status: Watchdog core internal status bits.
|
|
*/
|
|
struct watchdog_core_data {
|
|
- struct kref kref;
|
|
+ struct device dev;
|
|
struct cdev cdev;
|
|
struct watchdog_device *wdd;
|
|
struct mutex lock;
|
|
@@ -158,7 +157,8 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd)
|
|
ktime_t t = watchdog_next_keepalive(wdd);
|
|
|
|
if (t > 0)
|
|
- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
|
|
+ hrtimer_start(&wd_data->timer, t,
|
|
+ HRTIMER_MODE_REL_HARD);
|
|
} else {
|
|
hrtimer_cancel(&wd_data->timer);
|
|
}
|
|
@@ -177,7 +177,7 @@ static int __watchdog_ping(struct watchdog_device *wdd)
|
|
if (ktime_after(earliest_keepalive, now)) {
|
|
hrtimer_start(&wd_data->timer,
|
|
ktime_sub(earliest_keepalive, now),
|
|
- HRTIMER_MODE_REL);
|
|
+ HRTIMER_MODE_REL_HARD);
|
|
return 0;
|
|
}
|
|
|
|
@@ -839,7 +839,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
|
|
file->private_data = wd_data;
|
|
|
|
if (!hw_running)
|
|
- kref_get(&wd_data->kref);
|
|
+ get_device(&wd_data->dev);
|
|
|
|
/*
|
|
* open_timeout only applies for the first open from
|
|
@@ -860,11 +860,11 @@ out_clear:
|
|
return err;
|
|
}
|
|
|
|
-static void watchdog_core_data_release(struct kref *kref)
|
|
+static void watchdog_core_data_release(struct device *dev)
|
|
{
|
|
struct watchdog_core_data *wd_data;
|
|
|
|
- wd_data = container_of(kref, struct watchdog_core_data, kref);
|
|
+ wd_data = container_of(dev, struct watchdog_core_data, dev);
|
|
|
|
kfree(wd_data);
|
|
}
|
|
@@ -924,7 +924,7 @@ done:
|
|
*/
|
|
if (!running) {
|
|
module_put(wd_data->cdev.owner);
|
|
- kref_put(&wd_data->kref, watchdog_core_data_release);
|
|
+ put_device(&wd_data->dev);
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -943,17 +943,22 @@ static struct miscdevice watchdog_miscdev = {
|
|
.fops = &watchdog_fops,
|
|
};
|
|
|
|
+static struct class watchdog_class = {
|
|
+ .name = "watchdog",
|
|
+ .owner = THIS_MODULE,
|
|
+ .dev_groups = wdt_groups,
|
|
+};
|
|
+
|
|
/*
|
|
* watchdog_cdev_register: register watchdog character device
|
|
* @wdd: watchdog device
|
|
- * @devno: character device number
|
|
*
|
|
* Register a watchdog character device including handling the legacy
|
|
* /dev/watchdog node. /dev/watchdog is actually a miscdevice and
|
|
* thus we set it up like that.
|
|
*/
|
|
|
|
-static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
|
|
+static int watchdog_cdev_register(struct watchdog_device *wdd)
|
|
{
|
|
struct watchdog_core_data *wd_data;
|
|
int err;
|
|
@@ -961,7 +966,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
|
|
wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
|
|
if (!wd_data)
|
|
return -ENOMEM;
|
|
- kref_init(&wd_data->kref);
|
|
mutex_init(&wd_data->lock);
|
|
|
|
wd_data->wdd = wdd;
|
|
@@ -971,7 +975,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
|
|
return -ENODEV;
|
|
|
|
kthread_init_work(&wd_data->work, watchdog_ping_work);
|
|
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
|
|
wd_data->timer.function = watchdog_timer_expired;
|
|
|
|
if (wdd->id == 0) {
|
|
@@ -990,23 +994,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
|
|
}
|
|
}
|
|
|
|
+ device_initialize(&wd_data->dev);
|
|
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
|
|
+ wd_data->dev.class = &watchdog_class;
|
|
+ wd_data->dev.parent = wdd->parent;
|
|
+ wd_data->dev.groups = wdd->groups;
|
|
+ wd_data->dev.release = watchdog_core_data_release;
|
|
+ dev_set_drvdata(&wd_data->dev, wdd);
|
|
+ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
|
|
+
|
|
/* Fill in the data structures */
|
|
cdev_init(&wd_data->cdev, &watchdog_fops);
|
|
- wd_data->cdev.owner = wdd->ops->owner;
|
|
|
|
/* Add the device */
|
|
- err = cdev_add(&wd_data->cdev, devno, 1);
|
|
+ err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
|
|
if (err) {
|
|
pr_err("watchdog%d unable to add device %d:%d\n",
|
|
wdd->id, MAJOR(watchdog_devt), wdd->id);
|
|
if (wdd->id == 0) {
|
|
misc_deregister(&watchdog_miscdev);
|
|
old_wd_data = NULL;
|
|
- kref_put(&wd_data->kref, watchdog_core_data_release);
|
|
+ put_device(&wd_data->dev);
|
|
}
|
|
return err;
|
|
}
|
|
|
|
+ wd_data->cdev.owner = wdd->ops->owner;
|
|
+
|
|
/* Record time of most recent heartbeat as 'just before now'. */
|
|
wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
|
|
watchdog_set_open_deadline(wd_data);
|
|
@@ -1017,9 +1031,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
|
|
*/
|
|
if (watchdog_hw_running(wdd)) {
|
|
__module_get(wdd->ops->owner);
|
|
- kref_get(&wd_data->kref);
|
|
+ get_device(&wd_data->dev);
|
|
if (handle_boot_enabled)
|
|
- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
|
|
+ hrtimer_start(&wd_data->timer, 0,
|
|
+ HRTIMER_MODE_REL_HARD);
|
|
else
|
|
pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
|
|
wdd->id);
|
|
@@ -1040,7 +1055,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
|
|
{
|
|
struct watchdog_core_data *wd_data = wdd->wd_data;
|
|
|
|
- cdev_del(&wd_data->cdev);
|
|
+ cdev_device_del(&wd_data->cdev, &wd_data->dev);
|
|
if (wdd->id == 0) {
|
|
misc_deregister(&watchdog_miscdev);
|
|
old_wd_data = NULL;
|
|
@@ -1059,15 +1074,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
|
|
hrtimer_cancel(&wd_data->timer);
|
|
kthread_cancel_work_sync(&wd_data->work);
|
|
|
|
- kref_put(&wd_data->kref, watchdog_core_data_release);
|
|
+ put_device(&wd_data->dev);
|
|
}
|
|
|
|
-static struct class watchdog_class = {
|
|
- .name = "watchdog",
|
|
- .owner = THIS_MODULE,
|
|
- .dev_groups = wdt_groups,
|
|
-};
|
|
-
|
|
static int watchdog_reboot_notifier(struct notifier_block *nb,
|
|
unsigned long code, void *data)
|
|
{
|
|
@@ -1098,27 +1107,14 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
|
|
|
|
int watchdog_dev_register(struct watchdog_device *wdd)
|
|
{
|
|
- struct device *dev;
|
|
- dev_t devno;
|
|
int ret;
|
|
|
|
- devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
|
|
-
|
|
- ret = watchdog_cdev_register(wdd, devno);
|
|
+ ret = watchdog_cdev_register(wdd);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- dev = device_create_with_groups(&watchdog_class, wdd->parent,
|
|
- devno, wdd, wdd->groups,
|
|
- "watchdog%d", wdd->id);
|
|
- if (IS_ERR(dev)) {
|
|
- watchdog_cdev_unregister(wdd);
|
|
- return PTR_ERR(dev);
|
|
- }
|
|
-
|
|
ret = watchdog_register_pretimeout(wdd);
|
|
if (ret) {
|
|
- device_destroy(&watchdog_class, devno);
|
|
watchdog_cdev_unregister(wdd);
|
|
return ret;
|
|
}
|
|
@@ -1126,7 +1122,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
|
|
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
|
|
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
|
|
|
|
- ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
|
|
+ ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
|
|
+ &wdd->reboot_nb);
|
|
if (ret) {
|
|
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
|
|
wdd->id, ret);
|
|
@@ -1148,7 +1145,6 @@ int watchdog_dev_register(struct watchdog_device *wdd)
|
|
void watchdog_dev_unregister(struct watchdog_device *wdd)
|
|
{
|
|
watchdog_unregister_pretimeout(wdd);
|
|
- device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
|
|
watchdog_cdev_unregister(wdd);
|
|
}
|
|
|
|
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
|
|
index 1a135d1b85bd..07d8ace61f77 100644
|
|
--- a/fs/cifs/cifsfs.c
|
|
+++ b/fs/cifs/cifsfs.c
|
|
@@ -119,6 +119,7 @@ extern mempool_t *cifs_mid_poolp;
|
|
|
|
struct workqueue_struct *cifsiod_wq;
|
|
struct workqueue_struct *decrypt_wq;
|
|
+struct workqueue_struct *fileinfo_put_wq;
|
|
struct workqueue_struct *cifsoplockd_wq;
|
|
__u32 cifs_lock_secret;
|
|
|
|
@@ -1554,11 +1555,18 @@ init_cifs(void)
|
|
goto out_destroy_cifsiod_wq;
|
|
}
|
|
|
|
+ fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
|
|
+ WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
|
|
+ if (!fileinfo_put_wq) {
|
|
+ rc = -ENOMEM;
|
|
+ goto out_destroy_decrypt_wq;
|
|
+ }
|
|
+
|
|
cifsoplockd_wq = alloc_workqueue("cifsoplockd",
|
|
WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
|
|
if (!cifsoplockd_wq) {
|
|
rc = -ENOMEM;
|
|
- goto out_destroy_decrypt_wq;
|
|
+ goto out_destroy_fileinfo_put_wq;
|
|
}
|
|
|
|
rc = cifs_fscache_register();
|
|
@@ -1624,6 +1632,8 @@ out_unreg_fscache:
|
|
cifs_fscache_unregister();
|
|
out_destroy_cifsoplockd_wq:
|
|
destroy_workqueue(cifsoplockd_wq);
|
|
+out_destroy_fileinfo_put_wq:
|
|
+ destroy_workqueue(fileinfo_put_wq);
|
|
out_destroy_decrypt_wq:
|
|
destroy_workqueue(decrypt_wq);
|
|
out_destroy_cifsiod_wq:
|
|
@@ -1653,6 +1663,7 @@ exit_cifs(void)
|
|
cifs_fscache_unregister();
|
|
destroy_workqueue(cifsoplockd_wq);
|
|
destroy_workqueue(decrypt_wq);
|
|
+ destroy_workqueue(fileinfo_put_wq);
|
|
destroy_workqueue(cifsiod_wq);
|
|
cifs_proc_clean();
|
|
}
|
|
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
|
|
index 5d2dd04b55a6..f55e53486e74 100644
|
|
--- a/fs/cifs/cifsglob.h
|
|
+++ b/fs/cifs/cifsglob.h
|
|
@@ -1265,6 +1265,7 @@ struct cifsFileInfo {
|
|
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
|
|
struct cifs_search_info srch_inf;
|
|
struct work_struct oplock_break; /* work for oplock breaks */
|
|
+ struct work_struct put; /* work for the final part of _put */
|
|
};
|
|
|
|
struct cifs_io_parms {
|
|
@@ -1370,7 +1371,8 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
|
|
}
|
|
|
|
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
|
|
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
|
|
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
|
|
+ bool offload);
|
|
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
|
|
|
|
#define CIFS_CACHE_READ_FLG 1
|
|
@@ -1908,6 +1910,7 @@ void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
|
|
extern const struct slow_work_ops cifs_oplock_break_ops;
|
|
extern struct workqueue_struct *cifsiod_wq;
|
|
extern struct workqueue_struct *decrypt_wq;
|
|
+extern struct workqueue_struct *fileinfo_put_wq;
|
|
extern struct workqueue_struct *cifsoplockd_wq;
|
|
extern __u32 cifs_lock_secret;
|
|
|
|
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
|
|
index 20c70cbab1ad..02451d085ddd 100644
|
|
--- a/fs/cifs/connect.c
|
|
+++ b/fs/cifs/connect.c
|
|
@@ -387,7 +387,7 @@ static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
|
|
#ifdef CONFIG_CIFS_DFS_UPCALL
|
|
struct super_cb_data {
|
|
struct TCP_Server_Info *server;
|
|
- struct cifs_sb_info *cifs_sb;
|
|
+ struct super_block *sb;
|
|
};
|
|
|
|
/* These functions must be called with server->srv_mutex held */
|
|
@@ -398,25 +398,39 @@ static void super_cb(struct super_block *sb, void *arg)
|
|
struct cifs_sb_info *cifs_sb;
|
|
struct cifs_tcon *tcon;
|
|
|
|
- if (d->cifs_sb)
|
|
+ if (d->sb)
|
|
return;
|
|
|
|
cifs_sb = CIFS_SB(sb);
|
|
tcon = cifs_sb_master_tcon(cifs_sb);
|
|
if (tcon->ses->server == d->server)
|
|
- d->cifs_sb = cifs_sb;
|
|
+ d->sb = sb;
|
|
}
|
|
|
|
-static inline struct cifs_sb_info *
|
|
-find_super_by_tcp(struct TCP_Server_Info *server)
|
|
+static struct super_block *get_tcp_super(struct TCP_Server_Info *server)
|
|
{
|
|
struct super_cb_data d = {
|
|
.server = server,
|
|
- .cifs_sb = NULL,
|
|
+ .sb = NULL,
|
|
};
|
|
|
|
iterate_supers_type(&cifs_fs_type, super_cb, &d);
|
|
- return d.cifs_sb ? d.cifs_sb : ERR_PTR(-ENOENT);
|
|
+
|
|
+ if (unlikely(!d.sb))
|
|
+ return ERR_PTR(-ENOENT);
|
|
+ /*
|
|
+ * Grab an active reference in order to prevent automounts (DFS links)
|
|
+ * of expiring and then freeing up our cifs superblock pointer while
|
|
+ * we're doing failover.
|
|
+ */
|
|
+ cifs_sb_active(d.sb);
|
|
+ return d.sb;
|
|
+}
|
|
+
|
|
+static inline void put_tcp_super(struct super_block *sb)
|
|
+{
|
|
+ if (!IS_ERR_OR_NULL(sb))
|
|
+ cifs_sb_deactive(sb);
|
|
}
|
|
|
|
static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
|
|
@@ -480,6 +494,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
struct mid_q_entry *mid_entry;
|
|
struct list_head retry_list;
|
|
#ifdef CONFIG_CIFS_DFS_UPCALL
|
|
+ struct super_block *sb = NULL;
|
|
struct cifs_sb_info *cifs_sb = NULL;
|
|
struct dfs_cache_tgt_list tgt_list = {0};
|
|
struct dfs_cache_tgt_iterator *tgt_it = NULL;
|
|
@@ -489,13 +504,15 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
server->nr_targets = 1;
|
|
#ifdef CONFIG_CIFS_DFS_UPCALL
|
|
spin_unlock(&GlobalMid_Lock);
|
|
- cifs_sb = find_super_by_tcp(server);
|
|
- if (IS_ERR(cifs_sb)) {
|
|
- rc = PTR_ERR(cifs_sb);
|
|
+ sb = get_tcp_super(server);
|
|
+ if (IS_ERR(sb)) {
|
|
+ rc = PTR_ERR(sb);
|
|
cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n",
|
|
__func__, rc);
|
|
- cifs_sb = NULL;
|
|
+ sb = NULL;
|
|
} else {
|
|
+ cifs_sb = CIFS_SB(sb);
|
|
+
|
|
rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
|
|
if (rc && (rc != -EOPNOTSUPP)) {
|
|
cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
|
|
@@ -512,6 +529,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
/* the demux thread will exit normally
|
|
next time through the loop */
|
|
spin_unlock(&GlobalMid_Lock);
|
|
+#ifdef CONFIG_CIFS_DFS_UPCALL
|
|
+ dfs_cache_free_tgts(&tgt_list);
|
|
+ put_tcp_super(sb);
|
|
+#endif
|
|
return rc;
|
|
} else
|
|
server->tcpStatus = CifsNeedReconnect;
|
|
@@ -638,7 +659,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
__func__, rc);
|
|
}
|
|
dfs_cache_free_tgts(&tgt_list);
|
|
+
|
|
}
|
|
+
|
|
+ put_tcp_super(sb);
|
|
#endif
|
|
if (server->tcpStatus == CifsNeedNegotiate)
|
|
mod_delayed_work(cifsiod_wq, &server->echo, 0);
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index c32650f14c9b..969543034b4d 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -288,6 +288,8 @@ cifs_down_write(struct rw_semaphore *sem)
|
|
msleep(10);
|
|
}
|
|
|
|
+static void cifsFileInfo_put_work(struct work_struct *work);
|
|
+
|
|
struct cifsFileInfo *
|
|
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
|
|
struct tcon_link *tlink, __u32 oplock)
|
|
@@ -322,6 +324,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
|
|
cfile->invalidHandle = false;
|
|
cfile->tlink = cifs_get_tlink(tlink);
|
|
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
|
|
+ INIT_WORK(&cfile->put, cifsFileInfo_put_work);
|
|
mutex_init(&cfile->fh_mutex);
|
|
spin_lock_init(&cfile->file_info_lock);
|
|
|
|
@@ -376,6 +379,41 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
|
|
return cifs_file;
|
|
}
|
|
|
|
+static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
|
|
+{
|
|
+ struct inode *inode = d_inode(cifs_file->dentry);
|
|
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
|
+ struct cifsLockInfo *li, *tmp;
|
|
+ struct super_block *sb = inode->i_sb;
|
|
+
|
|
+ /*
|
|
+ * Delete any outstanding lock records. We'll lose them when the file
|
|
+ * is closed anyway.
|
|
+ */
|
|
+ cifs_down_write(&cifsi->lock_sem);
|
|
+ list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
|
|
+ list_del(&li->llist);
|
|
+ cifs_del_lock_waiters(li);
|
|
+ kfree(li);
|
|
+ }
|
|
+ list_del(&cifs_file->llist->llist);
|
|
+ kfree(cifs_file->llist);
|
|
+ up_write(&cifsi->lock_sem);
|
|
+
|
|
+ cifs_put_tlink(cifs_file->tlink);
|
|
+ dput(cifs_file->dentry);
|
|
+ cifs_sb_deactive(sb);
|
|
+ kfree(cifs_file);
|
|
+}
|
|
+
|
|
+static void cifsFileInfo_put_work(struct work_struct *work)
|
|
+{
|
|
+ struct cifsFileInfo *cifs_file = container_of(work,
|
|
+ struct cifsFileInfo, put);
|
|
+
|
|
+ cifsFileInfo_put_final(cifs_file);
|
|
+}
|
|
+
|
|
/**
|
|
* cifsFileInfo_put - release a reference of file priv data
|
|
*
|
|
@@ -383,15 +421,15 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
|
|
*/
|
|
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
|
{
|
|
- _cifsFileInfo_put(cifs_file, true);
|
|
+ _cifsFileInfo_put(cifs_file, true, true);
|
|
}
|
|
|
|
/**
|
|
* _cifsFileInfo_put - release a reference of file priv data
|
|
*
|
|
* This may involve closing the filehandle @cifs_file out on the
|
|
- * server. Must be called without holding tcon->open_file_lock and
|
|
- * cifs_file->file_info_lock.
|
|
+ * server. Must be called without holding tcon->open_file_lock,
|
|
+ * cinode->open_file_lock and cifs_file->file_info_lock.
|
|
*
|
|
* If @wait_for_oplock_handler is true and we are releasing the last
|
|
* reference, wait for any running oplock break handler of the file
|
|
@@ -399,7 +437,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
|
* oplock break handler, you need to pass false.
|
|
*
|
|
*/
|
|
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
|
|
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
|
|
+ bool wait_oplock_handler, bool offload)
|
|
{
|
|
struct inode *inode = d_inode(cifs_file->dentry);
|
|
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
|
|
@@ -407,7 +446,6 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
|
|
struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
|
struct super_block *sb = inode->i_sb;
|
|
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
|
|
- struct cifsLockInfo *li, *tmp;
|
|
struct cifs_fid fid;
|
|
struct cifs_pending_open open;
|
|
bool oplock_break_cancelled;
|
|
@@ -468,24 +506,10 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
|
|
|
|
cifs_del_pending_open(&open);
|
|
|
|
- /*
|
|
- * Delete any outstanding lock records. We'll lose them when the file
|
|
- * is closed anyway.
|
|
- */
|
|
- cifs_down_write(&cifsi->lock_sem);
|
|
- list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
|
|
- list_del(&li->llist);
|
|
- cifs_del_lock_waiters(li);
|
|
- kfree(li);
|
|
- }
|
|
- list_del(&cifs_file->llist->llist);
|
|
- kfree(cifs_file->llist);
|
|
- up_write(&cifsi->lock_sem);
|
|
-
|
|
- cifs_put_tlink(cifs_file->tlink);
|
|
- dput(cifs_file->dentry);
|
|
- cifs_sb_deactive(sb);
|
|
- kfree(cifs_file);
|
|
+ if (offload)
|
|
+ queue_work(fileinfo_put_wq, &cifs_file->put);
|
|
+ else
|
|
+ cifsFileInfo_put_final(cifs_file);
|
|
}
|
|
|
|
int cifs_open(struct inode *inode, struct file *file)
|
|
@@ -816,7 +840,7 @@ reopen_error_exit:
|
|
int cifs_close(struct inode *inode, struct file *file)
|
|
{
|
|
if (file->private_data != NULL) {
|
|
- cifsFileInfo_put(file->private_data);
|
|
+ _cifsFileInfo_put(file->private_data, true, false);
|
|
file->private_data = NULL;
|
|
}
|
|
|
|
@@ -4688,7 +4712,7 @@ void cifs_oplock_break(struct work_struct *work)
|
|
cinode);
|
|
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
|
|
}
|
|
- _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
|
|
+ _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
|
|
cifs_done_oplock_break(cinode);
|
|
}
|
|
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 53134e4509b8..8bba6cd5e870 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -3532,8 +3532,14 @@ retry:
|
|
return ret;
|
|
}
|
|
|
|
+ /*
|
|
+ * Writes that span EOF might trigger an I/O size update on completion,
|
|
+ * so consider them to be dirty for the purposes of O_DSYNC, even if
|
|
+ * there is no other metadata changes being made or are pending here.
|
|
+ */
|
|
iomap->flags = 0;
|
|
- if (ext4_inode_datasync_dirty(inode))
|
|
+ if (ext4_inode_datasync_dirty(inode) ||
|
|
+ offset + length > i_size_read(inode))
|
|
iomap->flags |= IOMAP_F_DIRTY;
|
|
iomap->bdev = inode->i_sb->s_bdev;
|
|
iomap->dax_dev = sbi->s_daxdev;
|
|
@@ -3836,7 +3842,13 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
|
|
* writes & truncates and since we take care of writing back page cache,
|
|
* we are protected against page writeback as well.
|
|
*/
|
|
- inode_lock_shared(inode);
|
|
+ if (iocb->ki_flags & IOCB_NOWAIT) {
|
|
+ if (!inode_trylock_shared(inode))
|
|
+ return -EAGAIN;
|
|
+ } else {
|
|
+ inode_lock_shared(inode);
|
|
+ }
|
|
+
|
|
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
|
|
iocb->ki_pos + count - 1);
|
|
if (ret)
|
|
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
|
|
index 4024790028aa..9046432b87c2 100644
|
|
--- a/fs/f2fs/f2fs.h
|
|
+++ b/fs/f2fs/f2fs.h
|
|
@@ -1289,6 +1289,7 @@ struct f2fs_sb_info {
|
|
unsigned int gc_mode; /* current GC state */
|
|
unsigned int next_victim_seg[2]; /* next segment in victim section */
|
|
/* for skip statistic */
|
|
+ unsigned int atomic_files; /* # of opened atomic file */
|
|
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
|
|
unsigned long long skipped_gc_rwsem; /* FG_GC only */
|
|
|
|
@@ -2704,6 +2705,20 @@ static inline void clear_file(struct inode *inode, int type)
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
|
}
|
|
|
|
+static inline bool f2fs_is_time_consistent(struct inode *inode)
|
|
+{
|
|
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
|
|
+ return false;
|
|
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
|
|
+ return false;
|
|
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
|
|
+ return false;
|
|
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
|
|
+ &F2FS_I(inode)->i_crtime))
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
|
|
{
|
|
bool ret;
|
|
@@ -2721,14 +2736,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
|
|
i_size_read(inode) & ~PAGE_MASK)
|
|
return false;
|
|
|
|
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
|
|
- return false;
|
|
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
|
|
- return false;
|
|
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
|
|
- return false;
|
|
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
|
|
- &F2FS_I(inode)->i_crtime))
|
|
+ if (!f2fs_is_time_consistent(inode))
|
|
return false;
|
|
|
|
down_read(&F2FS_I(inode)->i_sem);
|
|
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
|
|
index 29bc0a542759..8ed8e4328bd1 100644
|
|
--- a/fs/f2fs/file.c
|
|
+++ b/fs/f2fs/file.c
|
|
@@ -1890,6 +1890,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
|
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
if (list_empty(&fi->inmem_ilist))
|
|
list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
|
|
+ sbi->atomic_files++;
|
|
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
|
|
/* add inode in inmem_list first and set atomic_file */
|
|
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
|
|
index db4fec30c30d..386ad54c13c3 100644
|
|
--- a/fs/f2fs/inode.c
|
|
+++ b/fs/f2fs/inode.c
|
|
@@ -615,7 +615,11 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
inode->i_ino == F2FS_META_INO(sbi))
|
|
return 0;
|
|
|
|
- if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
|
|
+ /*
|
|
+ * atime could be updated without dirtying f2fs inode in lazytime mode
|
|
+ */
|
|
+ if (f2fs_is_time_consistent(inode) &&
|
|
+ !is_inode_flag_set(inode, FI_DIRTY_INODE))
|
|
return 0;
|
|
|
|
if (!f2fs_is_checkpoint_ready(sbi))
|
|
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
|
|
index 4faf06e8bf89..a1c507b0b4ac 100644
|
|
--- a/fs/f2fs/namei.c
|
|
+++ b/fs/f2fs/namei.c
|
|
@@ -981,7 +981,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
if (!old_dir_entry || whiteout)
|
|
file_lost_pino(old_inode);
|
|
else
|
|
- F2FS_I(old_inode)->i_pino = new_dir->i_ino;
|
|
+ /* adjust dir's i_pino to pass fsck check */
|
|
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
|
|
up_write(&F2FS_I(old_inode)->i_sem);
|
|
|
|
old_inode->i_ctime = current_time(old_inode);
|
|
@@ -1141,7 +1142,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
f2fs_set_link(old_dir, old_entry, old_page, new_inode);
|
|
|
|
down_write(&F2FS_I(old_inode)->i_sem);
|
|
- file_lost_pino(old_inode);
|
|
+ if (!old_dir_entry)
|
|
+ file_lost_pino(old_inode);
|
|
+ else
|
|
+ /* adjust dir's i_pino to pass fsck check */
|
|
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
|
|
up_write(&F2FS_I(old_inode)->i_sem);
|
|
|
|
old_dir->i_ctime = current_time(old_dir);
|
|
@@ -1156,7 +1161,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
|
|
|
|
down_write(&F2FS_I(new_inode)->i_sem);
|
|
- file_lost_pino(new_inode);
|
|
+ if (!new_dir_entry)
|
|
+ file_lost_pino(new_inode);
|
|
+ else
|
|
+ /* adjust dir's i_pino to pass fsck check */
|
|
+ f2fs_i_pino_write(new_inode, old_dir->i_ino);
|
|
up_write(&F2FS_I(new_inode)->i_sem);
|
|
|
|
new_dir->i_ctime = current_time(new_dir);
|
|
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
|
|
index 808709581481..7d8578401267 100644
|
|
--- a/fs/f2fs/segment.c
|
|
+++ b/fs/f2fs/segment.c
|
|
@@ -288,6 +288,8 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
|
|
struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
|
|
struct inode *inode;
|
|
struct f2fs_inode_info *fi;
|
|
+ unsigned int count = sbi->atomic_files;
|
|
+ unsigned int looped = 0;
|
|
next:
|
|
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
if (list_empty(head)) {
|
|
@@ -296,22 +298,26 @@ next:
|
|
}
|
|
fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
|
|
inode = igrab(&fi->vfs_inode);
|
|
+ if (inode)
|
|
+ list_move_tail(&fi->inmem_ilist, head);
|
|
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
|
|
if (inode) {
|
|
if (gc_failure) {
|
|
- if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
|
|
- goto drop;
|
|
- goto skip;
|
|
+ if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
|
|
+ goto skip;
|
|
}
|
|
-drop:
|
|
set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
|
|
f2fs_drop_inmem_pages(inode);
|
|
+skip:
|
|
iput(inode);
|
|
}
|
|
-skip:
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
cond_resched();
|
|
+ if (gc_failure) {
|
|
+ if (++looped >= count)
|
|
+ return;
|
|
+ }
|
|
goto next;
|
|
}
|
|
|
|
@@ -327,13 +333,16 @@ void f2fs_drop_inmem_pages(struct inode *inode)
|
|
mutex_unlock(&fi->inmem_lock);
|
|
}
|
|
|
|
- clear_inode_flag(inode, FI_ATOMIC_FILE);
|
|
fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
|
|
stat_dec_atomic_write(inode);
|
|
|
|
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
if (!list_empty(&fi->inmem_ilist))
|
|
list_del_init(&fi->inmem_ilist);
|
|
+ if (f2fs_is_atomic_file(inode)) {
|
|
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
|
|
+ sbi->atomic_files--;
|
|
+ }
|
|
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
|
|
}
|
|
|
|
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
|
|
index a478df035651..40306c1eab07 100644
|
|
--- a/fs/hugetlbfs/inode.c
|
|
+++ b/fs/hugetlbfs/inode.c
|
|
@@ -1461,28 +1461,43 @@ static int __init init_hugetlbfs_fs(void)
|
|
sizeof(struct hugetlbfs_inode_info),
|
|
0, SLAB_ACCOUNT, init_once);
|
|
if (hugetlbfs_inode_cachep == NULL)
|
|
- goto out2;
|
|
+ goto out;
|
|
|
|
error = register_filesystem(&hugetlbfs_fs_type);
|
|
if (error)
|
|
- goto out;
|
|
+ goto out_free;
|
|
|
|
+ /* default hstate mount is required */
|
|
+ mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
|
|
+ if (IS_ERR(mnt)) {
|
|
+ error = PTR_ERR(mnt);
|
|
+ goto out_unreg;
|
|
+ }
|
|
+ hugetlbfs_vfsmount[default_hstate_idx] = mnt;
|
|
+
|
|
+ /* other hstates are optional */
|
|
i = 0;
|
|
for_each_hstate(h) {
|
|
- mnt = mount_one_hugetlbfs(h);
|
|
- if (IS_ERR(mnt) && i == 0) {
|
|
- error = PTR_ERR(mnt);
|
|
- goto out;
|
|
+ if (i == default_hstate_idx) {
|
|
+ i++;
|
|
+ continue;
|
|
}
|
|
- hugetlbfs_vfsmount[i] = mnt;
|
|
+
|
|
+ mnt = mount_one_hugetlbfs(h);
|
|
+ if (IS_ERR(mnt))
|
|
+ hugetlbfs_vfsmount[i] = NULL;
|
|
+ else
|
|
+ hugetlbfs_vfsmount[i] = mnt;
|
|
i++;
|
|
}
|
|
|
|
return 0;
|
|
|
|
- out:
|
|
+ out_unreg:
|
|
+ (void)unregister_filesystem(&hugetlbfs_fs_type);
|
|
+ out_free:
|
|
kmem_cache_destroy(hugetlbfs_inode_cachep);
|
|
- out2:
|
|
+ out:
|
|
return error;
|
|
}
|
|
fs_initcall(init_hugetlbfs_fs)
|
|
diff --git a/fs/io_uring.c b/fs/io_uring.c
|
|
index a340147387ec..74e786578c77 100644
|
|
--- a/fs/io_uring.c
|
|
+++ b/fs/io_uring.c
|
|
@@ -3773,12 +3773,18 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
|
|
ctx->cq_entries = rings->cq_ring_entries;
|
|
|
|
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
|
|
- if (size == SIZE_MAX)
|
|
+ if (size == SIZE_MAX) {
|
|
+ io_mem_free(ctx->rings);
|
|
+ ctx->rings = NULL;
|
|
return -EOVERFLOW;
|
|
+ }
|
|
|
|
ctx->sq_sqes = io_mem_alloc(size);
|
|
- if (!ctx->sq_sqes)
|
|
+ if (!ctx->sq_sqes) {
|
|
+ io_mem_free(ctx->rings);
|
|
+ ctx->rings = NULL;
|
|
return -ENOMEM;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
|
|
index fd46ec83cb04..7b5f76efef02 100644
|
|
--- a/fs/iomap/direct-io.c
|
|
+++ b/fs/iomap/direct-io.c
|
|
@@ -318,7 +318,9 @@ zero_tail:
|
|
if (pad)
|
|
iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
|
|
}
|
|
- return copied ? copied : ret;
|
|
+ if (copied)
|
|
+ return copied;
|
|
+ return ret;
|
|
}
|
|
|
|
static loff_t
|
|
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
|
|
index 132fb92098c7..c43591cd70f1 100644
|
|
--- a/fs/jbd2/commit.c
|
|
+++ b/fs/jbd2/commit.c
|
|
@@ -727,7 +727,6 @@ start_journal_io:
|
|
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
|
}
|
|
cond_resched();
|
|
- stats.run.rs_blocks_logged += bufs;
|
|
|
|
/* Force a new descriptor to be generated next
|
|
time round the loop. */
|
|
@@ -814,6 +813,7 @@ start_journal_io:
|
|
if (unlikely(!buffer_uptodate(bh)))
|
|
err = -EIO;
|
|
jbd2_unfile_log_bh(bh);
|
|
+ stats.run.rs_blocks_logged++;
|
|
|
|
/*
|
|
* The list contains temporary buffer heads created by
|
|
@@ -859,6 +859,7 @@ start_journal_io:
|
|
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
|
|
clear_buffer_jwrite(bh);
|
|
jbd2_unfile_log_bh(bh);
|
|
+ stats.run.rs_blocks_logged++;
|
|
__brelse(bh); /* One for getblk */
|
|
/* AKPM: bforget here */
|
|
}
|
|
@@ -880,6 +881,7 @@ start_journal_io:
|
|
}
|
|
if (cbh)
|
|
err = journal_wait_on_commit_record(journal, cbh);
|
|
+ stats.run.rs_blocks_logged++;
|
|
if (jbd2_has_feature_async_commit(journal) &&
|
|
journal->j_flags & JBD2_BARRIER) {
|
|
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
|
|
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
|
|
index 3e7da392aa6f..bb981ec76456 100644
|
|
--- a/fs/ocfs2/acl.c
|
|
+++ b/fs/ocfs2/acl.c
|
|
@@ -327,8 +327,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
|
|
down_read(&OCFS2_I(inode)->ip_xattr_sem);
|
|
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
|
|
up_read(&OCFS2_I(inode)->ip_xattr_sem);
|
|
- if (IS_ERR(acl) || !acl)
|
|
- return PTR_ERR(acl);
|
|
+ if (IS_ERR_OR_NULL(acl))
|
|
+ return PTR_ERR_OR_ZERO(acl);
|
|
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
|
|
if (ret)
|
|
return ret;
|
|
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
|
|
index 7f0b39da5022..9b96243de081 100644
|
|
--- a/fs/quota/dquot.c
|
|
+++ b/fs/quota/dquot.c
|
|
@@ -2861,68 +2861,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
|
|
static int do_proc_dqstats(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
- unsigned int type = (int *)table->data - dqstats.stat;
|
|
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
|
|
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
|
|
+
|
|
+ /* Filter negative values for non-monotonic counters */
|
|
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
|
|
+ type == DQST_FREE_DQUOTS))
|
|
+ value = 0;
|
|
|
|
/* Update global table */
|
|
- dqstats.stat[type] =
|
|
- percpu_counter_sum_positive(&dqstats.counter[type]);
|
|
- return proc_dointvec(table, write, buffer, lenp, ppos);
|
|
+ dqstats.stat[type] = value;
|
|
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
|
}
|
|
|
|
static struct ctl_table fs_dqstats_table[] = {
|
|
{
|
|
.procname = "lookups",
|
|
.data = &dqstats.stat[DQST_LOOKUPS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "drops",
|
|
.data = &dqstats.stat[DQST_DROPS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "reads",
|
|
.data = &dqstats.stat[DQST_READS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "writes",
|
|
.data = &dqstats.stat[DQST_WRITES],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "cache_hits",
|
|
.data = &dqstats.stat[DQST_CACHE_HITS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "allocated_dquots",
|
|
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "free_dquots",
|
|
.data = &dqstats.stat[DQST_FREE_DQUOTS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
{
|
|
.procname = "syncs",
|
|
.data = &dqstats.stat[DQST_SYNCS],
|
|
- .maxlen = sizeof(int),
|
|
+ .maxlen = sizeof(unsigned long),
|
|
.mode = 0444,
|
|
.proc_handler = do_proc_dqstats,
|
|
},
|
|
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
|
|
index f9fd18670e22..d99d166fd892 100644
|
|
--- a/fs/userfaultfd.c
|
|
+++ b/fs/userfaultfd.c
|
|
@@ -1834,13 +1834,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
|
|
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
|
|
goto out;
|
|
features = uffdio_api.features;
|
|
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
|
|
- memset(&uffdio_api, 0, sizeof(uffdio_api));
|
|
- if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
|
|
- goto out;
|
|
- ret = -EINVAL;
|
|
- goto out;
|
|
- }
|
|
+ ret = -EINVAL;
|
|
+ if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
|
|
+ goto err_out;
|
|
+ ret = -EPERM;
|
|
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
|
|
+ goto err_out;
|
|
/* report all available features and ioctls to userland */
|
|
uffdio_api.features = UFFD_API_FEATURES;
|
|
uffdio_api.ioctls = UFFD_API_IOCTLS;
|
|
@@ -1853,6 +1852,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
|
|
ret = 0;
|
|
out:
|
|
return ret;
|
|
+err_out:
|
|
+ memset(&uffdio_api, 0, sizeof(uffdio_api));
|
|
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
|
|
+ ret = -EFAULT;
|
|
+ goto out;
|
|
}
|
|
|
|
static long userfaultfd_ioctl(struct file *file, unsigned cmd,
|
|
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
|
|
index 641d07f30a27..7b0d9ad8cb1a 100644
|
|
--- a/fs/xfs/xfs_log.c
|
|
+++ b/fs/xfs/xfs_log.c
|
|
@@ -1495,6 +1495,8 @@ out_free_iclog:
|
|
prev_iclog = iclog->ic_next;
|
|
kmem_free(iclog->ic_data);
|
|
kmem_free(iclog);
|
|
+ if (prev_iclog == log->l_iclog)
|
|
+ break;
|
|
}
|
|
out_free_log:
|
|
kmem_free(log);
|
|
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
|
|
index adf993a3bd58..6a18a97b76a8 100644
|
|
--- a/include/linux/dma-direct.h
|
|
+++ b/include/linux/dma-direct.h
|
|
@@ -3,8 +3,11 @@
|
|
#define _LINUX_DMA_DIRECT_H 1
|
|
|
|
#include <linux/dma-mapping.h>
|
|
+#include <linux/memblock.h> /* for min_low_pfn */
|
|
#include <linux/mem_encrypt.h>
|
|
|
|
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
|
+
|
|
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
|
#include <asm/dma-direct.h>
|
|
#else
|
|
@@ -24,11 +27,16 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
{
|
|
+ dma_addr_t end = addr + size - 1;
|
|
+
|
|
if (!dev->dma_mask)
|
|
return false;
|
|
|
|
- return addr + size - 1 <=
|
|
- min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
|
|
+ if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
|
|
+ min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
|
|
+ return false;
|
|
+
|
|
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
|
|
}
|
|
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
|
|
|
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
|
|
index 4a1c4fca475a..4d450672b7d6 100644
|
|
--- a/include/linux/dma-mapping.h
|
|
+++ b/include/linux/dma-mapping.h
|
|
@@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
|
|
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
void *cpu_addr, size_t size, int *ret);
|
|
|
|
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
|
|
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
|
|
int dma_release_from_global_coherent(int order, void *vaddr);
|
|
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
|
|
size_t size, int *ret);
|
|
@@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
|
|
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
|
|
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
|
|
|
|
-static inline void *dma_alloc_from_global_coherent(ssize_t size,
|
|
+static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
|
dma_addr_t *dma_handle)
|
|
{
|
|
return NULL;
|
|
@@ -583,6 +583,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
|
|
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
|
{
|
|
+ /* DMA must never operate on areas that might be remapped. */
|
|
+ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
|
|
+ "rejecting DMA map of vmalloc memory\n"))
|
|
+ return DMA_MAPPING_ERROR;
|
|
debug_dma_map_single(dev, ptr, size);
|
|
return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
|
|
size, dir, attrs);
|
|
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
|
|
index 1b9a51a1bccb..1f98b52118f0 100644
|
|
--- a/include/linux/hrtimer.h
|
|
+++ b/include/linux/hrtimer.h
|
|
@@ -456,12 +456,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
|
|
|
|
extern bool hrtimer_active(const struct hrtimer *timer);
|
|
|
|
-/*
|
|
- * Helper function to check, whether the timer is on one of the queues
|
|
+/**
|
|
+ * hrtimer_is_queued = check, whether the timer is on one of the queues
|
|
+ * @timer: Timer to check
|
|
+ *
|
|
+ * Returns: True if the timer is queued, false otherwise
|
|
+ *
|
|
+ * The function can be used lockless, but it gives only a current snapshot.
|
|
*/
|
|
-static inline int hrtimer_is_queued(struct hrtimer *timer)
|
|
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
|
|
{
|
|
- return timer->state & HRTIMER_STATE_ENQUEUED;
|
|
+ /* The READ_ONCE pairs with the update functions of timer->state */
|
|
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
|
|
}
|
|
|
|
/*
|
|
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
|
|
index edb0f0c30904..1adf54aad2df 100644
|
|
--- a/include/linux/libfdt_env.h
|
|
+++ b/include/linux/libfdt_env.h
|
|
@@ -7,6 +7,9 @@
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
+#define INT32_MAX S32_MAX
|
|
+#define UINT32_MAX U32_MAX
|
|
+
|
|
typedef __be16 fdt16_t;
|
|
typedef __be32 fdt32_t;
|
|
typedef __be64 fdt64_t;
|
|
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
|
|
index fe6cfdcfbc26..468328b1e1dd 100644
|
|
--- a/include/linux/posix-clock.h
|
|
+++ b/include/linux/posix-clock.h
|
|
@@ -69,29 +69,32 @@ struct posix_clock_operations {
|
|
*
|
|
* @ops: Functional interface to the clock
|
|
* @cdev: Character device instance for this clock
|
|
- * @kref: Reference count.
|
|
+ * @dev: Pointer to the clock's device.
|
|
* @rwsem: Protects the 'zombie' field from concurrent access.
|
|
* @zombie: If 'zombie' is true, then the hardware has disappeared.
|
|
- * @release: A function to free the structure when the reference count reaches
|
|
- * zero. May be NULL if structure is statically allocated.
|
|
*
|
|
* Drivers should embed their struct posix_clock within a private
|
|
* structure, obtaining a reference to it during callbacks using
|
|
* container_of().
|
|
+ *
|
|
+ * Drivers should supply an initialized but not exposed struct device
|
|
+ * to posix_clock_register(). It is used to manage lifetime of the
|
|
+ * driver's private structure. It's 'release' field should be set to
|
|
+ * a release function for this private structure.
|
|
*/
|
|
struct posix_clock {
|
|
struct posix_clock_operations ops;
|
|
struct cdev cdev;
|
|
- struct kref kref;
|
|
+ struct device *dev;
|
|
struct rw_semaphore rwsem;
|
|
bool zombie;
|
|
- void (*release)(struct posix_clock *clk);
|
|
};
|
|
|
|
/**
|
|
* posix_clock_register() - register a new clock
|
|
- * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
|
|
- * @devid: Allocated device id
|
|
+ * @clk: Pointer to the clock. Caller must provide 'ops' field
|
|
+ * @dev: Pointer to the initialized device. Caller must provide
|
|
+ * 'release' field
|
|
*
|
|
* A clock driver calls this function to register itself with the
|
|
* clock device subsystem. If 'clk' points to dynamically allocated
|
|
@@ -100,7 +103,7 @@ struct posix_clock {
|
|
*
|
|
* Returns zero on success, non-zero otherwise.
|
|
*/
|
|
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
|
|
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
|
|
|
|
/**
|
|
* posix_clock_unregister() - unregister a clock
|
|
diff --git a/include/linux/quota.h b/include/linux/quota.h
|
|
index f32dd270b8e3..27aab84fcbaa 100644
|
|
--- a/include/linux/quota.h
|
|
+++ b/include/linux/quota.h
|
|
@@ -263,7 +263,7 @@ enum {
|
|
};
|
|
|
|
struct dqstats {
|
|
- int stat[_DQST_DQSTAT_LAST];
|
|
+ unsigned long stat[_DQST_DQSTAT_LAST];
|
|
struct percpu_counter counter[_DQST_DQSTAT_LAST];
|
|
};
|
|
|
|
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
|
|
index bc8206a8f30e..61974c4c566b 100644
|
|
--- a/include/linux/rculist_nulls.h
|
|
+++ b/include/linux/rculist_nulls.h
|
|
@@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
|
first->pprev = &n->next;
|
|
}
|
|
|
|
+/**
|
|
+ * hlist_nulls_add_tail_rcu
|
|
+ * @n: the element to add to the hash list.
|
|
+ * @h: the list to add to.
|
|
+ *
|
|
+ * Description:
|
|
+ * Adds the specified element to the specified hlist_nulls,
|
|
+ * while permitting racing traversals.
|
|
+ *
|
|
+ * The caller must take whatever precautions are necessary
|
|
+ * (such as holding appropriate locks) to avoid racing
|
|
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
|
+ * or hlist_nulls_del_rcu(), running on this same list.
|
|
+ * However, it is perfectly legal to run concurrently with
|
|
+ * the _rcu list-traversal primitives, such as
|
|
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
|
|
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
|
|
+ * list-traversal primitive must be guarded by rcu_read_lock().
|
|
+ */
|
|
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
|
+ struct hlist_nulls_head *h)
|
|
+{
|
|
+ struct hlist_nulls_node *i, *last = NULL;
|
|
+
|
|
+ /* Note: write side code, so rcu accessors are not needed. */
|
|
+ for (i = h->first; !is_a_nulls(i); i = i->next)
|
|
+ last = i;
|
|
+
|
|
+ if (last) {
|
|
+ n->next = last->next;
|
|
+ n->pprev = &last->next;
|
|
+ rcu_assign_pointer(hlist_next_rcu(last), n);
|
|
+ } else {
|
|
+ hlist_nulls_add_head_rcu(n, h);
|
|
+ }
|
|
+}
|
|
+
|
|
/**
|
|
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
|
* @tpos: the type * to use as a loop cursor.
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 1ba6e2cc2725..6ae88b0c1c31 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -1795,7 +1795,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
|
|
*/
|
|
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
|
|
{
|
|
- struct sk_buff *skb = list_->prev;
|
|
+ struct sk_buff *skb = READ_ONCE(list_->prev);
|
|
|
|
if (skb == (struct sk_buff *)list_)
|
|
skb = NULL;
|
|
@@ -1861,7 +1861,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
|
|
struct sk_buff *prev, struct sk_buff *next,
|
|
struct sk_buff_head *list)
|
|
{
|
|
- /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
|
|
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
|
|
+ * for the opposite READ_ONCE()
|
|
+ */
|
|
WRITE_ONCE(newsk->next, next);
|
|
WRITE_ONCE(newsk->prev, prev);
|
|
WRITE_ONCE(next->prev, newsk);
|
|
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
|
|
index 659a4400517b..e93e249a4e9b 100644
|
|
--- a/include/linux/thread_info.h
|
|
+++ b/include/linux/thread_info.h
|
|
@@ -147,6 +147,8 @@ check_copy_size(const void *addr, size_t bytes, bool is_source)
|
|
__bad_copy_to();
|
|
return false;
|
|
}
|
|
+ if (WARN_ON_ONCE(bytes > INT_MAX))
|
|
+ return false;
|
|
check_object_size(addr, bytes, is_source);
|
|
return true;
|
|
}
|
|
diff --git a/include/net/dst.h b/include/net/dst.h
|
|
index 8224dad2ae94..3448cf865ede 100644
|
|
--- a/include/net/dst.h
|
|
+++ b/include/net/dst.h
|
|
@@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
|
|
if (dst && dst->ops->update_pmtu)
|
|
- dst->ops->update_pmtu(dst, NULL, skb, mtu);
|
|
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
|
|
+}
|
|
+
|
|
+/* update dst pmtu but not do neighbor confirm */
|
|
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
|
|
+{
|
|
+ struct dst_entry *dst = skb_dst(skb);
|
|
+
|
|
+ if (dst && dst->ops->update_pmtu)
|
|
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
|
|
}
|
|
|
|
static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
|
|
@@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
|
|
u32 encap_mtu = dst_mtu(encap_dst);
|
|
|
|
if (skb->len > encap_mtu - headroom)
|
|
- skb_dst_update_pmtu(skb, encap_mtu - headroom);
|
|
+ skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
|
|
}
|
|
|
|
#endif /* _NET_DST_H */
|
|
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
|
|
index 5ec645f27ee3..443863c7b8da 100644
|
|
--- a/include/net/dst_ops.h
|
|
+++ b/include/net/dst_ops.h
|
|
@@ -27,7 +27,8 @@ struct dst_ops {
|
|
struct dst_entry * (*negative_advice)(struct dst_entry *);
|
|
void (*link_failure)(struct sk_buff *);
|
|
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu);
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh);
|
|
void (*redirect)(struct dst_entry *dst, struct sock *sk,
|
|
struct sk_buff *skb);
|
|
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
|
|
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
|
|
index af2b4c065a04..d0019d3395cf 100644
|
|
--- a/include/net/inet_hashtables.h
|
|
+++ b/include/net/inet_hashtables.h
|
|
@@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
|
|
struct hlist_head chain;
|
|
};
|
|
|
|
-/*
|
|
- * Sockets can be hashed in established or listening table
|
|
+/* Sockets can be hashed in established or listening table.
|
|
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
|
|
+ * A socket might transition from ESTABLISH to LISTEN state without
|
|
+ * RCU grace period. A lookup in ehash table needs to handle this case.
|
|
*/
|
|
+#define LISTENING_NULLS_BASE (1U << 29)
|
|
struct inet_listen_hashbucket {
|
|
spinlock_t lock;
|
|
unsigned int count;
|
|
- struct hlist_head head;
|
|
+ union {
|
|
+ struct hlist_head head;
|
|
+ struct hlist_nulls_head nulls_head;
|
|
+ };
|
|
};
|
|
|
|
/* This is for listening sockets, thus all sockets which possess wildcards. */
|
|
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
|
|
index d80acda231ae..47e61956168d 100644
|
|
--- a/include/net/sch_generic.h
|
|
+++ b/include/net/sch_generic.h
|
|
@@ -308,6 +308,7 @@ struct tcf_proto_ops {
|
|
int (*delete)(struct tcf_proto *tp, void *arg,
|
|
bool *last, bool rtnl_held,
|
|
struct netlink_ext_ack *);
|
|
+ bool (*delete_empty)(struct tcf_proto *tp);
|
|
void (*walk)(struct tcf_proto *tp,
|
|
struct tcf_walker *arg, bool rtnl_held);
|
|
int (*reoffload)(struct tcf_proto *tp, bool add,
|
|
@@ -336,6 +337,10 @@ struct tcf_proto_ops {
|
|
int flags;
|
|
};
|
|
|
|
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
|
|
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
|
|
+ * conditions can occur when filters are inserted/deleted simultaneously.
|
|
+ */
|
|
enum tcf_proto_ops_flags {
|
|
TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
|
|
};
|
|
diff --git a/include/net/sock.h b/include/net/sock.h
|
|
index 013396e50b91..e09e2886a836 100644
|
|
--- a/include/net/sock.h
|
|
+++ b/include/net/sock.h
|
|
@@ -723,6 +723,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
|
|
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
|
|
}
|
|
|
|
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
|
+{
|
|
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
|
|
+}
|
|
+
|
|
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
|
{
|
|
sock_hold(sk);
|
|
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
|
|
index b71b5c4f418c..533f56733ba8 100644
|
|
--- a/include/scsi/iscsi_proto.h
|
|
+++ b/include/scsi/iscsi_proto.h
|
|
@@ -627,6 +627,7 @@ struct iscsi_reject {
|
|
#define ISCSI_REASON_BOOKMARK_INVALID 9
|
|
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
|
|
#define ISCSI_REASON_NEGOTIATION_RESET 11
|
|
+#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
|
|
|
|
/* Max. number of Key=Value pairs in a text message */
|
|
#define MAX_KEY_VALUE_PAIRS 8192
|
|
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
|
|
index 545e3869b0e3..551b0eb7028a 100644
|
|
--- a/kernel/dma/coherent.c
|
|
+++ b/kernel/dma/coherent.c
|
|
@@ -123,8 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
|
return ret;
|
|
}
|
|
|
|
-static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
|
|
- ssize_t size, dma_addr_t *dma_handle)
|
|
+static void *__dma_alloc_from_coherent(struct device *dev,
|
|
+ struct dma_coherent_mem *mem,
|
|
+ ssize_t size, dma_addr_t *dma_handle)
|
|
{
|
|
int order = get_order(size);
|
|
unsigned long flags;
|
|
@@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
|
|
/*
|
|
* Memory was found in the coherent area.
|
|
*/
|
|
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
|
+ *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
|
|
ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
|
memset(ret, 0, size);
|
|
@@ -175,17 +176,18 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
|
if (!mem)
|
|
return 0;
|
|
|
|
- *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
|
|
+ *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
|
|
return 1;
|
|
}
|
|
|
|
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
|
|
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
|
+ dma_addr_t *dma_handle)
|
|
{
|
|
if (!dma_coherent_default_memory)
|
|
return NULL;
|
|
|
|
- return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
|
|
- dma_handle);
|
|
+ return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
|
|
+ dma_handle);
|
|
}
|
|
|
|
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
|
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
|
|
index 099002d84f46..4ad74f5987ea 100644
|
|
--- a/kernel/dma/debug.c
|
|
+++ b/kernel/dma/debug.c
|
|
@@ -420,6 +420,7 @@ void debug_dma_dump_mappings(struct device *dev)
|
|
}
|
|
|
|
spin_unlock_irqrestore(&bucket->lock, flags);
|
|
+ cond_resched();
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
|
|
index b6f2f35d0bcf..70665934d53e 100644
|
|
--- a/kernel/sysctl.c
|
|
+++ b/kernel/sysctl.c
|
|
@@ -1466,7 +1466,7 @@ static struct ctl_table vm_table[] = {
|
|
.procname = "drop_caches",
|
|
.data = &sysctl_drop_caches,
|
|
.maxlen = sizeof(int),
|
|
- .mode = 0644,
|
|
+ .mode = 0200,
|
|
.proc_handler = drop_caches_sysctl_handler,
|
|
.extra1 = SYSCTL_ONE,
|
|
.extra2 = &four,
|
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
|
index 65605530ee34..7f31932216a1 100644
|
|
--- a/kernel/time/hrtimer.c
|
|
+++ b/kernel/time/hrtimer.c
|
|
@@ -966,7 +966,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
|
|
|
|
base->cpu_base->active_bases |= 1 << base->index;
|
|
|
|
- timer->state = HRTIMER_STATE_ENQUEUED;
|
|
+ /* Pairs with the lockless read in hrtimer_is_queued() */
|
|
+ WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
|
|
|
|
return timerqueue_add(&base->active, &timer->node);
|
|
}
|
|
@@ -988,7 +989,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
|
|
u8 state = timer->state;
|
|
|
|
- timer->state = newstate;
|
|
+ /* Pairs with the lockless read in hrtimer_is_queued() */
|
|
+ WRITE_ONCE(timer->state, newstate);
|
|
if (!(state & HRTIMER_STATE_ENQUEUED))
|
|
return;
|
|
|
|
@@ -1013,8 +1015,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|
static inline int
|
|
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
|
|
{
|
|
- if (hrtimer_is_queued(timer)) {
|
|
- u8 state = timer->state;
|
|
+ u8 state = timer->state;
|
|
+
|
|
+ if (state & HRTIMER_STATE_ENQUEUED) {
|
|
int reprogram;
|
|
|
|
/*
|
|
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
|
|
index ec960bb939fd..200fb2d3be99 100644
|
|
--- a/kernel/time/posix-clock.c
|
|
+++ b/kernel/time/posix-clock.c
|
|
@@ -14,8 +14,6 @@
|
|
|
|
#include "posix-timers.h"
|
|
|
|
-static void delete_clock(struct kref *kref);
|
|
-
|
|
/*
|
|
* Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
|
|
*/
|
|
@@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
|
|
err = 0;
|
|
|
|
if (!err) {
|
|
- kref_get(&clk->kref);
|
|
+ get_device(clk->dev);
|
|
fp->private_data = clk;
|
|
}
|
|
out:
|
|
@@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
|
|
if (clk->ops.release)
|
|
err = clk->ops.release(clk);
|
|
|
|
- kref_put(&clk->kref, delete_clock);
|
|
+ put_device(clk->dev);
|
|
|
|
fp->private_data = NULL;
|
|
|
|
@@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = {
|
|
#endif
|
|
};
|
|
|
|
-int posix_clock_register(struct posix_clock *clk, dev_t devid)
|
|
+int posix_clock_register(struct posix_clock *clk, struct device *dev)
|
|
{
|
|
int err;
|
|
|
|
- kref_init(&clk->kref);
|
|
init_rwsem(&clk->rwsem);
|
|
|
|
cdev_init(&clk->cdev, &posix_clock_file_operations);
|
|
+ err = cdev_device_add(&clk->cdev, dev);
|
|
+ if (err) {
|
|
+ pr_err("%s unable to add device %d:%d\n",
|
|
+ dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
|
|
+ return err;
|
|
+ }
|
|
clk->cdev.owner = clk->ops.owner;
|
|
- err = cdev_add(&clk->cdev, devid, 1);
|
|
+ clk->dev = dev;
|
|
|
|
- return err;
|
|
+ return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(posix_clock_register);
|
|
|
|
-static void delete_clock(struct kref *kref)
|
|
-{
|
|
- struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
|
|
-
|
|
- if (clk->release)
|
|
- clk->release(clk);
|
|
-}
|
|
-
|
|
void posix_clock_unregister(struct posix_clock *clk)
|
|
{
|
|
- cdev_del(&clk->cdev);
|
|
+ cdev_device_del(&clk->cdev, clk->dev);
|
|
|
|
down_write(&clk->rwsem);
|
|
clk->zombie = true;
|
|
up_write(&clk->rwsem);
|
|
|
|
- kref_put(&clk->kref, delete_clock);
|
|
+ put_device(clk->dev);
|
|
}
|
|
EXPORT_SYMBOL_GPL(posix_clock_unregister);
|
|
|
|
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
|
|
index af7800103e51..59980ecfc962 100644
|
|
--- a/net/bridge/br_netfilter_hooks.c
|
|
+++ b/net/bridge/br_netfilter_hooks.c
|
|
@@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv,
|
|
nf_bridge_pull_encap_header(skb);
|
|
}
|
|
|
|
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
|
|
+ return NF_DROP;
|
|
+
|
|
if (arp_hdr(skb)->ar_pln != 4) {
|
|
if (is_vlan_arp(skb, state->net))
|
|
nf_bridge_push_encap_header(skb);
|
|
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
|
|
index 2cdfc5d6c25d..8c69f0c95a8e 100644
|
|
--- a/net/bridge/br_nf_core.c
|
|
+++ b/net/bridge/br_nf_core.c
|
|
@@ -22,7 +22,8 @@
|
|
#endif
|
|
|
|
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
}
|
|
|
|
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
|
|
index 4096d8a74a2b..e1256e03a9a8 100644
|
|
--- a/net/bridge/netfilter/ebtables.c
|
|
+++ b/net/bridge/netfilter/ebtables.c
|
|
@@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
|
|
}
|
|
|
|
static int ebt_buf_add(struct ebt_entries_buf_state *state,
|
|
- void *data, unsigned int sz)
|
|
+ const void *data, unsigned int sz)
|
|
{
|
|
if (state->buf_kern_start == NULL)
|
|
goto count_only;
|
|
@@ -1901,7 +1901,7 @@ enum compat_mwt {
|
|
EBT_COMPAT_TARGET,
|
|
};
|
|
|
|
-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
|
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
|
|
enum compat_mwt compat_mwt,
|
|
struct ebt_entries_buf_state *state,
|
|
const unsigned char *base)
|
|
@@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
|
/* return size of all matches, watchers or target, including necessary
|
|
* alignment and padding.
|
|
*/
|
|
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
|
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
|
|
unsigned int size_left, enum compat_mwt type,
|
|
struct ebt_entries_buf_state *state, const void *base)
|
|
{
|
|
+ const char *buf = (const char *)match32;
|
|
int growth = 0;
|
|
- char *buf;
|
|
|
|
if (size_left == 0)
|
|
return 0;
|
|
|
|
- buf = (char *) match32;
|
|
-
|
|
- while (size_left >= sizeof(*match32)) {
|
|
+ do {
|
|
struct ebt_entry_match *match_kern;
|
|
int ret;
|
|
|
|
+ if (size_left < sizeof(*match32))
|
|
+ return -EINVAL;
|
|
+
|
|
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
|
|
if (match_kern) {
|
|
char *tmp;
|
|
@@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
|
if (match_kern)
|
|
match_kern->match_size = ret;
|
|
|
|
- /* rule should have no remaining data after target */
|
|
- if (type == EBT_COMPAT_TARGET && size_left)
|
|
- return -EINVAL;
|
|
-
|
|
match32 = (struct compat_ebt_entry_mwt *) buf;
|
|
- }
|
|
+ } while (size_left);
|
|
|
|
return growth;
|
|
}
|
|
|
|
/* called for all ebt_entry structures. */
|
|
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
|
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
|
|
unsigned int *total,
|
|
struct ebt_entries_buf_state *state)
|
|
{
|
|
- unsigned int i, j, startoff, new_offset = 0;
|
|
+ unsigned int i, j, startoff, next_expected_off, new_offset = 0;
|
|
/* stores match/watchers/targets & offset of next struct ebt_entry: */
|
|
unsigned int offsets[4];
|
|
unsigned int *offsets_update = NULL;
|
|
@@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
|
return ret;
|
|
}
|
|
|
|
- startoff = state->buf_user_offset - startoff;
|
|
+ next_expected_off = state->buf_user_offset - startoff;
|
|
+ if (next_expected_off != entry->next_offset)
|
|
+ return -EINVAL;
|
|
|
|
- if (WARN_ON(*total < startoff))
|
|
+ if (*total < entry->next_offset)
|
|
return -EINVAL;
|
|
- *total -= startoff;
|
|
+ *total -= entry->next_offset;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
|
|
index aea918135ec3..08c3dc45f1a4 100644
|
|
--- a/net/decnet/dn_route.c
|
|
+++ b/net/decnet/dn_route.c
|
|
@@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
|
|
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
|
|
static void dn_dst_link_failure(struct sk_buff *);
|
|
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb , u32 mtu);
|
|
+ struct sk_buff *skb , u32 mtu,
|
|
+ bool confirm_neigh);
|
|
static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
|
|
struct sk_buff *skb);
|
|
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
|
|
@@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops)
|
|
* advertise to the other end).
|
|
*/
|
|
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
struct dn_route *rt = (struct dn_route *) dst;
|
|
struct neighbour *n = rt->n;
|
|
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
|
|
index 4298aae74e0e..ac95ba78b903 100644
|
|
--- a/net/ipv4/icmp.c
|
|
+++ b/net/ipv4/icmp.c
|
|
@@ -249,10 +249,11 @@ bool icmp_global_allow(void)
|
|
bool rc = false;
|
|
|
|
/* Check if token bucket is empty and cannot be refilled
|
|
- * without taking the spinlock.
|
|
+ * without taking the spinlock. The READ_ONCE() are paired
|
|
+ * with the following WRITE_ONCE() in this same function.
|
|
*/
|
|
- if (!icmp_global.credit) {
|
|
- delta = min_t(u32, now - icmp_global.stamp, HZ);
|
|
+ if (!READ_ONCE(icmp_global.credit)) {
|
|
+ delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
|
|
if (delta < HZ / 50)
|
|
return false;
|
|
}
|
|
@@ -262,14 +263,14 @@ bool icmp_global_allow(void)
|
|
if (delta >= HZ / 50) {
|
|
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
|
|
if (incr)
|
|
- icmp_global.stamp = now;
|
|
+ WRITE_ONCE(icmp_global.stamp, now);
|
|
}
|
|
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
|
|
if (credit) {
|
|
credit--;
|
|
rc = true;
|
|
}
|
|
- icmp_global.credit = credit;
|
|
+ WRITE_ONCE(icmp_global.credit, credit);
|
|
spin_unlock(&icmp_global.lock);
|
|
return rc;
|
|
}
|
|
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
|
|
index eb30fc1770de..ac05e273bc66 100644
|
|
--- a/net/ipv4/inet_connection_sock.c
|
|
+++ b/net/ipv4/inet_connection_sock.c
|
|
@@ -1086,7 +1086,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
|
|
if (!dst)
|
|
goto out;
|
|
}
|
|
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
|
|
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
|
|
|
|
dst = __sk_dst_check(sk, 0);
|
|
if (!dst)
|
|
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
|
|
index 7dc79b973e6e..6a4c82f96e78 100644
|
|
--- a/net/ipv4/inet_diag.c
|
|
+++ b/net/ipv4/inet_diag.c
|
|
@@ -914,11 +914,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
|
|
|
|
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
|
|
struct inet_listen_hashbucket *ilb;
|
|
+ struct hlist_nulls_node *node;
|
|
|
|
num = 0;
|
|
ilb = &hashinfo->listening_hash[i];
|
|
spin_lock(&ilb->lock);
|
|
- sk_for_each(sk, &ilb->head) {
|
|
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
if (!net_eq(sock_net(sk), net))
|
|
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
|
|
index 83fb00153018..2bbaaf0c7176 100644
|
|
--- a/net/ipv4/inet_hashtables.c
|
|
+++ b/net/ipv4/inet_hashtables.c
|
|
@@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
|
struct inet_listen_hashbucket *ilb)
|
|
{
|
|
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
|
|
+ const struct hlist_nulls_node *node;
|
|
struct sock *sk2;
|
|
kuid_t uid = sock_i_uid(sk);
|
|
|
|
- sk_for_each_rcu(sk2, &ilb->head) {
|
|
+ sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
|
|
if (sk2 != sk &&
|
|
sk2->sk_family == sk->sk_family &&
|
|
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
|
|
@@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
|
|
}
|
|
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
|
sk->sk_family == AF_INET6)
|
|
- hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
|
|
+ __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
|
|
else
|
|
- hlist_add_head_rcu(&sk->sk_node, &ilb->head);
|
|
+ __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
|
|
inet_hash2(hashinfo, sk);
|
|
ilb->count++;
|
|
sock_set_flag(sk, SOCK_RCU_FREE);
|
|
@@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk)
|
|
reuseport_detach_sock(sk);
|
|
if (ilb) {
|
|
inet_unhash2(hashinfo, sk);
|
|
- __sk_del_node_init(sk);
|
|
- ilb->count--;
|
|
- } else {
|
|
- __sk_nulls_del_node_init_rcu(sk);
|
|
+ ilb->count--;
|
|
}
|
|
+ __sk_nulls_del_node_init_rcu(sk);
|
|
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
|
unlock:
|
|
spin_unlock_bh(lock);
|
|
@@ -750,7 +749,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
|
|
|
|
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
|
|
spin_lock_init(&h->listening_hash[i].lock);
|
|
- INIT_HLIST_HEAD(&h->listening_hash[i].head);
|
|
+ INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
|
|
+ i + LISTENING_NULLS_BASE);
|
|
h->listening_hash[i].count = 0;
|
|
}
|
|
|
|
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
|
|
index be778599bfed..ff327a62c9ce 100644
|
|
--- a/net/ipv4/inetpeer.c
|
|
+++ b/net/ipv4/inetpeer.c
|
|
@@ -160,7 +160,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
|
|
base->total / inet_peer_threshold * HZ;
|
|
for (i = 0; i < gc_cnt; i++) {
|
|
p = gc_stack[i];
|
|
- delta = (__u32)jiffies - p->dtime;
|
|
+
|
|
+ /* The READ_ONCE() pairs with the WRITE_ONCE()
|
|
+ * in inet_putpeer()
|
|
+ */
|
|
+ delta = (__u32)jiffies - READ_ONCE(p->dtime);
|
|
+
|
|
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
|
|
gc_stack[i] = NULL;
|
|
}
|
|
@@ -237,7 +242,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
|
|
|
|
void inet_putpeer(struct inet_peer *p)
|
|
{
|
|
- p->dtime = (__u32)jiffies;
|
|
+ /* The WRITE_ONCE() pairs with itself (we run lockless)
|
|
+ * and the READ_ONCE() in inet_peer_gc()
|
|
+ */
|
|
+ WRITE_ONCE(p->dtime, (__u32)jiffies);
|
|
|
|
if (refcount_dec_and_test(&p->refcnt))
|
|
call_rcu(&p->rcu, inetpeer_free_rcu);
|
|
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
|
|
index 38c02bb62e2c..0fe2a5d3e258 100644
|
|
--- a/net/ipv4/ip_tunnel.c
|
|
+++ b/net/ipv4/ip_tunnel.c
|
|
@@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
|
|
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
|
|
|
|
if (skb_valid_dst(skb))
|
|
- skb_dst_update_pmtu(skb, mtu);
|
|
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
|
|
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
|
if (!skb_is_gso(skb) &&
|
|
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
|
|
index cfb025606793..fb9f6d60c27c 100644
|
|
--- a/net/ipv4/ip_vti.c
|
|
+++ b/net/ipv4/ip_vti.c
|
|
@@ -214,7 +214,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
mtu = dst_mtu(dst);
|
|
if (skb->len > mtu) {
|
|
- skb_dst_update_pmtu(skb, mtu);
|
|
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
|
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
|
htonl(mtu));
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 621f83434b24..fe34e9e0912a 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -139,7 +139,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst);
|
|
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
|
|
static void ipv4_link_failure(struct sk_buff *skb);
|
|
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu);
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh);
|
|
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
|
|
struct sk_buff *skb);
|
|
static void ipv4_dst_destroy(struct dst_entry *dst);
|
|
@@ -1043,7 +1044,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
|
}
|
|
|
|
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
struct rtable *rt = (struct rtable *) dst;
|
|
struct flowi4 fl4;
|
|
@@ -2648,7 +2650,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
|
|
}
|
|
|
|
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
}
|
|
|
|
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
|
|
index 67b2dc7a1727..eda64871f983 100644
|
|
--- a/net/ipv4/tcp_ipv4.c
|
|
+++ b/net/ipv4/tcp_ipv4.c
|
|
@@ -2149,13 +2149,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
|
struct tcp_iter_state *st = seq->private;
|
|
struct net *net = seq_file_net(seq);
|
|
struct inet_listen_hashbucket *ilb;
|
|
+ struct hlist_nulls_node *node;
|
|
struct sock *sk = cur;
|
|
|
|
if (!sk) {
|
|
get_head:
|
|
ilb = &tcp_hashinfo.listening_hash[st->bucket];
|
|
spin_lock(&ilb->lock);
|
|
- sk = sk_head(&ilb->head);
|
|
+ sk = sk_nulls_head(&ilb->nulls_head);
|
|
st->offset = 0;
|
|
goto get_sk;
|
|
}
|
|
@@ -2163,9 +2164,9 @@ get_head:
|
|
++st->num;
|
|
++st->offset;
|
|
|
|
- sk = sk_next(sk);
|
|
+ sk = sk_nulls_next(sk);
|
|
get_sk:
|
|
- sk_for_each_from(sk) {
|
|
+ sk_nulls_for_each_from(sk, node) {
|
|
if (!net_eq(sock_net(sk), net))
|
|
continue;
|
|
if (sk->sk_family == afinfo->family)
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index 762edd800d78..0269584e9cf7 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
|
|
__skb_unlink(skb, &sk->sk_write_queue);
|
|
tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
|
|
|
|
+ if (tp->highest_sack == NULL)
|
|
+ tp->highest_sack = skb;
|
|
+
|
|
tp->packets_out += tcp_skb_pcount(skb);
|
|
if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
|
|
tcp_rearm_rto(sk);
|
|
@@ -2438,6 +2441,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
if (tcp_small_queue_check(sk, skb, 0))
|
|
break;
|
|
|
|
+ /* Argh, we hit an empty skb(), presumably a thread
|
|
+ * is sleeping in sendmsg()/sk_stream_wait_memory().
|
|
+ * We do not want to send a pure-ack packet and have
|
|
+ * a strange looking rtx queue with empty packet(s).
|
|
+ */
|
|
+ if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
|
|
+ break;
|
|
+
|
|
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
|
|
break;
|
|
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index 447defbfccdd..7aa4e77161f6 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -1475,7 +1475,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
|
* queue contains some other skb
|
|
*/
|
|
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
|
|
- if (rmem > (size + sk->sk_rcvbuf))
|
|
+ if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
|
|
goto uncharge_drop;
|
|
|
|
spin_lock(&list->lock);
|
|
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
|
|
index 35b84b52b702..9ebd54752e03 100644
|
|
--- a/net/ipv4/xfrm4_policy.c
|
|
+++ b/net/ipv4/xfrm4_policy.c
|
|
@@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
|
}
|
|
|
|
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
|
struct dst_entry *path = xdst->route;
|
|
|
|
- path->ops->update_pmtu(path, sk, skb, mtu);
|
|
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
|
|
}
|
|
|
|
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index 34ccef18b40e..f9b5690e94fd 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -5231,16 +5231,16 @@ static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (!netlink_strict_get_check(skb))
|
|
+ return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
|
|
+ ifa_ipv6_policy, extack);
|
|
+
|
|
ifm = nlmsg_data(nlh);
|
|
if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!netlink_strict_get_check(skb))
|
|
- return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
|
|
- ifa_ipv6_policy, extack);
|
|
-
|
|
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
|
|
ifa_ipv6_policy, extack);
|
|
if (err)
|
|
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
|
|
index fe9cb8d1adca..e315526fa244 100644
|
|
--- a/net/ipv6/inet6_connection_sock.c
|
|
+++ b/net/ipv6/inet6_connection_sock.c
|
|
@@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
|
|
|
|
if (IS_ERR(dst))
|
|
return NULL;
|
|
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
|
|
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
|
|
|
|
dst = inet6_csk_route_socket(sk, &fl6);
|
|
return IS_ERR(dst) ? NULL : dst;
|
|
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
|
|
index 923034c52ce4..189de56f5e36 100644
|
|
--- a/net/ipv6/ip6_gre.c
|
|
+++ b/net/ipv6/ip6_gre.c
|
|
@@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|
|
|
/* TooBig packet may have updated dst->dev's mtu */
|
|
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
|
|
- dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
|
|
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
|
|
|
|
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
|
|
NEXTHDR_GRE);
|
|
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
|
|
index 754a484d35df..2f376dbc37d5 100644
|
|
--- a/net/ipv6/ip6_tunnel.c
|
|
+++ b/net/ipv6/ip6_tunnel.c
|
|
@@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|
if (rel_info > dst_mtu(skb_dst(skb2)))
|
|
goto out;
|
|
|
|
- skb_dst_update_pmtu(skb2, rel_info);
|
|
+ skb_dst_update_pmtu_no_confirm(skb2, rel_info);
|
|
}
|
|
|
|
icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
|
|
@@ -1132,7 +1132,7 @@ route_lookup:
|
|
mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
|
|
IPV6_MIN_MTU : IPV4_MIN_MTU);
|
|
|
|
- skb_dst_update_pmtu(skb, mtu);
|
|
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
|
|
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
|
|
*pmtu = mtu;
|
|
err = -EMSGSIZE;
|
|
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
|
|
index 024db17386d2..6f08b760c2a7 100644
|
|
--- a/net/ipv6/ip6_vti.c
|
|
+++ b/net/ipv6/ip6_vti.c
|
|
@@ -479,7 +479,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
|
|
|
mtu = dst_mtu(dst);
|
|
if (skb->len > mtu) {
|
|
- skb_dst_update_pmtu(skb, mtu);
|
|
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
|
|
|
|
if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
if (mtu < IPV6_MIN_MTU)
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index 3f83ea851ebf..e4ed9c7b43b0 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -95,7 +95,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
|
|
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
|
|
static void ip6_link_failure(struct sk_buff *skb);
|
|
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu);
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh);
|
|
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
|
|
struct sk_buff *skb);
|
|
static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
|
|
@@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
|
|
}
|
|
|
|
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
}
|
|
|
|
@@ -2695,7 +2697,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
|
|
}
|
|
|
|
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|
- const struct ipv6hdr *iph, u32 mtu)
|
|
+ const struct ipv6hdr *iph, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
const struct in6_addr *daddr, *saddr;
|
|
struct rt6_info *rt6 = (struct rt6_info *)dst;
|
|
@@ -2713,7 +2716,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|
daddr = NULL;
|
|
saddr = NULL;
|
|
}
|
|
- dst_confirm_neigh(dst, daddr);
|
|
+
|
|
+ if (confirm_neigh)
|
|
+ dst_confirm_neigh(dst, daddr);
|
|
+
|
|
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
|
|
if (mtu >= dst_mtu(dst))
|
|
return;
|
|
@@ -2767,9 +2773,11 @@ out_unlock:
|
|
}
|
|
|
|
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
- __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
|
|
+ __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
|
|
+ confirm_neigh);
|
|
}
|
|
|
|
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
|
|
@@ -2788,7 +2796,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
|
|
|
|
dst = ip6_route_output(net, NULL, &fl6);
|
|
if (!dst->error)
|
|
- __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
|
|
+ __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
|
|
dst_release(dst);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ip6_update_pmtu);
|
|
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
|
|
index b2ccbc473127..98954830c40b 100644
|
|
--- a/net/ipv6/sit.c
|
|
+++ b/net/ipv6/sit.c
|
|
@@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
|
}
|
|
|
|
if (tunnel->parms.iph.daddr)
|
|
- skb_dst_update_pmtu(skb, mtu);
|
|
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
|
|
|
|
if (skb->len > mtu && !skb_is_gso(skb)) {
|
|
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
|
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
|
|
index 699e0730ce8e..af7a4b8b1e9c 100644
|
|
--- a/net/ipv6/xfrm6_policy.c
|
|
+++ b/net/ipv6/xfrm6_policy.c
|
|
@@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
|
|
}
|
|
|
|
static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
|
- struct sk_buff *skb, u32 mtu)
|
|
+ struct sk_buff *skb, u32 mtu,
|
|
+ bool confirm_neigh)
|
|
{
|
|
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
|
|
struct dst_entry *path = xdst->route;
|
|
|
|
- path->ops->update_pmtu(path, sk, skb, mtu);
|
|
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
|
|
}
|
|
|
|
static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
|
|
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
index 888d3068a492..c62a131a6094 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_xmit.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
@@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
|
|
struct rtable *ort = skb_rtable(skb);
|
|
|
|
if (!skb->dev && sk && sk_fullsock(sk))
|
|
- ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
|
|
+ ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
|
|
}
|
|
|
|
static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
|
|
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
|
|
index 08923b21e566..f0df0d90b8bd 100644
|
|
--- a/net/sched/act_mirred.c
|
|
+++ b/net/sched/act_mirred.c
|
|
@@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
|
|
bool use_reinsert;
|
|
bool want_ingress;
|
|
bool is_redirect;
|
|
+ bool expects_nh;
|
|
int m_eaction;
|
|
int mac_len;
|
|
+ bool at_nh;
|
|
|
|
rec_level = __this_cpu_inc_return(mirred_rec_level);
|
|
if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
|
|
@@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
|
|
goto out;
|
|
}
|
|
|
|
- /* If action's target direction differs than filter's direction,
|
|
- * and devices expect a mac header on xmit, then mac push/pull is
|
|
- * needed.
|
|
- */
|
|
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
|
|
- if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
|
|
- if (!skb_at_tc_ingress(skb)) {
|
|
- /* caught at egress, act ingress: pull mac */
|
|
- mac_len = skb_network_header(skb) - skb_mac_header(skb);
|
|
+
|
|
+ expects_nh = want_ingress || !m_mac_header_xmit;
|
|
+ at_nh = skb->data == skb_network_header(skb);
|
|
+ if (at_nh != expects_nh) {
|
|
+ mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
|
|
+ skb_network_header(skb) - skb_mac_header(skb);
|
|
+ if (expects_nh) {
|
|
+ /* target device/action expect data at nh */
|
|
skb_pull_rcsum(skb2, mac_len);
|
|
} else {
|
|
- /* caught at ingress, act egress: push mac */
|
|
- skb_push_rcsum(skb2, skb->mac_len);
|
|
+ /* target device/action expect data at mac */
|
|
+ skb_push_rcsum(skb2, mac_len);
|
|
}
|
|
}
|
|
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index 6a0eacafdb19..76e0d122616a 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
|
|
tcf_proto_destroy(tp, rtnl_held, true, extack);
|
|
}
|
|
|
|
-static int walker_check_empty(struct tcf_proto *tp, void *fh,
|
|
- struct tcf_walker *arg)
|
|
+static bool tcf_proto_check_delete(struct tcf_proto *tp)
|
|
{
|
|
- if (fh) {
|
|
- arg->nonempty = true;
|
|
- return -1;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
|
|
-{
|
|
- struct tcf_walker walker = { .fn = walker_check_empty, };
|
|
-
|
|
- if (tp->ops->walk) {
|
|
- tp->ops->walk(tp, &walker, rtnl_held);
|
|
- return !walker.nonempty;
|
|
- }
|
|
- return true;
|
|
-}
|
|
+ if (tp->ops->delete_empty)
|
|
+ return tp->ops->delete_empty(tp);
|
|
|
|
-static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
|
|
-{
|
|
- spin_lock(&tp->lock);
|
|
- if (tcf_proto_is_empty(tp, rtnl_held))
|
|
- tp->deleting = true;
|
|
- spin_unlock(&tp->lock);
|
|
+ tp->deleting = true;
|
|
return tp->deleting;
|
|
}
|
|
|
|
@@ -1751,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
|
|
* concurrently.
|
|
* Mark tp for deletion if it is empty.
|
|
*/
|
|
- if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
|
|
+ if (!tp_iter || !tcf_proto_check_delete(tp)) {
|
|
mutex_unlock(&chain->filter_chain_lock);
|
|
return;
|
|
}
|
|
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
|
|
index 4ac110bf19c5..5cf8163710c8 100644
|
|
--- a/net/sched/cls_flower.c
|
|
+++ b/net/sched/cls_flower.c
|
|
@@ -2519,6 +2519,17 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
|
|
f->res.class = cl;
|
|
}
|
|
|
|
+static bool fl_delete_empty(struct tcf_proto *tp)
|
|
+{
|
|
+ struct cls_fl_head *head = fl_head_dereference(tp);
|
|
+
|
|
+ spin_lock(&tp->lock);
|
|
+ tp->deleting = idr_is_empty(&head->handle_idr);
|
|
+ spin_unlock(&tp->lock);
|
|
+
|
|
+ return tp->deleting;
|
|
+}
|
|
+
|
|
static struct tcf_proto_ops cls_fl_ops __read_mostly = {
|
|
.kind = "flower",
|
|
.classify = fl_classify,
|
|
@@ -2528,6 +2539,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
|
|
.put = fl_put,
|
|
.change = fl_change,
|
|
.delete = fl_delete,
|
|
+ .delete_empty = fl_delete_empty,
|
|
.walk = fl_walk,
|
|
.reoffload = fl_reoffload,
|
|
.hw_add = fl_hw_add,
|
|
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
|
|
index 98dd87ce1510..78ecdf146882 100644
|
|
--- a/net/sched/sch_fq.c
|
|
+++ b/net/sched/sch_fq.c
|
|
@@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
|
|
f->socket_hash != sk->sk_hash)) {
|
|
f->credit = q->initial_quantum;
|
|
f->socket_hash = sk->sk_hash;
|
|
+ if (q->rate_enable)
|
|
+ smp_store_release(&sk->sk_pacing_status,
|
|
+ SK_PACING_FQ);
|
|
if (fq_flow_is_throttled(f))
|
|
fq_flow_unset_throttled(q, f);
|
|
f->time_next_packet = 0ULL;
|
|
@@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
|
|
|
|
fq_flow_set_detached(f);
|
|
f->sk = sk;
|
|
- if (skb->sk == sk)
|
|
+ if (skb->sk == sk) {
|
|
f->socket_hash = sk->sk_hash;
|
|
+ if (q->rate_enable)
|
|
+ smp_store_release(&sk->sk_pacing_status,
|
|
+ SK_PACING_FQ);
|
|
+ }
|
|
f->credit = q->initial_quantum;
|
|
|
|
rb_link_node(&f->fq_node, parent, p);
|
|
@@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
f->qlen++;
|
|
qdisc_qstats_backlog_inc(sch, skb);
|
|
if (fq_flow_is_detached(f)) {
|
|
- struct sock *sk = skb->sk;
|
|
-
|
|
fq_flow_add_tail(&q->new_flows, f);
|
|
if (time_after(jiffies, f->age + q->flow_refill_delay))
|
|
f->credit = max_t(u32, f->credit, q->quantum);
|
|
- if (sk && q->rate_enable) {
|
|
- if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
|
|
- SK_PACING_FQ))
|
|
- smp_store_release(&sk->sk_pacing_status,
|
|
- SK_PACING_FQ);
|
|
- }
|
|
q->inactive_flows--;
|
|
}
|
|
|
|
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
|
|
index 6a30392068a0..c1a100d2fed3 100644
|
|
--- a/net/sctp/stream.c
|
|
+++ b/net/sctp/stream.c
|
|
@@ -84,10 +84,8 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
|
|
return 0;
|
|
|
|
ret = genradix_prealloc(&stream->out, outcnt, gfp);
|
|
- if (ret) {
|
|
- genradix_free(&stream->out);
|
|
+ if (ret)
|
|
return ret;
|
|
- }
|
|
|
|
stream->outcnt = outcnt;
|
|
return 0;
|
|
@@ -102,10 +100,8 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
|
|
return 0;
|
|
|
|
ret = genradix_prealloc(&stream->in, incnt, gfp);
|
|
- if (ret) {
|
|
- genradix_free(&stream->in);
|
|
+ if (ret)
|
|
return ret;
|
|
- }
|
|
|
|
stream->incnt = incnt;
|
|
return 0;
|
|
@@ -123,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
|
|
* a new one with new outcnt to save memory if needed.
|
|
*/
|
|
if (outcnt == stream->outcnt)
|
|
- goto in;
|
|
+ goto handle_in;
|
|
|
|
/* Filter out chunks queued on streams that won't exist anymore */
|
|
sched->unsched_all(stream);
|
|
@@ -132,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
|
|
|
|
ret = sctp_stream_alloc_out(stream, outcnt, gfp);
|
|
if (ret)
|
|
- goto out;
|
|
+ goto out_err;
|
|
|
|
for (i = 0; i < stream->outcnt; i++)
|
|
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
|
|
|
|
-in:
|
|
+handle_in:
|
|
sctp_stream_interleave_init(stream);
|
|
if (!incnt)
|
|
goto out;
|
|
|
|
ret = sctp_stream_alloc_in(stream, incnt, gfp);
|
|
- if (ret) {
|
|
- sched->free(stream);
|
|
- genradix_free(&stream->out);
|
|
- stream->outcnt = 0;
|
|
- goto out;
|
|
- }
|
|
+ if (ret)
|
|
+ goto in_err;
|
|
+
|
|
+ goto out;
|
|
|
|
+in_err:
|
|
+ sched->free(stream);
|
|
+ genradix_free(&stream->in);
|
|
+out_err:
|
|
+ genradix_free(&stream->out);
|
|
+ stream->outcnt = 0;
|
|
out:
|
|
return ret;
|
|
}
|
|
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
|
|
index 7235a6032671..3bbe1a58ec87 100644
|
|
--- a/net/sctp/transport.c
|
|
+++ b/net/sctp/transport.c
|
|
@@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
|
|
|
|
pf->af->from_sk(&addr, sk);
|
|
pf->to_sk_daddr(&t->ipaddr, sk);
|
|
- dst->ops->update_pmtu(dst, sk, NULL, pmtu);
|
|
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
|
|
pf->to_sk_daddr(&addr, sk);
|
|
|
|
dst = sctp_transport_dst_check(t);
|
|
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
|
|
index 737b49909a7a..6a6d3b2aa5a9 100644
|
|
--- a/net/smc/af_smc.c
|
|
+++ b/net/smc/af_smc.c
|
|
@@ -854,6 +854,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
|
|
goto out;
|
|
|
|
sock_hold(&smc->sk); /* sock put in passive closing */
|
|
+ if (smc->use_fallback)
|
|
+ goto out;
|
|
if (flags & O_NONBLOCK) {
|
|
if (schedule_work(&smc->connect_work))
|
|
smc->connect_nonblock = 1;
|
|
@@ -1716,8 +1718,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
|
|
sk->sk_err = smc->clcsock->sk->sk_err;
|
|
sk->sk_error_report(sk);
|
|
}
|
|
- if (rc)
|
|
- return rc;
|
|
|
|
if (optlen < sizeof(int))
|
|
return -EINVAL;
|
|
@@ -1725,6 +1725,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
|
|
return -EFAULT;
|
|
|
|
lock_sock(sk);
|
|
+ if (rc || smc->use_fallback)
|
|
+ goto out;
|
|
switch (optname) {
|
|
case TCP_ULP:
|
|
case TCP_FASTOPEN:
|
|
@@ -1736,15 +1738,14 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
|
|
smc_switch_to_fallback(smc);
|
|
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
|
|
} else {
|
|
- if (!smc->use_fallback)
|
|
- rc = -EINVAL;
|
|
+ rc = -EINVAL;
|
|
}
|
|
break;
|
|
case TCP_NODELAY:
|
|
if (sk->sk_state != SMC_INIT &&
|
|
sk->sk_state != SMC_LISTEN &&
|
|
sk->sk_state != SMC_CLOSED) {
|
|
- if (val && !smc->use_fallback)
|
|
+ if (val)
|
|
mod_delayed_work(system_wq, &smc->conn.tx_work,
|
|
0);
|
|
}
|
|
@@ -1753,7 +1754,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
|
|
if (sk->sk_state != SMC_INIT &&
|
|
sk->sk_state != SMC_LISTEN &&
|
|
sk->sk_state != SMC_CLOSED) {
|
|
- if (!val && !smc->use_fallback)
|
|
+ if (!val)
|
|
mod_delayed_work(system_wq, &smc->conn.tx_work,
|
|
0);
|
|
}
|
|
@@ -1764,6 +1765,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
|
|
default:
|
|
break;
|
|
}
|
|
+out:
|
|
release_sock(sk);
|
|
|
|
return rc;
|
|
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
|
|
index 82160808765c..b5a5b1c548c9 100644
|
|
--- a/scripts/dtc/Makefile
|
|
+++ b/scripts/dtc/Makefile
|
|
@@ -11,7 +11,7 @@ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
|
|
# Source files need to get at the userspace version of libfdt_env.h to compile
|
|
HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt
|
|
|
|
-ifeq ($(wildcard /usr/include/yaml.h),)
|
|
+ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),)
|
|
ifneq ($(CHECK_DTBS),)
|
|
$(error dtc needs libyaml for DT schema validation support. \
|
|
Install the necessary libyaml development package.)
|
|
@@ -19,7 +19,7 @@ endif
|
|
HOST_EXTRACFLAGS += -DNO_YAML
|
|
else
|
|
dtc-objs += yamltree.o
|
|
-HOSTLDLIBS_dtc := -lyaml
|
|
+HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs)
|
|
endif
|
|
|
|
# Generated files need one more search path to include headers in source tree
|
|
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
|
|
index ae6504d07fd6..fb15f09e0e38 100644
|
|
--- a/scripts/kallsyms.c
|
|
+++ b/scripts/kallsyms.c
|
|
@@ -489,6 +489,8 @@ static void build_initial_tok_table(void)
|
|
table[pos] = table[i];
|
|
learn_symbol(table[pos].sym, table[pos].len);
|
|
pos++;
|
|
+ } else {
|
|
+ free(table[i].sym);
|
|
}
|
|
}
|
|
table_cnt = pos;
|
|
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
|
|
index 59f1cc2557a7..470693239e64 100644
|
|
--- a/security/apparmor/label.c
|
|
+++ b/security/apparmor/label.c
|
|
@@ -1458,11 +1458,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
|
|
/* helper macro for snprint routines */
|
|
#define update_for_len(total, len, size, str) \
|
|
do { \
|
|
+ size_t ulen = len; \
|
|
+ \
|
|
AA_BUG(len < 0); \
|
|
- total += len; \
|
|
- len = min(len, size); \
|
|
- size -= len; \
|
|
- str += len; \
|
|
+ total += ulen; \
|
|
+ ulen = min(ulen, size); \
|
|
+ size -= ulen; \
|
|
+ str += ulen; \
|
|
} while (0)
|
|
|
|
/**
|
|
@@ -1597,7 +1599,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
|
|
struct aa_ns *prev_ns = NULL;
|
|
struct label_it i;
|
|
int count = 0, total = 0;
|
|
- size_t len;
|
|
+ ssize_t len;
|
|
|
|
AA_BUG(!str && size != 0);
|
|
AA_BUG(!label);
|
|
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
|
|
index e7832448d721..bf38fc1b59b2 100644
|
|
--- a/security/tomoyo/realpath.c
|
|
+++ b/security/tomoyo/realpath.c
|
|
@@ -217,31 +217,6 @@ out:
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
-/**
|
|
- * tomoyo_get_socket_name - Get the name of a socket.
|
|
- *
|
|
- * @path: Pointer to "struct path".
|
|
- * @buffer: Pointer to buffer to return value in.
|
|
- * @buflen: Sizeof @buffer.
|
|
- *
|
|
- * Returns the buffer.
|
|
- */
|
|
-static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
|
|
- const int buflen)
|
|
-{
|
|
- struct inode *inode = d_backing_inode(path->dentry);
|
|
- struct socket *sock = inode ? SOCKET_I(inode) : NULL;
|
|
- struct sock *sk = sock ? sock->sk : NULL;
|
|
-
|
|
- if (sk) {
|
|
- snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
|
|
- sk->sk_family, sk->sk_type, sk->sk_protocol);
|
|
- } else {
|
|
- snprintf(buffer, buflen, "socket:[unknown]");
|
|
- }
|
|
- return buffer;
|
|
-}
|
|
-
|
|
/**
|
|
* tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
|
|
*
|
|
@@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path)
|
|
break;
|
|
/* To make sure that pos is '\0' terminated. */
|
|
buf[buf_len - 1] = '\0';
|
|
- /* Get better name for socket. */
|
|
- if (sb->s_magic == SOCKFS_MAGIC) {
|
|
- pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
|
|
- goto encode;
|
|
- }
|
|
- /* For "pipe:[\$]". */
|
|
+ /* For "pipe:[\$]" and "socket:[\$]". */
|
|
if (dentry->d_op && dentry->d_op->d_dname) {
|
|
pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
|
|
goto encode;
|
|
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
|
|
index c37a78677955..265682296836 100644
|
|
--- a/tools/perf/builtin-diff.c
|
|
+++ b/tools/perf/builtin-diff.c
|
|
@@ -575,8 +575,8 @@ static int64_t block_cycles_diff_cmp(struct hist_entry *left,
|
|
if (!pairs_left && !pairs_right)
|
|
return 0;
|
|
|
|
- l = labs(left->diff.cycles);
|
|
- r = labs(right->diff.cycles);
|
|
+ l = llabs(left->diff.cycles);
|
|
+ r = llabs(right->diff.cycles);
|
|
return r - l;
|
|
}
|
|
|
|
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
|
|
index 6dba8b728d23..3983d6ccd14d 100644
|
|
--- a/tools/perf/builtin-script.c
|
|
+++ b/tools/perf/builtin-script.c
|
|
@@ -448,7 +448,7 @@ static int perf_evsel__check_attr(struct evsel *evsel,
|
|
"selected. Hence, no address to lookup the source line number.\n");
|
|
return -EINVAL;
|
|
}
|
|
- if (PRINT_FIELD(BRSTACKINSN) &&
|
|
+ if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
|
|
!(perf_evlist__combined_branch_type(session->evlist) &
|
|
PERF_SAMPLE_BRANCH_ANY)) {
|
|
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
|
|
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
|
|
index 47fe34e5f7d5..ec7640cc4c91 100644
|
|
--- a/tools/perf/util/perf_regs.h
|
|
+++ b/tools/perf/util/perf_regs.h
|
|
@@ -41,7 +41,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
|
|
|
|
static inline const char *perf_reg_name(int id __maybe_unused)
|
|
{
|
|
- return NULL;
|
|
+ return "unknown";
|
|
}
|
|
|
|
static inline int perf_reg_value(u64 *valp __maybe_unused,
|
|
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
|
|
index 2a9890c8395a..21fcfe621d3a 100644
|
|
--- a/tools/power/x86/intel-speed-select/isst-config.c
|
|
+++ b/tools/power/x86/intel-speed-select/isst-config.c
|
|
@@ -169,7 +169,7 @@ int get_topo_max_cpus(void)
|
|
static void set_cpu_online_offline(int cpu, int state)
|
|
{
|
|
char buffer[128];
|
|
- int fd;
|
|
+ int fd, ret;
|
|
|
|
snprintf(buffer, sizeof(buffer),
|
|
"/sys/devices/system/cpu/cpu%d/online", cpu);
|
|
@@ -179,9 +179,12 @@ static void set_cpu_online_offline(int cpu, int state)
|
|
err(-1, "%s open failed", buffer);
|
|
|
|
if (state)
|
|
- write(fd, "1\n", 2);
|
|
+ ret = write(fd, "1\n", 2);
|
|
else
|
|
- write(fd, "0\n", 2);
|
|
+ ret = write(fd, "0\n", 2);
|
|
+
|
|
+ if (ret == -1)
|
|
+ perror("Online/Offline: Operation failed\n");
|
|
|
|
close(fd);
|
|
}
|
|
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
|
|
index 6dee5332c9d3..fde3f9cefc6d 100644
|
|
--- a/tools/power/x86/intel-speed-select/isst-core.c
|
|
+++ b/tools/power/x86/intel-speed-select/isst-core.c
|
|
@@ -553,7 +553,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
|
|
i);
|
|
ctdp_level = &pkg_dev->ctdp_level[i];
|
|
|
|
- ctdp_level->processed = 1;
|
|
ctdp_level->level = i;
|
|
ctdp_level->control_cpu = cpu;
|
|
ctdp_level->pkg_id = get_physical_package_id(cpu);
|
|
@@ -561,7 +560,10 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
|
|
|
|
ret = isst_get_ctdp_control(cpu, i, ctdp_level);
|
|
if (ret)
|
|
- return ret;
|
|
+ continue;
|
|
+
|
|
+ pkg_dev->processed = 1;
|
|
+ ctdp_level->processed = 1;
|
|
|
|
ret = isst_get_tdp_info(cpu, i, ctdp_level);
|
|
if (ret)
|
|
@@ -614,8 +616,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
|
|
}
|
|
}
|
|
|
|
- pkg_dev->processed = 1;
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
|
|
index 40346d534f78..b11575c3e886 100644
|
|
--- a/tools/power/x86/intel-speed-select/isst-display.c
|
|
+++ b/tools/power/x86/intel-speed-select/isst-display.c
|
|
@@ -314,7 +314,8 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
|
|
char value[256];
|
|
int i, base_level = 1;
|
|
|
|
- print_package_info(cpu, outf);
|
|
+ if (pkg_dev->processed)
|
|
+ print_package_info(cpu, outf);
|
|
|
|
for (i = 0; i <= pkg_dev->levels; ++i) {
|
|
struct isst_pkg_ctdp_level_info *ctdp_level;
|
|
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
|
|
index 25e23e73c72e..2ecfa1158e2b 100644
|
|
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
|
|
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
|
|
@@ -73,7 +73,7 @@ trans:
|
|
[sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
|
|
[dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
|
|
[tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
|
|
- : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
|
|
+ : "memory", "r0", "r3", "r4", "r5", "r6", "lr"
|
|
);
|
|
|
|
/* TM failed, analyse */
|
|
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
|
|
index f603fe5a445b..6f7fb51f0809 100644
|
|
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
|
|
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
|
|
@@ -74,8 +74,8 @@ trans:
|
|
"3: ;"
|
|
: [res] "=r" (result), [texasr] "=r" (texasr)
|
|
: [sprn_texasr] "i" (SPRN_TEXASR)
|
|
- : "memory", "r0", "r1", "r3", "r4",
|
|
- "r7", "r8", "r9", "r10", "r11"
|
|
+ : "memory", "r0", "r3", "r4",
|
|
+ "r7", "r8", "r9", "r10", "r11", "lr"
|
|
);
|
|
|
|
if (result) {
|
|
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
|
|
index e0d37f07bdeb..46ef378a15ec 100644
|
|
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
|
|
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
|
|
@@ -62,7 +62,7 @@ trans:
|
|
[sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
|
|
[tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
|
|
[dscr_2]"i"(DSCR_2), [cptr1] "b" (&cptr[1])
|
|
- : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
|
|
+ : "memory", "r0", "r3", "r4", "r5", "r6"
|
|
);
|
|
|
|
/* TM failed, analyse */
|
|
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
|
|
index 8027457b97b7..70ca01234f79 100644
|
|
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
|
|
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
|
|
@@ -62,8 +62,8 @@ trans:
|
|
"3: ;"
|
|
: [res] "=r" (result), [texasr] "=r" (texasr)
|
|
: [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "b" (&cptr[1])
|
|
- : "memory", "r0", "r1", "r3", "r4",
|
|
- "r7", "r8", "r9", "r10", "r11"
|
|
+ : "memory", "r0", "r3", "r4",
|
|
+ "r7", "r8", "r9", "r10", "r11", "lr"
|
|
);
|
|
|
|
if (result) {
|
|
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
|
|
index 56fbf9f6bbf3..07c388147b75 100644
|
|
--- a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
|
|
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
|
|
@@ -10,10 +10,12 @@
|
|
*/
|
|
|
|
#define _GNU_SOURCE
|
|
+#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <signal.h>
|
|
|
|
#include "utils.h"
|
|
+#include "tm.h"
|
|
|
|
void trap_signal_handler(int signo, siginfo_t *si, void *uc)
|
|
{
|
|
@@ -29,6 +31,8 @@ int tm_signal_sigreturn_nt(void)
|
|
{
|
|
struct sigaction trap_sa;
|
|
|
|
+ SKIP_IF(!have_htm());
|
|
+
|
|
trap_sa.sa_flags = SA_SIGINFO;
|
|
trap_sa.sa_sigaction = trap_signal_handler;
|
|
|
|
diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
|
|
index 1c0d76cb5adf..93b90a9b1eeb 100644
|
|
--- a/tools/testing/selftests/vm/config
|
|
+++ b/tools/testing/selftests/vm/config
|
|
@@ -1,2 +1,3 @@
|
|
CONFIG_SYSVIPC=y
|
|
CONFIG_USERFAULTFD=y
|
|
+CONFIG_TEST_VMALLOC=m
|