mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-31 19:21:43 +00:00
54008 lines
1.7 MiB
54008 lines
1.7 MiB
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
|
index aefd358a5ca36..bb5c9dc4d270b 100644
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -2096,6 +2096,9 @@
|
|
off
|
|
Disables hypervisor mitigations and doesn't
|
|
emit any warnings.
|
|
+ It also drops the swap size and available
|
|
+ RAM limit restriction on both hypervisor and
|
|
+ bare metal.
|
|
|
|
Default is 'flush'.
|
|
|
|
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
|
|
index b85dd80510b02..9af977384168e 100644
|
|
--- a/Documentation/admin-guide/l1tf.rst
|
|
+++ b/Documentation/admin-guide/l1tf.rst
|
|
@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
|
|
|
|
off Disables hypervisor mitigations and doesn't emit any
|
|
warnings.
|
|
+ It also drops the swap size and available RAM limit restrictions
|
|
+ on both hypervisor and bare metal.
|
|
+
|
|
============ =============================================================
|
|
|
|
The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
|
|
@@ -576,7 +579,8 @@ Default mitigations
|
|
The kernel default mitigations for vulnerable processors are:
|
|
|
|
- PTE inversion to protect against malicious user space. This is done
|
|
- unconditionally and cannot be controlled.
|
|
+ unconditionally and cannot be controlled. The swap storage is limited
|
|
+ to ~16TB.
|
|
|
|
- L1D conditional flushing on VMENTER when EPT is enabled for
|
|
a guest.
|
|
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
|
|
index aededdbc262b2..f9a7c984274ce 100644
|
|
--- a/Documentation/devicetree/bindings/eeprom/at24.txt
|
|
+++ b/Documentation/devicetree/bindings/eeprom/at24.txt
|
|
@@ -27,6 +27,7 @@ Required properties:
|
|
"atmel,24c256",
|
|
"atmel,24c512",
|
|
"atmel,24c1024",
|
|
+ "atmel,24c2048",
|
|
|
|
If <manufacturer> is not "atmel", then a fallback must be used
|
|
with the same <model> and "atmel" as manufacturer.
|
|
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
|
|
index 12a5e6e693b6e..2a4e63f5122cf 100644
|
|
--- a/Documentation/filesystems/proc.txt
|
|
+++ b/Documentation/filesystems/proc.txt
|
|
@@ -496,7 +496,9 @@ manner. The codes are the following:
|
|
|
|
Note that there is no guarantee that every flag and associated mnemonic will
|
|
be present in all further kernel releases. Things get changed, the flags may
|
|
-be vanished or the reverse -- new added.
|
|
+be vanished or the reverse -- new added. Interpretation of their meaning
|
|
+might change in future as well. So each consumer of these flags has to
|
|
+follow each specific kernel version for the exact semantic.
|
|
|
|
This file is only present if the CONFIG_MMU kernel configuration option is
|
|
enabled.
|
|
diff --git a/Makefile b/Makefile
|
|
index 7a2a9a175756c..c83abc1e689b4 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 20
|
|
-SUBLEVEL = 0
|
|
+SUBLEVEL = 13
|
|
EXTRAVERSION =
|
|
NAME = Shy Crocodile
|
|
|
|
@@ -967,6 +967,7 @@ ifdef CONFIG_STACK_VALIDATION
|
|
endif
|
|
endif
|
|
|
|
+PHONY += prepare0
|
|
|
|
ifeq ($(KBUILD_EXTMOD),)
|
|
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
|
|
@@ -1034,6 +1035,8 @@ ifdef CONFIG_GDB_SCRIPTS
|
|
endif
|
|
+$(call if_changed,link-vmlinux)
|
|
|
|
+targets := vmlinux
|
|
+
|
|
# Build samples along the rest of the kernel. This needs headers_install.
|
|
ifdef CONFIG_SAMPLES
|
|
vmlinux-dirs += samples
|
|
@@ -1075,8 +1078,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
|
|
# archprepare is used in arch Makefiles and when processed asm symlink,
|
|
# version.h and scripts_basic is processed / created.
|
|
|
|
-# Listed in dependency order
|
|
-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
|
|
+PHONY += prepare archprepare prepare1 prepare2 prepare3
|
|
|
|
# prepare3 is used to check if we are building in a separate output directory,
|
|
# and if so do:
|
|
@@ -1545,9 +1547,6 @@ else # KBUILD_EXTMOD
|
|
|
|
# We are always building modules
|
|
KBUILD_MODULES := 1
|
|
-PHONY += crmodverdir
|
|
-crmodverdir:
|
|
- $(cmd_crmodverdir)
|
|
|
|
PHONY += $(objtree)/Module.symvers
|
|
$(objtree)/Module.symvers:
|
|
@@ -1559,7 +1558,7 @@ $(objtree)/Module.symvers:
|
|
|
|
module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD))
|
|
PHONY += $(module-dirs) modules
|
|
-$(module-dirs): crmodverdir $(objtree)/Module.symvers
|
|
+$(module-dirs): prepare $(objtree)/Module.symvers
|
|
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
|
|
|
|
modules: $(module-dirs)
|
|
@@ -1600,7 +1599,8 @@ help:
|
|
|
|
# Dummies...
|
|
PHONY += prepare scripts
|
|
-prepare: ;
|
|
+prepare:
|
|
+ $(cmd_crmodverdir)
|
|
scripts: ;
|
|
endif # KBUILD_EXTMOD
|
|
|
|
@@ -1724,17 +1724,14 @@ endif
|
|
|
|
# Modules
|
|
/: prepare scripts FORCE
|
|
- $(cmd_crmodverdir)
|
|
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
|
$(build)=$(build-dir)
|
|
# Make sure the latest headers are built for Documentation
|
|
Documentation/ samples/: headers_install
|
|
%/: prepare scripts FORCE
|
|
- $(cmd_crmodverdir)
|
|
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
|
$(build)=$(build-dir)
|
|
%.ko: prepare scripts FORCE
|
|
- $(cmd_crmodverdir)
|
|
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
|
$(build)=$(build-dir) $(@:.ko=.o)
|
|
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
|
@@ -1758,13 +1755,12 @@ quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
|
|
cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
|
|
$(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
|
|
|
|
-# read all saved command lines
|
|
-cmd_files := $(wildcard .*.cmd)
|
|
+# read saved command lines for existing targets
|
|
+existing-targets := $(wildcard $(sort $(targets)))
|
|
|
|
-ifneq ($(cmd_files),)
|
|
- $(cmd_files): ; # Do not try to update included dependency files
|
|
- include $(cmd_files)
|
|
-endif
|
|
+cmd_files := $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
|
|
+$(cmd_files): ; # Do not try to update included dependency files
|
|
+-include $(cmd_files)
|
|
|
|
endif # ifeq ($(config-targets),1)
|
|
endif # ifeq ($(mixed-targets),1)
|
|
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
|
|
index 4d17cacd14622..432402c8e47f5 100644
|
|
--- a/arch/alpha/include/asm/irq.h
|
|
+++ b/arch/alpha/include/asm/irq.h
|
|
@@ -56,15 +56,15 @@
|
|
|
|
#elif defined(CONFIG_ALPHA_DP264) || \
|
|
defined(CONFIG_ALPHA_LYNX) || \
|
|
- defined(CONFIG_ALPHA_SHARK) || \
|
|
- defined(CONFIG_ALPHA_EIGER)
|
|
+ defined(CONFIG_ALPHA_SHARK)
|
|
# define NR_IRQS 64
|
|
|
|
#elif defined(CONFIG_ALPHA_TITAN)
|
|
#define NR_IRQS 80
|
|
|
|
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
|
|
- defined(CONFIG_ALPHA_TAKARA)
|
|
+ defined(CONFIG_ALPHA_TAKARA) || \
|
|
+ defined(CONFIG_ALPHA_EIGER)
|
|
# define NR_IRQS 128
|
|
|
|
#elif defined(CONFIG_ALPHA_WILDFIRE)
|
|
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
|
|
index d73dc473fbb94..188fc9256baf1 100644
|
|
--- a/arch/alpha/mm/fault.c
|
|
+++ b/arch/alpha/mm/fault.c
|
|
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
|
|
/* Macro for exception fixup code to access integer registers. */
|
|
#define dpf_reg(r) \
|
|
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
|
|
- (r) <= 18 ? (r)+8 : (r)-10])
|
|
+ (r) <= 18 ? (r)+10 : (r)-10])
|
|
|
|
asmlinkage void
|
|
do_page_fault(unsigned long address, unsigned long mmcsr,
|
|
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
|
|
index 6dd7835573308..dadb494d83fd4 100644
|
|
--- a/arch/arc/Kconfig
|
|
+++ b/arch/arc/Kconfig
|
|
@@ -26,6 +26,7 @@ config ARC
|
|
select GENERIC_IRQ_SHOW
|
|
select GENERIC_PCI_IOMAP
|
|
select GENERIC_PENDING_IRQ if SMP
|
|
+ select GENERIC_SCHED_CLOCK
|
|
select GENERIC_SMP_IDLE_THREAD
|
|
select HAVE_ARCH_KGDB
|
|
select HAVE_ARCH_TRACEHOOK
|
|
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
|
|
index f393b663413e4..2ad77fb43639c 100644
|
|
--- a/arch/arc/include/asm/cache.h
|
|
+++ b/arch/arc/include/asm/cache.h
|
|
@@ -52,6 +52,17 @@
|
|
#define cache_line_size() SMP_CACHE_BYTES
|
|
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
|
|
|
|
+/*
|
|
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
|
|
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
|
|
+ * alignment for any atomic64_t embedded in buffer.
|
|
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
|
|
+ * value of 4 (and not 8) in ARC ABI.
|
|
+ */
|
|
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
|
|
+#define ARCH_SLAB_MINALIGN 8
|
|
+#endif
|
|
+
|
|
extern void arc_cache_init(void);
|
|
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
|
|
extern void read_decode_cache_bcr(void);
|
|
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
|
|
index 9185541035cc3..6958545390f0f 100644
|
|
--- a/arch/arc/include/asm/perf_event.h
|
|
+++ b/arch/arc/include/asm/perf_event.h
|
|
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
|
|
|
|
/* counts condition */
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
|
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
|
|
+ /* All jump instructions that are taken */
|
|
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
|
|
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
|
#ifdef CONFIG_ISA_ARCV2
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
|
|
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
|
|
index 8b90d25a15cca..1f945d0f40daa 100644
|
|
--- a/arch/arc/kernel/head.S
|
|
+++ b/arch/arc/kernel/head.S
|
|
@@ -17,6 +17,7 @@
|
|
#include <asm/entry.h>
|
|
#include <asm/arcregs.h>
|
|
#include <asm/cache.h>
|
|
+#include <asm/irqflags.h>
|
|
|
|
.macro CPU_EARLY_SETUP
|
|
|
|
@@ -47,6 +48,15 @@
|
|
sr r5, [ARC_REG_DC_CTRL]
|
|
|
|
1:
|
|
+
|
|
+#ifdef CONFIG_ISA_ARCV2
|
|
+ ; Unaligned access is disabled at reset, so re-enable early as
|
|
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
|
|
+ ; by default
|
|
+ lr r5, [status32]
|
|
+ bset r5, r5, STATUS_AD_BIT
|
|
+ kflag r5
|
|
+#endif
|
|
.endm
|
|
|
|
.section .init.text, "ax",@progbits
|
|
@@ -93,9 +103,9 @@ ENTRY(stext)
|
|
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
|
; Uboot - kernel ABI
|
|
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
|
|
- ; r1 = magic number (board identity, unused as of now
|
|
+ ; r1 = magic number (always zero as of now)
|
|
; r2 = pointer to uboot provided cmdline or external DTB in mem
|
|
- ; These are handled later in setup_arch()
|
|
+ ; These are handled later in handle_uboot_args()
|
|
st r0, [@uboot_tag]
|
|
st r2, [@uboot_arg]
|
|
#endif
|
|
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
|
|
index eea8c5ce63350..80dd1a716ca72 100644
|
|
--- a/arch/arc/kernel/setup.c
|
|
+++ b/arch/arc/kernel/setup.c
|
|
@@ -452,43 +452,80 @@ void setup_processor(void)
|
|
arc_chk_core_config();
|
|
}
|
|
|
|
-static inline int is_kernel(unsigned long addr)
|
|
+static inline bool uboot_arg_invalid(unsigned long addr)
|
|
{
|
|
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
|
|
- return 1;
|
|
- return 0;
|
|
+ /*
|
|
+ * Check that it is a untranslated address (although MMU is not enabled
|
|
+ * yet, it being a high address ensures this is not by fluke)
|
|
+ */
|
|
+ if (addr < PAGE_OFFSET)
|
|
+ return true;
|
|
+
|
|
+ /* Check that address doesn't clobber resident kernel image */
|
|
+ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
|
|
}
|
|
|
|
-void __init setup_arch(char **cmdline_p)
|
|
+#define IGNORE_ARGS "Ignore U-boot args: "
|
|
+
|
|
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
|
|
+#define UBOOT_TAG_NONE 0
|
|
+#define UBOOT_TAG_CMDLINE 1
|
|
+#define UBOOT_TAG_DTB 2
|
|
+
|
|
+void __init handle_uboot_args(void)
|
|
{
|
|
+ bool use_embedded_dtb = true;
|
|
+ bool append_cmdline = false;
|
|
+
|
|
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
|
- /* make sure that uboot passed pointer to cmdline/dtb is valid */
|
|
- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
|
|
- panic("Invalid uboot arg\n");
|
|
+ /* check that we know this tag */
|
|
+ if (uboot_tag != UBOOT_TAG_NONE &&
|
|
+ uboot_tag != UBOOT_TAG_CMDLINE &&
|
|
+ uboot_tag != UBOOT_TAG_DTB) {
|
|
+ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
|
|
+ goto ignore_uboot_args;
|
|
+ }
|
|
+
|
|
+ if (uboot_tag != UBOOT_TAG_NONE &&
|
|
+ uboot_arg_invalid((unsigned long)uboot_arg)) {
|
|
+ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
|
|
+ goto ignore_uboot_args;
|
|
+ }
|
|
+
|
|
+ /* see if U-boot passed an external Device Tree blob */
|
|
+ if (uboot_tag == UBOOT_TAG_DTB) {
|
|
+ machine_desc = setup_machine_fdt((void *)uboot_arg);
|
|
|
|
- /* See if u-boot passed an external Device Tree blob */
|
|
- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
|
|
- if (!machine_desc)
|
|
+ /* external Device Tree blob is invalid - use embedded one */
|
|
+ use_embedded_dtb = !machine_desc;
|
|
+ }
|
|
+
|
|
+ if (uboot_tag == UBOOT_TAG_CMDLINE)
|
|
+ append_cmdline = true;
|
|
+
|
|
+ignore_uboot_args:
|
|
#endif
|
|
- {
|
|
- /* No, so try the embedded one */
|
|
+
|
|
+ if (use_embedded_dtb) {
|
|
machine_desc = setup_machine_fdt(__dtb_start);
|
|
if (!machine_desc)
|
|
panic("Embedded DT invalid\n");
|
|
+ }
|
|
|
|
- /*
|
|
- * If we are here, it is established that @uboot_arg didn't
|
|
- * point to DT blob. Instead if u-boot says it is cmdline,
|
|
- * append to embedded DT cmdline.
|
|
- * setup_machine_fdt() would have populated @boot_command_line
|
|
- */
|
|
- if (uboot_tag == 1) {
|
|
- /* Ensure a whitespace between the 2 cmdlines */
|
|
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
|
- strlcat(boot_command_line, uboot_arg,
|
|
- COMMAND_LINE_SIZE);
|
|
- }
|
|
+ /*
|
|
+ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
|
|
+ * append processing can only happen after.
|
|
+ */
|
|
+ if (append_cmdline) {
|
|
+ /* Ensure a whitespace between the 2 cmdlines */
|
|
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
|
+ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
|
|
}
|
|
+}
|
|
+
|
|
+void __init setup_arch(char **cmdline_p)
|
|
+{
|
|
+ handle_uboot_args();
|
|
|
|
/* Save unparsed command line copy for /proc/cmdline */
|
|
*cmdline_p = boot_command_line;
|
|
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
|
|
index 62ad4bcb841aa..f230bb7092fdb 100644
|
|
--- a/arch/arc/lib/memset-archs.S
|
|
+++ b/arch/arc/lib/memset-archs.S
|
|
@@ -7,11 +7,39 @@
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
+#include <asm/cache.h>
|
|
|
|
-#undef PREALLOC_NOT_AVAIL
|
|
+/*
|
|
+ * The memset implementation below is optimized to use prefetchw and prealloc
|
|
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
|
|
+ * If you want to implement optimized memset for other possible L1 data cache
|
|
+ * line lengths (32B and 128B) you should rewrite code carefully checking
|
|
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
|
|
+ * don't belongs to memset area.
|
|
+ */
|
|
+
|
|
+#if L1_CACHE_SHIFT == 6
|
|
+
|
|
+.macro PREALLOC_INSTR reg, off
|
|
+ prealloc [\reg, \off]
|
|
+.endm
|
|
+
|
|
+.macro PREFETCHW_INSTR reg, off
|
|
+ prefetchw [\reg, \off]
|
|
+.endm
|
|
+
|
|
+#else
|
|
+
|
|
+.macro PREALLOC_INSTR
|
|
+.endm
|
|
+
|
|
+.macro PREFETCHW_INSTR
|
|
+.endm
|
|
+
|
|
+#endif
|
|
|
|
ENTRY_CFI(memset)
|
|
- prefetchw [r0] ; Prefetch the write location
|
|
+ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
|
|
mov.f 0, r2
|
|
;;; if size is zero
|
|
jz.d [blink]
|
|
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
|
|
|
|
lpnz @.Lset64bytes
|
|
;; LOOP START
|
|
-#ifdef PREALLOC_NOT_AVAIL
|
|
- prefetchw [r3, 64] ;Prefetch the next write location
|
|
-#else
|
|
- prealloc [r3, 64]
|
|
-#endif
|
|
+ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
|
|
+
|
|
#ifdef CONFIG_ARC_HAS_LL64
|
|
std.ab r4, [r3, 8]
|
|
std.ab r4, [r3, 8]
|
|
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
|
|
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
|
|
lpnz .Lset32bytes
|
|
;; LOOP START
|
|
- prefetchw [r3, 32] ;Prefetch the next write location
|
|
#ifdef CONFIG_ARC_HAS_LL64
|
|
std.ab r4, [r3, 8]
|
|
std.ab r4, [r3, 8]
|
|
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
|
|
index f8fe5668b30fd..a56e6a8ed259d 100644
|
|
--- a/arch/arc/mm/init.c
|
|
+++ b/arch/arc/mm/init.c
|
|
@@ -137,7 +137,8 @@ void __init setup_arch_memory(void)
|
|
*/
|
|
|
|
memblock_add_node(low_mem_start, low_mem_sz, 0);
|
|
- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
|
|
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
|
|
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
if (initrd_start)
|
|
diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
|
|
index df1227613d48e..c2ece0b91885e 100644
|
|
--- a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
|
|
+++ b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
|
|
@@ -13,7 +13,7 @@
|
|
bootargs = "console=ttyS4,115200 earlyprintk";
|
|
};
|
|
|
|
- memory {
|
|
+ memory@80000000 {
|
|
reg = <0x80000000 0x40000000>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
|
|
index 7a291de02543d..22dade6393d06 100644
|
|
--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
|
|
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
|
|
@@ -13,7 +13,7 @@
|
|
bootargs = "earlyprintk";
|
|
};
|
|
|
|
- memory {
|
|
+ memory@80000000 {
|
|
reg = <0x80000000 0x20000000>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
|
|
index d598b63913625..024e52a6cd0f8 100644
|
|
--- a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
|
|
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
|
|
@@ -14,7 +14,7 @@
|
|
bootargs = "console=ttyS4,115200 earlyprintk";
|
|
};
|
|
|
|
- memory {
|
|
+ memory@80000000 {
|
|
reg = <0x80000000 0x40000000>;
|
|
};
|
|
|
|
@@ -322,4 +322,3 @@
|
|
&adc {
|
|
status = "okay";
|
|
};
|
|
-
|
|
diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
|
|
index 43ed13963d354..33d704541de62 100644
|
|
--- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
|
|
+++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
|
|
@@ -17,7 +17,7 @@
|
|
bootargs = "console=ttyS4,115200 earlyprintk";
|
|
};
|
|
|
|
- memory {
|
|
+ memory@80000000 {
|
|
reg = <0x80000000 0x20000000>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
|
|
index a3c9b346721d4..f04bc3e153322 100644
|
|
--- a/arch/arm/boot/dts/da850-evm.dts
|
|
+++ b/arch/arm/boot/dts/da850-evm.dts
|
|
@@ -94,6 +94,28 @@
|
|
regulator-boot-on;
|
|
};
|
|
|
|
+ baseboard_3v3: fixedregulator-3v3 {
|
|
+ /* TPS73701DCQ */
|
|
+ compatible = "regulator-fixed";
|
|
+ regulator-name = "baseboard_3v3";
|
|
+ regulator-min-microvolt = <3300000>;
|
|
+ regulator-max-microvolt = <3300000>;
|
|
+ vin-supply = <&vbat>;
|
|
+ regulator-always-on;
|
|
+ regulator-boot-on;
|
|
+ };
|
|
+
|
|
+ baseboard_1v8: fixedregulator-1v8 {
|
|
+ /* TPS73701DCQ */
|
|
+ compatible = "regulator-fixed";
|
|
+ regulator-name = "baseboard_1v8";
|
|
+ regulator-min-microvolt = <1800000>;
|
|
+ regulator-max-microvolt = <1800000>;
|
|
+ vin-supply = <&vbat>;
|
|
+ regulator-always-on;
|
|
+ regulator-boot-on;
|
|
+ };
|
|
+
|
|
backlight_lcd: backlight-regulator {
|
|
compatible = "regulator-fixed";
|
|
regulator-name = "lcd_backlight_pwr";
|
|
@@ -105,7 +127,7 @@
|
|
|
|
sound {
|
|
compatible = "simple-audio-card";
|
|
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
|
|
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
|
|
simple-audio-card,widgets =
|
|
"Line", "Line In",
|
|
"Line", "Line Out";
|
|
@@ -210,10 +232,9 @@
|
|
|
|
/* Regulators */
|
|
IOVDD-supply = <&vdcdc2_reg>;
|
|
- /* Derived from VBAT: Baseboard 3.3V / 1.8V */
|
|
- AVDD-supply = <&vbat>;
|
|
- DRVDD-supply = <&vbat>;
|
|
- DVDD-supply = <&vbat>;
|
|
+ AVDD-supply = <&baseboard_3v3>;
|
|
+ DRVDD-supply = <&baseboard_3v3>;
|
|
+ DVDD-supply = <&baseboard_1v8>;
|
|
};
|
|
tca6416: gpio@20 {
|
|
compatible = "ti,tca6416";
|
|
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
|
|
index 0177e3ed20feb..3a2fa6e035a38 100644
|
|
--- a/arch/arm/boot/dts/da850-lcdk.dts
|
|
+++ b/arch/arm/boot/dts/da850-lcdk.dts
|
|
@@ -39,9 +39,39 @@
|
|
};
|
|
};
|
|
|
|
+ vcc_5vd: fixedregulator-vcc_5vd {
|
|
+ compatible = "regulator-fixed";
|
|
+ regulator-name = "vcc_5vd";
|
|
+ regulator-min-microvolt = <5000000>;
|
|
+ regulator-max-microvolt = <5000000>;
|
|
+ regulator-boot-on;
|
|
+ };
|
|
+
|
|
+ vcc_3v3d: fixedregulator-vcc_3v3d {
|
|
+ /* TPS650250 - VDCDC1 */
|
|
+ compatible = "regulator-fixed";
|
|
+ regulator-name = "vcc_3v3d";
|
|
+ regulator-min-microvolt = <3300000>;
|
|
+ regulator-max-microvolt = <3300000>;
|
|
+ vin-supply = <&vcc_5vd>;
|
|
+ regulator-always-on;
|
|
+ regulator-boot-on;
|
|
+ };
|
|
+
|
|
+ vcc_1v8d: fixedregulator-vcc_1v8d {
|
|
+ /* TPS650250 - VDCDC2 */
|
|
+ compatible = "regulator-fixed";
|
|
+ regulator-name = "vcc_1v8d";
|
|
+ regulator-min-microvolt = <1800000>;
|
|
+ regulator-max-microvolt = <1800000>;
|
|
+ vin-supply = <&vcc_5vd>;
|
|
+ regulator-always-on;
|
|
+ regulator-boot-on;
|
|
+ };
|
|
+
|
|
sound {
|
|
compatible = "simple-audio-card";
|
|
- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
|
|
+ simple-audio-card,name = "DA850-OMAPL138 LCDK";
|
|
simple-audio-card,widgets =
|
|
"Line", "Line In",
|
|
"Line", "Line Out";
|
|
@@ -221,6 +251,12 @@
|
|
compatible = "ti,tlv320aic3106";
|
|
reg = <0x18>;
|
|
status = "okay";
|
|
+
|
|
+ /* Regulators */
|
|
+ IOVDD-supply = <&vcc_3v3d>;
|
|
+ AVDD-supply = <&vcc_3v3d>;
|
|
+ DRVDD-supply = <&vcc_3v3d>;
|
|
+ DVDD-supply = <&vcc_1v8d>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
|
|
index 47aa53ba6b922..559659b399d04 100644
|
|
--- a/arch/arm/boot/dts/da850.dtsi
|
|
+++ b/arch/arm/boot/dts/da850.dtsi
|
|
@@ -476,7 +476,7 @@
|
|
clocksource: timer@20000 {
|
|
compatible = "ti,da830-timer";
|
|
reg = <0x20000 0x1000>;
|
|
- interrupts = <12>, <13>;
|
|
+ interrupts = <21>, <22>;
|
|
interrupt-names = "tint12", "tint34";
|
|
clocks = <&pll0_auxclk>;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
|
|
index 03611d50c5a9e..e84544b220b9e 100644
|
|
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
|
|
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
|
|
@@ -26,8 +26,7 @@
|
|
"Speakers", "SPKL",
|
|
"Speakers", "SPKR";
|
|
|
|
- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
|
|
- <&clock CLK_MOUT_EPLL>,
|
|
+ assigned-clocks = <&clock CLK_MOUT_EPLL>,
|
|
<&clock CLK_MOUT_MAU_EPLL>,
|
|
<&clock CLK_MOUT_USER_MAU_EPLL>,
|
|
<&clock_audss EXYNOS_MOUT_AUDSS>,
|
|
@@ -36,15 +35,13 @@
|
|
<&clock_audss EXYNOS_DOUT_AUD_BUS>,
|
|
<&clock_audss EXYNOS_DOUT_I2S>;
|
|
|
|
- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
|
|
- <&clock CLK_FOUT_EPLL>,
|
|
+ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
|
|
<&clock CLK_MOUT_EPLL>,
|
|
<&clock CLK_MOUT_MAU_EPLL>,
|
|
<&clock CLK_MAU_EPLL>,
|
|
<&clock_audss EXYNOS_MOUT_AUDSS>;
|
|
|
|
assigned-clock-rates = <0>,
|
|
- <0>,
|
|
<0>,
|
|
<0>,
|
|
<0>,
|
|
@@ -84,4 +81,6 @@
|
|
|
|
&i2s0 {
|
|
status = "okay";
|
|
+ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
|
|
+ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
|
|
index 4a30cc849b00a..122174ea9e0a3 100644
|
|
--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
|
|
+++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
|
|
@@ -33,8 +33,7 @@
|
|
compatible = "samsung,odroid-xu3-audio";
|
|
model = "Odroid-XU4";
|
|
|
|
- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
|
|
- <&clock CLK_MOUT_EPLL>,
|
|
+ assigned-clocks = <&clock CLK_MOUT_EPLL>,
|
|
<&clock CLK_MOUT_MAU_EPLL>,
|
|
<&clock CLK_MOUT_USER_MAU_EPLL>,
|
|
<&clock_audss EXYNOS_MOUT_AUDSS>,
|
|
@@ -43,15 +42,13 @@
|
|
<&clock_audss EXYNOS_DOUT_AUD_BUS>,
|
|
<&clock_audss EXYNOS_DOUT_I2S>;
|
|
|
|
- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
|
|
- <&clock CLK_FOUT_EPLL>,
|
|
+ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
|
|
<&clock CLK_MOUT_EPLL>,
|
|
<&clock CLK_MOUT_MAU_EPLL>,
|
|
<&clock CLK_MAU_EPLL>,
|
|
<&clock_audss EXYNOS_MOUT_AUDSS>;
|
|
|
|
assigned-clock-rates = <0>,
|
|
- <0>,
|
|
<0>,
|
|
<0>,
|
|
<0>,
|
|
@@ -79,6 +76,8 @@
|
|
|
|
&i2s0 {
|
|
status = "okay";
|
|
+ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
|
|
+ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
|
|
};
|
|
|
|
&pwm {
|
|
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
|
|
index 6f258b50eb442..502a361d1fe90 100644
|
|
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
|
|
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
|
|
@@ -274,20 +274,16 @@
|
|
read-only;
|
|
};
|
|
/*
|
|
- * Between the boot loader and the rootfs is the kernel
|
|
- * in a custom Storlink format flashed from the boot
|
|
- * menu. The rootfs is in squashfs format.
|
|
+ * This firmware image contains the kernel catenated
|
|
+ * with the squashfs root filesystem. For some reason
|
|
+ * this is called "upgrade" on the vendor system.
|
|
*/
|
|
- partition@1800c0 {
|
|
- label = "rootfs";
|
|
- reg = <0x001800c0 0x01dbff40>;
|
|
- read-only;
|
|
- };
|
|
- partition@1f40000 {
|
|
+ partition@40000 {
|
|
label = "upgrade";
|
|
- reg = <0x01f40000 0x00040000>;
|
|
+ reg = <0x00040000 0x01f40000>;
|
|
read-only;
|
|
};
|
|
+ /* RGDB, Residental Gateway Database? */
|
|
partition@1f80000 {
|
|
label = "rgdb";
|
|
reg = <0x01f80000 0x00040000>;
|
|
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
|
|
index 69d753cac89ae..44985f61c0baf 100644
|
|
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
|
|
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
|
|
@@ -477,6 +477,15 @@
|
|
};
|
|
|
|
&gpio1 {
|
|
+ gpio-line-names = "", "", "", "",
|
|
+ "", "", "", "",
|
|
+ "", "hp-amp-shutdown-b", "", "",
|
|
+ "", "", "", "",
|
|
+ "", "", "", "",
|
|
+ "", "", "", "",
|
|
+ "", "", "", "",
|
|
+ "", "", "", "";
|
|
+
|
|
unused-sd3-wp-gpio {
|
|
/*
|
|
* See pinctrl_esdhc1 below for more details on this
|
|
@@ -495,9 +504,6 @@
|
|
hpa1: amp@60 {
|
|
compatible = "ti,tpa6130a2";
|
|
reg = <0x60>;
|
|
- pinctrl-names = "default";
|
|
- pinctrl-0 = <&pinctrl_ampgpio>;
|
|
- power-gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
|
|
Vdd-supply = <®_3p3v>;
|
|
};
|
|
|
|
@@ -671,7 +677,10 @@
|
|
};
|
|
|
|
&iomuxc {
|
|
- pinctrl_ampgpio: ampgpiogrp {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&pinctrl_hog>;
|
|
+
|
|
+ pinctrl_hog: hoggrp {
|
|
fsl,pins = <
|
|
MX51_PAD_GPIO1_9__GPIO1_9 0x5e
|
|
>;
|
|
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
|
|
index cbaf06f2f78e2..eb917462b219b 100644
|
|
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
|
|
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
|
|
@@ -36,8 +36,8 @@
|
|
compatible = "gpio-fan";
|
|
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
|
|
pinctrl-names = "default";
|
|
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
|
|
- &gpio1 13 GPIO_ACTIVE_LOW>;
|
|
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
|
|
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
|
|
gpio-fan,speed-map = <0 0
|
|
3000 1
|
|
6000 2>;
|
|
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
|
|
index 766bbb8495b60..47e5b63339d18 100644
|
|
--- a/arch/arm/boot/dts/mmp2.dtsi
|
|
+++ b/arch/arm/boot/dts/mmp2.dtsi
|
|
@@ -220,12 +220,15 @@
|
|
status = "disabled";
|
|
};
|
|
|
|
- twsi2: i2c@d4025000 {
|
|
+ twsi2: i2c@d4031000 {
|
|
compatible = "mrvl,mmp-twsi";
|
|
- reg = <0xd4025000 0x1000>;
|
|
- interrupts = <58>;
|
|
+ reg = <0xd4031000 0x1000>;
|
|
+ interrupt-parent = <&intcmux17>;
|
|
+ interrupts = <0>;
|
|
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
|
|
resets = <&soc_clocks MMP2_CLK_TWSI1>;
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
status = "disabled";
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
|
|
index 490726b522162..9dc7ec7655cb8 100644
|
|
--- a/arch/arm/boot/dts/omap4-sdp.dts
|
|
+++ b/arch/arm/boot/dts/omap4-sdp.dts
|
|
@@ -33,6 +33,7 @@
|
|
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
|
|
enable-active-high;
|
|
regulator-boot-on;
|
|
+ startup-delay-us = <25000>;
|
|
};
|
|
|
|
vbat: fixedregulator-vbat {
|
|
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
|
|
index bf7ca00f4c214..c2dc4199b4ec2 100644
|
|
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
|
|
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
|
|
@@ -317,7 +317,8 @@
|
|
|
|
palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
|
|
pinctrl-single,pins = <
|
|
- OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
|
|
+ /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
|
|
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
|
|
>;
|
|
};
|
|
|
|
@@ -385,7 +386,8 @@
|
|
|
|
palmas: palmas@48 {
|
|
compatible = "ti,palmas";
|
|
- interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
|
|
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
|
|
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
|
|
reg = <0x48>;
|
|
interrupt-controller;
|
|
#interrupt-cells = <2>;
|
|
@@ -651,7 +653,8 @@
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&twl6040_pins>;
|
|
|
|
- interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
|
|
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
|
|
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
|
|
|
|
/* audpwron gpio defined in the board specific dts */
|
|
|
|
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
|
|
index 5e21fb430a65d..e78d3718f145d 100644
|
|
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
|
|
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
|
|
@@ -181,6 +181,13 @@
|
|
OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
|
|
>;
|
|
};
|
|
+
|
|
+ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
|
|
+ pinctrl-single,pins = <
|
|
+ /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
|
|
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
|
|
+ >;
|
|
+ };
|
|
};
|
|
|
|
&omap5_pmx_core {
|
|
@@ -414,8 +421,11 @@
|
|
|
|
palmas: palmas@48 {
|
|
compatible = "ti,palmas";
|
|
- interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
|
|
reg = <0x48>;
|
|
+ pinctrl-0 = <&palmas_sys_nirq_pins>;
|
|
+ pinctrl-names = "default";
|
|
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
|
|
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
|
|
interrupt-controller;
|
|
#interrupt-cells = <2>;
|
|
ti,system-power-controller;
|
|
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
|
|
index 12a6172263c0b..3bf82232b1bed 100644
|
|
--- a/arch/arm/kernel/smp.c
|
|
+++ b/arch/arm/kernel/smp.c
|
|
@@ -724,6 +724,21 @@ void smp_send_stop(void)
|
|
pr_warn("SMP: failed to stop secondary CPUs\n");
|
|
}
|
|
|
|
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
|
|
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
|
|
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
|
|
+ * kdump fails. So split out the panic_smp_self_stop() and add
|
|
+ * set_cpu_online(smp_processor_id(), false).
|
|
+ */
|
|
+void panic_smp_self_stop(void)
|
|
+{
|
|
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
|
|
+ smp_processor_id());
|
|
+ set_cpu_online(smp_processor_id(), false);
|
|
+ while (1)
|
|
+ cpu_relax();
|
|
+}
|
|
+
|
|
/*
|
|
* not supported here
|
|
*/
|
|
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
|
|
index 318394ed5c7a9..5e11ad3164e08 100644
|
|
--- a/arch/arm/mach-cns3xxx/pcie.c
|
|
+++ b/arch/arm/mach-cns3xxx/pcie.c
|
|
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
|
|
} else /* remote PCI bus */
|
|
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
|
|
|
|
- return base + (where & 0xffc) + (devfn << 12);
|
|
+ return base + where + (devfn << 12);
|
|
}
|
|
|
|
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
|
|
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
|
|
index 7d8ab36ff83d9..b3a3f5a59b5c7 100644
|
|
--- a/arch/arm/mach-davinci/board-da830-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-da830-evm.c
|
|
@@ -207,9 +207,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
|
|
.dev_id = "da830-mmc.0",
|
|
.table = {
|
|
/* gpio chip 1 contains gpio range 32-63 */
|
|
- GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd",
|
|
+ GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd",
|
|
GPIO_ACTIVE_LOW),
|
|
- GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp",
|
|
+ GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp",
|
|
GPIO_ACTIVE_LOW),
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
|
|
index e1a949b47306d..bf2549e1894bc 100644
|
|
--- a/arch/arm/mach-davinci/board-da850-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-da850-evm.c
|
|
@@ -780,9 +780,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
|
|
.dev_id = "da830-mmc.0",
|
|
.table = {
|
|
/* gpio chip 2 contains gpio range 64-95 */
|
|
- GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
|
|
+ GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd",
|
|
GPIO_ACTIVE_LOW),
|
|
- GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
|
|
+ GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp",
|
|
GPIO_ACTIVE_HIGH),
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
|
|
index f53a461a606f6..f7fa960c23e38 100644
|
|
--- a/arch/arm/mach-davinci/board-dm355-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
|
|
@@ -117,9 +117,9 @@ static struct platform_device davinci_nand_device = {
|
|
static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
|
|
.dev_id = "i2c_davinci.1",
|
|
.table = {
|
|
- GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda",
|
|
+ GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda",
|
|
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
|
- GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl",
|
|
+ GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl",
|
|
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
|
|
index e4a8f9225d166..f752d828e42fd 100644
|
|
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
|
|
@@ -638,9 +638,9 @@ static struct i2c_board_info __initdata i2c_info[] = {
|
|
static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
|
|
.dev_id = "i2c_davinci.1",
|
|
.table = {
|
|
- GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda",
|
|
+ GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda",
|
|
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
|
- GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl",
|
|
+ GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl",
|
|
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
|
|
index 8e8d51f4a2762..94c4f126ef866 100644
|
|
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
|
|
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
|
|
@@ -134,9 +134,9 @@ static const short hawk_mmcsd0_pins[] = {
|
|
static struct gpiod_lookup_table mmc_gpios_table = {
|
|
.dev_id = "da830-mmc.0",
|
|
.table = {
|
|
- GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd",
|
|
+ GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd",
|
|
GPIO_ACTIVE_LOW),
|
|
- GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp",
|
|
+ GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp",
|
|
GPIO_ACTIVE_LOW),
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
|
|
index a109f64824136..0f916c245a2e9 100644
|
|
--- a/arch/arm/mach-integrator/impd1.c
|
|
+++ b/arch/arm/mach-integrator/impd1.c
|
|
@@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
|
|
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
|
|
GFP_KERNEL);
|
|
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
|
|
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
|
|
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
|
|
+ "lm%x:00700", dev->id);
|
|
+ if (!lookup || !chipname || !mmciname)
|
|
+ return -ENOMEM;
|
|
+
|
|
lookup->dev_id = mmciname;
|
|
/*
|
|
* Offsets on GPIO block 1:
|
|
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
|
|
index 3b73813c6b043..23e8c93515d4e 100644
|
|
--- a/arch/arm/mach-iop32x/n2100.c
|
|
+++ b/arch/arm/mach-iop32x/n2100.c
|
|
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
|
|
/*
|
|
* N2100 PCI.
|
|
*/
|
|
-static int __init
|
|
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
|
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
|
{
|
|
int irq;
|
|
|
|
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
|
|
index 17886744dbe69..55bf73710a59d 100644
|
|
--- a/arch/arm/mach-omap1/board-ams-delta.c
|
|
+++ b/arch/arm/mach-omap1/board-ams-delta.c
|
|
@@ -247,8 +247,8 @@ static struct platform_device latch2_gpio_device = {
|
|
#define LATCH2_PIN_SCARD_CMDVCC 11
|
|
#define LATCH2_PIN_MODEM_NRESET 12
|
|
#define LATCH2_PIN_MODEM_CODEC 13
|
|
-#define LATCH2_PIN_HOOKFLASH1 14
|
|
-#define LATCH2_PIN_HOOKFLASH2 15
|
|
+#define LATCH2_PIN_AUDIO_MUTE 14
|
|
+#define LATCH2_PIN_HOOKFLASH 15
|
|
|
|
static struct regulator_consumer_supply modem_nreset_consumers[] = {
|
|
REGULATOR_SUPPLY("RESET#", "serial8250.1"),
|
|
@@ -588,6 +588,8 @@ static int gpiochip_match_by_label(struct gpio_chip *chip, void *data)
|
|
static struct gpiod_hog ams_delta_gpio_hogs[] = {
|
|
GPIO_HOG(LATCH2_LABEL, LATCH2_PIN_KEYBRD_DATAOUT, "keybrd_dataout",
|
|
GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW),
|
|
+ GPIO_HOG(LATCH2_LABEL, LATCH2_PIN_AUDIO_MUTE, "audio_mute",
|
|
+ GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW),
|
|
{},
|
|
};
|
|
|
|
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
|
|
index fc5fb776a7101..17558be4bf0a5 100644
|
|
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
|
|
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
|
|
@@ -50,6 +50,9 @@
|
|
#define OMAP4_NR_BANKS 4
|
|
#define OMAP4_NR_IRQS 128
|
|
|
|
+#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
|
|
+#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
|
|
+
|
|
static void __iomem *wakeupgen_base;
|
|
static void __iomem *sar_base;
|
|
static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
|
|
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
|
|
irq_chip_unmask_parent(d);
|
|
}
|
|
|
|
+/*
|
|
+ * The sys_nirq pins bypass peripheral modules and are wired directly
|
|
+ * to MPUSS wakeupgen. They get automatically inverted for GIC.
|
|
+ */
|
|
+static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
|
|
+{
|
|
+ bool inverted = false;
|
|
+
|
|
+ switch (type) {
|
|
+ case IRQ_TYPE_LEVEL_LOW:
|
|
+ type &= ~IRQ_TYPE_LEVEL_MASK;
|
|
+ type |= IRQ_TYPE_LEVEL_HIGH;
|
|
+ inverted = true;
|
|
+ break;
|
|
+ case IRQ_TYPE_EDGE_FALLING:
|
|
+ type &= ~IRQ_TYPE_EDGE_BOTH;
|
|
+ type |= IRQ_TYPE_EDGE_RISING;
|
|
+ inverted = true;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
|
|
+ d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
|
|
+ pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
|
|
+ d->hwirq);
|
|
+
|
|
+ return irq_chip_set_type_parent(d, type);
|
|
+}
|
|
+
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
|
|
|
|
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
|
|
.irq_mask = wakeupgen_mask,
|
|
.irq_unmask = wakeupgen_unmask,
|
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
|
- .irq_set_type = irq_chip_set_type_parent,
|
|
+ .irq_set_type = wakeupgen_irq_set_type,
|
|
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
|
|
#ifdef CONFIG_SMP
|
|
.irq_set_affinity = irq_chip_set_affinity_parent,
|
|
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
|
|
index 083dcd9942ce5..921c9aaee63f5 100644
|
|
--- a/arch/arm/mach-omap2/omap_hwmod.c
|
|
+++ b/arch/arm/mach-omap2/omap_hwmod.c
|
|
@@ -2413,7 +2413,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
|
|
* a stub; implementing this properly requires iclk autoidle usecounting in
|
|
* the clock code. No return value.
|
|
*/
|
|
-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
|
|
+static void _setup_iclk_autoidle(struct omap_hwmod *oh)
|
|
{
|
|
struct omap_hwmod_ocp_if *os;
|
|
|
|
@@ -2444,7 +2444,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
|
|
* reset. Returns 0 upon success or a negative error code upon
|
|
* failure.
|
|
*/
|
|
-static int __init _setup_reset(struct omap_hwmod *oh)
|
|
+static int _setup_reset(struct omap_hwmod *oh)
|
|
{
|
|
int r;
|
|
|
|
@@ -2505,7 +2505,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
|
|
*
|
|
* No return value.
|
|
*/
|
|
-static void __init _setup_postsetup(struct omap_hwmod *oh)
|
|
+static void _setup_postsetup(struct omap_hwmod *oh)
|
|
{
|
|
u8 postsetup_state;
|
|
|
|
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
|
|
index c5c0ab8ac9f91..024c1fbcc55ae 100644
|
|
--- a/arch/arm/mach-pxa/cm-x300.c
|
|
+++ b/arch/arm/mach-pxa/cm-x300.c
|
|
@@ -558,7 +558,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
|
|
.exit = cm_x300_u2d_exit,
|
|
};
|
|
|
|
-static void cm_x300_init_u2d(void)
|
|
+static void __init cm_x300_init_u2d(void)
|
|
{
|
|
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
|
|
}
|
|
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
|
|
index 9e132b3e48c68..9960ea158829b 100644
|
|
--- a/arch/arm/mach-pxa/littleton.c
|
|
+++ b/arch/arm/mach-pxa/littleton.c
|
|
@@ -184,7 +184,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
|
|
.lcd_conn = LCD_COLOR_TFT_16BPP,
|
|
};
|
|
|
|
-static void littleton_init_lcd(void)
|
|
+static void __init littleton_init_lcd(void)
|
|
{
|
|
pxa_set_fb_info(NULL, &littleton_lcd_info);
|
|
}
|
|
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
|
|
index d53ea12fc7666..54a32f0433a2e 100644
|
|
--- a/arch/arm/mach-pxa/zeus.c
|
|
+++ b/arch/arm/mach-pxa/zeus.c
|
|
@@ -576,7 +576,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
|
|
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
|
|
};
|
|
|
|
-static void zeus_register_ohci(void)
|
|
+static void __init zeus_register_ohci(void)
|
|
{
|
|
/* Port 2 is shared between host and client interface. */
|
|
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
|
|
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
|
|
index 028e50c6383fa..a32c3b631484a 100644
|
|
--- a/arch/arm/mach-tango/pm.c
|
|
+++ b/arch/arm/mach-tango/pm.c
|
|
@@ -3,6 +3,7 @@
|
|
#include <linux/suspend.h>
|
|
#include <asm/suspend.h>
|
|
#include "smc.h"
|
|
+#include "pm.h"
|
|
|
|
static int tango_pm_powerdown(unsigned long arg)
|
|
{
|
|
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
|
|
.valid = suspend_valid_only_mem,
|
|
};
|
|
|
|
-static int __init tango_pm_init(void)
|
|
+void __init tango_pm_init(void)
|
|
{
|
|
suspend_set_ops(&tango_pm_ops);
|
|
- return 0;
|
|
}
|
|
-
|
|
-late_initcall(tango_pm_init);
|
|
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
|
|
new file mode 100644
|
|
index 0000000000000..35ea705a0ee23
|
|
--- /dev/null
|
|
+++ b/arch/arm/mach-tango/pm.h
|
|
@@ -0,0 +1,7 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+
|
|
+#ifdef CONFIG_SUSPEND
|
|
+void __init tango_pm_init(void);
|
|
+#else
|
|
+#define tango_pm_init NULL
|
|
+#endif
|
|
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
|
|
index 677dd7b5efd90..824f90737b044 100644
|
|
--- a/arch/arm/mach-tango/setup.c
|
|
+++ b/arch/arm/mach-tango/setup.c
|
|
@@ -2,6 +2,7 @@
|
|
#include <asm/mach/arch.h>
|
|
#include <asm/hardware/cache-l2x0.h>
|
|
#include "smc.h"
|
|
+#include "pm.h"
|
|
|
|
static void tango_l2c_write(unsigned long val, unsigned int reg)
|
|
{
|
|
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
|
|
.dt_compat = tango_dt_compat,
|
|
.l2c_aux_mask = ~0,
|
|
.l2c_write_sec = tango_l2c_write,
|
|
+ .init_late = tango_pm_init,
|
|
MACHINE_END
|
|
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
|
|
index 19516fbc2c55a..5461d589a1e25 100644
|
|
--- a/arch/arm/mm/proc-macros.S
|
|
+++ b/arch/arm/mm/proc-macros.S
|
|
@@ -278,7 +278,7 @@
|
|
* If we are building for big.Little with branch predictor hardening,
|
|
* we need the processor function tables to remain available after boot.
|
|
*/
|
|
-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
.section ".rodata"
|
|
#endif
|
|
.type \name\()_processor_functions, #object
|
|
@@ -316,7 +316,7 @@ ENTRY(\name\()_processor_functions)
|
|
.endif
|
|
|
|
.size \name\()_processor_functions, . - \name\()_processor_functions
|
|
-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
.previous
|
|
#endif
|
|
.endm
|
|
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
|
|
index 2c118a6ab3587..0dc23fc227ed2 100644
|
|
--- a/arch/arm/probes/kprobes/opt-arm.c
|
|
+++ b/arch/arm/probes/kprobes/opt-arm.c
|
|
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
|
|
}
|
|
|
|
/* Copy arch-dep-instance from template. */
|
|
- memcpy(code, (unsigned char *)optprobe_template_entry,
|
|
+ memcpy(code, (unsigned long *)&optprobe_template_entry,
|
|
TMPL_END_IDX * sizeof(kprobe_opcode_t));
|
|
|
|
/* Adjust buffer according to instruction. */
|
|
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
|
|
index 6cb9fc7e9382d..8978f60779c41 100644
|
|
--- a/arch/arm64/Makefile
|
|
+++ b/arch/arm64/Makefile
|
|
@@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y)
|
|
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
|
|
# for relative relocs, since this leads to better Image compression
|
|
# with the relocation offsets always being zero.
|
|
-LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
|
|
+LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
|
|
$(call ld-option, --no-apply-dynamic-relocs)
|
|
endif
|
|
|
|
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
|
|
index b0c64f75792c1..8974b5a1d3b1e 100644
|
|
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
|
|
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
|
|
@@ -188,6 +188,7 @@
|
|
reg = <0x3a3>;
|
|
interrupt-parent = <&r_intc>;
|
|
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
|
|
+ x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
|
|
index df017dbd2e572..b1a42e99cb678 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
|
|
@@ -143,7 +143,7 @@
|
|
compatible = "amlogic,meson-axg-dwmac", "snps,dwmac";
|
|
reg = <0x0 0xff3f0000 0x0 0x10000
|
|
0x0 0xff634540 0x0 0x8>;
|
|
- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
|
|
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
|
|
interrupt-names = "macirq";
|
|
clocks = <&clkc CLKID_ETH>,
|
|
<&clkc CLKID_FCLK_DIV2>,
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
|
|
index f1e5cdbade5ed..58e6bcaac1d86 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
|
|
@@ -462,7 +462,7 @@
|
|
compatible = "amlogic,meson-gx-dwmac", "amlogic,meson-gxbb-dwmac", "snps,dwmac";
|
|
reg = <0x0 0xc9410000 0x0 0x10000
|
|
0x0 0xc8834540 0x0 0x4>;
|
|
- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
|
|
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
|
|
interrupt-names = "macirq";
|
|
status = "disabled";
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
|
|
index 54954b314a452..f8d1cedbe6007 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
|
|
@@ -143,7 +143,6 @@
|
|
interrupt-parent = <&gpio_intc>;
|
|
/* MAC_INTR on GPIOZ_15 */
|
|
interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
|
|
- eee-broken-1000t;
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
|
|
index 70325b273bd2b..ec09bb5792b71 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
|
|
@@ -142,7 +142,6 @@
|
|
eth_phy0: ethernet-phy@0 {
|
|
/* Realtek RTL8211F (0x001cc916) */
|
|
reg = <0>;
|
|
- eee-broken-1000t;
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
|
|
index 7d94c1fa592a0..7f799cb5668e2 100644
|
|
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
|
|
@@ -28,6 +28,23 @@
|
|
method = "smc";
|
|
};
|
|
|
|
+ reserved-memory {
|
|
+ #address-cells = <2>;
|
|
+ #size-cells = <2>;
|
|
+ ranges;
|
|
+
|
|
+ /*
|
|
+ * This area matches the mapping done with a
|
|
+ * mainline U-Boot, and should be updated by the
|
|
+ * bootloader.
|
|
+ */
|
|
+
|
|
+ psci-area@4000000 {
|
|
+ reg = <0x0 0x4000000 0x0 0x200000>;
|
|
+ no-map;
|
|
+ };
|
|
+ };
|
|
+
|
|
ap806 {
|
|
#address-cells = <2>;
|
|
#size-cells = <2>;
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
|
|
index dc20145dd393d..c6509a02480d8 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
|
|
@@ -40,6 +40,7 @@
|
|
pinctrl-0 = <&usb30_host_drv>;
|
|
regulator-name = "vcc_host_5v";
|
|
regulator-always-on;
|
|
+ regulator-boot-on;
|
|
vin-supply = <&vcc_sys>;
|
|
};
|
|
|
|
@@ -51,6 +52,7 @@
|
|
pinctrl-0 = <&usb20_host_drv>;
|
|
regulator-name = "vcc_host1_5v";
|
|
regulator-always-on;
|
|
+ regulator-boot-on;
|
|
vin-supply = <&vcc_sys>;
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
|
|
index 1d35f5406b5e2..5bd4d69914bde 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
|
|
@@ -293,12 +293,12 @@
|
|
};
|
|
};
|
|
|
|
- vcc2v8_dvp: LDO_REG2 {
|
|
- regulator-name = "vcc2v8_dvp";
|
|
+ vcc3v0_touch: LDO_REG2 {
|
|
+ regulator-name = "vcc3v0_touch";
|
|
regulator-always-on;
|
|
regulator-boot-on;
|
|
- regulator-min-microvolt = <2800000>;
|
|
- regulator-max-microvolt = <2800000>;
|
|
+ regulator-min-microvolt = <3000000>;
|
|
+ regulator-max-microvolt = <3000000>;
|
|
regulator-state-mem {
|
|
regulator-off-in-suspend;
|
|
};
|
|
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
|
|
index 6142402c2eb4e..08b216c200c98 100644
|
|
--- a/arch/arm64/include/asm/assembler.h
|
|
+++ b/arch/arm64/include/asm/assembler.h
|
|
@@ -377,27 +377,33 @@ alternative_endif
|
|
* size: size of the region
|
|
* Corrupts: kaddr, size, tmp1, tmp2
|
|
*/
|
|
+ .macro __dcache_op_workaround_clean_cache, op, kaddr
|
|
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
|
|
+ dc \op, \kaddr
|
|
+alternative_else
|
|
+ dc civac, \kaddr
|
|
+alternative_endif
|
|
+ .endm
|
|
+
|
|
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
|
|
dcache_line_size \tmp1, \tmp2
|
|
add \size, \kaddr, \size
|
|
sub \tmp2, \tmp1, #1
|
|
bic \kaddr, \kaddr, \tmp2
|
|
9998:
|
|
- .if (\op == cvau || \op == cvac)
|
|
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
|
|
- dc \op, \kaddr
|
|
-alternative_else
|
|
- dc civac, \kaddr
|
|
-alternative_endif
|
|
- .elseif (\op == cvap)
|
|
-alternative_if ARM64_HAS_DCPOP
|
|
- sys 3, c7, c12, 1, \kaddr // dc cvap
|
|
-alternative_else
|
|
- dc cvac, \kaddr
|
|
-alternative_endif
|
|
+ .ifc \op, cvau
|
|
+ __dcache_op_workaround_clean_cache \op, \kaddr
|
|
+ .else
|
|
+ .ifc \op, cvac
|
|
+ __dcache_op_workaround_clean_cache \op, \kaddr
|
|
+ .else
|
|
+ .ifc \op, cvap
|
|
+ sys 3, c7, c12, 1, \kaddr // dc cvap
|
|
.else
|
|
dc \op, \kaddr
|
|
.endif
|
|
+ .endif
|
|
+ .endif
|
|
add \kaddr, \kaddr, \tmp1
|
|
cmp \kaddr, \size
|
|
b.lo 9998b
|
|
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
|
|
index 9f8b915af3a71..ee723835c1f4b 100644
|
|
--- a/arch/arm64/include/asm/io.h
|
|
+++ b/arch/arm64/include/asm/io.h
|
|
@@ -104,7 +104,23 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|
}
|
|
|
|
/* IO barriers */
|
|
-#define __iormb() rmb()
|
|
+#define __iormb(v) \
|
|
+({ \
|
|
+ unsigned long tmp; \
|
|
+ \
|
|
+ rmb(); \
|
|
+ \
|
|
+ /* \
|
|
+ * Create a dummy control dependency from the IO read to any \
|
|
+ * later instructions. This ensures that a subsequent call to \
|
|
+ * udelay() will be ordered due to the ISB in get_cycles(). \
|
|
+ */ \
|
|
+ asm volatile("eor %0, %1, %1\n" \
|
|
+ "cbnz %0, ." \
|
|
+ : "=r" (tmp) : "r" ((unsigned long)(v)) \
|
|
+ : "memory"); \
|
|
+})
|
|
+
|
|
#define __iowmb() wmb()
|
|
|
|
#define mmiowb() do { } while (0)
|
|
@@ -129,10 +145,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|
* following Normal memory access. Writes are ordered relative to any prior
|
|
* Normal memory access.
|
|
*/
|
|
-#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
|
|
-#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
|
|
-#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
|
|
-#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
|
|
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
|
|
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
|
|
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
|
|
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
|
|
|
|
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
|
|
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
|
|
@@ -183,9 +199,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
|
/*
|
|
* io{read,write}{16,32,64}be() macros
|
|
*/
|
|
-#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
|
-#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
|
-#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
|
|
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
|
|
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
|
|
+#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
|
|
|
|
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
|
|
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
|
|
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
|
|
index 6f602af5263c2..bc2327d4a505b 100644
|
|
--- a/arch/arm64/include/asm/kvm_arm.h
|
|
+++ b/arch/arm64/include/asm/kvm_arm.h
|
|
@@ -24,6 +24,8 @@
|
|
|
|
/* Hyp Configuration Register (HCR) bits */
|
|
#define HCR_FWB (UL(1) << 46)
|
|
+#define HCR_API (UL(1) << 41)
|
|
+#define HCR_APK (UL(1) << 40)
|
|
#define HCR_TEA (UL(1) << 37)
|
|
#define HCR_TERR (UL(1) << 36)
|
|
#define HCR_TLOR (UL(1) << 35)
|
|
@@ -87,6 +89,7 @@
|
|
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
|
|
HCR_FMO | HCR_IMO)
|
|
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
|
|
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
|
|
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
|
|
|
/* TCR_EL2 Registers bits */
|
|
@@ -104,7 +107,7 @@
|
|
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
|
|
|
|
/* VTCR_EL2 Registers bits */
|
|
-#define VTCR_EL2_RES1 (1 << 31)
|
|
+#define VTCR_EL2_RES1 (1U << 31)
|
|
#define VTCR_EL2_HD (1 << 22)
|
|
#define VTCR_EL2_HA (1 << 21)
|
|
#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
|
|
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
|
|
index f0a5c9531e8bb..c67081301035f 100644
|
|
--- a/arch/arm64/include/asm/memory.h
|
|
+++ b/arch/arm64/include/asm/memory.h
|
|
@@ -67,12 +67,17 @@
|
|
/*
|
|
* KASAN requires 1/8th of the kernel virtual address space for the shadow
|
|
* region. KASAN can bloat the stack significantly, so double the (minimum)
|
|
- * stack size when KASAN is in use.
|
|
+ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
|
|
+ * on.
|
|
*/
|
|
#ifdef CONFIG_KASAN
|
|
#define KASAN_SHADOW_SCALE_SHIFT 3
|
|
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
|
|
+#ifdef CONFIG_KASAN_EXTRA
|
|
+#define KASAN_THREAD_SHIFT 2
|
|
+#else
|
|
#define KASAN_THREAD_SHIFT 1
|
|
+#endif /* CONFIG_KASAN_EXTRA */
|
|
#else
|
|
#define KASAN_SHADOW_SIZE (0)
|
|
#define KASAN_THREAD_SHIFT 0
|
|
@@ -298,6 +303,17 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
|
|
_virt_addr_valid(kaddr))
|
|
|
|
+/*
|
|
+ * Given that the GIC architecture permits ITS implementations that can only be
|
|
+ * configured with a LPI table address once, GICv3 systems with many CPUs may
|
|
+ * end up reserving a lot of different regions after a kexec for their LPI
|
|
+ * tables (one per CPU), as we are forced to reuse the same memory after kexec
|
|
+ * (and thus reserve it persistently with EFI beforehand)
|
|
+ */
|
|
+#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
|
|
+# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + 2*(NR_CPUS + 1))
|
|
+#endif
|
|
+
|
|
#include <asm-generic/memory_model.h>
|
|
|
|
#endif
|
|
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
|
|
index b13ca091f8332..85d5c10262047 100644
|
|
--- a/arch/arm64/include/asm/unistd.h
|
|
+++ b/arch/arm64/include/asm/unistd.h
|
|
@@ -40,8 +40,9 @@
|
|
* The following SVCs are ARM private.
|
|
*/
|
|
#define __ARM_NR_COMPAT_BASE 0x0f0000
|
|
-#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
|
-#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
|
+#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
|
|
+#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
|
|
+#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
|
|
|
|
#define __NR_compat_syscalls 399
|
|
#endif
|
|
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
|
|
index a36227fdb0847..65ef8b0fdb0ea 100644
|
|
--- a/arch/arm64/include/uapi/asm/ptrace.h
|
|
+++ b/arch/arm64/include/uapi/asm/ptrace.h
|
|
@@ -131,7 +131,7 @@ struct user_sve_header {
|
|
|
|
/* Offset from the start of struct user_sve_header to the register data */
|
|
#define SVE_PT_REGS_OFFSET \
|
|
- ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
|
+ ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \
|
|
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
|
|
|
/*
|
|
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
|
|
index 6ad715d67df89..99622e5ad21b7 100644
|
|
--- a/arch/arm64/kernel/cpu_errata.c
|
|
+++ b/arch/arm64/kernel/cpu_errata.c
|
|
@@ -135,7 +135,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
const char *hyp_vecs_start,
|
|
const char *hyp_vecs_end)
|
|
{
|
|
- static DEFINE_SPINLOCK(bp_lock);
|
|
+ static DEFINE_RAW_SPINLOCK(bp_lock);
|
|
int cpu, slot = -1;
|
|
|
|
/*
|
|
@@ -147,7 +147,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
return;
|
|
}
|
|
|
|
- spin_lock(&bp_lock);
|
|
+ raw_spin_lock(&bp_lock);
|
|
for_each_possible_cpu(cpu) {
|
|
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
|
|
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
|
|
@@ -163,7 +163,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|
|
|
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
|
|
__this_cpu_write(bp_hardening_data.fn, fn);
|
|
- spin_unlock(&bp_lock);
|
|
+ raw_spin_unlock(&bp_lock);
|
|
}
|
|
#else
|
|
#define __smccc_workaround_1_smc_start NULL
|
|
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
|
|
index 1175f5827ae17..295951f3172ea 100644
|
|
--- a/arch/arm64/kernel/entry-ftrace.S
|
|
+++ b/arch/arm64/kernel/entry-ftrace.S
|
|
@@ -79,7 +79,6 @@
|
|
.macro mcount_get_lr reg
|
|
ldr \reg, [x29]
|
|
ldr \reg, [\reg, #8]
|
|
- mcount_adjust_addr \reg, \reg
|
|
.endm
|
|
|
|
.macro mcount_get_lr_addr reg
|
|
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
|
|
index 4471f570a2952..b207a2ce4bc6e 100644
|
|
--- a/arch/arm64/kernel/head.S
|
|
+++ b/arch/arm64/kernel/head.S
|
|
@@ -496,10 +496,9 @@ ENTRY(el2_setup)
|
|
#endif
|
|
|
|
/* Hyp configuration. */
|
|
- mov x0, #HCR_RW // 64-bit EL1
|
|
+ mov_q x0, HCR_HOST_NVHE_FLAGS
|
|
cbz x2, set_hcr
|
|
- orr x0, x0, #HCR_TGE // Enable Host Extensions
|
|
- orr x0, x0, #HCR_E2H
|
|
+ mov_q x0, HCR_HOST_VHE_FLAGS
|
|
set_hcr:
|
|
msr hcr_el2, x0
|
|
isb
|
|
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
|
|
index 29cdc99688f33..9859e1178e6be 100644
|
|
--- a/arch/arm64/kernel/hibernate.c
|
|
+++ b/arch/arm64/kernel/hibernate.c
|
|
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
|
|
dcache_clean_range(__idmap_text_start, __idmap_text_end);
|
|
|
|
/* Clean kvm setup code to PoC? */
|
|
- if (el2_reset_needed())
|
|
+ if (el2_reset_needed()) {
|
|
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
|
|
+ dcache_clean_range(__hyp_text_start, __hyp_text_end);
|
|
+ }
|
|
|
|
/* make the crash dump kernel image protected again */
|
|
crash_post_resume();
|
|
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
|
|
index e1261fbaa374e..17f325ba831e8 100644
|
|
--- a/arch/arm64/kernel/hyp-stub.S
|
|
+++ b/arch/arm64/kernel/hyp-stub.S
|
|
@@ -28,6 +28,8 @@
|
|
#include <asm/virt.h>
|
|
|
|
.text
|
|
+ .pushsection .hyp.text, "ax"
|
|
+
|
|
.align 11
|
|
|
|
ENTRY(__hyp_stub_vectors)
|
|
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
|
|
index f0e6ab8abe9c9..b09b6f75f7591 100644
|
|
--- a/arch/arm64/kernel/kaslr.c
|
|
+++ b/arch/arm64/kernel/kaslr.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/sched.h>
|
|
#include <linux/types.h>
|
|
|
|
+#include <asm/cacheflush.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/kernel-pgtable.h>
|
|
#include <asm/memory.h>
|
|
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
|
|
return ret;
|
|
}
|
|
|
|
-static __init const u8 *get_cmdline(void *fdt)
|
|
+static __init const u8 *kaslr_get_cmdline(void *fdt)
|
|
{
|
|
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
|
|
|
|
@@ -87,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
|
* we end up running with module randomization disabled.
|
|
*/
|
|
module_alloc_base = (u64)_etext - MODULES_VSIZE;
|
|
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
|
|
|
|
/*
|
|
* Try to map the FDT early. If this fails, we simply bail,
|
|
@@ -109,7 +111,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
|
* Check if 'nokaslr' appears on the command line, and
|
|
* return 0 if that is the case.
|
|
*/
|
|
- cmdline = get_cmdline(fdt);
|
|
+ cmdline = kaslr_get_cmdline(fdt);
|
|
str = strstr(cmdline, "nokaslr");
|
|
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
|
|
return 0;
|
|
@@ -169,5 +171,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
|
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
|
|
module_alloc_base &= PAGE_MASK;
|
|
|
|
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
|
|
+ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
|
|
+
|
|
return offset;
|
|
}
|
|
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
|
|
index e213f8e867f65..8a91ac067d44c 100644
|
|
--- a/arch/arm64/kernel/perf_event.c
|
|
+++ b/arch/arm64/kernel/perf_event.c
|
|
@@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = {
|
|
.driver = {
|
|
.name = ARMV8_PMU_PDEV_NAME,
|
|
.of_match_table = armv8_pmu_of_device_ids,
|
|
+ .suppress_bind_attrs = true,
|
|
},
|
|
.probe = armv8_pmu_device_probe,
|
|
};
|
|
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
|
|
index f4fc1e0544b73..953e316521fca 100644
|
|
--- a/arch/arm64/kernel/setup.c
|
|
+++ b/arch/arm64/kernel/setup.c
|
|
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
|
|
arm64_memblock_init();
|
|
|
|
paging_init();
|
|
- efi_apply_persistent_mem_reservations();
|
|
|
|
acpi_table_upgrade();
|
|
|
|
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
|
|
index 32653d156747e..bc348ab3dd6b4 100644
|
|
--- a/arch/arm64/kernel/sys_compat.c
|
|
+++ b/arch/arm64/kernel/sys_compat.c
|
|
@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
|
|
/*
|
|
* Handle all unrecognised system calls.
|
|
*/
|
|
-long compat_arm_syscall(struct pt_regs *regs)
|
|
+long compat_arm_syscall(struct pt_regs *regs, int scno)
|
|
{
|
|
- unsigned int no = regs->regs[7];
|
|
void __user *addr;
|
|
|
|
- switch (no) {
|
|
+ switch (scno) {
|
|
/*
|
|
* Flush a region from virtual address 'r0' to virtual address 'r1'
|
|
* _exclusive_. There is no alignment requirement on either address;
|
|
@@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs)
|
|
|
|
default:
|
|
/*
|
|
- * Calls 9f00xx..9f07ff are defined to return -ENOSYS
|
|
+ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
|
|
* if not implemented, rather than raising SIGILL. This
|
|
* way the calling program can gracefully determine whether
|
|
* a feature is supported.
|
|
*/
|
|
- if ((no & 0xffff) <= 0x7ff)
|
|
+ if (scno < __ARM_NR_COMPAT_END)
|
|
return -ENOSYS;
|
|
break;
|
|
}
|
|
@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs)
|
|
(compat_thumb_mode(regs) ? 2 : 4);
|
|
|
|
arm64_notify_die("Oops - bad compat syscall(2)", regs,
|
|
- SIGILL, ILL_ILLTRP, addr, no);
|
|
+ SIGILL, ILL_ILLTRP, addr, scno);
|
|
return 0;
|
|
}
|
|
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
|
|
index 032d223128815..5610ac01c1ec0 100644
|
|
--- a/arch/arm64/kernel/syscall.c
|
|
+++ b/arch/arm64/kernel/syscall.c
|
|
@@ -13,16 +13,15 @@
|
|
#include <asm/thread_info.h>
|
|
#include <asm/unistd.h>
|
|
|
|
-long compat_arm_syscall(struct pt_regs *regs);
|
|
-
|
|
+long compat_arm_syscall(struct pt_regs *regs, int scno);
|
|
long sys_ni_syscall(void);
|
|
|
|
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
|
+static long do_ni_syscall(struct pt_regs *regs, int scno)
|
|
{
|
|
#ifdef CONFIG_COMPAT
|
|
long ret;
|
|
if (is_compat_task()) {
|
|
- ret = compat_arm_syscall(regs);
|
|
+ ret = compat_arm_syscall(regs, scno);
|
|
if (ret != -ENOSYS)
|
|
return ret;
|
|
}
|
|
@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
|
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
|
|
ret = __invoke_syscall(regs, syscall_fn);
|
|
} else {
|
|
- ret = do_ni_syscall(regs);
|
|
+ ret = do_ni_syscall(regs, scno);
|
|
}
|
|
|
|
regs->regs[0] = ret;
|
|
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
|
|
index 03b00007553d9..7fa0083749078 100644
|
|
--- a/arch/arm64/kernel/vmlinux.lds.S
|
|
+++ b/arch/arm64/kernel/vmlinux.lds.S
|
|
@@ -99,7 +99,8 @@ SECTIONS
|
|
*(.discard)
|
|
*(.discard.*)
|
|
*(.interp .dynamic)
|
|
- *(.dynsym .dynstr .hash)
|
|
+ *(.dynsym .dynstr .hash .gnu.hash)
|
|
+ *(.eh_frame)
|
|
}
|
|
|
|
. = KIMAGE_VADDR + TEXT_OFFSET;
|
|
@@ -192,12 +193,12 @@ SECTIONS
|
|
|
|
PERCPU_SECTION(L1_CACHE_BYTES)
|
|
|
|
- .rela : ALIGN(8) {
|
|
+ .rela.dyn : ALIGN(8) {
|
|
*(.rela .rela*)
|
|
}
|
|
|
|
- __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
|
|
- __rela_size = SIZEOF(.rela);
|
|
+ __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
|
|
+ __rela_size = SIZEOF(.rela.dyn);
|
|
|
|
. = ALIGN(SEGMENT_ALIGN);
|
|
__initdata_end = .;
|
|
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
|
|
index 7cc175c88a37e..f6e02cc4d856f 100644
|
|
--- a/arch/arm64/kvm/hyp/switch.c
|
|
+++ b/arch/arm64/kvm/hyp/switch.c
|
|
@@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
|
|
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
|
|
|
|
write_sysreg(mdcr_el2, mdcr_el2);
|
|
- write_sysreg(HCR_RW, hcr_el2);
|
|
+ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
|
|
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
|
|
}
|
|
|
|
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
|
|
index 4dbd9c69a96d0..7fcc9c1a5f45c 100644
|
|
--- a/arch/arm64/kvm/hyp/tlb.c
|
|
+++ b/arch/arm64/kvm/hyp/tlb.c
|
|
@@ -15,14 +15,19 @@
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
+#include <linux/irqflags.h>
|
|
+
|
|
#include <asm/kvm_hyp.h>
|
|
#include <asm/kvm_mmu.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
|
|
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
|
|
+ unsigned long *flags)
|
|
{
|
|
u64 val;
|
|
|
|
+ local_irq_save(*flags);
|
|
+
|
|
/*
|
|
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
|
|
* most TLB operations target EL2/EL0. In order to affect the
|
|
@@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
|
|
isb();
|
|
}
|
|
|
|
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
|
|
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
|
|
+ unsigned long *flags)
|
|
{
|
|
__load_guest_stage2(kvm);
|
|
isb();
|
|
@@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
|
|
__tlb_switch_to_guest_vhe,
|
|
ARM64_HAS_VIRT_HOST_EXTN);
|
|
|
|
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
|
|
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
|
|
+ unsigned long flags)
|
|
{
|
|
/*
|
|
* We're done with the TLB operation, let's restore the host's
|
|
@@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
|
|
*/
|
|
write_sysreg(0, vttbr_el2);
|
|
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
|
+ isb();
|
|
+ local_irq_restore(flags);
|
|
}
|
|
|
|
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
|
|
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
|
|
+ unsigned long flags)
|
|
{
|
|
write_sysreg(0, vttbr_el2);
|
|
}
|
|
@@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
|
|
|
|
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
|
{
|
|
+ unsigned long flags;
|
|
+
|
|
dsb(ishst);
|
|
|
|
/* Switch to requested VMID */
|
|
kvm = kern_hyp_va(kvm);
|
|
- __tlb_switch_to_guest()(kvm);
|
|
+ __tlb_switch_to_guest()(kvm, &flags);
|
|
|
|
/*
|
|
* We could do so much better if we had the VA as well.
|
|
@@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
|
if (!has_vhe() && icache_is_vpipt())
|
|
__flush_icache_all();
|
|
|
|
- __tlb_switch_to_host()(kvm);
|
|
+ __tlb_switch_to_host()(kvm, flags);
|
|
}
|
|
|
|
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
|
{
|
|
+ unsigned long flags;
|
|
+
|
|
dsb(ishst);
|
|
|
|
/* Switch to requested VMID */
|
|
kvm = kern_hyp_va(kvm);
|
|
- __tlb_switch_to_guest()(kvm);
|
|
+ __tlb_switch_to_guest()(kvm, &flags);
|
|
|
|
__tlbi(vmalls12e1is);
|
|
dsb(ish);
|
|
isb();
|
|
|
|
- __tlb_switch_to_host()(kvm);
|
|
+ __tlb_switch_to_host()(kvm, flags);
|
|
}
|
|
|
|
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
|
+ unsigned long flags;
|
|
|
|
/* Switch to requested VMID */
|
|
- __tlb_switch_to_guest()(kvm);
|
|
+ __tlb_switch_to_guest()(kvm, &flags);
|
|
|
|
__tlbi(vmalle1);
|
|
dsb(nsh);
|
|
isb();
|
|
|
|
- __tlb_switch_to_host()(kvm);
|
|
+ __tlb_switch_to_host()(kvm, flags);
|
|
}
|
|
|
|
void __hyp_text __kvm_flush_vm_context(void)
|
|
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
|
|
index 0c22ede52f906..a194fd0e837fb 100644
|
|
--- a/arch/arm64/mm/cache.S
|
|
+++ b/arch/arm64/mm/cache.S
|
|
@@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area)
|
|
* - size - size in question
|
|
*/
|
|
ENTRY(__clean_dcache_area_pop)
|
|
+ alternative_if_not ARM64_HAS_DCPOP
|
|
+ b __clean_dcache_area_poc
|
|
+ alternative_else_nop_endif
|
|
dcache_by_line_op cvap, sy, x0, x1, x2, x3
|
|
ret
|
|
ENDPIPROC(__clean_dcache_area_pop)
|
|
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
|
|
index 30695a8681074..5c9073bace83a 100644
|
|
--- a/arch/arm64/mm/flush.c
|
|
+++ b/arch/arm64/mm/flush.c
|
|
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
|
|
__clean_dcache_area_pou(kaddr, len);
|
|
__flush_icache_all();
|
|
} else {
|
|
- flush_icache_range(addr, addr + len);
|
|
+ /*
|
|
+ * Don't issue kick_all_cpus_sync() after I-cache invalidation
|
|
+ * for user mappings.
|
|
+ */
|
|
+ __flush_icache_range(addr, addr + len);
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
|
|
index 63527e585aace..fcb2ca30b6f14 100644
|
|
--- a/arch/arm64/mm/kasan_init.c
|
|
+++ b/arch/arm64/mm/kasan_init.c
|
|
@@ -39,7 +39,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
|
{
|
|
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
|
|
__pa(MAX_DMA_ADDRESS),
|
|
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
|
|
+ MEMBLOCK_ALLOC_KASAN, node);
|
|
return __pa(p);
|
|
}
|
|
|
|
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
|
|
index ecae6b358f95f..c1dfa9c10e36c 100644
|
|
--- a/arch/csky/include/asm/io.h
|
|
+++ b/arch/csky/include/asm/io.h
|
|
@@ -15,6 +15,31 @@ extern void iounmap(void *addr);
|
|
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
|
size_t size, unsigned long flags);
|
|
|
|
+/*
|
|
+ * I/O memory access primitives. Reads are ordered relative to any
|
|
+ * following Normal memory access. Writes are ordered relative to any prior
|
|
+ * Normal memory access.
|
|
+ *
|
|
+ * For CACHEV1 (807, 810), store instruction could fast retire, so we need
|
|
+ * another mb() to prevent st fast retire.
|
|
+ *
|
|
+ * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
|
|
+ * fast retire.
|
|
+ */
|
|
+#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
|
|
+#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
|
|
+#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
|
|
+
|
|
+#ifdef CONFIG_CPU_HAS_CACHEV2
|
|
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
|
|
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
|
|
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
|
|
+#else
|
|
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
|
|
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
|
|
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
|
|
+#endif
|
|
+
|
|
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
|
|
#define ioremap_wc ioremap_nocache
|
|
#define ioremap_wt ioremap_nocache
|
|
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
|
|
index 65abab0c7a478..b5ad7d9de18cf 100644
|
|
--- a/arch/csky/kernel/module.c
|
|
+++ b/arch/csky/kernel/module.c
|
|
@@ -12,7 +12,7 @@
|
|
#include <linux/spinlock.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
-#if defined(__CSKYABIV2__)
|
|
+#ifdef CONFIG_CPU_CK810
|
|
#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
|
|
#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0)
|
|
|
|
@@ -25,6 +25,26 @@
|
|
*(uint16_t *)(addr) = 0xE8Fa; \
|
|
*((uint16_t *)(addr) + 1) = 0x0000; \
|
|
} while (0)
|
|
+
|
|
+static void jsri_2_lrw_jsr(uint32_t *location)
|
|
+{
|
|
+ uint16_t *location_tmp = (uint16_t *)location;
|
|
+
|
|
+ if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
|
|
+ return;
|
|
+
|
|
+ if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
|
|
+ /* jsri 0x... --> lrw r26, 0x... */
|
|
+ CHANGE_JSRI_TO_LRW(location);
|
|
+ /* lsli r0, r0 --> jsr r26 */
|
|
+ SET_JSR32_R26(location + 1);
|
|
+ }
|
|
+}
|
|
+#else
|
|
+static void inline jsri_2_lrw_jsr(uint32_t *location)
|
|
+{
|
|
+ return;
|
|
+}
|
|
#endif
|
|
|
|
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|
@@ -35,9 +55,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|
Elf32_Sym *sym;
|
|
uint32_t *location;
|
|
short *temp;
|
|
-#if defined(__CSKYABIV2__)
|
|
- uint16_t *location_tmp;
|
|
-#endif
|
|
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
/* This is where to make the change */
|
|
@@ -59,18 +76,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|
case R_CSKY_PCRELJSR_IMM11BY2:
|
|
break;
|
|
case R_CSKY_PCRELJSR_IMM26BY2:
|
|
-#if defined(__CSKYABIV2__)
|
|
- location_tmp = (uint16_t *)location;
|
|
- if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
|
|
- break;
|
|
-
|
|
- if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
|
|
- /* jsri 0x... --> lrw r26, 0x... */
|
|
- CHANGE_JSRI_TO_LRW(location);
|
|
- /* lsli r0, r0 --> jsr r26 */
|
|
- SET_JSR32_R26(location + 1);
|
|
- }
|
|
-#endif
|
|
+ jsri_2_lrw_jsr(location);
|
|
break;
|
|
case R_CSKY_ADDR_HI16:
|
|
temp = ((short *)location) + 1;
|
|
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
|
|
index 8272ea4c72645..bfb3d8451c0af 100644
|
|
--- a/arch/mips/Kconfig
|
|
+++ b/arch/mips/Kconfig
|
|
@@ -794,6 +794,7 @@ config SIBYTE_SWARM
|
|
select SYS_SUPPORTS_HIGHMEM
|
|
select SYS_SUPPORTS_LITTLE_ENDIAN
|
|
select ZONE_DMA32 if 64BIT
|
|
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
|
|
|
|
config SIBYTE_LITTLESUR
|
|
bool "Sibyte BCM91250C2-LittleSur"
|
|
@@ -814,6 +815,7 @@ config SIBYTE_SENTOSA
|
|
select SYS_HAS_CPU_SB1
|
|
select SYS_SUPPORTS_BIG_ENDIAN
|
|
select SYS_SUPPORTS_LITTLE_ENDIAN
|
|
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
|
|
|
|
config SIBYTE_BIGSUR
|
|
bool "Sibyte BCM91480B-BigSur"
|
|
@@ -826,6 +828,7 @@ config SIBYTE_BIGSUR
|
|
select SYS_SUPPORTS_HIGHMEM
|
|
select SYS_SUPPORTS_LITTLE_ENDIAN
|
|
select ZONE_DMA32 if 64BIT
|
|
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
|
|
|
|
config SNI_RM
|
|
bool "SNI RM200/300/400"
|
|
@@ -3184,6 +3187,7 @@ config MIPS32_O32
|
|
config MIPS32_N32
|
|
bool "Kernel support for n32 binaries"
|
|
depends on 64BIT
|
|
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
|
|
select COMPAT
|
|
select MIPS32_COMPAT
|
|
select SYSVIPC_COMPAT if SYSVIPC
|
|
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
|
|
index 6054d49e608ee..fe3773539effe 100644
|
|
--- a/arch/mips/bcm47xx/setup.c
|
|
+++ b/arch/mips/bcm47xx/setup.c
|
|
@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
|
|
pm_power_off = bcm47xx_machine_halt;
|
|
}
|
|
|
|
+#ifdef CONFIG_BCM47XX_BCMA
|
|
+static struct device * __init bcm47xx_setup_device(void)
|
|
+{
|
|
+ struct device *dev;
|
|
+ int err;
|
|
+
|
|
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
+ if (!dev)
|
|
+ return NULL;
|
|
+
|
|
+ err = dev_set_name(dev, "bcm47xx_soc");
|
|
+ if (err) {
|
|
+ pr_err("Failed to set SoC device name: %d\n", err);
|
|
+ kfree(dev);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
+ if (err)
|
|
+ pr_err("Failed to set SoC DMA mask: %d\n", err);
|
|
+
|
|
+ return dev;
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* This finishes bus initialization doing things that were not possible without
|
|
* kmalloc. Make sure to call it late enough (after mm_init).
|
|
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
|
|
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
|
|
int err;
|
|
|
|
+ bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
|
|
+ if (!bcm47xx_bus.bcma.dev)
|
|
+ panic("Failed to setup SoC device\n");
|
|
+
|
|
err = bcma_host_soc_init(&bcm47xx_bus.bcma);
|
|
if (err)
|
|
panic("Failed to initialize BCMA bus (err %d)", err);
|
|
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
|
|
#endif
|
|
#ifdef CONFIG_BCM47XX_BCMA
|
|
case BCM47XX_BUS_TYPE_BCMA:
|
|
+ if (device_register(bcm47xx_bus.bcma.dev))
|
|
+ pr_err("Failed to register SoC device\n");
|
|
bcma_bus_register(&bcm47xx_bus.bcma.bus);
|
|
break;
|
|
#endif
|
|
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
|
|
index 37fe58c19a90f..542c3ede97222 100644
|
|
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
|
|
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <stdint.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
+#include "../../../../include/linux/sizes.h"
|
|
|
|
int main(int argc, char *argv[])
|
|
{
|
|
@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
|
|
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
|
|
|
|
/*
|
|
- * Align with 16 bytes: "greater than that used for any standard data
|
|
- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
|
|
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
|
|
+ * which may be as large as 64KB depending on the kernel configuration.
|
|
*/
|
|
|
|
- vmlinuz_load_addr += (16 - vmlinux_size % 16);
|
|
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
|
|
|
|
printf("0x%llx\n", vmlinuz_load_addr);
|
|
|
|
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
|
|
index 65af3f6ba81c3..84328afa3a55c 100644
|
|
--- a/arch/mips/boot/dts/img/boston.dts
|
|
+++ b/arch/mips/boot/dts/img/boston.dts
|
|
@@ -141,6 +141,12 @@
|
|
#size-cells = <2>;
|
|
#interrupt-cells = <1>;
|
|
|
|
+ eg20t_phub@2,0,0 {
|
|
+ compatible = "pci8086,8801";
|
|
+ reg = <0x00020000 0 0 0 0>;
|
|
+ intel,eg20t-prefetch = <0>;
|
|
+ };
|
|
+
|
|
eg20t_mac@2,0,1 {
|
|
compatible = "pci8086,8802";
|
|
reg = <0x00020100 0 0 0 0>;
|
|
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
|
|
index 2152b7ba65fbc..cc8dbea0911fc 100644
|
|
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
|
|
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
|
|
@@ -90,11 +90,11 @@
|
|
interrupts = <0>;
|
|
};
|
|
|
|
- axi_i2c: i2c@10A00000 {
|
|
+ axi_i2c: i2c@10a00000 {
|
|
compatible = "xlnx,xps-iic-2.00.a";
|
|
interrupt-parent = <&axi_intc>;
|
|
interrupts = <4>;
|
|
- reg = < 0x10A00000 0x10000 >;
|
|
+ reg = < 0x10a00000 0x10000 >;
|
|
clocks = <&ext>;
|
|
xlnx,clk-freq = <0x5f5e100>;
|
|
xlnx,family = "Artix7";
|
|
@@ -106,9 +106,9 @@
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
|
|
- ad7420@4B {
|
|
+ ad7420@4b {
|
|
compatible = "adi,adt7420";
|
|
- reg = <0x4B>;
|
|
+ reg = <0x4b>;
|
|
};
|
|
} ;
|
|
};
|
|
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
|
|
index 6c79e8a16a268..3ddbb98dff848 100644
|
|
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
|
|
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
|
|
@@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
|
|
case 3:
|
|
return CVMX_HELPER_INTERFACE_MODE_LOOP;
|
|
case 4:
|
|
- return CVMX_HELPER_INTERFACE_MODE_RGMII;
|
|
+ /* TODO: Implement support for AGL (RGMII). */
|
|
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
|
|
default:
|
|
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
|
|
}
|
|
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
|
|
index dfb95cffef3e2..a3cf68538f3d7 100644
|
|
--- a/arch/mips/cavium-octeon/setup.c
|
|
+++ b/arch/mips/cavium-octeon/setup.c
|
|
@@ -96,7 +96,7 @@ static void octeon_kexec_smp_down(void *ignored)
|
|
" sync \n"
|
|
" synci ($0) \n");
|
|
|
|
- relocated_kexec_smp_wait(NULL);
|
|
+ kexec_reboot();
|
|
}
|
|
#endif
|
|
|
|
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
|
|
index 951c4231bdb85..4c47b3fd958b6 100644
|
|
--- a/arch/mips/configs/ath79_defconfig
|
|
+++ b/arch/mips/configs/ath79_defconfig
|
|
@@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
|
# CONFIG_SERIAL_8250_PCI is not set
|
|
CONFIG_SERIAL_8250_NR_UARTS=1
|
|
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
|
|
+CONFIG_SERIAL_OF_PLATFORM=y
|
|
CONFIG_SERIAL_AR933X=y
|
|
CONFIG_SERIAL_AR933X_CONSOLE=y
|
|
# CONFIG_HW_RANDOM is not set
|
|
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
|
|
index d4ea7a5b60cf4..9e805317847d8 100644
|
|
--- a/arch/mips/include/asm/atomic.h
|
|
+++ b/arch/mips/include/asm/atomic.h
|
|
@@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
|
|
{ \
|
|
long result; \
|
|
\
|
|
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
|
|
+ if (kernel_uses_llsc) { \
|
|
long temp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
|
|
index a41059d47d31c..ed7ffe4e63a32 100644
|
|
--- a/arch/mips/include/asm/cpu-info.h
|
|
+++ b/arch/mips/include/asm/cpu-info.h
|
|
@@ -50,7 +50,7 @@ struct guest_info {
|
|
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
|
|
|
|
struct cpuinfo_mips {
|
|
- unsigned long asid_cache;
|
|
+ u64 asid_cache;
|
|
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
|
|
unsigned long asid_mask;
|
|
#endif
|
|
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
|
|
index dacbdb84516a0..532b49b1dbb3e 100644
|
|
--- a/arch/mips/include/asm/cpu.h
|
|
+++ b/arch/mips/include/asm/cpu.h
|
|
@@ -248,8 +248,9 @@
|
|
#define PRID_REV_LOONGSON3A_R1 0x0005
|
|
#define PRID_REV_LOONGSON3B_R1 0x0006
|
|
#define PRID_REV_LOONGSON3B_R2 0x0007
|
|
-#define PRID_REV_LOONGSON3A_R2 0x0008
|
|
+#define PRID_REV_LOONGSON3A_R2_0 0x0008
|
|
#define PRID_REV_LOONGSON3A_R3_0 0x0009
|
|
+#define PRID_REV_LOONGSON3A_R2_1 0x000c
|
|
#define PRID_REV_LOONGSON3A_R3_1 0x000d
|
|
|
|
/*
|
|
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
|
|
index e9cc62cfac99d..ff50aeb1a933f 100644
|
|
--- a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
|
|
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
|
|
@@ -4,8 +4,6 @@
|
|
|
|
struct jz4740_mmc_platform_data {
|
|
int gpio_power;
|
|
- int gpio_card_detect;
|
|
- int gpio_read_only;
|
|
unsigned card_detect_active_low:1;
|
|
unsigned read_only_active_low:1;
|
|
unsigned power_active_low:1;
|
|
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
|
|
index cbac603ced19c..b5e288a12dfe2 100644
|
|
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
|
|
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
|
|
@@ -31,7 +31,7 @@
|
|
/* Enable STFill Buffer */
|
|
mfc0 t0, CP0_PRID
|
|
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
|
|
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
|
|
+ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
|
|
bnez t0, 1f
|
|
mfc0 t0, CP0_CONFIG6
|
|
or t0, 0x100
|
|
@@ -60,7 +60,7 @@
|
|
/* Enable STFill Buffer */
|
|
mfc0 t0, CP0_PRID
|
|
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
|
|
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
|
|
+ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
|
|
bnez t0, 1f
|
|
mfc0 t0, CP0_CONFIG6
|
|
or t0, 0x100
|
|
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
|
|
index c9f7e231e66bb..59c8b11c090ee 100644
|
|
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
|
|
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
|
|
@@ -21,6 +21,7 @@
|
|
#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
|
|
|
|
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
|
|
+#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
|
|
|
|
#define LEVELS_PER_SLICE 128
|
|
|
|
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
|
|
index 0740be7d5d4ac..24d6b42345fb8 100644
|
|
--- a/arch/mips/include/asm/mmu.h
|
|
+++ b/arch/mips/include/asm/mmu.h
|
|
@@ -7,7 +7,7 @@
|
|
#include <linux/wait.h>
|
|
|
|
typedef struct {
|
|
- unsigned long asid[NR_CPUS];
|
|
+ u64 asid[NR_CPUS];
|
|
void *vdso;
|
|
atomic_t fp_mode_switching;
|
|
|
|
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
|
|
index 94414561de0e7..a589585be21be 100644
|
|
--- a/arch/mips/include/asm/mmu_context.h
|
|
+++ b/arch/mips/include/asm/mmu_context.h
|
|
@@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
|
|
* All unused by hardware upper bits will be considered
|
|
* as a software asid extension.
|
|
*/
|
|
-static unsigned long asid_version_mask(unsigned int cpu)
|
|
+static inline u64 asid_version_mask(unsigned int cpu)
|
|
{
|
|
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
|
|
|
- return ~(asid_mask | (asid_mask - 1));
|
|
+ return ~(u64)(asid_mask | (asid_mask - 1));
|
|
}
|
|
|
|
-static unsigned long asid_first_version(unsigned int cpu)
|
|
+static inline u64 asid_first_version(unsigned int cpu)
|
|
{
|
|
return ~asid_version_mask(cpu) + 1;
|
|
}
|
|
@@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
static inline void
|
|
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|
{
|
|
- unsigned long asid = asid_cache(cpu);
|
|
+ u64 asid = asid_cache(cpu);
|
|
|
|
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
|
|
if (cpu_has_vtag_icache)
|
|
flush_icache_all();
|
|
local_flush_tlb_all(); /* start new asid cycle */
|
|
- if (!asid) /* fix version if needed */
|
|
- asid = asid_first_version(cpu);
|
|
}
|
|
|
|
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
|
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
|
|
index f085fba41da50..b826b8473e956 100644
|
|
--- a/arch/mips/include/asm/mmzone.h
|
|
+++ b/arch/mips/include/asm/mmzone.h
|
|
@@ -7,7 +7,18 @@
|
|
#define _ASM_MMZONE_H_
|
|
|
|
#include <asm/page.h>
|
|
-#include <mmzone.h>
|
|
+
|
|
+#ifdef CONFIG_NEED_MULTIPLE_NODES
|
|
+# include <mmzone.h>
|
|
+#endif
|
|
+
|
|
+#ifndef pa_to_nid
|
|
+#define pa_to_nid(addr) 0
|
|
+#endif
|
|
+
|
|
+#ifndef nid_to_addrbase
|
|
+#define nid_to_addrbase(nid) 0
|
|
+#endif
|
|
|
|
#ifdef CONFIG_DISCONTIGMEM
|
|
|
|
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
|
|
index 0036ea0c71735..93a9dce31f255 100644
|
|
--- a/arch/mips/include/asm/pgtable-64.h
|
|
+++ b/arch/mips/include/asm/pgtable-64.h
|
|
@@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd)
|
|
|
|
static inline int pmd_present(pmd_t pmd)
|
|
{
|
|
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
|
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
|
|
+ return pmd_val(pmd) & _PAGE_PRESENT;
|
|
+#endif
|
|
+
|
|
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
|
|
}
|
|
|
|
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
|
|
index d19b2d65336b9..7f4a32d3345a0 100644
|
|
--- a/arch/mips/include/asm/r4kcache.h
|
|
+++ b/arch/mips/include/asm/r4kcache.h
|
|
@@ -20,6 +20,7 @@
|
|
#include <asm/cpu-features.h>
|
|
#include <asm/cpu-type.h>
|
|
#include <asm/mipsmtregs.h>
|
|
+#include <asm/mmzone.h>
|
|
#include <linux/uaccess.h> /* for uaccess_kernel() */
|
|
|
|
extern void (*r4k_blast_dcache)(void);
|
|
@@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
|
|
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
|
|
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
|
|
|
|
+/* Currently, this is very specific to Loongson-3 */
|
|
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
|
|
+static inline void blast_##pfx##cache##lsize##_node(long node) \
|
|
+{ \
|
|
+ unsigned long start = CAC_BASE | nid_to_addrbase(node); \
|
|
+ unsigned long end = start + current_cpu_data.desc.waysize; \
|
|
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
|
|
+ unsigned long ws_end = current_cpu_data.desc.ways << \
|
|
+ current_cpu_data.desc.waybit; \
|
|
+ unsigned long ws, addr; \
|
|
+ \
|
|
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
|
|
+ for (addr = start; addr < end; addr += lsize * 32) \
|
|
+ cache##lsize##_unroll32(addr|ws, indexop); \
|
|
+}
|
|
+
|
|
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
|
|
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
|
|
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
|
|
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
|
|
+
|
|
#endif /* _ASM_R4KCACHE_H */
|
|
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
|
|
index c05dcf5ab414e..273ef58f4d435 100644
|
|
--- a/arch/mips/include/uapi/asm/inst.h
|
|
+++ b/arch/mips/include/uapi/asm/inst.h
|
|
@@ -369,8 +369,8 @@ enum mm_32a_minor_op {
|
|
mm_ext_op = 0x02c,
|
|
mm_pool32axf_op = 0x03c,
|
|
mm_srl32_op = 0x040,
|
|
+ mm_srlv32_op = 0x050,
|
|
mm_sra_op = 0x080,
|
|
- mm_srlv32_op = 0x090,
|
|
mm_rotr_op = 0x0c0,
|
|
mm_lwxs_op = 0x118,
|
|
mm_addu32_op = 0x150,
|
|
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
|
|
index 4c41ed0a637e5..415a08376c362 100644
|
|
--- a/arch/mips/jazz/jazzdma.c
|
|
+++ b/arch/mips/jazz/jazzdma.c
|
|
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
|
|
get_order(VDMA_PGTBL_SIZE));
|
|
BUG_ON(!pgtbl);
|
|
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
|
|
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
|
|
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
|
|
|
|
/*
|
|
* Clear the R4030 translation table
|
|
*/
|
|
vdma_pgtbl_init();
|
|
|
|
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
|
|
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
|
|
+ CPHYSADDR((unsigned long)pgtbl));
|
|
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
|
|
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
|
|
|
|
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
|
|
index af0c8ace01416..705593d40d120 100644
|
|
--- a/arch/mips/jz4740/board-qi_lb60.c
|
|
+++ b/arch/mips/jz4740/board-qi_lb60.c
|
|
@@ -43,7 +43,6 @@
|
|
#include "clock.h"
|
|
|
|
/* GPIOs */
|
|
-#define QI_LB60_GPIO_SD_CD JZ_GPIO_PORTD(0)
|
|
#define QI_LB60_GPIO_SD_VCC_EN_N JZ_GPIO_PORTD(2)
|
|
|
|
#define QI_LB60_GPIO_KEYOUT(x) (JZ_GPIO_PORTC(10) + (x))
|
|
@@ -386,12 +385,18 @@ static struct platform_device qi_lb60_gpio_keys = {
|
|
};
|
|
|
|
static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
|
|
- .gpio_card_detect = QI_LB60_GPIO_SD_CD,
|
|
- .gpio_read_only = -1,
|
|
.gpio_power = QI_LB60_GPIO_SD_VCC_EN_N,
|
|
.power_active_low = 1,
|
|
};
|
|
|
|
+static struct gpiod_lookup_table qi_lb60_mmc_gpio_table = {
|
|
+ .dev_id = "jz4740-mmc.0",
|
|
+ .table = {
|
|
+ GPIO_LOOKUP("GPIOD", 0, "cd", GPIO_ACTIVE_HIGH),
|
|
+ { },
|
|
+ },
|
|
+};
|
|
+
|
|
/* beeper */
|
|
static struct pwm_lookup qi_lb60_pwm_lookup[] = {
|
|
PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
|
|
@@ -500,6 +505,7 @@ static int __init qi_lb60_init_platform_devices(void)
|
|
gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
|
|
gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
|
|
gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table);
|
|
+ gpiod_add_lookup_table(&qi_lb60_mmc_gpio_table);
|
|
|
|
spi_register_board_info(qi_lb60_spi_board_info,
|
|
ARRAY_SIZE(qi_lb60_spi_board_info));
|
|
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
|
|
index d535fc706a8b3..f70cf6447cfb9 100644
|
|
--- a/arch/mips/kernel/cpu-probe.c
|
|
+++ b/arch/mips/kernel/cpu-probe.c
|
|
@@ -1843,7 +1843,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
|
switch (c->processor_id & PRID_IMP_MASK) {
|
|
case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
|
|
switch (c->processor_id & PRID_REV_MASK) {
|
|
- case PRID_REV_LOONGSON3A_R2:
|
|
+ case PRID_REV_LOONGSON3A_R2_0:
|
|
+ case PRID_REV_LOONGSON3A_R2_1:
|
|
c->cputype = CPU_LOONGSON3;
|
|
__cpu_name[cpu] = "ICT Loongson-3";
|
|
set_elf_platform(cpu, "loongson3a");
|
|
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
|
|
index 046846999efdb..909b7a87c89c1 100644
|
|
--- a/arch/mips/kernel/idle.c
|
|
+++ b/arch/mips/kernel/idle.c
|
|
@@ -183,7 +183,7 @@ void __init check_wait(void)
|
|
cpu_wait = r4k_wait;
|
|
break;
|
|
case CPU_LOONGSON3:
|
|
- if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
|
|
+ if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
|
|
cpu_wait = r4k_wait;
|
|
break;
|
|
|
|
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
|
|
index 8f5bd04f320a9..7f3f136572dec 100644
|
|
--- a/arch/mips/kernel/mips-cm.c
|
|
+++ b/arch/mips/kernel/mips-cm.c
|
|
@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
|
|
}
|
|
|
|
/* reprime cause register */
|
|
- write_gcr_error_cause(0);
|
|
+ write_gcr_error_cause(cm_error);
|
|
}
|
|
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
|
|
index 48a9c6b90e079..9df3ebdc7b0f7 100644
|
|
--- a/arch/mips/kernel/vdso.c
|
|
+++ b/arch/mips/kernel/vdso.c
|
|
@@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
|
|
/* Map delay slot emulation page */
|
|
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
|
|
- VM_READ|VM_WRITE|VM_EXEC|
|
|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
|
+ VM_READ | VM_EXEC |
|
|
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
|
|
0, NULL);
|
|
if (IS_ERR_VALUE(base)) {
|
|
ret = base;
|
|
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
|
|
index f0bc3312ed110..c4ef1c31e0c4f 100644
|
|
--- a/arch/mips/lantiq/irq.c
|
|
+++ b/arch/mips/lantiq/irq.c
|
|
@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
|
|
.irq_set_type = ltq_eiu_settype,
|
|
};
|
|
|
|
-static void ltq_hw_irqdispatch(int module)
|
|
+static void ltq_hw_irq_handler(struct irq_desc *desc)
|
|
{
|
|
+ int module = irq_desc_get_irq(desc) - 2;
|
|
u32 irq;
|
|
+ int hwirq;
|
|
|
|
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
|
|
if (irq == 0)
|
|
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
|
|
* other bits might be bogus
|
|
*/
|
|
irq = __fls(irq);
|
|
- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
|
|
+ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
|
|
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
|
|
|
|
/* if this is a EBU irq, we need to ack it or get a deadlock */
|
|
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
|
|
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
|
|
LTQ_EBU_PCC_ISTAT);
|
|
}
|
|
|
|
-#define DEFINE_HWx_IRQDISPATCH(x) \
|
|
- static void ltq_hw ## x ## _irqdispatch(void) \
|
|
- { \
|
|
- ltq_hw_irqdispatch(x); \
|
|
- }
|
|
-DEFINE_HWx_IRQDISPATCH(0)
|
|
-DEFINE_HWx_IRQDISPATCH(1)
|
|
-DEFINE_HWx_IRQDISPATCH(2)
|
|
-DEFINE_HWx_IRQDISPATCH(3)
|
|
-DEFINE_HWx_IRQDISPATCH(4)
|
|
-
|
|
-#if MIPS_CPU_TIMER_IRQ == 7
|
|
-static void ltq_hw5_irqdispatch(void)
|
|
-{
|
|
- do_IRQ(MIPS_CPU_TIMER_IRQ);
|
|
-}
|
|
-#else
|
|
-DEFINE_HWx_IRQDISPATCH(5)
|
|
-#endif
|
|
-
|
|
-static void ltq_hw_irq_handler(struct irq_desc *desc)
|
|
-{
|
|
- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
|
|
-}
|
|
-
|
|
-asmlinkage void plat_irq_dispatch(void)
|
|
-{
|
|
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
|
|
- int irq;
|
|
-
|
|
- if (!pending) {
|
|
- spurious_interrupt();
|
|
- return;
|
|
- }
|
|
-
|
|
- pending >>= CAUSEB_IP;
|
|
- while (pending) {
|
|
- irq = fls(pending) - 1;
|
|
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
|
|
- pending &= ~BIT(irq);
|
|
- }
|
|
-}
|
|
-
|
|
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
|
{
|
|
struct irq_chip *chip = <q_irq_type;
|
|
@@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
|
|
for (i = 0; i < MAX_IM; i++)
|
|
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
|
|
|
|
- if (cpu_has_vint) {
|
|
- pr_info("Setting up vectored interrupts\n");
|
|
- set_vi_handler(2, ltq_hw0_irqdispatch);
|
|
- set_vi_handler(3, ltq_hw1_irqdispatch);
|
|
- set_vi_handler(4, ltq_hw2_irqdispatch);
|
|
- set_vi_handler(5, ltq_hw3_irqdispatch);
|
|
- set_vi_handler(6, ltq_hw4_irqdispatch);
|
|
- set_vi_handler(7, ltq_hw5_irqdispatch);
|
|
- }
|
|
-
|
|
ltq_domain = irq_domain_add_linear(node,
|
|
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
|
|
&irq_domain_ops, 0);
|
|
|
|
-#ifndef CONFIG_MIPS_MT_SMP
|
|
- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
|
|
- IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
|
|
-#else
|
|
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
|
|
- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
|
|
-#endif
|
|
-
|
|
/* tell oprofile which irq to use */
|
|
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
|
|
|
|
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
|
|
index 8f68ee02a8c24..72e5f8fb2b357 100644
|
|
--- a/arch/mips/loongson64/common/env.c
|
|
+++ b/arch/mips/loongson64/common/env.c
|
|
@@ -197,7 +197,8 @@ void __init prom_init_env(void)
|
|
cpu_clock_freq = 797000000;
|
|
break;
|
|
case PRID_REV_LOONGSON3A_R1:
|
|
- case PRID_REV_LOONGSON3A_R2:
|
|
+ case PRID_REV_LOONGSON3A_R2_0:
|
|
+ case PRID_REV_LOONGSON3A_R2_1:
|
|
case PRID_REV_LOONGSON3A_R3_0:
|
|
case PRID_REV_LOONGSON3A_R3_1:
|
|
cpu_clock_freq = 900000000;
|
|
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
|
|
index a60715e11306b..b26892ce871c8 100644
|
|
--- a/arch/mips/loongson64/common/reset.c
|
|
+++ b/arch/mips/loongson64/common/reset.c
|
|
@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
|
|
{
|
|
#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
|
|
mach_prepare_shutdown();
|
|
- unreachable();
|
|
+
|
|
+ /*
|
|
+ * It needs a wait loop here, but mips/kernel/reset.c already calls
|
|
+ * a generic delay loop, machine_hang(), so simply return.
|
|
+ */
|
|
+ return;
|
|
#else
|
|
void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
|
|
|
|
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
|
|
index b5c1e0aa955e6..8fba0aa48bf4e 100644
|
|
--- a/arch/mips/loongson64/loongson-3/smp.c
|
|
+++ b/arch/mips/loongson64/loongson-3/smp.c
|
|
@@ -682,7 +682,8 @@ void play_dead(void)
|
|
play_dead_at_ckseg1 =
|
|
(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
|
|
break;
|
|
- case PRID_REV_LOONGSON3A_R2:
|
|
+ case PRID_REV_LOONGSON3A_R2_0:
|
|
+ case PRID_REV_LOONGSON3A_R2_1:
|
|
case PRID_REV_LOONGSON3A_R3_0:
|
|
case PRID_REV_LOONGSON3A_R3_1:
|
|
play_dead_at_ckseg1 =
|
|
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
|
|
index 5450f4d1c920e..e2d46cb93ca98 100644
|
|
--- a/arch/mips/math-emu/dsemul.c
|
|
+++ b/arch/mips/math-emu/dsemul.c
|
|
@@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
|
|
{
|
|
int isa16 = get_isa16_mode(regs->cp0_epc);
|
|
mips_instruction break_math;
|
|
- struct emuframe __user *fr;
|
|
- int err, fr_idx;
|
|
+ unsigned long fr_uaddr;
|
|
+ struct emuframe fr;
|
|
+ int fr_idx, ret;
|
|
|
|
/* NOP is easy */
|
|
if (ir == 0)
|
|
@@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
|
|
fr_idx = alloc_emuframe();
|
|
if (fr_idx == BD_EMUFRAME_NONE)
|
|
return SIGBUS;
|
|
- fr = &dsemul_page()[fr_idx];
|
|
|
|
/* Retrieve the appropriately encoded break instruction */
|
|
break_math = BREAK_MATH(isa16);
|
|
|
|
/* Write the instructions to the frame */
|
|
if (isa16) {
|
|
- err = __put_user(ir >> 16,
|
|
- (u16 __user *)(&fr->emul));
|
|
- err |= __put_user(ir & 0xffff,
|
|
- (u16 __user *)((long)(&fr->emul) + 2));
|
|
- err |= __put_user(break_math >> 16,
|
|
- (u16 __user *)(&fr->badinst));
|
|
- err |= __put_user(break_math & 0xffff,
|
|
- (u16 __user *)((long)(&fr->badinst) + 2));
|
|
+ union mips_instruction _emul = {
|
|
+ .halfword = { ir >> 16, ir }
|
|
+ };
|
|
+ union mips_instruction _badinst = {
|
|
+ .halfword = { break_math >> 16, break_math }
|
|
+ };
|
|
+
|
|
+ fr.emul = _emul.word;
|
|
+ fr.badinst = _badinst.word;
|
|
} else {
|
|
- err = __put_user(ir, &fr->emul);
|
|
- err |= __put_user(break_math, &fr->badinst);
|
|
+ fr.emul = ir;
|
|
+ fr.badinst = break_math;
|
|
}
|
|
|
|
- if (unlikely(err)) {
|
|
+ /* Write the frame to user memory */
|
|
+ fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
|
|
+ ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
|
|
+ FOLL_FORCE | FOLL_WRITE);
|
|
+ if (unlikely(ret != sizeof(fr))) {
|
|
MIPS_FPU_EMU_INC_STATS(errors);
|
|
free_emuframe(fr_idx, current->mm);
|
|
return SIGBUS;
|
|
@@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
|
|
atomic_set(¤t->thread.bd_emu_frame, fr_idx);
|
|
|
|
/* Change user register context to execute the frame */
|
|
- regs->cp0_epc = (unsigned long)&fr->emul | isa16;
|
|
-
|
|
- /* Ensure the icache observes our newly written frame */
|
|
- flush_cache_sigtramp((unsigned long)&fr->emul);
|
|
+ regs->cp0_epc = fr_uaddr | isa16;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
|
|
index 3466fcdae0ca2..01848cdf20741 100644
|
|
--- a/arch/mips/mm/c-r3k.c
|
|
+++ b/arch/mips/mm/c-r3k.c
|
|
@@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
|
|
pmd_t *pmdp;
|
|
pte_t *ptep;
|
|
|
|
- pr_debug("cpage[%08lx,%08lx]\n",
|
|
+ pr_debug("cpage[%08llx,%08lx]\n",
|
|
cpu_context(smp_processor_id(), mm), addr);
|
|
|
|
/* No ASID => no such page in the cache. */
|
|
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
|
|
index 05bd77727fb95..96d666a0f4a07 100644
|
|
--- a/arch/mips/mm/c-r4k.c
|
|
+++ b/arch/mips/mm/c-r4k.c
|
|
@@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
|
|
r4k_blast_scache = blast_scache128;
|
|
}
|
|
|
|
+static void (*r4k_blast_scache_node)(long node);
|
|
+
|
|
+static void r4k_blast_scache_node_setup(void)
|
|
+{
|
|
+ unsigned long sc_lsize = cpu_scache_line_size();
|
|
+
|
|
+ if (current_cpu_type() != CPU_LOONGSON3)
|
|
+ r4k_blast_scache_node = (void *)cache_noop;
|
|
+ else if (sc_lsize == 16)
|
|
+ r4k_blast_scache_node = blast_scache16_node;
|
|
+ else if (sc_lsize == 32)
|
|
+ r4k_blast_scache_node = blast_scache32_node;
|
|
+ else if (sc_lsize == 64)
|
|
+ r4k_blast_scache_node = blast_scache64_node;
|
|
+ else if (sc_lsize == 128)
|
|
+ r4k_blast_scache_node = blast_scache128_node;
|
|
+}
|
|
+
|
|
static inline void local_r4k___flush_cache_all(void * args)
|
|
{
|
|
switch (current_cpu_type()) {
|
|
case CPU_LOONGSON2:
|
|
- case CPU_LOONGSON3:
|
|
case CPU_R4000SC:
|
|
case CPU_R4000MC:
|
|
case CPU_R4400SC:
|
|
@@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
|
|
r4k_blast_scache();
|
|
break;
|
|
|
|
+ case CPU_LOONGSON3:
|
|
+ /* Use get_ebase_cpunum() for both NUMA=y/n */
|
|
+ r4k_blast_scache_node(get_ebase_cpunum() >> 2);
|
|
+ break;
|
|
+
|
|
case CPU_BMIPS5000:
|
|
r4k_blast_scache();
|
|
__sync();
|
|
@@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
|
|
|
preempt_disable();
|
|
if (cpu_has_inclusive_pcaches) {
|
|
- if (size >= scache_size)
|
|
- r4k_blast_scache();
|
|
- else
|
|
+ if (size >= scache_size) {
|
|
+ if (current_cpu_type() != CPU_LOONGSON3)
|
|
+ r4k_blast_scache();
|
|
+ else
|
|
+ r4k_blast_scache_node(pa_to_nid(addr));
|
|
+ } else {
|
|
blast_scache_range(addr, addr + size);
|
|
+ }
|
|
preempt_enable();
|
|
__sync();
|
|
return;
|
|
@@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
|
|
|
|
preempt_disable();
|
|
if (cpu_has_inclusive_pcaches) {
|
|
- if (size >= scache_size)
|
|
- r4k_blast_scache();
|
|
- else {
|
|
+ if (size >= scache_size) {
|
|
+ if (current_cpu_type() != CPU_LOONGSON3)
|
|
+ r4k_blast_scache();
|
|
+ else
|
|
+ r4k_blast_scache_node(pa_to_nid(addr));
|
|
+ } else {
|
|
/*
|
|
* There is no clearly documented alignment requirement
|
|
* for the cache instruction on MIPS processors and
|
|
@@ -1352,7 +1381,7 @@ static void probe_pcache(void)
|
|
c->dcache.ways *
|
|
c->dcache.linesz;
|
|
c->dcache.waybit = 0;
|
|
- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
|
|
+ if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
|
|
c->options |= MIPS_CPU_PREFETCH;
|
|
break;
|
|
|
|
@@ -1918,6 +1947,7 @@ void r4k_cache_init(void)
|
|
r4k_blast_scache_page_setup();
|
|
r4k_blast_scache_page_indexed_setup();
|
|
r4k_blast_scache_setup();
|
|
+ r4k_blast_scache_node_setup();
|
|
#ifdef CONFIG_EVA
|
|
r4k_blast_dcache_user_page_setup();
|
|
r4k_blast_icache_user_page_setup();
|
|
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
|
|
index aeb7b1b0f2024..252c00985c973 100644
|
|
--- a/arch/mips/net/ebpf_jit.c
|
|
+++ b/arch/mips/net/ebpf_jit.c
|
|
@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
|
|
const struct bpf_prog *prog = ctx->skf;
|
|
int stack_adjust = ctx->stack_size;
|
|
int store_offset = stack_adjust - 8;
|
|
+ enum reg_val_type td;
|
|
int r0 = MIPS_R_V0;
|
|
|
|
- if (dest_reg == MIPS_R_RA &&
|
|
- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
|
|
+ if (dest_reg == MIPS_R_RA) {
|
|
/* Don't let zero extended value escape. */
|
|
- emit_instr(ctx, sll, r0, r0, 0);
|
|
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
|
|
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
|
|
+ emit_instr(ctx, sll, r0, r0, 0);
|
|
+ }
|
|
|
|
if (ctx->flags & EBPF_SAVE_RA) {
|
|
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
|
|
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
|
|
index 2a5bb849b10ef..288b58b00dc84 100644
|
|
--- a/arch/mips/pci/msi-octeon.c
|
|
+++ b/arch/mips/pci/msi-octeon.c
|
|
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
|
|
int irq;
|
|
struct irq_chip *msi;
|
|
|
|
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
|
|
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
|
|
+ return 0;
|
|
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
|
|
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
|
|
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
|
|
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
|
|
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
|
|
index 5017d5843c5ac..fc29b85cfa926 100644
|
|
--- a/arch/mips/pci/pci-octeon.c
|
|
+++ b/arch/mips/pci/pci-octeon.c
|
|
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
|
|
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
|
|
return 0;
|
|
|
|
+ if (!octeon_is_pci_host()) {
|
|
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
/* Point pcibios_map_irq() to the PCI version of it */
|
|
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
|
|
|
|
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
|
|
else
|
|
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
|
|
|
|
- if (!octeon_is_pci_host()) {
|
|
- pr_notice("Not in host mode, PCI Controller not initialized\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
/* PCI I/O and PCI MEM values */
|
|
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
|
|
ioport_resource.start = 0;
|
|
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
|
|
index 1f9cb0e3c79a6..613d617634333 100644
|
|
--- a/arch/mips/ralink/Kconfig
|
|
+++ b/arch/mips/ralink/Kconfig
|
|
@@ -38,6 +38,7 @@ choice
|
|
|
|
config SOC_MT7620
|
|
bool "MT7620/8"
|
|
+ select CPU_MIPSR2_IRQ_VI
|
|
select HW_HAS_PCI
|
|
|
|
config SOC_MT7621
|
|
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
|
|
index b3d6bf23a6620..3ef3fb6581369 100644
|
|
--- a/arch/mips/sibyte/common/Makefile
|
|
+++ b/arch/mips/sibyte/common/Makefile
|
|
@@ -1,4 +1,5 @@
|
|
obj-y := cfe.o
|
|
+obj-$(CONFIG_SWIOTLB) += dma.o
|
|
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
|
|
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
|
|
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
|
|
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
|
|
new file mode 100644
|
|
index 0000000000000..eb47a94f3583e
|
|
--- /dev/null
|
|
+++ b/arch/mips/sibyte/common/dma.c
|
|
@@ -0,0 +1,14 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * DMA support for Broadcom SiByte platforms.
|
|
+ *
|
|
+ * Copyright (c) 2018 Maciej W. Rozycki
|
|
+ */
|
|
+
|
|
+#include <linux/swiotlb.h>
|
|
+#include <asm/bootinfo.h>
|
|
+
|
|
+void __init plat_swiotlb_setup(void)
|
|
+{
|
|
+ swiotlb_init(1);
|
|
+}
|
|
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
|
|
index 58a0315ad743d..67e44466d5a4b 100644
|
|
--- a/arch/mips/vdso/Makefile
|
|
+++ b/arch/mips/vdso/Makefile
|
|
@@ -8,6 +8,7 @@ ccflags-vdso := \
|
|
$(filter -E%,$(KBUILD_CFLAGS)) \
|
|
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
|
|
$(filter -march=%,$(KBUILD_CFLAGS)) \
|
|
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
|
|
-D__VDSO__
|
|
|
|
ifdef CONFIG_CC_IS_CLANG
|
|
@@ -128,7 +129,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
|
|
$(call cmd,force_checksrc)
|
|
$(call if_changed_rule,cc_o_c)
|
|
|
|
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
|
|
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
|
|
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
|
|
$(call if_changed_dep,cpp_lds_S)
|
|
|
|
@@ -168,7 +169,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
|
|
$(call cmd,force_checksrc)
|
|
$(call if_changed_rule,cc_o_c)
|
|
|
|
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
|
|
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
|
|
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
|
|
$(call if_changed_dep,cpp_lds_S)
|
|
|
|
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
|
|
index 6b68558522238..7c5c15ad854aa 100644
|
|
--- a/arch/nds32/mm/Makefile
|
|
+++ b/arch/nds32/mm/Makefile
|
|
@@ -4,4 +4,8 @@ obj-y := extable.o tlb.o \
|
|
|
|
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
|
|
obj-$(CONFIG_HIGHMEM) += highmem.o
|
|
-CFLAGS_proc-n13.o += -fomit-frame-pointer
|
|
+
|
|
+ifdef CONFIG_FUNCTION_TRACER
|
|
+CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE)
|
|
+endif
|
|
+CFLAGS_proc.o += -fomit-frame-pointer
|
|
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
|
|
index 2582df1c529bb..0964c236e3e5a 100644
|
|
--- a/arch/parisc/kernel/ptrace.c
|
|
+++ b/arch/parisc/kernel/ptrace.c
|
|
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|
|
|
long do_syscall_trace_enter(struct pt_regs *regs)
|
|
{
|
|
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
|
- tracehook_report_syscall_entry(regs)) {
|
|
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
|
|
+ int rc = tracehook_report_syscall_entry(regs);
|
|
+
|
|
/*
|
|
- * Tracing decided this syscall should not happen or the
|
|
- * debugger stored an invalid system call number. Skip
|
|
- * the system call and the system call restart handling.
|
|
+ * As tracesys_next does not set %r28 to -ENOSYS
|
|
+ * when %r20 is set to -1, initialize it here.
|
|
*/
|
|
- regs->gr[20] = -1UL;
|
|
- goto out;
|
|
+ regs->gr[28] = -ENOSYS;
|
|
+
|
|
+ if (rc) {
|
|
+ /*
|
|
+ * A nonzero return code from
|
|
+ * tracehook_report_syscall_entry() tells us
|
|
+ * to prevent the syscall execution. Skip
|
|
+ * the syscall call and the syscall restart handling.
|
|
+ *
|
|
+ * Note that the tracer may also just change
|
|
+ * regs->gr[20] to an invalid syscall number,
|
|
+ * that is handled by tracesys_next.
|
|
+ */
|
|
+ regs->gr[20] = -1UL;
|
|
+ return -1;
|
|
+ }
|
|
}
|
|
|
|
/* Do the secure computing check after ptrace. */
|
|
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
|
|
regs->gr[24] & 0xffffffff,
|
|
regs->gr[23] & 0xffffffff);
|
|
|
|
-out:
|
|
/*
|
|
* Sign extend the syscall number to 64bit since it may have been
|
|
* modified by a compat ptrace call
|
|
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
|
|
index 2d7cffcaa476e..059187a3ded74 100644
|
|
--- a/arch/parisc/mm/init.c
|
|
+++ b/arch/parisc/mm/init.c
|
|
@@ -512,8 +512,8 @@ static void __init map_pages(unsigned long start_vaddr,
|
|
|
|
void __init set_kernel_text_rw(int enable_read_write)
|
|
{
|
|
- unsigned long start = (unsigned long)__init_begin;
|
|
- unsigned long end = (unsigned long)_etext;
|
|
+ unsigned long start = (unsigned long) _text;
|
|
+ unsigned long end = (unsigned long) &data_start;
|
|
|
|
map_pages(start, __pa(start), end-start,
|
|
PAGE_KERNEL_RWX, enable_read_write ? 1:0);
|
|
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
|
|
index 6c99e846a8c95..5ff63d53b31c2 100644
|
|
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
|
|
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
|
|
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
|
|
|
|
static inline int pud_present(pud_t pud)
|
|
{
|
|
- return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
|
|
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
|
|
}
|
|
|
|
extern struct page *pud_page(pud_t pud);
|
|
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
|
|
|
|
static inline int pgd_present(pgd_t pgd)
|
|
{
|
|
- return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
|
|
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
|
|
}
|
|
|
|
static inline pte_t pgd_pte(pgd_t pgd)
|
|
@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
#define pmd_move_must_withdraw pmd_move_must_withdraw
|
|
struct spinlock;
|
|
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
|
|
- struct spinlock *old_pmd_ptl,
|
|
- struct vm_area_struct *vma)
|
|
-{
|
|
- if (radix_enabled())
|
|
- return false;
|
|
- /*
|
|
- * Archs like ppc64 use pgtable to store per pmd
|
|
- * specific information. So when we switch the pmd,
|
|
- * we should also withdraw and deposit the pgtable
|
|
- */
|
|
- return true;
|
|
-}
|
|
-
|
|
-
|
|
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
|
|
+ struct spinlock *old_pmd_ptl,
|
|
+ struct vm_area_struct *vma);
|
|
+/*
|
|
+ * Hash translation mode use the deposited table to store hash pte
|
|
+ * slot information.
|
|
+ */
|
|
#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
|
|
static inline bool arch_needs_pgtable_deposit(void)
|
|
{
|
|
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
|
|
index 1e7a33592e297..15bc07a31c467 100644
|
|
--- a/arch/powerpc/include/asm/fadump.h
|
|
+++ b/arch/powerpc/include/asm/fadump.h
|
|
@@ -200,7 +200,7 @@ struct fad_crash_memory_ranges {
|
|
unsigned long long size;
|
|
};
|
|
|
|
-extern int is_fadump_boot_memory_area(u64 addr, ulong size);
|
|
+extern int is_fadump_memory_area(u64 addr, ulong size);
|
|
extern int early_init_dt_scan_fw_dump(unsigned long node,
|
|
const char *uname, int depth, void *data);
|
|
extern int fadump_reserve_mem(void);
|
|
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
|
|
index 15bea9a0f2604..ebc0b916dcf90 100644
|
|
--- a/arch/powerpc/include/asm/uaccess.h
|
|
+++ b/arch/powerpc/include/asm/uaccess.h
|
|
@@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
|
|
#endif
|
|
|
|
#define access_ok(type, addr, size) \
|
|
- (__chk_user_ptr(addr), \
|
|
+ (__chk_user_ptr(addr), (void)(type), \
|
|
__access_ok((__force unsigned long)(addr), (size), get_fs()))
|
|
|
|
/*
|
|
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
|
|
index 761b28b1427d8..7fd9b3e1fa39a 100644
|
|
--- a/arch/powerpc/kernel/fadump.c
|
|
+++ b/arch/powerpc/kernel/fadump.c
|
|
@@ -118,13 +118,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
|
|
|
|
/*
|
|
* If fadump is registered, check if the memory provided
|
|
- * falls within boot memory area.
|
|
+ * falls within boot memory area and reserved memory area.
|
|
*/
|
|
-int is_fadump_boot_memory_area(u64 addr, ulong size)
|
|
+int is_fadump_memory_area(u64 addr, ulong size)
|
|
{
|
|
+ u64 d_start = fw_dump.reserve_dump_area_start;
|
|
+ u64 d_end = d_start + fw_dump.reserve_dump_area_size;
|
|
+
|
|
if (!fw_dump.dump_registered)
|
|
return 0;
|
|
|
|
+ if (((addr + size) > d_start) && (addr <= d_end))
|
|
+ return 1;
|
|
+
|
|
return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
|
|
index 3b67b9533c82f..438512759e827 100644
|
|
--- a/arch/powerpc/kernel/head_8xx.S
|
|
+++ b/arch/powerpc/kernel/head_8xx.S
|
|
@@ -927,11 +927,12 @@ start_here:
|
|
|
|
/* set up the PTE pointers for the Abatron bdiGDB.
|
|
*/
|
|
- tovirt(r6,r6)
|
|
lis r5, abatron_pteptrs@h
|
|
ori r5, r5, abatron_pteptrs@l
|
|
stw r5, 0xf0(0) /* Must match your Abatron config file */
|
|
tophys(r5,r5)
|
|
+ lis r6, swapper_pg_dir@h
|
|
+ ori r6, r6, swapper_pg_dir@l
|
|
stw r6, 0(r5)
|
|
|
|
/* Now turn on the MMU for real! */
|
|
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
|
|
index f6f469fc4073e..1b395b85132be 100644
|
|
--- a/arch/powerpc/kernel/security.c
|
|
+++ b/arch/powerpc/kernel/security.c
|
|
@@ -22,7 +22,7 @@ enum count_cache_flush_type {
|
|
COUNT_CACHE_FLUSH_SW = 0x2,
|
|
COUNT_CACHE_FLUSH_HW = 0x4,
|
|
};
|
|
-static enum count_cache_flush_type count_cache_flush_type;
|
|
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
|
|
|
|
bool barrier_nospec_enabled;
|
|
static bool no_nospec;
|
|
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
|
|
index e6474a45cef50..1355fab5f6763 100644
|
|
--- a/arch/powerpc/kernel/signal_32.c
|
|
+++ b/arch/powerpc/kernel/signal_32.c
|
|
@@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
|
/* If TM bits are set to the reserved value, it's an invalid context */
|
|
if (MSR_TM_RESV(msr_hi))
|
|
return 1;
|
|
- /* Pull in the MSR TM bits from the user context */
|
|
+
|
|
+ /*
|
|
+ * Disabling preemption, since it is unsafe to be preempted
|
|
+ * with MSR[TS] set without recheckpointing.
|
|
+ */
|
|
+ preempt_disable();
|
|
+
|
|
+ /*
|
|
+ * CAUTION:
|
|
+ * After regs->MSR[TS] being updated, make sure that get_user(),
|
|
+ * put_user() or similar functions are *not* called. These
|
|
+ * functions can generate page faults which will cause the process
|
|
+ * to be de-scheduled with MSR[TS] set but without calling
|
|
+ * tm_recheckpoint(). This can cause a bug.
|
|
+ *
|
|
+ * Pull in the MSR TM bits from the user context
|
|
+ */
|
|
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
|
|
/* Now, recheckpoint. This loads up all of the checkpointed (older)
|
|
* registers, including FP and V[S]Rs. After recheckpointing, the
|
|
@@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
|
}
|
|
#endif
|
|
|
|
+ preempt_enable();
|
|
+
|
|
return 0;
|
|
}
|
|
#endif
|
|
@@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|
{
|
|
struct rt_sigframe __user *rt_sf;
|
|
struct pt_regs *regs = current_pt_regs();
|
|
+ int tm_restore = 0;
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
struct ucontext __user *uc_transact;
|
|
unsigned long msr_hi;
|
|
unsigned long tmp;
|
|
- int tm_restore = 0;
|
|
#endif
|
|
/* Always make any pending restarted system calls return -EINTR */
|
|
current->restart_block.fn = do_no_restart_syscall;
|
|
@@ -1192,11 +1210,19 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|
goto bad;
|
|
}
|
|
}
|
|
- if (!tm_restore)
|
|
- /* Fall through, for non-TM restore */
|
|
+ if (!tm_restore) {
|
|
+ /*
|
|
+ * Unset regs->msr because ucontext MSR TS is not
|
|
+ * set, and recheckpoint was not called. This avoid
|
|
+ * hitting a TM Bad thing at RFID
|
|
+ */
|
|
+ regs->msr &= ~MSR_TS_MASK;
|
|
+ }
|
|
+ /* Fall through, for non-TM restore */
|
|
#endif
|
|
- if (do_setcontext(&rt_sf->uc, regs, 1))
|
|
- goto bad;
|
|
+ if (!tm_restore)
|
|
+ if (do_setcontext(&rt_sf->uc, regs, 1))
|
|
+ goto bad;
|
|
|
|
/*
|
|
* It's not clear whether or why it is desirable to save the
|
|
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
|
|
index 83d51bf586c7e..14b0f5b6a373d 100644
|
|
--- a/arch/powerpc/kernel/signal_64.c
|
|
+++ b/arch/powerpc/kernel/signal_64.c
|
|
@@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
|
|
if (MSR_TM_RESV(msr))
|
|
return -EINVAL;
|
|
|
|
- /* pull in MSR TS bits from user context */
|
|
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
|
|
-
|
|
- /*
|
|
- * Ensure that TM is enabled in regs->msr before we leave the signal
|
|
- * handler. It could be the case that (a) user disabled the TM bit
|
|
- * through the manipulation of the MSR bits in uc_mcontext or (b) the
|
|
- * TM bit was disabled because a sufficient number of context switches
|
|
- * happened whilst in the signal handler and load_tm overflowed,
|
|
- * disabling the TM bit. In either case we can end up with an illegal
|
|
- * TM state leading to a TM Bad Thing when we return to userspace.
|
|
- */
|
|
- regs->msr |= MSR_TM;
|
|
-
|
|
/* pull in MSR LE from user context */
|
|
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
|
|
|
|
@@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
|
|
tm_enable();
|
|
/* Make sure the transaction is marked as failed */
|
|
tsk->thread.tm_texasr |= TEXASR_FS;
|
|
+
|
|
+ /*
|
|
+ * Disabling preemption, since it is unsafe to be preempted
|
|
+ * with MSR[TS] set without recheckpointing.
|
|
+ */
|
|
+ preempt_disable();
|
|
+
|
|
+ /* pull in MSR TS bits from user context */
|
|
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
|
|
+
|
|
+ /*
|
|
+ * Ensure that TM is enabled in regs->msr before we leave the signal
|
|
+ * handler. It could be the case that (a) user disabled the TM bit
|
|
+ * through the manipulation of the MSR bits in uc_mcontext or (b) the
|
|
+ * TM bit was disabled because a sufficient number of context switches
|
|
+ * happened whilst in the signal handler and load_tm overflowed,
|
|
+ * disabling the TM bit. In either case we can end up with an illegal
|
|
+ * TM state leading to a TM Bad Thing when we return to userspace.
|
|
+ *
|
|
+ * CAUTION:
|
|
+ * After regs->MSR[TS] being updated, make sure that get_user(),
|
|
+ * put_user() or similar functions are *not* called. These
|
|
+ * functions can generate page faults which will cause the process
|
|
+ * to be de-scheduled with MSR[TS] set but without calling
|
|
+ * tm_recheckpoint(). This can cause a bug.
|
|
+ */
|
|
+ regs->msr |= MSR_TM;
|
|
+
|
|
/* This loads the checkpointed FP/VEC state, if used */
|
|
tm_recheckpoint(&tsk->thread);
|
|
|
|
@@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
|
|
regs->msr |= MSR_VEC;
|
|
}
|
|
|
|
+ preempt_enable();
|
|
+
|
|
return err;
|
|
}
|
|
#endif
|
|
@@ -739,12 +755,25 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
|
|
&uc_transact->uc_mcontext))
|
|
goto badframe;
|
|
- }
|
|
- else
|
|
- /* Fall through, for non-TM restore */
|
|
+ } else
|
|
#endif
|
|
- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
|
|
- goto badframe;
|
|
+ {
|
|
+ /*
|
|
+ * Fall through, for non-TM restore
|
|
+ *
|
|
+ * Unset MSR[TS] on the thread regs since MSR from user
|
|
+ * context does not have MSR active, and recheckpoint was
|
|
+ * not called since restore_tm_sigcontexts() was not called
|
|
+ * also.
|
|
+ *
|
|
+ * If not unsetting it, the code can RFID to userspace with
|
|
+ * MSR[TS] set, but without CPU in the proper state,
|
|
+ * causing a TM bad thing.
|
|
+ */
|
|
+ current->thread.regs->msr &= ~MSR_TS_MASK;
|
|
+ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
|
|
+ goto badframe;
|
|
+ }
|
|
|
|
if (restore_altstack(&uc->uc_stack))
|
|
goto badframe;
|
|
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
|
|
index 434581bcd5b42..1148c3c60c3b9 100644
|
|
--- a/arch/powerpc/kernel/vmlinux.lds.S
|
|
+++ b/arch/powerpc/kernel/vmlinux.lds.S
|
|
@@ -308,6 +308,10 @@ SECTIONS
|
|
#ifdef CONFIG_PPC32
|
|
.data : AT(ADDR(.data) - LOAD_OFFSET) {
|
|
DATA_DATA
|
|
+#ifdef CONFIG_UBSAN
|
|
+ *(.data..Lubsan_data*)
|
|
+ *(.data..Lubsan_type*)
|
|
+#endif
|
|
*(.data.rel*)
|
|
*(SDATA_MAIN)
|
|
*(.sdata2)
|
|
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
|
|
index c615617e78acc..a18afda3d0f0b 100644
|
|
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
|
|
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
|
|
@@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
|
|
srcu_idx = srcu_read_lock(&kvm->srcu);
|
|
slots = kvm_memslots(kvm);
|
|
kvm_for_each_memslot(memslot, slots) {
|
|
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
/*
|
|
* This assumes it is acceptable to lose reference and
|
|
* change bits across a reset.
|
|
*/
|
|
memset(memslot->arch.rmap, 0,
|
|
memslot->npages * sizeof(*memslot->arch.rmap));
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
}
|
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
|
}
|
|
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
|
|
index a56f8413758ab..ab43306c4ea13 100644
|
|
--- a/arch/powerpc/kvm/book3s_hv.c
|
|
+++ b/arch/powerpc/kvm/book3s_hv.c
|
|
@@ -4532,12 +4532,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
|
|
{
|
|
if (nesting_enabled(kvm))
|
|
kvmhv_release_all_nested(kvm);
|
|
+ kvmppc_rmap_reset(kvm);
|
|
+ kvm->arch.process_table = 0;
|
|
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
+ kvm->arch.radix = 0;
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
kvmppc_free_radix(kvm);
|
|
kvmppc_update_lpcr(kvm, LPCR_VPM1,
|
|
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
|
|
- kvmppc_rmap_reset(kvm);
|
|
- kvm->arch.radix = 0;
|
|
- kvm->arch.process_table = 0;
|
|
return 0;
|
|
}
|
|
|
|
@@ -4549,12 +4552,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
|
|
err = kvmppc_init_vm_radix(kvm);
|
|
if (err)
|
|
return err;
|
|
-
|
|
+ kvmppc_rmap_reset(kvm);
|
|
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
+ kvm->arch.radix = 1;
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
kvmppc_free_hpt(&kvm->arch.hpt);
|
|
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
|
|
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
|
|
- kvmppc_rmap_reset(kvm);
|
|
- kvm->arch.radix = 1;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
|
|
index 401d2ecbebc50..f8176ae3a5a7c 100644
|
|
--- a/arch/powerpc/kvm/book3s_hv_nested.c
|
|
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
|
|
@@ -1220,6 +1220,8 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
|
|
return ret;
|
|
shift = kvmppc_radix_level_to_shift(level);
|
|
}
|
|
+ /* Align gfn to the start of the page */
|
|
+ gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
|
|
|
|
/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
|
|
|
|
@@ -1227,6 +1229,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
|
|
perm |= gpte.may_read ? 0UL : _PAGE_READ;
|
|
perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
|
|
perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
|
|
+ /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
|
|
+ perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
|
|
+ perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
|
|
pte = __pte(pte_val(pte) & ~perm);
|
|
|
|
/* What size pte can we insert? */
|
|
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
|
|
index 2869a299c4edc..75e2e471442fe 100644
|
|
--- a/arch/powerpc/kvm/powerpc.c
|
|
+++ b/arch/powerpc/kvm/powerpc.c
|
|
@@ -543,8 +543,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
case KVM_CAP_SPAPR_TCE:
|
|
case KVM_CAP_SPAPR_TCE_64:
|
|
- /* fallthrough */
|
|
+ r = 1;
|
|
+ break;
|
|
case KVM_CAP_SPAPR_TCE_VFIO:
|
|
+ r = !!cpu_has_feature(CPU_FTR_HVMODE);
|
|
+ break;
|
|
case KVM_CAP_PPC_RTAS:
|
|
case KVM_CAP_PPC_FIXUP_HCALL:
|
|
case KVM_CAP_PPC_ENABLE_HCALL:
|
|
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
|
|
index 1697e903bbf28..50e5c790d11e3 100644
|
|
--- a/arch/powerpc/mm/fault.c
|
|
+++ b/arch/powerpc/mm/fault.c
|
|
@@ -226,7 +226,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
|
|
static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
|
|
unsigned long address)
|
|
{
|
|
- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
|
|
+ /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
|
|
+ if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
|
|
+ DSISR_PROTFAULT))) {
|
|
printk_ratelimited(KERN_CRIT "kernel tried to execute"
|
|
" exec-protected page (%lx) -"
|
|
"exploit attempt? (uid: %d)\n",
|
|
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
|
|
index 9f93c9f985c5f..30d89a37fe626 100644
|
|
--- a/arch/powerpc/mm/pgtable-book3s64.c
|
|
+++ b/arch/powerpc/mm/pgtable-book3s64.c
|
|
@@ -482,3 +482,25 @@ void arch_report_meminfo(struct seq_file *m)
|
|
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
|
|
}
|
|
#endif /* CONFIG_PROC_FS */
|
|
+
|
|
+/*
|
|
+ * For hash translation mode, we use the deposited table to store hash slot
|
|
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
|
|
+ * location. Hence a pmd move requires deposit and withdraw.
|
|
+ *
|
|
+ * For radix translation with split pmd ptl, we store the deposited table in the
|
|
+ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
|
|
+ * move.
|
|
+ *
|
|
+ * With hash we use deposited table always irrespective of anon or not.
|
|
+ * With radix we use deposited table only for anonymous mapping.
|
|
+ */
|
|
+int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
|
|
+ struct spinlock *old_pmd_ptl,
|
|
+ struct vm_area_struct *vma)
|
|
+{
|
|
+ if (radix_enabled())
|
|
+ return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
|
|
+
|
|
+ return true;
|
|
+}
|
|
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
|
|
index 177de814286fc..6a2f65d3d088c 100644
|
|
--- a/arch/powerpc/perf/isa207-common.c
|
|
+++ b/arch/powerpc/perf/isa207-common.c
|
|
@@ -226,8 +226,13 @@ void isa207_get_mem_weight(u64 *weight)
|
|
u64 mmcra = mfspr(SPRN_MMCRA);
|
|
u64 exp = MMCRA_THR_CTR_EXP(mmcra);
|
|
u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
|
|
+ u64 sier = mfspr(SPRN_SIER);
|
|
+ u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
|
|
|
|
- *weight = mantissa << (2 * exp);
|
|
+ if (val == 0 || val == 7)
|
|
+ *weight = 0;
|
|
+ else
|
|
+ *weight = mantissa << (2 * exp);
|
|
}
|
|
|
|
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
|
|
index f5bbd4563342b..3632de52db0af 100644
|
|
--- a/arch/powerpc/platforms/4xx/ocm.c
|
|
+++ b/arch/powerpc/platforms/4xx/ocm.c
|
|
@@ -179,7 +179,7 @@ static void __init ocm_init_node(int count, struct device_node *node)
|
|
/* ioremap the non-cached region */
|
|
if (ocm->nc.memtotal) {
|
|
ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
|
|
- _PAGE_EXEC | PAGE_KERNEL_NCG);
|
|
+ _PAGE_EXEC | pgprot_val(PAGE_KERNEL_NCG));
|
|
|
|
if (!ocm->nc.virt) {
|
|
printk(KERN_ERR
|
|
@@ -194,7 +194,7 @@ static void __init ocm_init_node(int count, struct device_node *node)
|
|
|
|
if (ocm->c.memtotal) {
|
|
ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
|
|
- _PAGE_EXEC | PAGE_KERNEL);
|
|
+ _PAGE_EXEC | pgprot_val(PAGE_KERNEL));
|
|
|
|
if (!ocm->c.virt) {
|
|
printk(KERN_ERR
|
|
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
|
|
index fe9691040f54c..7639b21687559 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
|
|
@@ -299,7 +299,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|
if (alloc_userspace_copy) {
|
|
offset = 0;
|
|
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
|
- levels, tce_table_size, &offset,
|
|
+ tmplevels, tce_table_size, &offset,
|
|
&total_allocated_uas);
|
|
if (!uas)
|
|
goto free_tces_exit;
|
|
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
|
|
index 7625546caefd4..17958043e7f7b 100644
|
|
--- a/arch/powerpc/platforms/pseries/dlpar.c
|
|
+++ b/arch/powerpc/platforms/pseries/dlpar.c
|
|
@@ -270,6 +270,8 @@ int dlpar_detach_node(struct device_node *dn)
|
|
if (rc)
|
|
return rc;
|
|
|
|
+ of_node_put(dn);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
index 2a983b5a52e1c..2318ab29d5dd0 100644
|
|
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
@@ -355,8 +355,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
|
|
phys_addr = lmb->base_addr;
|
|
|
|
#ifdef CONFIG_FA_DUMP
|
|
- /* Don't hot-remove memory that falls in fadump boot memory area */
|
|
- if (is_fadump_boot_memory_area(phys_addr, block_sz))
|
|
+ /*
|
|
+ * Don't hot-remove memory that falls in fadump boot memory area
|
|
+ * and memory that is reserved for capturing old kernel memory.
|
|
+ */
|
|
+ if (is_fadump_memory_area(phys_addr, block_sz))
|
|
return false;
|
|
#endif
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
|
|
index 7d6457ab5d345..bba281b1fe1b0 100644
|
|
--- a/arch/powerpc/platforms/pseries/papr_scm.c
|
|
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
|
|
@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
|
|
{
|
|
unsigned long ret[PLPAR_HCALL_BUFSIZE];
|
|
uint64_t rc, token;
|
|
+ uint64_t saved = 0;
|
|
|
|
/*
|
|
* When the hypervisor cannot map all the requested memory in a single
|
|
@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
|
|
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
|
|
p->blocks, BIND_ANY_ADDR, token);
|
|
token = ret[0];
|
|
+ if (!saved)
|
|
+ saved = ret[1];
|
|
cond_resched();
|
|
} while (rc == H_BUSY);
|
|
|
|
@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
|
|
return -ENXIO;
|
|
}
|
|
|
|
- p->bound_addr = ret[1];
|
|
+ p->bound_addr = saved;
|
|
|
|
dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
|
|
|
|
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
|
|
index 36b8dc47a3c32..b566203d09c55 100644
|
|
--- a/arch/powerpc/xmon/xmon.c
|
|
+++ b/arch/powerpc/xmon/xmon.c
|
|
@@ -75,6 +75,9 @@ static int xmon_gate;
|
|
#define xmon_owner 0
|
|
#endif /* CONFIG_SMP */
|
|
|
|
+#ifdef CONFIG_PPC_PSERIES
|
|
+static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
|
|
+#endif
|
|
static unsigned long in_xmon __read_mostly = 0;
|
|
static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
|
|
|
|
@@ -358,7 +361,6 @@ static inline void disable_surveillance(void)
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
/* Since this can't be a module, args should end up below 4GB. */
|
|
static struct rtas_args args;
|
|
- int token;
|
|
|
|
/*
|
|
* At this point we have got all the cpus we can into
|
|
@@ -367,11 +369,11 @@ static inline void disable_surveillance(void)
|
|
* If we did try to take rtas.lock there would be a
|
|
* real possibility of deadlock.
|
|
*/
|
|
- token = rtas_token("set-indicator");
|
|
- if (token == RTAS_UNKNOWN_SERVICE)
|
|
+ if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
|
|
return;
|
|
|
|
- rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
|
|
+ rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
|
|
+ SURVEILLANCE_TOKEN, 0, 0);
|
|
|
|
#endif /* CONFIG_PPC_PSERIES */
|
|
}
|
|
@@ -3688,6 +3690,14 @@ static void xmon_init(int enable)
|
|
__debugger_iabr_match = xmon_iabr_match;
|
|
__debugger_break_match = xmon_break_match;
|
|
__debugger_fault_handler = xmon_fault_handler;
|
|
+
|
|
+#ifdef CONFIG_PPC_PSERIES
|
|
+ /*
|
|
+ * Get the token here to avoid trying to get a lock
|
|
+ * during the crash, causing a deadlock.
|
|
+ */
|
|
+ set_indicator_token = rtas_token("set-indicator");
|
|
+#endif
|
|
} else {
|
|
__debugger = NULL;
|
|
__debugger_ipi = NULL;
|
|
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
|
|
index 2fa2942be221e..470755cb75584 100644
|
|
--- a/arch/riscv/include/asm/pgtable-bits.h
|
|
+++ b/arch/riscv/include/asm/pgtable-bits.h
|
|
@@ -35,6 +35,12 @@
|
|
#define _PAGE_SPECIAL _PAGE_SOFT
|
|
#define _PAGE_TABLE _PAGE_PRESENT
|
|
|
|
+/*
|
|
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
|
|
+ * distinguish them from swapped out pages
|
|
+ */
|
|
+#define _PAGE_PROT_NONE _PAGE_READ
|
|
+
|
|
#define _PAGE_PFN_SHIFT 10
|
|
|
|
/* Set of bits to preserve across pte_modify() */
|
|
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
|
|
index 16301966d65b6..a8179a8c1491c 100644
|
|
--- a/arch/riscv/include/asm/pgtable.h
|
|
+++ b/arch/riscv/include/asm/pgtable.h
|
|
@@ -44,7 +44,7 @@
|
|
/* Page protection bits */
|
|
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
|
|
|
|
-#define PAGE_NONE __pgprot(0)
|
|
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
|
|
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
|
|
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
|
|
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
|
|
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|
|
|
static inline int pmd_present(pmd_t pmd)
|
|
{
|
|
- return (pmd_val(pmd) & _PAGE_PRESENT);
|
|
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
|
|
}
|
|
|
|
static inline int pmd_none(pmd_t pmd)
|
|
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
|
|
|
|
static inline int pte_present(pte_t pte)
|
|
{
|
|
- return (pte_val(pte) & _PAGE_PRESENT);
|
|
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
|
|
}
|
|
|
|
static inline int pte_none(pte_t pte)
|
|
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|
*
|
|
* Format of swap PTE:
|
|
* bit 0: _PAGE_PRESENT (zero)
|
|
- * bit 1: reserved for future use (zero)
|
|
+ * bit 1: _PAGE_PROT_NONE (zero)
|
|
* bits 2 to 6: swap type
|
|
* bits 7 to XLEN-1: swap offset
|
|
*/
|
|
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
|
|
index 60f1e02eed360..6c898d540d9da 100644
|
|
--- a/arch/riscv/kernel/ptrace.c
|
|
+++ b/arch/riscv/kernel/ptrace.c
|
|
@@ -172,6 +172,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
|
|
|
|
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
|
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
|
- trace_sys_exit(regs, regs->regs[0]);
|
|
+ trace_sys_exit(regs, regs_return_value(regs));
|
|
#endif
|
|
}
|
|
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
|
|
index ccbb53e220240..8d04e6f3f7964 100644
|
|
--- a/arch/s390/include/asm/mmu_context.h
|
|
+++ b/arch/s390/include/asm/mmu_context.h
|
|
@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|
atomic_set(&mm->context.flush_count, 0);
|
|
mm->context.gmap_asce = 0;
|
|
mm->context.flush_mm = 0;
|
|
- mm->context.compat_mm = 0;
|
|
+ mm->context.compat_mm = test_thread_flag(TIF_31BIT);
|
|
#ifdef CONFIG_PGSTE
|
|
mm->context.alloc_pgste = page_table_allocate_pgste ||
|
|
test_thread_flag(TIF_PGSTE) ||
|
|
@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
{
|
|
int cpu = smp_processor_id();
|
|
|
|
- if (prev == next)
|
|
- return;
|
|
S390_lowcore.user_asce = next->context.asce;
|
|
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
|
|
/* Clear previous user-ASCE from CR1 and CR7 */
|
|
@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
__ctl_load(S390_lowcore.vdso_asce, 7, 7);
|
|
clear_cpu_flag(CIF_ASCE_SECONDARY);
|
|
}
|
|
- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
|
+ if (prev != next)
|
|
+ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
|
}
|
|
|
|
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
|
|
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
|
|
index 42c81a95e97ba..494c34c507161 100644
|
|
--- a/arch/s390/include/uapi/asm/zcrypt.h
|
|
+++ b/arch/s390/include/uapi/asm/zcrypt.h
|
|
@@ -150,8 +150,8 @@ struct ica_xcRB {
|
|
* @cprb_len: CPRB header length [0x0020]
|
|
* @cprb_ver_id: CPRB version id. [0x04]
|
|
* @pad_000: Alignment pad bytes
|
|
- * @flags: Admin cmd [0x80] or functional cmd [0x00]
|
|
- * @func_id: Function id / subtype [0x5434]
|
|
+ * @flags: Admin bit [0x80], Special bit [0x20]
|
|
+ * @func_id: Function id / subtype [0x5434] "T4"
|
|
* @source_id: Source id [originator id]
|
|
* @target_id: Target id [usage/ctrl domain id]
|
|
* @ret_code: Return code
|
|
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
|
|
index af5c2b3f70656..a8c7789b246b4 100644
|
|
--- a/arch/s390/kernel/early.c
|
|
+++ b/arch/s390/kernel/early.c
|
|
@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
|
|
if (stsi(vmms, 3, 2, 2) || !vmms->count)
|
|
return;
|
|
|
|
- /* Running under KVM? If not we assume z/VM */
|
|
+ /* Detect known hypervisors */
|
|
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
|
|
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
|
|
- else
|
|
+ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
|
|
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
|
|
}
|
|
|
|
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
|
|
index 72dd23ef771b6..7ed90a7591357 100644
|
|
--- a/arch/s390/kernel/setup.c
|
|
+++ b/arch/s390/kernel/setup.c
|
|
@@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
|
|
pr_info("Linux is running under KVM in 64-bit mode\n");
|
|
else if (MACHINE_IS_LPAR)
|
|
pr_info("Linux is running natively in 64-bit mode\n");
|
|
+ else
|
|
+ pr_info("Linux is running as a guest in 64-bit mode\n");
|
|
|
|
/* Have one command line that is parsed and saved in /proc/cmdline */
|
|
/* boot_command_line has been already set up in early.c */
|
|
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
|
|
index f82b3d3c36e2d..b198ece2aad63 100644
|
|
--- a/arch/s390/kernel/smp.c
|
|
+++ b/arch/s390/kernel/smp.c
|
|
@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
|
|
*/
|
|
void smp_call_ipl_cpu(void (*func)(void *), void *data)
|
|
{
|
|
+ struct lowcore *lc = pcpu_devices->lowcore;
|
|
+
|
|
+ if (pcpu_devices[0].address == stap())
|
|
+ lc = &S390_lowcore;
|
|
+
|
|
pcpu_delegate(&pcpu_devices[0], func, data,
|
|
- pcpu_devices->lowcore->nodat_stack);
|
|
+ lc->nodat_stack);
|
|
}
|
|
|
|
int smp_find_processor_id(u16 address)
|
|
@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
|
|
{
|
|
int rc;
|
|
|
|
+ rc = lock_device_hotplug_sysfs();
|
|
+ if (rc)
|
|
+ return rc;
|
|
rc = smp_rescan_cpus();
|
|
+ unlock_device_hotplug();
|
|
return rc ? rc : count;
|
|
}
|
|
static DEVICE_ATTR_WO(rescan);
|
|
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
|
|
index 537f97fde37f9..b6796e616812d 100644
|
|
--- a/arch/s390/kernel/swsusp.S
|
|
+++ b/arch/s390/kernel/swsusp.S
|
|
@@ -30,10 +30,10 @@
|
|
.section .text
|
|
ENTRY(swsusp_arch_suspend)
|
|
lg %r1,__LC_NODAT_STACK
|
|
- aghi %r1,-STACK_FRAME_OVERHEAD
|
|
stmg %r6,%r15,__SF_GPRS(%r1)
|
|
+ aghi %r1,-STACK_FRAME_OVERHEAD
|
|
stg %r15,__SF_BACKCHAIN(%r1)
|
|
- lgr %r1,%r15
|
|
+ lgr %r15,%r1
|
|
|
|
/* Store FPU registers */
|
|
brasl %r14,save_fpu_regs
|
|
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
|
|
index ebe748a9f472f..4ff354887db41 100644
|
|
--- a/arch/s390/kernel/vdso.c
|
|
+++ b/arch/s390/kernel/vdso.c
|
|
@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
|
|
vdso_pages = vdso64_pages;
|
|
#ifdef CONFIG_COMPAT
|
|
- if (is_compat_task()) {
|
|
+ mm->context.compat_mm = is_compat_task();
|
|
+ if (mm->context.compat_mm)
|
|
vdso_pages = vdso32_pages;
|
|
- mm->context.compat_mm = 1;
|
|
- }
|
|
#endif
|
|
/*
|
|
* vDSO has a problem and was disabled, just don't "enable" it for
|
|
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
|
|
index a153257bf7d98..d62fa148558b9 100644
|
|
--- a/arch/s390/kvm/vsie.c
|
|
+++ b/arch/s390/kvm/vsie.c
|
|
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
|
scb_s->crycbd = 0;
|
|
|
|
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
|
|
- if (!apie_h && !key_msk)
|
|
+ if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
|
|
return 0;
|
|
|
|
if (!crycb_addr)
|
|
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
|
|
index 19b2d2a9b43db..eeb7450db18c0 100644
|
|
--- a/arch/s390/pci/pci_clp.c
|
|
+++ b/arch/s390/pci/pci_clp.c
|
|
@@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
|
|
struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
|
|
int rc;
|
|
|
|
- rrb = clp_alloc_block(GFP_KERNEL);
|
|
+ rrb = clp_alloc_block(GFP_ATOMIC);
|
|
if (!rrb)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
|
|
index e59c577ed8715..c70bc7809ddae 100644
|
|
--- a/arch/sh/boards/mach-kfr2r09/setup.c
|
|
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
|
|
@@ -25,7 +25,6 @@
|
|
#include <linux/memblock.h>
|
|
#include <linux/mfd/tmio.h>
|
|
#include <linux/mmc/host.h>
|
|
-#include <linux/mtd/onenand.h>
|
|
#include <linux/mtd/physmap.h>
|
|
#include <linux/platform_data/lv5207lp.h>
|
|
#include <linux/platform_device.h>
|
|
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
|
|
index 7485398d07370..9c04562310b36 100644
|
|
--- a/arch/um/include/asm/pgtable.h
|
|
+++ b/arch/um/include/asm/pgtable.h
|
|
@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
|
|
|
|
static inline pte_t pte_wrprotect(pte_t pte)
|
|
{
|
|
- pte_clear_bits(pte, _PAGE_RW);
|
|
+ if (likely(pte_get_bits(pte, _PAGE_RW)))
|
|
+ pte_clear_bits(pte, _PAGE_RW);
|
|
+ else
|
|
+ return pte;
|
|
return(pte_mknewprot(pte));
|
|
}
|
|
|
|
static inline pte_t pte_mkread(pte_t pte)
|
|
{
|
|
+ if (unlikely(pte_get_bits(pte, _PAGE_USER)))
|
|
+ return pte;
|
|
pte_set_bits(pte, _PAGE_USER);
|
|
return(pte_mknewprot(pte));
|
|
}
|
|
@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
|
|
|
|
static inline pte_t pte_mkwrite(pte_t pte)
|
|
{
|
|
+ if (unlikely(pte_get_bits(pte, _PAGE_RW)))
|
|
+ return pte;
|
|
pte_set_bits(pte, _PAGE_RW);
|
|
return(pte_mknewprot(pte));
|
|
}
|
|
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
|
|
index 8eaf8952c408c..39913770a44d5 100644
|
|
--- a/arch/x86/entry/entry_64_compat.S
|
|
+++ b/arch/x86/entry/entry_64_compat.S
|
|
@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
|
|
|
|
/* Need to switch before accessing the thread stack. */
|
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
|
|
- movq %rsp, %rdi
|
|
+ /* In the Xen PV case we already run on the thread stack. */
|
|
+ ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
|
|
|
pushq 6*8(%rdi) /* regs->ss */
|
|
@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
|
|
pushq 3*8(%rdi) /* regs->cs */
|
|
pushq 2*8(%rdi) /* regs->ip */
|
|
pushq 1*8(%rdi) /* regs->orig_ax */
|
|
-
|
|
pushq (%rdi) /* pt_regs->di */
|
|
+.Lint80_keep_stack:
|
|
+
|
|
pushq %rsi /* pt_regs->si */
|
|
xorl %esi, %esi /* nospec si */
|
|
pushq %rdx /* pt_regs->dx */
|
|
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
|
|
index 374a19712e200..b684f0294f35d 100644
|
|
--- a/arch/x86/events/core.c
|
|
+++ b/arch/x86/events/core.c
|
|
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
|
|
x86_pmu.check_microcode();
|
|
}
|
|
|
|
+static int x86_pmu_check_period(struct perf_event *event, u64 value)
|
|
+{
|
|
+ if (x86_pmu.check_period && x86_pmu.check_period(event, value))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (value && x86_pmu.limit_period) {
|
|
+ if (x86_pmu.limit_period(event, value) > value)
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static struct pmu pmu = {
|
|
.pmu_enable = x86_pmu_enable,
|
|
.pmu_disable = x86_pmu_disable,
|
|
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
|
|
.event_idx = x86_pmu_event_idx,
|
|
.sched_task = x86_pmu_sched_task,
|
|
.task_ctx_size = sizeof(struct x86_perf_task_context),
|
|
+ .check_period = x86_pmu_check_period,
|
|
};
|
|
|
|
void arch_perf_update_userpage(struct perf_event *event,
|
|
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
|
|
index ecc3e34ca955f..ede20c44cc692 100644
|
|
--- a/arch/x86/events/intel/core.c
|
|
+++ b/arch/x86/events/intel/core.c
|
|
@@ -3558,6 +3558,14 @@ static void free_excl_cntrs(int cpu)
|
|
}
|
|
|
|
static void intel_pmu_cpu_dying(int cpu)
|
|
+{
|
|
+ fini_debug_store_on_cpu(cpu);
|
|
+
|
|
+ if (x86_pmu.counter_freezing)
|
|
+ disable_counter_freeze();
|
|
+}
|
|
+
|
|
+static void intel_pmu_cpu_dead(int cpu)
|
|
{
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
struct intel_shared_regs *pc;
|
|
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu)
|
|
}
|
|
|
|
free_excl_cntrs(cpu);
|
|
-
|
|
- fini_debug_store_on_cpu(cpu);
|
|
-
|
|
- if (x86_pmu.counter_freezing)
|
|
- disable_counter_freeze();
|
|
}
|
|
|
|
static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
|
@@ -3584,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
|
intel_pmu_lbr_sched_task(ctx, sched_in);
|
|
}
|
|
|
|
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
|
|
+{
|
|
+ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
|
|
+}
|
|
+
|
|
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
|
|
|
|
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
|
|
@@ -3663,6 +3671,9 @@ static __initconst const struct x86_pmu core_pmu = {
|
|
.cpu_prepare = intel_pmu_cpu_prepare,
|
|
.cpu_starting = intel_pmu_cpu_starting,
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
|
+ .cpu_dead = intel_pmu_cpu_dead,
|
|
+
|
|
+ .check_period = intel_pmu_check_period,
|
|
};
|
|
|
|
static struct attribute *intel_pmu_attrs[];
|
|
@@ -3703,8 +3714,12 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|
.cpu_prepare = intel_pmu_cpu_prepare,
|
|
.cpu_starting = intel_pmu_cpu_starting,
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
|
+ .cpu_dead = intel_pmu_cpu_dead,
|
|
+
|
|
.guest_get_msrs = intel_guest_get_msrs,
|
|
.sched_task = intel_pmu_sched_task,
|
|
+
|
|
+ .check_period = intel_pmu_check_period,
|
|
};
|
|
|
|
static __init void intel_clovertown_quirk(void)
|
|
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
|
|
index c07bee31abe85..b10e04387f380 100644
|
|
--- a/arch/x86/events/intel/uncore_snbep.c
|
|
+++ b/arch/x86/events/intel/uncore_snbep.c
|
|
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
|
|
.id_table = snbep_uncore_pci_ids,
|
|
};
|
|
|
|
+#define NODE_ID_MASK 0x7
|
|
+
|
|
/*
|
|
* build pci bus to socket mapping
|
|
*/
|
|
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
|
|
err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
|
|
if (err)
|
|
break;
|
|
- nodeid = config;
|
|
+ nodeid = config & NODE_ID_MASK;
|
|
/* get the Node ID mapping */
|
|
err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
|
|
if (err)
|
|
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
|
|
index 78d7b7031bfcc..d46fd6754d920 100644
|
|
--- a/arch/x86/events/perf_event.h
|
|
+++ b/arch/x86/events/perf_event.h
|
|
@@ -646,6 +646,11 @@ struct x86_pmu {
|
|
* Intel host/guest support (KVM)
|
|
*/
|
|
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
|
|
+
|
|
+ /*
|
|
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
|
+ */
|
|
+ int (*check_period) (struct perf_event *event, u64 period);
|
|
};
|
|
|
|
struct x86_perf_task_context {
|
|
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
|
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
|
|
-static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|
+static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
|
|
{
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
unsigned int hw_event, bts_event;
|
|
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
|
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
|
|
|
- return hw_event == bts_event && hwc->sample_period == 1;
|
|
+ return hw_event == bts_event && period == 1;
|
|
+}
|
|
+
|
|
+static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|
+{
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
+
|
|
+ return intel_pmu_has_bts_period(event, hwc->sample_period);
|
|
}
|
|
|
|
int intel_pmu_save_and_restart(struct perf_event *event);
|
|
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
|
|
index 8e02b30cf08e1..3ebd77770f98b 100644
|
|
--- a/arch/x86/ia32/ia32_aout.c
|
|
+++ b/arch/x86/ia32/ia32_aout.c
|
|
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
|
|
/*
|
|
* fill in the user structure for a core dump..
|
|
*/
|
|
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
|
|
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
|
|
{
|
|
u32 fs, gs;
|
|
memset(dump, 0, sizeof(*dump));
|
|
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
|
|
fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
has_dumped = 1;
|
|
+
|
|
+ fill_dump(cprm->regs, &dump);
|
|
+
|
|
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
|
|
dump.u_ar0 = offsetof(struct user32, regs);
|
|
dump.signal = cprm->siginfo->si_signo;
|
|
- dump_thread32(cprm->regs, &dump);
|
|
|
|
/*
|
|
* If the size of the dump file exceeds the rlimit, then see
|
|
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
|
|
index 69dcdf195b611..fa2c93cb42a27 100644
|
|
--- a/arch/x86/include/asm/fpu/internal.h
|
|
+++ b/arch/x86/include/asm/fpu/internal.h
|
|
@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
|
|
#define user_insn(insn, output, input...) \
|
|
({ \
|
|
int err; \
|
|
+ \
|
|
+ might_fault(); \
|
|
+ \
|
|
asm volatile(ASM_STAC "\n" \
|
|
"1:" #insn "\n\t" \
|
|
"2: " ASM_CLAC "\n" \
|
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
index fbda5a917c5b7..5a0cbc717997b 100644
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
|
|
unsigned int cr4_smap:1;
|
|
unsigned int cr4_smep:1;
|
|
unsigned int cr4_la57:1;
|
|
+ unsigned int maxphyaddr:6;
|
|
};
|
|
};
|
|
|
|
@@ -397,6 +398,7 @@ struct kvm_mmu {
|
|
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
u64 *spte, const void *pte);
|
|
hpa_t root_hpa;
|
|
+ gpa_t root_cr3;
|
|
union kvm_mmu_role mmu_role;
|
|
u8 root_level;
|
|
u8 shadow_root_level;
|
|
@@ -1492,7 +1494,7 @@ asmlinkage void kvm_spurious_fault(void);
|
|
"cmpb $0, kvm_rebooting \n\t" \
|
|
"jne 668b \n\t" \
|
|
__ASM_SIZE(push) " $666b \n\t" \
|
|
- "call kvm_spurious_fault \n\t" \
|
|
+ "jmp kvm_spurious_fault \n\t" \
|
|
".popsection \n\t" \
|
|
_ASM_EXTABLE(666b, 667b)
|
|
|
|
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
|
|
index 0ca50611e8cec..19d18fae6ec66 100644
|
|
--- a/arch/x86/include/asm/mmu_context.h
|
|
+++ b/arch/x86/include/asm/mmu_context.h
|
|
@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
|
|
|
|
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
|
|
|
+/*
|
|
+ * Init a new mm. Used on mm copies, like at fork()
|
|
+ * and on mm's that are brand-new, like at execve().
|
|
+ */
|
|
static inline int init_new_context(struct task_struct *tsk,
|
|
struct mm_struct *mm)
|
|
{
|
|
@@ -228,8 +232,22 @@ do { \
|
|
} while (0)
|
|
#endif
|
|
|
|
+static inline void arch_dup_pkeys(struct mm_struct *oldmm,
|
|
+ struct mm_struct *mm)
|
|
+{
|
|
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
|
|
+ return;
|
|
+
|
|
+ /* Duplicate the oldmm pkey state in mm: */
|
|
+ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
|
|
+ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
|
|
+#endif
|
|
+}
|
|
+
|
|
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
{
|
|
+ arch_dup_pkeys(oldmm, mm);
|
|
paravirt_arch_dup_mmap(oldmm, mm);
|
|
return ldt_dup_context(oldmm, mm);
|
|
}
|
|
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
|
|
index 3de69330e6c50..afbc87206886e 100644
|
|
--- a/arch/x86/include/asm/traps.h
|
|
+++ b/arch/x86/include/asm/traps.h
|
|
@@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi;
|
|
|
|
void math_emulate(struct math_emu_info *);
|
|
#ifndef CONFIG_X86_32
|
|
-asmlinkage void smp_thermal_interrupt(void);
|
|
-asmlinkage void smp_threshold_interrupt(void);
|
|
-asmlinkage void smp_deferred_error_interrupt(void);
|
|
+asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
|
|
+asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
|
|
+asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
|
|
#endif
|
|
|
|
extern void ist_enter(struct pt_regs *regs);
|
|
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
|
|
index e652a7cc61863..3f697a9e3f59b 100644
|
|
--- a/arch/x86/include/asm/uv/bios.h
|
|
+++ b/arch/x86/include/asm/uv/bios.h
|
|
@@ -48,7 +48,8 @@ enum {
|
|
BIOS_STATUS_SUCCESS = 0,
|
|
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
|
BIOS_STATUS_EINVAL = -EINVAL,
|
|
- BIOS_STATUS_UNAVAIL = -EBUSY
|
|
+ BIOS_STATUS_UNAVAIL = -EBUSY,
|
|
+ BIOS_STATUS_ABORT = -EINTR,
|
|
};
|
|
|
|
/* Address map parameters */
|
|
@@ -167,4 +168,9 @@ extern long system_serial_number;
|
|
|
|
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
|
|
|
+/*
|
|
+ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
|
|
+ */
|
|
+extern struct semaphore __efi_uv_runtime_lock;
|
|
+
|
|
#endif /* _ASM_X86_UV_BIOS_H */
|
|
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
|
|
index 500278f5308ee..d8b0b0e7be328 100644
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -69,7 +69,7 @@ void __init check_bugs(void)
|
|
* identify_boot_cpu() initialized SMT support information, let the
|
|
* core code know.
|
|
*/
|
|
- cpu_smt_check_topology_early();
|
|
+ cpu_smt_check_topology();
|
|
|
|
if (!IS_ENABLED(CONFIG_SMP)) {
|
|
pr_info("CPU: ");
|
|
@@ -213,7 +213,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
|
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
|
|
SPECTRE_V2_USER_NONE;
|
|
|
|
-#ifdef RETPOLINE
|
|
+#ifdef CONFIG_RETPOLINE
|
|
static bool spectre_v2_bad_module;
|
|
|
|
bool retpoline_module_ok(bool has_retpoline)
|
|
@@ -1002,7 +1002,8 @@ static void __init l1tf_select_mitigation(void)
|
|
#endif
|
|
|
|
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
|
- if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
|
|
+ if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
|
|
+ e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
|
|
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
|
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
|
|
half_pa);
|
|
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
|
|
index 44272b7107ad9..2d0a565fd0bbc 100644
|
|
--- a/arch/x86/kernel/cpu/intel_rdt.c
|
|
+++ b/arch/x86/kernel/cpu/intel_rdt.c
|
|
@@ -421,7 +421,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
|
|
struct list_head *l;
|
|
|
|
if (id < 0)
|
|
- return ERR_PTR(id);
|
|
+ return ERR_PTR(-ENODEV);
|
|
|
|
list_for_each(l, &r->domains) {
|
|
d = list_entry(l, struct rdt_domain, list);
|
|
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
|
|
index efa4a519f5e55..c8b72aff55e00 100644
|
|
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
|
|
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
|
|
@@ -467,7 +467,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
|
|
|
r = &rdt_resources_all[resid];
|
|
d = rdt_find_domain(r, domid, NULL);
|
|
- if (!d) {
|
|
+ if (IS_ERR_OR_NULL(d)) {
|
|
ret = -ENOENT;
|
|
goto out;
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
|
|
index f27b8115ffa2a..951c613676882 100644
|
|
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
|
|
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
|
|
@@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
|
|
* peer RDT CDP resource. Hence the WARN.
|
|
*/
|
|
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
|
|
- if (WARN_ON(!_d_cdp)) {
|
|
+ if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
|
|
_r_cdp = NULL;
|
|
ret = -EINVAL;
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
|
|
index 36d2696c9563e..84089c2342c0d 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/mce.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
|
|
@@ -786,6 +786,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
|
|
quirk_no_way_out(i, m, regs);
|
|
|
|
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
|
|
+ m->bank = i;
|
|
mce_read_aux(m, i);
|
|
*msg = tmp;
|
|
return 1;
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
|
|
index e12454e21b8a5..9f915a8791cc7 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/string.h>
|
|
|
|
#include <asm/amd_nb.h>
|
|
+#include <asm/traps.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/msr.h>
|
|
@@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
|
|
[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
|
|
};
|
|
|
|
-const char *smca_get_name(enum smca_bank_types t)
|
|
+static const char *smca_get_name(enum smca_bank_types t)
|
|
{
|
|
if (t >= N_SMCA_BANK_TYPES)
|
|
return NULL;
|
|
@@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
|
|
mce_log(&m);
|
|
}
|
|
|
|
-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
|
|
+asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
|
|
{
|
|
entering_irq();
|
|
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
|
|
index 2da67b70ba989..ee229ceee745c 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <linux/cpu.h>
|
|
|
|
#include <asm/processor.h>
|
|
+#include <asm/traps.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/msr.h>
|
|
@@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void)
|
|
|
|
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
|
|
|
|
-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
|
|
+asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
|
|
{
|
|
entering_irq();
|
|
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
|
|
index 2b584b319eff3..c21e0a1efd0fb 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
|
|
@@ -6,6 +6,7 @@
|
|
#include <linux/kernel.h>
|
|
|
|
#include <asm/irq_vectors.h>
|
|
+#include <asm/traps.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/trace/irq_vectors.h>
|
|
@@ -18,7 +19,7 @@ static void default_threshold_interrupt(void)
|
|
|
|
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
|
|
|
|
-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
|
|
+asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
|
|
{
|
|
entering_irq();
|
|
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
|
|
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
|
|
index ba4bfb7f6a369..5c93a65ee1e5c 100644
|
|
--- a/arch/x86/kernel/kvm.c
|
|
+++ b/arch/x86/kernel/kvm.c
|
|
@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
|
#else
|
|
u64 ipi_bitmap = 0;
|
|
#endif
|
|
+ long ret;
|
|
|
|
if (cpumask_empty(mask))
|
|
return;
|
|
@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
|
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
|
|
max = apic_id < max ? max : apic_id;
|
|
} else {
|
|
- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
|
+ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
|
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
|
+ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
|
|
min = max = apic_id;
|
|
ipi_bitmap = 0;
|
|
}
|
|
@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
|
}
|
|
|
|
if (ipi_bitmap) {
|
|
- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
|
+ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
|
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
|
+ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
|
|
}
|
|
|
|
local_irq_restore(flags);
|
|
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
|
|
index a9134d1910b96..ccd1f2a8e5577 100644
|
|
--- a/arch/x86/kernel/smpboot.c
|
|
+++ b/arch/x86/kernel/smpboot.c
|
|
@@ -1347,7 +1347,7 @@ void __init calculate_max_logical_packages(void)
|
|
* extrapolate the boot cpu's data to all packages.
|
|
*/
|
|
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
|
|
- __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
|
|
+ __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
|
|
pr_info("Max logical packages: %u\n", __max_logical_packages);
|
|
}
|
|
|
|
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
|
|
index 7bcfa61375c09..98d13c6a64be0 100644
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
|
|
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
|
|
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
|
|
+ unsigned f_la57 = 0;
|
|
|
|
/* cpuid 1.edx */
|
|
const u32 kvm_cpuid_1_edx_x86_features =
|
|
@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|
// TSC_ADJUST is emulated
|
|
entry->ebx |= F(TSC_ADJUST);
|
|
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
|
|
+ f_la57 = entry->ecx & F(LA57);
|
|
cpuid_mask(&entry->ecx, CPUID_7_ECX);
|
|
+ /* Set LA57 based on hardware capability. */
|
|
+ entry->ecx |= f_la57;
|
|
entry->ecx |= f_umip;
|
|
/* PKU is not yet implemented for shadow paging. */
|
|
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
|
|
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
|
index 7c03c0f35444f..e763e5445e3ca 100644
|
|
--- a/arch/x86/kvm/mmu.c
|
|
+++ b/arch/x86/kvm/mmu.c
|
|
@@ -3517,6 +3517,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|
&invalid_list);
|
|
mmu->root_hpa = INVALID_PAGE;
|
|
}
|
|
+ mmu->root_cr3 = 0;
|
|
}
|
|
|
|
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
|
@@ -3572,6 +3573,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
|
|
} else
|
|
BUG();
|
|
+ vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
|
|
|
|
return 0;
|
|
}
|
|
@@ -3580,10 +3582,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_mmu_page *sp;
|
|
u64 pdptr, pm_mask;
|
|
- gfn_t root_gfn;
|
|
+ gfn_t root_gfn, root_cr3;
|
|
int i;
|
|
|
|
- root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
|
|
+ root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
|
|
+ root_gfn = root_cr3 >> PAGE_SHIFT;
|
|
|
|
if (mmu_check_root(vcpu, root_gfn))
|
|
return 1;
|
|
@@ -3608,7 +3611,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
++sp->root_count;
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
vcpu->arch.mmu->root_hpa = root;
|
|
- return 0;
|
|
+ goto set_root_cr3;
|
|
}
|
|
|
|
/*
|
|
@@ -3674,6 +3677,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
|
|
}
|
|
|
|
+set_root_cr3:
|
|
+ vcpu->arch.mmu->root_cr3 = root_cr3;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -4125,7 +4131,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
|
|
struct kvm_mmu_root_info root;
|
|
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
|
|
|
- root.cr3 = mmu->get_cr3(vcpu);
|
|
+ root.cr3 = mmu->root_cr3;
|
|
root.hpa = mmu->root_hpa;
|
|
|
|
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
|
|
@@ -4138,6 +4144,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
|
|
}
|
|
|
|
mmu->root_hpa = root.hpa;
|
|
+ mmu->root_cr3 = root.cr3;
|
|
|
|
return i < KVM_MMU_NUM_PREV_ROOTS;
|
|
}
|
|
@@ -4731,6 +4738,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
|
|
ext.cr4_pse = !!is_pse(vcpu);
|
|
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
|
|
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
|
|
+ ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
|
|
|
|
ext.valid = 1;
|
|
|
|
@@ -5477,11 +5485,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
|
|
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
|
|
|
|
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
|
|
+ vcpu->arch.root_mmu.root_cr3 = 0;
|
|
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
|
|
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
|
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
|
|
|
|
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
|
|
+ vcpu->arch.guest_mmu.root_cr3 = 0;
|
|
vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
|
|
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
|
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index 101f53ccf5718..13baba9d1cc1a 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -5836,6 +5836,13 @@ static bool svm_cpu_has_accelerated_tpr(void)
|
|
|
|
static bool svm_has_emulated_msr(int index)
|
|
{
|
|
+ switch (index) {
|
|
+ case MSR_IA32_MCG_EXT_CTL:
|
|
+ return false;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
return true;
|
|
}
|
|
|
|
@@ -6248,6 +6255,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|
int asid, ret;
|
|
|
|
ret = -EBUSY;
|
|
+ if (unlikely(sev->active))
|
|
+ return ret;
|
|
+
|
|
asid = sev_asid_new();
|
|
if (asid < 0)
|
|
return ret;
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 8d5d984541bea..bbd0520867a81 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -28,6 +28,7 @@
|
|
#include <linux/mm.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/sched.h>
|
|
+#include <linux/sched/smt.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/mod_devicetable.h>
|
|
#include <linux/trace_events.h>
|
|
@@ -2778,7 +2779,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
|
if (!entry_only)
|
|
j = find_msr(&m->host, msr);
|
|
|
|
- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
|
|
+ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
|
|
+ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
|
|
printk_once(KERN_WARNING "Not enough msr switch entries. "
|
|
"Can't add msr %x\n", msr);
|
|
return;
|
|
@@ -3619,9 +3621,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|
* secondary cpu-based controls. Do not include those that
|
|
* depend on CPUID bits, they are added later by vmx_cpuid_update.
|
|
*/
|
|
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
- msrs->secondary_ctls_low,
|
|
- msrs->secondary_ctls_high);
|
|
+ if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
|
|
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
+ msrs->secondary_ctls_low,
|
|
+ msrs->secondary_ctls_high);
|
|
+
|
|
msrs->secondary_ctls_low = 0;
|
|
msrs->secondary_ctls_high &=
|
|
SECONDARY_EXEC_DESC |
|
|
@@ -8031,13 +8035,16 @@ static __init int hardware_setup(void)
|
|
|
|
kvm_mce_cap_supported |= MCG_LMCE_P;
|
|
|
|
- return alloc_kvm_area();
|
|
+ r = alloc_kvm_area();
|
|
+ if (r)
|
|
+ goto out;
|
|
+ return 0;
|
|
|
|
out:
|
|
for (i = 0; i < VMX_BITMAP_NR; i++)
|
|
free_page((unsigned long)vmx_bitmap[i]);
|
|
|
|
- return r;
|
|
+ return r;
|
|
}
|
|
|
|
static __exit void hardware_unsetup(void)
|
|
@@ -8312,11 +8319,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
|
|
if (r < 0)
|
|
goto out_vmcs02;
|
|
|
|
- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
|
+ vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
|
|
if (!vmx->nested.cached_vmcs12)
|
|
goto out_cached_vmcs12;
|
|
|
|
- vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
|
+ vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
|
|
if (!vmx->nested.cached_shadow_vmcs12)
|
|
goto out_cached_shadow_vmcs12;
|
|
|
|
@@ -8506,6 +8513,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
|
|
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
|
|
return;
|
|
|
|
+ hrtimer_cancel(&vmx->nested.preemption_timer);
|
|
vmx->nested.vmxon = false;
|
|
vmx->nested.smm.vmxon = false;
|
|
free_vpid(vmx->nested.vpid02);
|
|
@@ -11639,7 +11647,7 @@ static int vmx_vm_init(struct kvm *kvm)
|
|
* Warn upon starting the first VM in a potentially
|
|
* insecure environment.
|
|
*/
|
|
- if (cpu_smt_control == CPU_SMT_ENABLED)
|
|
+ if (sched_smt_active())
|
|
pr_warn_once(L1TF_MSG_SMT);
|
|
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
|
|
pr_warn_once(L1TF_MSG_L1D);
|
|
@@ -14850,13 +14858,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
|
|
copy_shadow_to_vmcs12(vmx);
|
|
}
|
|
|
|
- if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
|
|
+ /*
|
|
+ * Copy over the full allocated size of vmcs12 rather than just the size
|
|
+ * of the struct.
|
|
+ */
|
|
+ if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
|
|
return -EFAULT;
|
|
|
|
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
|
vmcs12->vmcs_link_pointer != -1ull) {
|
|
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
|
|
- get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
|
|
+ get_shadow_vmcs12(vcpu), VMCS12_SIZE))
|
|
return -EFAULT;
|
|
}
|
|
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index f049ecfac7bb8..6d69503ca43e8 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -5041,6 +5041,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
|
|
{
|
|
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
|
|
|
+ /*
|
|
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
|
|
+ * is returned, but our callers are not ready for that and they blindly
|
|
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
|
|
+ * uninitialized kernel stack memory into cr2 and error code.
|
|
+ */
|
|
+ memset(exception, 0, sizeof(*exception));
|
|
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
|
|
exception);
|
|
}
|
|
@@ -6407,8 +6414,7 @@ restart:
|
|
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
|
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
|
kvm_rip_write(vcpu, ctxt->eip);
|
|
- if (r == EMULATE_DONE &&
|
|
- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
|
+ if (r == EMULATE_DONE && ctxt->tf)
|
|
kvm_vcpu_do_singlestep(vcpu, &r);
|
|
if (!ctxt->have_exception ||
|
|
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
|
@@ -6998,10 +7004,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
case KVM_HC_CLOCK_PAIRING:
|
|
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
|
|
break;
|
|
+#endif
|
|
case KVM_HC_SEND_IPI:
|
|
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
|
|
break;
|
|
-#endif
|
|
default:
|
|
ret = -KVM_ENOSYS;
|
|
break;
|
|
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
|
|
index 79778ab200e49..a536651164584 100644
|
|
--- a/arch/x86/lib/kaslr.c
|
|
+++ b/arch/x86/lib/kaslr.c
|
|
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
|
|
u16 status, timer;
|
|
|
|
do {
|
|
- outb(I8254_PORT_CONTROL,
|
|
- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
|
|
+ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
|
|
+ I8254_PORT_CONTROL);
|
|
status = inb(I8254_PORT_COUNTER0);
|
|
timer = inb(I8254_PORT_COUNTER0);
|
|
timer |= inb(I8254_PORT_COUNTER0) << 8;
|
|
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
|
|
index ef99f3892e1fa..427a955a2cf2c 100644
|
|
--- a/arch/x86/mm/init.c
|
|
+++ b/arch/x86/mm/init.c
|
|
@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
|
|
|
|
pages = generic_max_swapfile_size();
|
|
|
|
- if (boot_cpu_has_bug(X86_BUG_L1TF)) {
|
|
+ if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
|
|
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
|
|
unsigned long long l1tf_limit = l1tf_pfn_limit();
|
|
/*
|
|
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
|
|
index 5fab264948c2a..de95db8ac52f9 100644
|
|
--- a/arch/x86/mm/init_64.c
|
|
+++ b/arch/x86/mm/init_64.c
|
|
@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
|
paddr_end,
|
|
page_size_mask,
|
|
prot);
|
|
- __flush_tlb_all();
|
|
continue;
|
|
}
|
|
/*
|
|
@@ -627,7 +626,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
|
pud_populate(&init_mm, pud, pmd);
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
}
|
|
- __flush_tlb_all();
|
|
|
|
update_page_count(PG_LEVEL_1G, pages);
|
|
|
|
@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
|
|
paddr_last = phys_pud_init(pud, paddr,
|
|
paddr_end,
|
|
page_size_mask);
|
|
- __flush_tlb_all();
|
|
continue;
|
|
}
|
|
|
|
@@ -680,7 +677,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
|
|
p4d_populate(&init_mm, p4d, pud);
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
}
|
|
- __flush_tlb_all();
|
|
|
|
return paddr_last;
|
|
}
|
|
@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
|
|
if (pgd_changed)
|
|
sync_global_pgds(vaddr_start, vaddr_end - 1);
|
|
|
|
- __flush_tlb_all();
|
|
-
|
|
return paddr_last;
|
|
}
|
|
|
|
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
|
|
index 526536c81ddc4..ca1e8e6dccc8a 100644
|
|
--- a/arch/x86/pci/broadcom_bus.c
|
|
+++ b/arch/x86/pci/broadcom_bus.c
|
|
@@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
|
|
word1 = read_pci_config_16(bus, slot, func, 0xc0);
|
|
word2 = read_pci_config_16(bus, slot, func, 0xc2);
|
|
if (word1 != word2) {
|
|
- res.start = (word1 << 16) | 0x0000;
|
|
- res.end = (word2 << 16) | 0xffff;
|
|
+ res.start = ((resource_size_t) word1 << 16) | 0x0000;
|
|
+ res.end = ((resource_size_t) word2 << 16) | 0xffff;
|
|
res.flags = IORESOURCE_MEM;
|
|
update_res(info, res.start, res.end, res.flags, 0);
|
|
}
|
|
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
|
|
index 4a6a5a26c5829..eb33432f2f241 100644
|
|
--- a/arch/x86/platform/uv/bios_uv.c
|
|
+++ b/arch/x86/platform/uv/bios_uv.c
|
|
@@ -29,7 +29,8 @@
|
|
|
|
struct uv_systab *uv_systab;
|
|
|
|
-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|
+ u64 a4, u64 a5)
|
|
{
|
|
struct uv_systab *tab = uv_systab;
|
|
s64 ret;
|
|
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|
|
|
return ret;
|
|
}
|
|
+
|
|
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|
+{
|
|
+ s64 ret;
|
|
+
|
|
+ if (down_interruptible(&__efi_uv_runtime_lock))
|
|
+ return BIOS_STATUS_ABORT;
|
|
+
|
|
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
|
+ up(&__efi_uv_runtime_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
EXPORT_SYMBOL_GPL(uv_bios_call);
|
|
|
|
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|
unsigned long bios_flags;
|
|
s64 ret;
|
|
|
|
+ if (down_interruptible(&__efi_uv_runtime_lock))
|
|
+ return BIOS_STATUS_ABORT;
|
|
+
|
|
local_irq_save(bios_flags);
|
|
- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
|
|
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
|
local_irq_restore(bios_flags);
|
|
|
|
+ up(&__efi_uv_runtime_lock);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
|
|
index 2f6787fc71066..c54a493e139a7 100644
|
|
--- a/arch/x86/xen/enlighten_pv.c
|
|
+++ b/arch/x86/xen/enlighten_pv.c
|
|
@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
|
|
val = native_read_msr_safe(msr, err);
|
|
switch (msr) {
|
|
case MSR_IA32_APICBASE:
|
|
-#ifdef CONFIG_X86_X2APIC
|
|
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
|
|
-#endif
|
|
- val &= ~X2APIC_ENABLE;
|
|
+ val &= ~X2APIC_ENABLE;
|
|
break;
|
|
}
|
|
return val;
|
|
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
|
|
index 72bf446c3fee3..6e29794573b72 100644
|
|
--- a/arch/x86/xen/time.c
|
|
+++ b/arch/x86/xen/time.c
|
|
@@ -361,8 +361,6 @@ void xen_timer_resume(void)
|
|
{
|
|
int cpu;
|
|
|
|
- pvclock_resume();
|
|
-
|
|
if (xen_clockevent != &xen_vcpuop_clockevent)
|
|
return;
|
|
|
|
@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
|
|
};
|
|
|
|
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
|
|
+static u64 xen_clock_value_saved;
|
|
|
|
void xen_save_time_memory_area(void)
|
|
{
|
|
struct vcpu_register_time_memory_area t;
|
|
int ret;
|
|
|
|
+ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
|
|
+
|
|
if (!xen_clock)
|
|
return;
|
|
|
|
@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
|
|
int ret;
|
|
|
|
if (!xen_clock)
|
|
- return;
|
|
+ goto out;
|
|
|
|
t.addr.v = &xen_clock->pvti;
|
|
|
|
@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
|
|
if (ret != 0)
|
|
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
|
|
ret);
|
|
+
|
|
+out:
|
|
+ /* Need pvclock_resume() before using xen_clocksource_read(). */
|
|
+ pvclock_resume();
|
|
+ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
|
|
}
|
|
|
|
static void xen_setup_vsyscall_time_info(void)
|
|
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
|
|
index 1090528825ec6..e46ae07bab059 100644
|
|
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
|
|
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
|
|
@@ -103,7 +103,7 @@
|
|
};
|
|
};
|
|
|
|
- spi0: spi-master@0d0a0000 {
|
|
+ spi0: spi@0d0a0000 {
|
|
compatible = "cdns,xtfpga-spi";
|
|
#address-cells = <1>;
|
|
#size-cells = <0>;
|
|
diff --git a/block/blk-flush.c b/block/blk-flush.c
|
|
index 8b44b86779daa..87fc49daa2b49 100644
|
|
--- a/block/blk-flush.c
|
|
+++ b/block/blk-flush.c
|
|
@@ -424,7 +424,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
|
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
|
|
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
|
|
|
- blk_mq_run_hw_queue(hctx, true);
|
|
+ blk_mq_sched_restart(hctx);
|
|
}
|
|
|
|
/**
|
|
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
|
|
index 29bfe8017a2d8..da1de190a3b13 100644
|
|
--- a/block/blk-mq-sched.c
|
|
+++ b/block/blk-mq-sched.c
|
|
@@ -54,13 +54,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
|
|
* Mark a hardware queue as needing a restart. For shared queues, maintain
|
|
* a count of how many hardware queues are marked for restart.
|
|
*/
|
|
-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
|
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
|
return;
|
|
|
|
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
|
|
|
|
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
|
|
index 8a9544203173f..38e06e23821fe 100644
|
|
--- a/block/blk-mq-sched.h
|
|
+++ b/block/blk-mq-sched.h
|
|
@@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
|
struct request **merged_request);
|
|
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
|
|
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
|
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
|
|
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
|
|
|
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|
diff --git a/block/blk-stat.h b/block/blk-stat.h
|
|
index f4a1568e81a41..17b47a86eefb3 100644
|
|
--- a/block/blk-stat.h
|
|
+++ b/block/blk-stat.h
|
|
@@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb,
|
|
mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
|
|
}
|
|
|
|
+static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
|
|
+{
|
|
+ del_timer_sync(&cb->timer);
|
|
+}
|
|
+
|
|
/**
|
|
* blk_stat_activate_msecs() - Gather block statistics during a time window in
|
|
* milliseconds.
|
|
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
|
|
index 8ac93fcbaa2ea..0c62bf4eca757 100644
|
|
--- a/block/blk-wbt.c
|
|
+++ b/block/blk-wbt.c
|
|
@@ -760,8 +760,10 @@ void wbt_disable_default(struct request_queue *q)
|
|
if (!rqos)
|
|
return;
|
|
rwb = RQWB(rqos);
|
|
- if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
|
|
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
|
|
+ blk_stat_deactivate(rwb->cb);
|
|
rwb->wb_normal = 0;
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL_GPL(wbt_disable_default);
|
|
|
|
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
|
|
index 099a9e05854c3..d5e21ce44d2cc 100644
|
|
--- a/block/mq-deadline.c
|
|
+++ b/block/mq-deadline.c
|
|
@@ -373,9 +373,16 @@ done:
|
|
|
|
/*
|
|
* One confusing aspect here is that we get called for a specific
|
|
- * hardware queue, but we return a request that may not be for a
|
|
+ * hardware queue, but we may return a request that is for a
|
|
* different hardware queue. This is because mq-deadline has shared
|
|
* state for all hardware queues, in terms of sorting, FIFOs, etc.
|
|
+ *
|
|
+ * For a zoned block device, __dd_dispatch_request() may return NULL
|
|
+ * if all the queued write requests are directed at zones that are already
|
|
+ * locked due to on-going write requests. In this case, make sure to mark
|
|
+ * the queue as needing a restart to ensure that the queue is run again
|
|
+ * and the pending writes dispatched once the target zones for the ongoing
|
|
+ * write requests are unlocked in dd_finish_request().
|
|
*/
|
|
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
@@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
|
|
|
spin_lock(&dd->lock);
|
|
rq = __dd_dispatch_request(dd);
|
|
+ if (!rq && blk_queue_is_zoned(hctx->queue) &&
|
|
+ !list_empty(&dd->fifo_list[WRITE]))
|
|
+ blk_mq_sched_mark_restart_hctx(hctx);
|
|
spin_unlock(&dd->lock);
|
|
|
|
return rq;
|
|
diff --git a/block/partition-generic.c b/block/partition-generic.c
|
|
index d3d14e81fb12d..5f8db5c5140f4 100644
|
|
--- a/block/partition-generic.c
|
|
+++ b/block/partition-generic.c
|
|
@@ -249,9 +249,10 @@ struct device_type part_type = {
|
|
.uevent = part_uevent,
|
|
};
|
|
|
|
-static void delete_partition_rcu_cb(struct rcu_head *head)
|
|
+static void delete_partition_work_fn(struct work_struct *work)
|
|
{
|
|
- struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
|
|
+ struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
|
|
+ rcu_work);
|
|
|
|
part->start_sect = 0;
|
|
part->nr_sects = 0;
|
|
@@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
|
|
void __delete_partition(struct percpu_ref *ref)
|
|
{
|
|
struct hd_struct *part = container_of(ref, struct hd_struct, ref);
|
|
- call_rcu(&part->rcu_head, delete_partition_rcu_cb);
|
|
+ INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
|
|
+ queue_rcu_work(system_wq, &part->rcu_work);
|
|
}
|
|
|
|
/*
|
|
diff --git a/crypto/Kconfig b/crypto/Kconfig
|
|
index 05c91eb10ca1f..4f24acac3d482 100644
|
|
--- a/crypto/Kconfig
|
|
+++ b/crypto/Kconfig
|
|
@@ -1006,7 +1006,8 @@ config CRYPTO_AES_TI
|
|
8 for decryption), this implementation only uses just two S-boxes of
|
|
256 bytes each, and attempts to eliminate data dependent latencies by
|
|
prefetching the entire table into the cache at the start of each
|
|
- block.
|
|
+ block. Interrupts are also disabled to avoid races where cachelines
|
|
+ are evicted when the CPU is interrupted to do something else.
|
|
|
|
config CRYPTO_AES_586
|
|
tristate "AES cipher algorithms (i586)"
|
|
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
|
|
index 03023b2290e8e..1ff9785b30f55 100644
|
|
--- a/crypto/aes_ti.c
|
|
+++ b/crypto/aes_ti.c
|
|
@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
const u32 *rkp = ctx->key_enc + 4;
|
|
int rounds = 6 + ctx->key_length / 4;
|
|
u32 st0[4], st1[4];
|
|
+ unsigned long flags;
|
|
int round;
|
|
|
|
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
|
|
@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
|
|
st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
|
|
|
|
+ /*
|
|
+ * Temporarily disable interrupts to avoid races where cachelines are
|
|
+ * evicted when the CPU is interrupted to do something else.
|
|
+ */
|
|
+ local_irq_save(flags);
|
|
+
|
|
st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
|
|
st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
|
|
st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
|
|
@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
|
|
put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
|
|
put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
|
|
+
|
|
+ local_irq_restore(flags);
|
|
}
|
|
|
|
static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
const u32 *rkp = ctx->key_dec + 4;
|
|
int rounds = 6 + ctx->key_length / 4;
|
|
u32 st0[4], st1[4];
|
|
+ unsigned long flags;
|
|
int round;
|
|
|
|
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
|
|
@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
|
|
st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
|
|
|
|
+ /*
|
|
+ * Temporarily disable interrupts to avoid races where cachelines are
|
|
+ * evicted when the CPU is interrupted to do something else.
|
|
+ */
|
|
+ local_irq_save(flags);
|
|
+
|
|
st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
|
|
st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
|
|
st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
|
|
@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
|
|
put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
|
|
put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
|
|
+
|
|
+ local_irq_restore(flags);
|
|
}
|
|
|
|
static struct crypto_alg aes_alg = {
|
|
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
|
|
index 17eb09d222ff4..ec78a04eb136e 100644
|
|
--- a/crypto/af_alg.c
|
|
+++ b/crypto/af_alg.c
|
|
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
|
|
|
|
int af_alg_release(struct socket *sock)
|
|
{
|
|
- if (sock->sk)
|
|
+ if (sock->sk) {
|
|
sock_put(sock->sk);
|
|
+ sock->sk = NULL;
|
|
+ }
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(af_alg_release);
|
|
diff --git a/crypto/authenc.c b/crypto/authenc.c
|
|
index 37f54d1b2f669..4be293a4b5f0f 100644
|
|
--- a/crypto/authenc.c
|
|
+++ b/crypto/authenc.c
|
|
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
|
|
return -EINVAL;
|
|
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
|
|
return -EINVAL;
|
|
- if (RTA_PAYLOAD(rta) < sizeof(*param))
|
|
+
|
|
+ /*
|
|
+ * RTA_OK() didn't align the rtattr's payload when validating that it
|
|
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
|
|
+ * aligned boundary. To avoid confusion, require that the rtattr
|
|
+ * payload be exactly the param struct, which has a 4-byte aligned size.
|
|
+ */
|
|
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
|
|
return -EINVAL;
|
|
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
|
|
|
|
param = RTA_DATA(rta);
|
|
keys->enckeylen = be32_to_cpu(param->enckeylen);
|
|
|
|
- key += RTA_ALIGN(rta->rta_len);
|
|
- keylen -= RTA_ALIGN(rta->rta_len);
|
|
+ key += rta->rta_len;
|
|
+ keylen -= rta->rta_len;
|
|
|
|
if (keylen < keys->enckeylen)
|
|
return -EINVAL;
|
|
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
|
|
index 80a25cc04aec7..4741fe89ba2cd 100644
|
|
--- a/crypto/authencesn.c
|
|
+++ b/crypto/authencesn.c
|
|
@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
|
|
struct aead_request *req = areq->data;
|
|
|
|
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
|
|
- aead_request_complete(req, err);
|
|
+ authenc_esn_request_complete(req, err);
|
|
}
|
|
|
|
static int crypto_authenc_esn_decrypt(struct aead_request *req)
|
|
diff --git a/crypto/cfb.c b/crypto/cfb.c
|
|
index 20987d0e09d89..e81e456734985 100644
|
|
--- a/crypto/cfb.c
|
|
+++ b/crypto/cfb.c
|
|
@@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
|
|
|
|
do {
|
|
crypto_cfb_encrypt_one(tfm, iv, dst);
|
|
- crypto_xor(dst, iv, bsize);
|
|
+ crypto_xor(dst, src, bsize);
|
|
iv = src;
|
|
|
|
src += bsize;
|
|
diff --git a/crypto/ecc.c b/crypto/ecc.c
|
|
index 8facafd678026..adcce310f6462 100644
|
|
--- a/crypto/ecc.c
|
|
+++ b/crypto/ecc.c
|
|
@@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
|
|
|
|
static void ecc_point_mult(struct ecc_point *result,
|
|
const struct ecc_point *point, const u64 *scalar,
|
|
- u64 *initial_z, u64 *curve_prime,
|
|
+ u64 *initial_z, const struct ecc_curve *curve,
|
|
unsigned int ndigits)
|
|
{
|
|
/* R0 and R1 */
|
|
u64 rx[2][ECC_MAX_DIGITS];
|
|
u64 ry[2][ECC_MAX_DIGITS];
|
|
u64 z[ECC_MAX_DIGITS];
|
|
+ u64 sk[2][ECC_MAX_DIGITS];
|
|
+ u64 *curve_prime = curve->p;
|
|
int i, nb;
|
|
- int num_bits = vli_num_bits(scalar, ndigits);
|
|
+ int num_bits;
|
|
+ int carry;
|
|
+
|
|
+ carry = vli_add(sk[0], scalar, curve->n, ndigits);
|
|
+ vli_add(sk[1], sk[0], curve->n, ndigits);
|
|
+ scalar = sk[!carry];
|
|
+ num_bits = sizeof(u64) * ndigits * 8 + 1;
|
|
|
|
vli_set(rx[1], point->x, ndigits);
|
|
vli_set(ry[1], point->y, ndigits);
|
|
@@ -1004,7 +1012,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
|
|
goto out;
|
|
}
|
|
|
|
- ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
|
|
+ ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
|
|
if (ecc_point_is_zero(pk)) {
|
|
ret = -EAGAIN;
|
|
goto err_free_point;
|
|
@@ -1090,7 +1098,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
|
goto err_alloc_product;
|
|
}
|
|
|
|
- ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
|
|
+ ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
|
|
|
|
ecc_swap_digits(product->x, secret, ndigits);
|
|
|
|
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
|
|
index 9a5c60f08aad8..c0cf87ae7ef6d 100644
|
|
--- a/crypto/sm3_generic.c
|
|
+++ b/crypto/sm3_generic.c
|
|
@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
|
|
|
|
for (i = 0; i <= 63; i++) {
|
|
|
|
- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
|
|
+ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
|
|
|
|
ss2 = ss1 ^ rol32(a, 12);
|
|
|
|
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
|
|
index c20c9f5c18f22..1026173d721a0 100644
|
|
--- a/crypto/tcrypt.c
|
|
+++ b/crypto/tcrypt.c
|
|
@@ -1736,6 +1736,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
|
ret += tcrypt_test("ctr(aes)");
|
|
ret += tcrypt_test("rfc3686(ctr(aes))");
|
|
ret += tcrypt_test("ofb(aes)");
|
|
+ ret += tcrypt_test("cfb(aes)");
|
|
break;
|
|
|
|
case 11:
|
|
@@ -2060,6 +2061,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
|
speed_template_16_24_32);
|
|
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
|
speed_template_16_24_32);
|
|
+ test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
|
|
+ speed_template_16_24_32);
|
|
+ test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
|
|
+ speed_template_16_24_32);
|
|
break;
|
|
|
|
case 201:
|
|
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
|
|
index b1f79c6bf4096..84937ceb4bd80 100644
|
|
--- a/crypto/testmgr.c
|
|
+++ b/crypto/testmgr.c
|
|
@@ -2690,6 +2690,13 @@ static const struct alg_test_desc alg_test_descs[] = {
|
|
.dec = __VECS(aes_ccm_dec_tv_template)
|
|
}
|
|
}
|
|
+ }, {
|
|
+ .alg = "cfb(aes)",
|
|
+ .test = alg_test_skcipher,
|
|
+ .fips_allowed = 1,
|
|
+ .suite = {
|
|
+ .cipher = __VECS(aes_cfb_tv_template)
|
|
+ },
|
|
}, {
|
|
.alg = "chacha20",
|
|
.test = alg_test_skcipher,
|
|
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
|
|
index 1fe7b97ba03f9..b5b0d29761cec 100644
|
|
--- a/crypto/testmgr.h
|
|
+++ b/crypto/testmgr.h
|
|
@@ -11449,6 +11449,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
|
|
},
|
|
};
|
|
|
|
+static const struct cipher_testvec aes_cfb_tv_template[] = {
|
|
+ { /* From NIST SP800-38A */
|
|
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
|
|
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
|
|
+ .klen = 16,
|
|
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
|
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
|
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
|
|
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
|
|
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
|
|
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
|
|
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
|
|
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
|
|
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
|
|
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
|
|
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
|
|
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
|
|
+ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
|
|
+ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
|
|
+ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
|
|
+ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
|
|
+ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
|
|
+ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
|
|
+ .len = 64,
|
|
+ }, {
|
|
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
|
|
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
|
|
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
|
|
+ .klen = 24,
|
|
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
|
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
|
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
|
|
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
|
|
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
|
|
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
|
|
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
|
|
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
|
|
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
|
|
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
|
|
+ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
|
|
+ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
|
|
+ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
|
|
+ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
|
|
+ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
|
|
+ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
|
|
+ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
|
|
+ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
|
|
+ .len = 64,
|
|
+ }, {
|
|
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
|
|
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
|
|
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
|
|
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
|
|
+ .klen = 32,
|
|
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
|
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
|
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
|
|
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
|
|
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
|
|
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
|
|
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
|
|
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
|
|
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
|
|
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
|
|
+ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
|
|
+ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
|
|
+ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
|
|
+ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
|
|
+ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
|
|
+ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
|
|
+ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
|
|
+ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
|
|
+ .len = 64,
|
|
+ },
|
|
+};
|
|
+
|
|
static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
|
|
{ /* Input data from RFC 2410 Case 1 */
|
|
#ifdef __LITTLE_ENDIAN
|
|
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
|
|
index 02c6fd9caff7d..f008ba7c9cedc 100644
|
|
--- a/drivers/acpi/apei/ghes.c
|
|
+++ b/drivers/acpi/apei/ghes.c
|
|
@@ -691,6 +691,8 @@ static void __ghes_panic(struct ghes *ghes)
|
|
{
|
|
__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
|
|
|
|
+ ghes_clear_estatus(ghes);
|
|
+
|
|
/* reboot to log the error! */
|
|
if (!panic_timeout)
|
|
panic_timeout = ghes_panic_timeout;
|
|
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
|
|
index 70f4e80b9246a..a1a22e9c03289 100644
|
|
--- a/drivers/acpi/arm64/iort.c
|
|
+++ b/drivers/acpi/arm64/iort.c
|
|
@@ -951,9 +951,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
|
{
|
|
struct acpi_iort_node *node;
|
|
struct acpi_iort_root_complex *rc;
|
|
+ struct pci_bus *pbus = to_pci_dev(dev)->bus;
|
|
|
|
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
|
|
- iort_match_node_callback, dev);
|
|
+ iort_match_node_callback, &pbus->dev);
|
|
if (!node || node->revision < 1)
|
|
return -ENODEV;
|
|
|
|
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
|
|
index bb3d96dea6dba..26d4164d394fb 100644
|
|
--- a/drivers/acpi/bus.c
|
|
+++ b/drivers/acpi/bus.c
|
|
@@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
|
|
goto error0;
|
|
}
|
|
|
|
- /*
|
|
- * ACPI 2.0 requires the EC driver to be loaded and work before
|
|
- * the EC device is found in the namespace (i.e. before
|
|
- * acpi_load_tables() is called).
|
|
- *
|
|
- * This is accomplished by looking for the ECDT table, and getting
|
|
- * the EC parameters out of that.
|
|
- *
|
|
- * Ignore the result. Not having an ECDT is not fatal.
|
|
- */
|
|
- status = acpi_ec_ecdt_probe();
|
|
-
|
|
#ifdef CONFIG_X86
|
|
if (!acpi_ioapic) {
|
|
/* compatible (0) means level (3) */
|
|
@@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
|
|
goto error1;
|
|
}
|
|
|
|
+ /*
|
|
+ * ACPI 2.0 requires the EC driver to be loaded and work before the EC
|
|
+ * device is found in the namespace.
|
|
+ *
|
|
+ * This is accomplished by looking for the ECDT table and getting the EC
|
|
+ * parameters out of that.
|
|
+ *
|
|
+ * Do that before calling acpi_initialize_objects() which may trigger EC
|
|
+ * address space accesses.
|
|
+ */
|
|
+ acpi_ec_ecdt_probe();
|
|
+
|
|
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
|
|
if (ACPI_FAILURE(status)) {
|
|
printk(KERN_ERR PREFIX
|
|
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
|
|
index 5912d30020c71..2a2d7ec772526 100644
|
|
--- a/drivers/acpi/nfit/core.c
|
|
+++ b/drivers/acpi/nfit/core.c
|
|
@@ -394,6 +394,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
|
|
return id;
|
|
}
|
|
|
|
+static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
|
+ struct nd_cmd_pkg *call_pkg)
|
|
+{
|
|
+ if (call_pkg) {
|
|
+ int i;
|
|
+
|
|
+ if (nfit_mem->family != call_pkg->nd_family)
|
|
+ return -ENOTTY;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
|
+ if (call_pkg->nd_reserved2[i])
|
|
+ return -EINVAL;
|
|
+ return call_pkg->nd_command;
|
|
+ }
|
|
+
|
|
+ /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
|
|
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
|
+ return cmd;
|
|
+
|
|
+ /*
|
|
+ * Force function number validation to fail since 0 is never
|
|
+ * published as a valid function in dsm_mask.
|
|
+ */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
|
|
{
|
|
@@ -407,30 +433,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
unsigned long cmd_mask, dsm_mask;
|
|
u32 offset, fw_status = 0;
|
|
acpi_handle handle;
|
|
- unsigned int func;
|
|
const guid_t *guid;
|
|
- int rc, i;
|
|
+ int func, rc, i;
|
|
|
|
if (cmd_rc)
|
|
*cmd_rc = -EINVAL;
|
|
- func = cmd;
|
|
- if (cmd == ND_CMD_CALL) {
|
|
- call_pkg = buf;
|
|
- func = call_pkg->nd_command;
|
|
-
|
|
- for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
|
- if (call_pkg->nd_reserved2[i])
|
|
- return -EINVAL;
|
|
- }
|
|
|
|
if (nvdimm) {
|
|
struct acpi_device *adev = nfit_mem->adev;
|
|
|
|
if (!adev)
|
|
return -ENOTTY;
|
|
- if (call_pkg && nfit_mem->family != call_pkg->nd_family)
|
|
- return -ENOTTY;
|
|
|
|
+ if (cmd == ND_CMD_CALL)
|
|
+ call_pkg = buf;
|
|
+ func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
|
+ if (func < 0)
|
|
+ return func;
|
|
dimm_name = nvdimm_name(nvdimm);
|
|
cmd_name = nvdimm_cmd_name(cmd);
|
|
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
|
@@ -441,6 +460,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
} else {
|
|
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
|
|
|
+ func = cmd;
|
|
cmd_name = nvdimm_bus_cmd_name(cmd);
|
|
cmd_mask = nd_desc->cmd_mask;
|
|
dsm_mask = cmd_mask;
|
|
@@ -455,7 +475,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
|
|
return -ENOTTY;
|
|
|
|
- if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
|
|
+ /*
|
|
+ * Check for a valid command. For ND_CMD_CALL, we also have to
|
|
+ * make sure that the DSM function is supported.
|
|
+ */
|
|
+ if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
|
|
+ return -ENOTTY;
|
|
+ else if (!test_bit(cmd, &cmd_mask))
|
|
return -ENOTTY;
|
|
|
|
in_obj.type = ACPI_TYPE_PACKAGE;
|
|
@@ -698,6 +724,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
|
|
struct acpi_nfit_memory_map *memdev;
|
|
struct acpi_nfit_desc *acpi_desc;
|
|
struct nfit_mem *nfit_mem;
|
|
+ u16 physical_id;
|
|
|
|
mutex_lock(&acpi_desc_lock);
|
|
list_for_each_entry(acpi_desc, &acpi_descs, list) {
|
|
@@ -705,10 +732,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
|
|
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
|
|
memdev = __to_nfit_memdev(nfit_mem);
|
|
if (memdev->device_handle == device_handle) {
|
|
+ *flags = memdev->flags;
|
|
+ physical_id = memdev->physical_id;
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
mutex_unlock(&acpi_desc_lock);
|
|
- *flags = memdev->flags;
|
|
- return memdev->physical_id;
|
|
+ return physical_id;
|
|
}
|
|
}
|
|
mutex_unlock(&acpi_desc->init_mutex);
|
|
@@ -1844,6 +1872,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
|
return 0;
|
|
}
|
|
|
|
+ /*
|
|
+ * Function 0 is the command interrogation function, don't
|
|
+ * export it to potential userspace use, and enable it to be
|
|
+ * used as an error value in acpi_nfit_ctl().
|
|
+ */
|
|
+ dsm_mask &= ~1UL;
|
|
+
|
|
guid = to_nfit_uuid(nfit_mem->family);
|
|
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
|
if (acpi_check_dsm(adev_dimm->handle, guid,
|
|
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
|
|
index 274699463b4f1..7bbbf8256a41a 100644
|
|
--- a/drivers/acpi/numa.c
|
|
+++ b/drivers/acpi/numa.c
|
|
@@ -146,9 +146,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
|
|
{
|
|
struct acpi_srat_mem_affinity *p =
|
|
(struct acpi_srat_mem_affinity *)header;
|
|
- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
|
|
- (unsigned long)p->base_address,
|
|
- (unsigned long)p->length,
|
|
+ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
|
|
+ (unsigned long long)p->base_address,
|
|
+ (unsigned long long)p->length,
|
|
p->proximity_domain,
|
|
(p->flags & ACPI_SRAT_MEM_ENABLED) ?
|
|
"enabled" : "disabled",
|
|
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
|
|
index 2579675b7082b..e7c0006e66028 100644
|
|
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
|
|
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
|
|
@@ -20,8 +20,11 @@
|
|
#define GPI1_LDO_ON (3 << 0)
|
|
#define GPI1_LDO_OFF (4 << 0)
|
|
|
|
-#define AXP288_ADC_TS_PIN_GPADC 0xf2
|
|
-#define AXP288_ADC_TS_PIN_ON 0xf3
|
|
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
|
|
+#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
|
|
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
|
|
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
|
|
+#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
|
|
|
|
static struct pmic_table power_table[] = {
|
|
{
|
|
@@ -212,22 +215,44 @@ out:
|
|
*/
|
|
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
|
|
{
|
|
+ int ret, adc_ts_pin_ctrl;
|
|
u8 buf[2];
|
|
- int ret;
|
|
|
|
- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
|
|
- AXP288_ADC_TS_PIN_GPADC);
|
|
+ /*
|
|
+ * The current-source used for the battery temp-sensor (TS) is shared
|
|
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
|
|
+ * current-source needs to be permanently on. But to read the GPADC we
|
|
+ * need to temporary switch the TS current-source to ondemand, so that
|
|
+ * the GPADC can use it, otherwise we will always read an all 0 value.
|
|
+ *
|
|
+ * Note that the switching from on to on-ondemand is not necessary
|
|
+ * when the TS current-source is off (this happens on devices which
|
|
+ * do not use the TS-pin).
|
|
+ */
|
|
+ ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- /* After switching to the GPADC pin give things some time to settle */
|
|
- usleep_range(6000, 10000);
|
|
+ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
|
|
+ ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
|
|
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
|
|
+ AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Wait a bit after switching the current-source */
|
|
+ usleep_range(6000, 10000);
|
|
+ }
|
|
|
|
ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
|
|
if (ret == 0)
|
|
ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
|
|
|
|
- regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
|
|
+ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
|
|
+ regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
|
|
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
|
|
+ AXP288_ADC_TS_CURRENT_ON);
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
|
|
index 1b475bc1ae169..665e93ca0b40f 100644
|
|
--- a/drivers/acpi/power.c
|
|
+++ b/drivers/acpi/power.c
|
|
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
|
|
}
|
|
}
|
|
|
|
+static bool acpi_power_resource_is_dup(union acpi_object *package,
|
|
+ unsigned int start, unsigned int i)
|
|
+{
|
|
+ acpi_handle rhandle, dup;
|
|
+ unsigned int j;
|
|
+
|
|
+ /* The caller is expected to check the package element types */
|
|
+ rhandle = package->package.elements[i].reference.handle;
|
|
+ for (j = start; j < i; j++) {
|
|
+ dup = package->package.elements[j].reference.handle;
|
|
+ if (dup == rhandle)
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
|
|
struct list_head *list)
|
|
{
|
|
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
|
|
err = -ENODEV;
|
|
break;
|
|
}
|
|
+
|
|
+ /* Some ACPI tables contain duplicate power resource references */
|
|
+ if (acpi_power_resource_is_dup(package, start, i))
|
|
+ continue;
|
|
+
|
|
err = acpi_add_power_resource(rhandle);
|
|
if (err)
|
|
break;
|
|
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
|
|
index 9d52743080a4f..c336784d0bcbe 100644
|
|
--- a/drivers/acpi/spcr.c
|
|
+++ b/drivers/acpi/spcr.c
|
|
@@ -148,6 +148,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
|
|
}
|
|
|
|
switch (table->baud_rate) {
|
|
+ case 0:
|
|
+ /*
|
|
+ * SPCR 1.04 defines 0 as a preconfigured state of UART.
|
|
+ * Assume firmware or bootloader configures console correctly.
|
|
+ */
|
|
+ baud_rate = 0;
|
|
+ break;
|
|
case 3:
|
|
baud_rate = 9600;
|
|
break;
|
|
@@ -196,6 +203,10 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
|
|
* UART so don't attempt to change to the baud rate state
|
|
* in the table because driver cannot calculate the dividers
|
|
*/
|
|
+ baud_rate = 0;
|
|
+ }
|
|
+
|
|
+ if (!baud_rate) {
|
|
snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
|
|
table->serial_port.address);
|
|
} else {
|
|
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
|
|
index 9f1000d2a40c7..b834ee335d9a8 100644
|
|
--- a/drivers/android/binder.c
|
|
+++ b/drivers/android/binder.c
|
|
@@ -72,6 +72,7 @@
|
|
#include <linux/spinlock.h>
|
|
#include <linux/ratelimit.h>
|
|
#include <linux/syscalls.h>
|
|
+#include <linux/task_work.h>
|
|
|
|
#include <uapi/linux/android/binder.h>
|
|
|
|
@@ -2160,6 +2161,64 @@ static bool binder_validate_fixup(struct binder_buffer *b,
|
|
return (fixup_offset >= last_min_offset);
|
|
}
|
|
|
|
+/**
|
|
+ * struct binder_task_work_cb - for deferred close
|
|
+ *
|
|
+ * @twork: callback_head for task work
|
|
+ * @fd: fd to close
|
|
+ *
|
|
+ * Structure to pass task work to be handled after
|
|
+ * returning from binder_ioctl() via task_work_add().
|
|
+ */
|
|
+struct binder_task_work_cb {
|
|
+ struct callback_head twork;
|
|
+ struct file *file;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * binder_do_fd_close() - close list of file descriptors
|
|
+ * @twork: callback head for task work
|
|
+ *
|
|
+ * It is not safe to call ksys_close() during the binder_ioctl()
|
|
+ * function if there is a chance that binder's own file descriptor
|
|
+ * might be closed. This is to meet the requirements for using
|
|
+ * fdget() (see comments for __fget_light()). Therefore use
|
|
+ * task_work_add() to schedule the close operation once we have
|
|
+ * returned from binder_ioctl(). This function is a callback
|
|
+ * for that mechanism and does the actual ksys_close() on the
|
|
+ * given file descriptor.
|
|
+ */
|
|
+static void binder_do_fd_close(struct callback_head *twork)
|
|
+{
|
|
+ struct binder_task_work_cb *twcb = container_of(twork,
|
|
+ struct binder_task_work_cb, twork);
|
|
+
|
|
+ fput(twcb->file);
|
|
+ kfree(twcb);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_deferred_fd_close() - schedule a close for the given file-descriptor
|
|
+ * @fd: file-descriptor to close
|
|
+ *
|
|
+ * See comments in binder_do_fd_close(). This function is used to schedule
|
|
+ * a file-descriptor to be closed after returning from binder_ioctl().
|
|
+ */
|
|
+static void binder_deferred_fd_close(int fd)
|
|
+{
|
|
+ struct binder_task_work_cb *twcb;
|
|
+
|
|
+ twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
|
|
+ if (!twcb)
|
|
+ return;
|
|
+ init_task_work(&twcb->twork, binder_do_fd_close);
|
|
+ __close_fd_get_file(fd, &twcb->file);
|
|
+ if (twcb->file)
|
|
+ task_work_add(current, &twcb->twork, true);
|
|
+ else
|
|
+ kfree(twcb);
|
|
+}
|
|
+
|
|
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
struct binder_buffer *buffer,
|
|
binder_size_t *failed_at)
|
|
@@ -2299,7 +2358,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
}
|
|
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
|
|
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
|
|
- ksys_close(fd_array[fd_index]);
|
|
+ binder_deferred_fd_close(fd_array[fd_index]);
|
|
} break;
|
|
default:
|
|
pr_err("transaction release %d bad object type %x\n",
|
|
@@ -3912,7 +3971,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
|
|
} else if (ret) {
|
|
u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
|
|
|
|
- ksys_close(*fdp);
|
|
+ binder_deferred_fd_close(*fdp);
|
|
}
|
|
list_del(&fixup->fixup_entry);
|
|
kfree(fixup);
|
|
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
|
|
index b8c3f9e6af899..adf28788cab52 100644
|
|
--- a/drivers/ata/libata-core.c
|
|
+++ b/drivers/ata/libata-core.c
|
|
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
|
|
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
|
|
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
|
|
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
|
|
|
|
/* devices that don't properly handle queued TRIM commands */
|
|
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
|
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
|
|
index 4b1ff5bc256a3..59b2317acea99 100644
|
|
--- a/drivers/ata/sata_rcar.c
|
|
+++ b/drivers/ata/sata_rcar.c
|
|
@@ -891,7 +891,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
|
|
int ret = 0;
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
- if (irq <= 0)
|
|
+ if (irq < 0)
|
|
+ return irq;
|
|
+ if (!irq)
|
|
return -EINVAL;
|
|
|
|
priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
|
|
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
|
|
index 29f102dcfec49..329ce9072ee9f 100644
|
|
--- a/drivers/atm/he.c
|
|
+++ b/drivers/atm/he.c
|
|
@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
|
|
instead of '/ 512', use '>> 9' to prevent a call
|
|
to divdu3 on x86 platforms
|
|
*/
|
|
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
|
|
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
|
|
|
|
if (rate_cps < 10)
|
|
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
|
|
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
|
|
index 8bfd27ec73d60..e06a57936cc96 100644
|
|
--- a/drivers/base/bus.c
|
|
+++ b/drivers/base/bus.c
|
|
@@ -31,6 +31,9 @@ static struct kset *system_kset;
|
|
|
|
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
|
|
|
|
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
|
|
+ struct driver_attribute driver_attr_##_name = \
|
|
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
|
|
|
|
static int __must_check bus_rescan_devices_helper(struct device *dev,
|
|
void *data);
|
|
@@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
|
|
bus_put(bus);
|
|
return err;
|
|
}
|
|
-static DRIVER_ATTR_WO(unbind);
|
|
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
|
|
|
|
/*
|
|
* Manually attach a device to a driver.
|
|
@@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
|
|
bus_put(bus);
|
|
return err;
|
|
}
|
|
-static DRIVER_ATTR_WO(bind);
|
|
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
|
|
|
|
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
|
|
{
|
|
@@ -611,8 +614,10 @@ static void remove_probe_files(struct bus_type *bus)
|
|
static ssize_t uevent_store(struct device_driver *drv, const char *buf,
|
|
size_t count)
|
|
{
|
|
- kobject_synth_uevent(&drv->p->kobj, buf, count);
|
|
- return count;
|
|
+ int rc;
|
|
+
|
|
+ rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
|
|
+ return rc ? rc : count;
|
|
}
|
|
static DRIVER_ATTR_WO(uevent);
|
|
|
|
@@ -828,8 +833,10 @@ static void klist_devices_put(struct klist_node *n)
|
|
static ssize_t bus_uevent_store(struct bus_type *bus,
|
|
const char *buf, size_t count)
|
|
{
|
|
- kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
|
|
- return count;
|
|
+ int rc;
|
|
+
|
|
+ rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
|
|
+ return rc ? rc : count;
|
|
}
|
|
static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
|
|
|
|
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
|
|
index cf78fa6d470d4..a7359535caf5d 100644
|
|
--- a/drivers/base/cacheinfo.c
|
|
+++ b/drivers/base/cacheinfo.c
|
|
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
|
|
ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
propname = cache_type_info[ct_idx].size_prop;
|
|
|
|
- if (of_property_read_u32(np, propname, &this_leaf->size))
|
|
- this_leaf->size = 0;
|
|
+ of_property_read_u32(np, propname, &this_leaf->size);
|
|
}
|
|
|
|
/* not cache_line_size() because that's a macro in include/linux/cache.h */
|
|
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
|
|
ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
propname = cache_type_info[ct_idx].nr_sets_prop;
|
|
|
|
- if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
|
|
- this_leaf->number_of_sets = 0;
|
|
+ of_property_read_u32(np, propname, &this_leaf->number_of_sets);
|
|
}
|
|
|
|
static void cache_associativity(struct cacheinfo *this_leaf)
|
|
diff --git a/drivers/base/core.c b/drivers/base/core.c
|
|
index 04bbcd779e114..92e2c32c22270 100644
|
|
--- a/drivers/base/core.c
|
|
+++ b/drivers/base/core.c
|
|
@@ -1067,8 +1067,14 @@ out:
|
|
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
- if (kobject_synth_uevent(&dev->kobj, buf, count))
|
|
+ int rc;
|
|
+
|
|
+ rc = kobject_synth_uevent(&dev->kobj, buf, count);
|
|
+
|
|
+ if (rc) {
|
|
dev_err(dev, "uevent: failed to send synthetic uevent\n");
|
|
+ return rc;
|
|
+ }
|
|
|
|
return count;
|
|
}
|
|
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
|
|
index 169412ee4ae80..9d6604b50d75e 100644
|
|
--- a/drivers/base/dd.c
|
|
+++ b/drivers/base/dd.c
|
|
@@ -928,16 +928,13 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
|
|
|
drv = dev->driver;
|
|
if (drv) {
|
|
- if (driver_allows_async_probing(drv))
|
|
- async_synchronize_full();
|
|
-
|
|
while (device_links_busy(dev)) {
|
|
device_unlock(dev);
|
|
- if (parent)
|
|
+ if (parent && dev->bus->need_parent_lock)
|
|
device_unlock(parent);
|
|
|
|
device_links_unbind_consumers(dev);
|
|
- if (parent)
|
|
+ if (parent && dev->bus->need_parent_lock)
|
|
device_lock(parent);
|
|
|
|
device_lock(dev);
|
|
@@ -1036,6 +1033,9 @@ void driver_detach(struct device_driver *drv)
|
|
struct device_private *dev_prv;
|
|
struct device *dev;
|
|
|
|
+ if (driver_allows_async_probing(drv))
|
|
+ async_synchronize_full();
|
|
+
|
|
for (;;) {
|
|
spin_lock(&drv->p->klist_devices.k_lock);
|
|
if (list_empty(&drv->p->klist_devices.k_list)) {
|
|
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
|
|
index f39a920496fbb..8da314b81eabb 100644
|
|
--- a/drivers/base/platform-msi.c
|
|
+++ b/drivers/base/platform-msi.c
|
|
@@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
|
|
unsigned int nvec)
|
|
{
|
|
struct platform_msi_priv_data *data = domain->host_data;
|
|
- struct msi_desc *desc;
|
|
- for_each_msi_entry(desc, data->dev) {
|
|
+ struct msi_desc *desc, *tmp;
|
|
+ for_each_msi_entry_safe(desc, tmp, data->dev) {
|
|
if (WARN_ON(!desc->irq || desc->nvec_used != 1))
|
|
return;
|
|
if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
|
|
continue;
|
|
|
|
irq_domain_free_irqs_common(domain, desc->irq, 1);
|
|
+ list_del(&desc->list);
|
|
+ free_msi_entry(desc);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
|
|
index d15703b1ffe84..7145031731504 100644
|
|
--- a/drivers/block/drbd/drbd_nl.c
|
|
+++ b/drivers/block/drbd/drbd_nl.c
|
|
@@ -668,14 +668,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
|
|
if (rv == SS_TWO_PRIMARIES) {
|
|
/* Maybe the peer is detected as dead very soon...
|
|
retry at most once more in this case. */
|
|
- int timeo;
|
|
- rcu_read_lock();
|
|
- nc = rcu_dereference(connection->net_conf);
|
|
- timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
|
|
- rcu_read_unlock();
|
|
- schedule_timeout_interruptible(timeo);
|
|
- if (try < max_tries)
|
|
+ if (try < max_tries) {
|
|
+ int timeo;
|
|
try = max_tries - 1;
|
|
+ rcu_read_lock();
|
|
+ nc = rcu_dereference(connection->net_conf);
|
|
+ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
|
|
+ rcu_read_unlock();
|
|
+ schedule_timeout_interruptible(timeo);
|
|
+ }
|
|
continue;
|
|
}
|
|
if (rv < SS_SUCCESS) {
|
|
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
|
|
index 61c392752fe4b..e3da52b524d3b 100644
|
|
--- a/drivers/block/drbd/drbd_receiver.c
|
|
+++ b/drivers/block/drbd/drbd_receiver.c
|
|
@@ -3364,7 +3364,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
|
|
enum drbd_conns rv = C_MASK;
|
|
enum drbd_disk_state mydisk;
|
|
struct net_conf *nc;
|
|
- int hg, rule_nr, rr_conflict, tentative;
|
|
+ int hg, rule_nr, rr_conflict, tentative, always_asbp;
|
|
|
|
mydisk = device->state.disk;
|
|
if (mydisk == D_NEGOTIATING)
|
|
@@ -3415,8 +3415,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
|
|
|
|
rcu_read_lock();
|
|
nc = rcu_dereference(peer_device->connection->net_conf);
|
|
+ always_asbp = nc->always_asbp;
|
|
+ rr_conflict = nc->rr_conflict;
|
|
+ tentative = nc->tentative;
|
|
+ rcu_read_unlock();
|
|
|
|
- if (hg == 100 || (hg == -100 && nc->always_asbp)) {
|
|
+ if (hg == 100 || (hg == -100 && always_asbp)) {
|
|
int pcount = (device->state.role == R_PRIMARY)
|
|
+ (peer_role == R_PRIMARY);
|
|
int forced = (hg == -100);
|
|
@@ -3455,9 +3459,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
|
|
"Sync from %s node\n",
|
|
(hg < 0) ? "peer" : "this");
|
|
}
|
|
- rr_conflict = nc->rr_conflict;
|
|
- tentative = nc->tentative;
|
|
- rcu_read_unlock();
|
|
|
|
if (hg == -100) {
|
|
/* FIXME this log message is not correct if we end up here
|
|
@@ -4142,7 +4143,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
|
|
kfree(device->p_uuid);
|
|
device->p_uuid = p_uuid;
|
|
|
|
- if (device->state.conn < C_CONNECTED &&
|
|
+ if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
|
|
device->state.disk < D_INCONSISTENT &&
|
|
device->state.role == R_PRIMARY &&
|
|
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
|
|
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
|
|
index cb0cc86850762..84b055aa81ba8 100644
|
|
--- a/drivers/block/loop.c
|
|
+++ b/drivers/block/loop.c
|
|
@@ -83,7 +83,7 @@
|
|
#include <linux/uaccess.h>
|
|
|
|
static DEFINE_IDR(loop_index_idr);
|
|
-static DEFINE_MUTEX(loop_index_mutex);
|
|
+static DEFINE_MUTEX(loop_ctl_mutex);
|
|
|
|
static int max_part;
|
|
static int part_shift;
|
|
@@ -630,18 +630,7 @@ static void loop_reread_partitions(struct loop_device *lo,
|
|
{
|
|
int rc;
|
|
|
|
- /*
|
|
- * bd_mutex has been held already in release path, so don't
|
|
- * acquire it if this function is called in such case.
|
|
- *
|
|
- * If the reread partition isn't from release path, lo_refcnt
|
|
- * must be at least one and it can only become zero when the
|
|
- * current holder is released.
|
|
- */
|
|
- if (!atomic_read(&lo->lo_refcnt))
|
|
- rc = __blkdev_reread_part(bdev);
|
|
- else
|
|
- rc = blkdev_reread_part(bdev);
|
|
+ rc = blkdev_reread_part(bdev);
|
|
if (rc)
|
|
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
|
|
__func__, lo->lo_number, lo->lo_file_name, rc);
|
|
@@ -688,26 +677,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
|
|
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|
unsigned int arg)
|
|
{
|
|
- struct file *file, *old_file;
|
|
+ struct file *file = NULL, *old_file;
|
|
int error;
|
|
+ bool partscan;
|
|
|
|
+ error = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (error)
|
|
+ return error;
|
|
error = -ENXIO;
|
|
if (lo->lo_state != Lo_bound)
|
|
- goto out;
|
|
+ goto out_err;
|
|
|
|
/* the loop device has to be read-only */
|
|
error = -EINVAL;
|
|
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
|
|
- goto out;
|
|
+ goto out_err;
|
|
|
|
error = -EBADF;
|
|
file = fget(arg);
|
|
if (!file)
|
|
- goto out;
|
|
+ goto out_err;
|
|
|
|
error = loop_validate_file(file, bdev);
|
|
if (error)
|
|
- goto out_putf;
|
|
+ goto out_err;
|
|
|
|
old_file = lo->lo_backing_file;
|
|
|
|
@@ -715,7 +708,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|
|
|
/* size of the new backing store needs to be the same */
|
|
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
|
- goto out_putf;
|
|
+ goto out_err;
|
|
|
|
/* and ... switch */
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
|
@@ -726,15 +719,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
|
loop_update_dio(lo);
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
|
-
|
|
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ /*
|
|
+ * We must drop file reference outside of loop_ctl_mutex as dropping
|
|
+ * the file ref can take bd_mutex which creates circular locking
|
|
+ * dependency.
|
|
+ */
|
|
fput(old_file);
|
|
- if (lo->lo_flags & LO_FLAGS_PARTSCAN)
|
|
+ if (partscan)
|
|
loop_reread_partitions(lo, bdev);
|
|
return 0;
|
|
|
|
- out_putf:
|
|
- fput(file);
|
|
- out:
|
|
+out_err:
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ if (file)
|
|
+ fput(file);
|
|
return error;
|
|
}
|
|
|
|
@@ -909,6 +909,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
|
int lo_flags = 0;
|
|
int error;
|
|
loff_t size;
|
|
+ bool partscan;
|
|
|
|
/* This is safe, since we have a reference from open(). */
|
|
__module_get(THIS_MODULE);
|
|
@@ -918,13 +919,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
|
if (!file)
|
|
goto out;
|
|
|
|
+ error = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (error)
|
|
+ goto out_putf;
|
|
+
|
|
error = -EBUSY;
|
|
if (lo->lo_state != Lo_unbound)
|
|
- goto out_putf;
|
|
+ goto out_unlock;
|
|
|
|
error = loop_validate_file(file, bdev);
|
|
if (error)
|
|
- goto out_putf;
|
|
+ goto out_unlock;
|
|
|
|
mapping = file->f_mapping;
|
|
inode = mapping->host;
|
|
@@ -936,10 +941,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
|
error = -EFBIG;
|
|
size = get_loop_size(lo, file);
|
|
if ((loff_t)(sector_t)size != size)
|
|
- goto out_putf;
|
|
+ goto out_unlock;
|
|
error = loop_prepare_queue(lo);
|
|
if (error)
|
|
- goto out_putf;
|
|
+ goto out_unlock;
|
|
|
|
error = 0;
|
|
|
|
@@ -971,18 +976,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
|
lo->lo_state = Lo_bound;
|
|
if (part_shift)
|
|
lo->lo_flags |= LO_FLAGS_PARTSCAN;
|
|
- if (lo->lo_flags & LO_FLAGS_PARTSCAN)
|
|
- loop_reread_partitions(lo, bdev);
|
|
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
|
|
|
|
/* Grab the block_device to prevent its destruction after we
|
|
- * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
|
|
+ * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
|
|
*/
|
|
bdgrab(bdev);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ if (partscan)
|
|
+ loop_reread_partitions(lo, bdev);
|
|
return 0;
|
|
|
|
- out_putf:
|
|
+out_unlock:
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+out_putf:
|
|
fput(file);
|
|
- out:
|
|
+out:
|
|
/* This is safe: open() is still holding a reference. */
|
|
module_put(THIS_MODULE);
|
|
return error;
|
|
@@ -1025,39 +1034,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
|
|
return err;
|
|
}
|
|
|
|
-static int loop_clr_fd(struct loop_device *lo)
|
|
+static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|
{
|
|
- struct file *filp = lo->lo_backing_file;
|
|
+ struct file *filp = NULL;
|
|
gfp_t gfp = lo->old_gfp_mask;
|
|
struct block_device *bdev = lo->lo_device;
|
|
+ int err = 0;
|
|
+ bool partscan = false;
|
|
+ int lo_number;
|
|
|
|
- if (lo->lo_state != Lo_bound)
|
|
- return -ENXIO;
|
|
-
|
|
- /*
|
|
- * If we've explicitly asked to tear down the loop device,
|
|
- * and it has an elevated reference count, set it for auto-teardown when
|
|
- * the last reference goes away. This stops $!~#$@ udev from
|
|
- * preventing teardown because it decided that it needs to run blkid on
|
|
- * the loopback device whenever they appear. xfstests is notorious for
|
|
- * failing tests because blkid via udev races with a losetup
|
|
- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
|
|
- * command to fail with EBUSY.
|
|
- */
|
|
- if (atomic_read(&lo->lo_refcnt) > 1) {
|
|
- lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
- return 0;
|
|
+ mutex_lock(&loop_ctl_mutex);
|
|
+ if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
|
|
+ err = -ENXIO;
|
|
+ goto out_unlock;
|
|
}
|
|
|
|
- if (filp == NULL)
|
|
- return -EINVAL;
|
|
+ filp = lo->lo_backing_file;
|
|
+ if (filp == NULL) {
|
|
+ err = -EINVAL;
|
|
+ goto out_unlock;
|
|
+ }
|
|
|
|
/* freeze request queue during the transition */
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
|
|
|
spin_lock_irq(&lo->lo_lock);
|
|
- lo->lo_state = Lo_rundown;
|
|
lo->lo_backing_file = NULL;
|
|
spin_unlock_irq(&lo->lo_lock);
|
|
|
|
@@ -1093,21 +1094,73 @@ static int loop_clr_fd(struct loop_device *lo)
|
|
module_put(THIS_MODULE);
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
|
|
|
- if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
|
|
- loop_reread_partitions(lo, bdev);
|
|
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
|
|
+ lo_number = lo->lo_number;
|
|
lo->lo_flags = 0;
|
|
if (!part_shift)
|
|
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
|
|
loop_unprepare_queue(lo);
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
+out_unlock:
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ if (partscan) {
|
|
+ /*
|
|
+ * bd_mutex has been held already in release path, so don't
|
|
+ * acquire it if this function is called in such case.
|
|
+ *
|
|
+ * If the reread partition isn't from release path, lo_refcnt
|
|
+ * must be at least one and it can only become zero when the
|
|
+ * current holder is released.
|
|
+ */
|
|
+ if (release)
|
|
+ err = __blkdev_reread_part(bdev);
|
|
+ else
|
|
+ err = blkdev_reread_part(bdev);
|
|
+ pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
|
+ __func__, lo_number, err);
|
|
+ /* Device is gone, no point in returning error */
|
|
+ err = 0;
|
|
+ }
|
|
/*
|
|
- * Need not hold lo_ctl_mutex to fput backing file.
|
|
- * Calling fput holding lo_ctl_mutex triggers a circular
|
|
+ * Need not hold loop_ctl_mutex to fput backing file.
|
|
+ * Calling fput holding loop_ctl_mutex triggers a circular
|
|
* lock dependency possibility warning as fput can take
|
|
- * bd_mutex which is usually taken before lo_ctl_mutex.
|
|
+ * bd_mutex which is usually taken before loop_ctl_mutex.
|
|
*/
|
|
- fput(filp);
|
|
- return 0;
|
|
+ if (filp)
|
|
+ fput(filp);
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int loop_clr_fd(struct loop_device *lo)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (err)
|
|
+ return err;
|
|
+ if (lo->lo_state != Lo_bound) {
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+ /*
|
|
+ * If we've explicitly asked to tear down the loop device,
|
|
+ * and it has an elevated reference count, set it for auto-teardown when
|
|
+ * the last reference goes away. This stops $!~#$@ udev from
|
|
+ * preventing teardown because it decided that it needs to run blkid on
|
|
+ * the loopback device whenever they appear. xfstests is notorious for
|
|
+ * failing tests because blkid via udev races with a losetup
|
|
+ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
|
|
+ * command to fail with EBUSY.
|
|
+ */
|
|
+ if (atomic_read(&lo->lo_refcnt) > 1) {
|
|
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ return 0;
|
|
+ }
|
|
+ lo->lo_state = Lo_rundown;
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+
|
|
+ return __loop_clr_fd(lo, false);
|
|
}
|
|
|
|
static int
|
|
@@ -1116,47 +1169,72 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|
int err;
|
|
struct loop_func_table *xfer;
|
|
kuid_t uid = current_uid();
|
|
+ struct block_device *bdev;
|
|
+ bool partscan = false;
|
|
|
|
+ err = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (err)
|
|
+ return err;
|
|
if (lo->lo_encrypt_key_size &&
|
|
!uid_eq(lo->lo_key_owner, uid) &&
|
|
- !capable(CAP_SYS_ADMIN))
|
|
- return -EPERM;
|
|
- if (lo->lo_state != Lo_bound)
|
|
- return -ENXIO;
|
|
- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
|
|
- return -EINVAL;
|
|
+ !capable(CAP_SYS_ADMIN)) {
|
|
+ err = -EPERM;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+ if (lo->lo_state != Lo_bound) {
|
|
+ err = -ENXIO;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
|
|
+ err = -EINVAL;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
+ if (lo->lo_offset != info->lo_offset ||
|
|
+ lo->lo_sizelimit != info->lo_sizelimit) {
|
|
+ sync_blockdev(lo->lo_device);
|
|
+ kill_bdev(lo->lo_device);
|
|
+ }
|
|
|
|
/* I/O need to be drained during transfer transition */
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
|
|
|
err = loop_release_xfer(lo);
|
|
if (err)
|
|
- goto exit;
|
|
+ goto out_unfreeze;
|
|
|
|
if (info->lo_encrypt_type) {
|
|
unsigned int type = info->lo_encrypt_type;
|
|
|
|
if (type >= MAX_LO_CRYPT) {
|
|
err = -EINVAL;
|
|
- goto exit;
|
|
+ goto out_unfreeze;
|
|
}
|
|
xfer = xfer_funcs[type];
|
|
if (xfer == NULL) {
|
|
err = -EINVAL;
|
|
- goto exit;
|
|
+ goto out_unfreeze;
|
|
}
|
|
} else
|
|
xfer = NULL;
|
|
|
|
err = loop_init_xfer(lo, xfer, info);
|
|
if (err)
|
|
- goto exit;
|
|
+ goto out_unfreeze;
|
|
|
|
if (lo->lo_offset != info->lo_offset ||
|
|
lo->lo_sizelimit != info->lo_sizelimit) {
|
|
+ /* kill_bdev should have truncated all the pages */
|
|
+ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
|
|
+ err = -EAGAIN;
|
|
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
|
|
+ __func__, lo->lo_number, lo->lo_file_name,
|
|
+ lo->lo_device->bd_inode->i_mapping->nrpages);
|
|
+ goto out_unfreeze;
|
|
+ }
|
|
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
|
|
err = -EFBIG;
|
|
- goto exit;
|
|
+ goto out_unfreeze;
|
|
}
|
|
}
|
|
|
|
@@ -1188,15 +1266,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|
/* update dio if lo_offset or transfer is changed */
|
|
__loop_update_dio(lo, lo->use_dio);
|
|
|
|
- exit:
|
|
+out_unfreeze:
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
|
|
|
if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
|
|
!(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
|
|
lo->lo_flags |= LO_FLAGS_PARTSCAN;
|
|
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
|
|
- loop_reread_partitions(lo, lo->lo_device);
|
|
+ bdev = lo->lo_device;
|
|
+ partscan = true;
|
|
}
|
|
+out_unlock:
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ if (partscan)
|
|
+ loop_reread_partitions(lo, bdev);
|
|
|
|
return err;
|
|
}
|
|
@@ -1204,12 +1287,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|
static int
|
|
loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
|
{
|
|
- struct file *file;
|
|
+ struct path path;
|
|
struct kstat stat;
|
|
int ret;
|
|
|
|
+ ret = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (ret)
|
|
+ return ret;
|
|
if (lo->lo_state != Lo_bound) {
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
return -ENXIO;
|
|
}
|
|
|
|
@@ -1228,17 +1314,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
|
lo->lo_encrypt_key_size);
|
|
}
|
|
|
|
- /* Drop lo_ctl_mutex while we call into the filesystem. */
|
|
- file = get_file(lo->lo_backing_file);
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
- ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
|
|
- AT_STATX_SYNC_AS_STAT);
|
|
+ /* Drop loop_ctl_mutex while we call into the filesystem. */
|
|
+ path = lo->lo_backing_file->f_path;
|
|
+ path_get(&path);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
|
|
if (!ret) {
|
|
info->lo_device = huge_encode_dev(stat.dev);
|
|
info->lo_inode = stat.ino;
|
|
info->lo_rdevice = huge_encode_dev(stat.rdev);
|
|
}
|
|
- fput(file);
|
|
+ path_put(&path);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1322,10 +1408,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
|
|
struct loop_info64 info64;
|
|
int err;
|
|
|
|
- if (!arg) {
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
+ if (!arg)
|
|
return -EINVAL;
|
|
- }
|
|
err = loop_get_status(lo, &info64);
|
|
if (!err)
|
|
err = loop_info64_to_old(&info64, &info);
|
|
@@ -1340,10 +1424,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
|
|
struct loop_info64 info64;
|
|
int err;
|
|
|
|
- if (!arg) {
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
+ if (!arg)
|
|
return -EINVAL;
|
|
- }
|
|
err = loop_get_status(lo, &info64);
|
|
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
|
|
err = -EFAULT;
|
|
@@ -1375,22 +1457,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
|
|
|
|
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
|
|
{
|
|
+ int err = 0;
|
|
+
|
|
if (lo->lo_state != Lo_bound)
|
|
return -ENXIO;
|
|
|
|
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
|
|
return -EINVAL;
|
|
|
|
+ if (lo->lo_queue->limits.logical_block_size != arg) {
|
|
+ sync_blockdev(lo->lo_device);
|
|
+ kill_bdev(lo->lo_device);
|
|
+ }
|
|
+
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
|
|
|
+ /* kill_bdev should have truncated all the pages */
|
|
+ if (lo->lo_queue->limits.logical_block_size != arg &&
|
|
+ lo->lo_device->bd_inode->i_mapping->nrpages) {
|
|
+ err = -EAGAIN;
|
|
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
|
|
+ __func__, lo->lo_number, lo->lo_file_name,
|
|
+ lo->lo_device->bd_inode->i_mapping->nrpages);
|
|
+ goto out_unfreeze;
|
|
+ }
|
|
+
|
|
blk_queue_logical_block_size(lo->lo_queue, arg);
|
|
blk_queue_physical_block_size(lo->lo_queue, arg);
|
|
blk_queue_io_min(lo->lo_queue, arg);
|
|
loop_update_dio(lo);
|
|
-
|
|
+out_unfreeze:
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
|
|
|
- return 0;
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (err)
|
|
+ return err;
|
|
+ switch (cmd) {
|
|
+ case LOOP_SET_CAPACITY:
|
|
+ err = loop_set_capacity(lo);
|
|
+ break;
|
|
+ case LOOP_SET_DIRECT_IO:
|
|
+ err = loop_set_dio(lo, arg);
|
|
+ break;
|
|
+ case LOOP_SET_BLOCK_SIZE:
|
|
+ err = loop_set_block_size(lo, arg);
|
|
+ break;
|
|
+ default:
|
|
+ err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
|
|
+ }
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
+ return err;
|
|
}
|
|
|
|
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
|
|
@@ -1399,64 +1523,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
|
|
struct loop_device *lo = bdev->bd_disk->private_data;
|
|
int err;
|
|
|
|
- err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
|
|
- if (err)
|
|
- goto out_unlocked;
|
|
-
|
|
switch (cmd) {
|
|
case LOOP_SET_FD:
|
|
- err = loop_set_fd(lo, mode, bdev, arg);
|
|
- break;
|
|
+ return loop_set_fd(lo, mode, bdev, arg);
|
|
case LOOP_CHANGE_FD:
|
|
- err = loop_change_fd(lo, bdev, arg);
|
|
- break;
|
|
+ return loop_change_fd(lo, bdev, arg);
|
|
case LOOP_CLR_FD:
|
|
- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
|
|
- err = loop_clr_fd(lo);
|
|
- if (!err)
|
|
- goto out_unlocked;
|
|
- break;
|
|
+ return loop_clr_fd(lo);
|
|
case LOOP_SET_STATUS:
|
|
err = -EPERM;
|
|
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
|
|
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
|
|
err = loop_set_status_old(lo,
|
|
(struct loop_info __user *)arg);
|
|
+ }
|
|
break;
|
|
case LOOP_GET_STATUS:
|
|
- err = loop_get_status_old(lo, (struct loop_info __user *) arg);
|
|
- /* loop_get_status() unlocks lo_ctl_mutex */
|
|
- goto out_unlocked;
|
|
+ return loop_get_status_old(lo, (struct loop_info __user *) arg);
|
|
case LOOP_SET_STATUS64:
|
|
err = -EPERM;
|
|
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
|
|
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
|
|
err = loop_set_status64(lo,
|
|
(struct loop_info64 __user *) arg);
|
|
+ }
|
|
break;
|
|
case LOOP_GET_STATUS64:
|
|
- err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
|
|
- /* loop_get_status() unlocks lo_ctl_mutex */
|
|
- goto out_unlocked;
|
|
+ return loop_get_status64(lo, (struct loop_info64 __user *) arg);
|
|
case LOOP_SET_CAPACITY:
|
|
- err = -EPERM;
|
|
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
|
|
- err = loop_set_capacity(lo);
|
|
- break;
|
|
case LOOP_SET_DIRECT_IO:
|
|
- err = -EPERM;
|
|
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
|
|
- err = loop_set_dio(lo, arg);
|
|
- break;
|
|
case LOOP_SET_BLOCK_SIZE:
|
|
- err = -EPERM;
|
|
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
|
|
- err = loop_set_block_size(lo, arg);
|
|
- break;
|
|
+ if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
|
|
+ return -EPERM;
|
|
+ /* Fall through */
|
|
default:
|
|
- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
|
|
+ err = lo_simple_ioctl(lo, cmd, arg);
|
|
+ break;
|
|
}
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
|
|
-out_unlocked:
|
|
return err;
|
|
}
|
|
|
|
@@ -1570,10 +1672,8 @@ loop_get_status_compat(struct loop_device *lo,
|
|
struct loop_info64 info64;
|
|
int err;
|
|
|
|
- if (!arg) {
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
+ if (!arg)
|
|
return -EINVAL;
|
|
- }
|
|
err = loop_get_status(lo, &info64);
|
|
if (!err)
|
|
err = loop_info64_to_compat(&info64, arg);
|
|
@@ -1588,20 +1688,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
switch(cmd) {
|
|
case LOOP_SET_STATUS:
|
|
- err = mutex_lock_killable(&lo->lo_ctl_mutex);
|
|
- if (!err) {
|
|
- err = loop_set_status_compat(lo,
|
|
- (const struct compat_loop_info __user *)arg);
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
- }
|
|
+ err = loop_set_status_compat(lo,
|
|
+ (const struct compat_loop_info __user *)arg);
|
|
break;
|
|
case LOOP_GET_STATUS:
|
|
- err = mutex_lock_killable(&lo->lo_ctl_mutex);
|
|
- if (!err) {
|
|
- err = loop_get_status_compat(lo,
|
|
- (struct compat_loop_info __user *)arg);
|
|
- /* loop_get_status() unlocks lo_ctl_mutex */
|
|
- }
|
|
+ err = loop_get_status_compat(lo,
|
|
+ (struct compat_loop_info __user *)arg);
|
|
break;
|
|
case LOOP_SET_CAPACITY:
|
|
case LOOP_CLR_FD:
|
|
@@ -1625,9 +1717,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
static int lo_open(struct block_device *bdev, fmode_t mode)
|
|
{
|
|
struct loop_device *lo;
|
|
- int err = 0;
|
|
+ int err;
|
|
|
|
- mutex_lock(&loop_index_mutex);
|
|
+ err = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (err)
|
|
+ return err;
|
|
lo = bdev->bd_disk->private_data;
|
|
if (!lo) {
|
|
err = -ENXIO;
|
|
@@ -1636,26 +1730,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
|
|
|
|
atomic_inc(&lo->lo_refcnt);
|
|
out:
|
|
- mutex_unlock(&loop_index_mutex);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
return err;
|
|
}
|
|
|
|
-static void __lo_release(struct loop_device *lo)
|
|
+static void lo_release(struct gendisk *disk, fmode_t mode)
|
|
{
|
|
- int err;
|
|
+ struct loop_device *lo;
|
|
|
|
+ mutex_lock(&loop_ctl_mutex);
|
|
+ lo = disk->private_data;
|
|
if (atomic_dec_return(&lo->lo_refcnt))
|
|
- return;
|
|
+ goto out_unlock;
|
|
|
|
- mutex_lock(&lo->lo_ctl_mutex);
|
|
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
|
|
+ if (lo->lo_state != Lo_bound)
|
|
+ goto out_unlock;
|
|
+ lo->lo_state = Lo_rundown;
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
/*
|
|
* In autoclear mode, stop the loop thread
|
|
* and remove configuration after last close.
|
|
*/
|
|
- err = loop_clr_fd(lo);
|
|
- if (!err)
|
|
- return;
|
|
+ __loop_clr_fd(lo, true);
|
|
+ return;
|
|
} else if (lo->lo_state == Lo_bound) {
|
|
/*
|
|
* Otherwise keep thread (if running) and config,
|
|
@@ -1665,14 +1763,8 @@ static void __lo_release(struct loop_device *lo)
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
|
}
|
|
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
-}
|
|
-
|
|
-static void lo_release(struct gendisk *disk, fmode_t mode)
|
|
-{
|
|
- mutex_lock(&loop_index_mutex);
|
|
- __lo_release(disk->private_data);
|
|
- mutex_unlock(&loop_index_mutex);
|
|
+out_unlock:
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
}
|
|
|
|
static const struct block_device_operations lo_fops = {
|
|
@@ -1711,10 +1803,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
|
|
struct loop_device *lo = ptr;
|
|
struct loop_func_table *xfer = data;
|
|
|
|
- mutex_lock(&lo->lo_ctl_mutex);
|
|
+ mutex_lock(&loop_ctl_mutex);
|
|
if (lo->lo_encryption == xfer)
|
|
loop_release_xfer(lo);
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1895,7 +1987,6 @@ static int loop_add(struct loop_device **l, int i)
|
|
if (!part_shift)
|
|
disk->flags |= GENHD_FL_NO_PART_SCAN;
|
|
disk->flags |= GENHD_FL_EXT_DEVT;
|
|
- mutex_init(&lo->lo_ctl_mutex);
|
|
atomic_set(&lo->lo_refcnt, 0);
|
|
lo->lo_number = i;
|
|
spin_lock_init(&lo->lo_lock);
|
|
@@ -1974,7 +2065,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
|
|
struct kobject *kobj;
|
|
int err;
|
|
|
|
- mutex_lock(&loop_index_mutex);
|
|
+ mutex_lock(&loop_ctl_mutex);
|
|
err = loop_lookup(&lo, MINOR(dev) >> part_shift);
|
|
if (err < 0)
|
|
err = loop_add(&lo, MINOR(dev) >> part_shift);
|
|
@@ -1982,7 +2073,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
|
|
kobj = NULL;
|
|
else
|
|
kobj = get_disk_and_module(lo->lo_disk);
|
|
- mutex_unlock(&loop_index_mutex);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
|
|
*part = 0;
|
|
return kobj;
|
|
@@ -1992,9 +2083,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
|
|
unsigned long parm)
|
|
{
|
|
struct loop_device *lo;
|
|
- int ret = -ENOSYS;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_killable(&loop_ctl_mutex);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
- mutex_lock(&loop_index_mutex);
|
|
+ ret = -ENOSYS;
|
|
switch (cmd) {
|
|
case LOOP_CTL_ADD:
|
|
ret = loop_lookup(&lo, parm);
|
|
@@ -2008,21 +2103,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
|
|
ret = loop_lookup(&lo, parm);
|
|
if (ret < 0)
|
|
break;
|
|
- ret = mutex_lock_killable(&lo->lo_ctl_mutex);
|
|
- if (ret)
|
|
- break;
|
|
if (lo->lo_state != Lo_unbound) {
|
|
ret = -EBUSY;
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
break;
|
|
}
|
|
if (atomic_read(&lo->lo_refcnt) > 0) {
|
|
ret = -EBUSY;
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
break;
|
|
}
|
|
lo->lo_disk->private_data = NULL;
|
|
- mutex_unlock(&lo->lo_ctl_mutex);
|
|
idr_remove(&loop_index_idr, lo->lo_number);
|
|
loop_remove(lo);
|
|
break;
|
|
@@ -2032,7 +2121,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
|
|
break;
|
|
ret = loop_add(&lo, -1);
|
|
}
|
|
- mutex_unlock(&loop_index_mutex);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
|
|
return ret;
|
|
}
|
|
@@ -2116,10 +2205,10 @@ static int __init loop_init(void)
|
|
THIS_MODULE, loop_probe, NULL, NULL);
|
|
|
|
/* pre-create number of devices given by config or max_loop */
|
|
- mutex_lock(&loop_index_mutex);
|
|
+ mutex_lock(&loop_ctl_mutex);
|
|
for (i = 0; i < nr; i++)
|
|
loop_add(&lo, i);
|
|
- mutex_unlock(&loop_index_mutex);
|
|
+ mutex_unlock(&loop_ctl_mutex);
|
|
|
|
printk(KERN_INFO "loop: module loaded\n");
|
|
return 0;
|
|
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
|
|
index 4d42c7af7de75..af75a5ee40944 100644
|
|
--- a/drivers/block/loop.h
|
|
+++ b/drivers/block/loop.h
|
|
@@ -54,7 +54,6 @@ struct loop_device {
|
|
|
|
spinlock_t lo_lock;
|
|
int lo_state;
|
|
- struct mutex lo_ctl_mutex;
|
|
struct kthread_worker worker;
|
|
struct task_struct *worker_task;
|
|
bool use_dio;
|
|
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
|
|
index 4d4d6129ff662..c964315c7b0b3 100644
|
|
--- a/drivers/block/nbd.c
|
|
+++ b/drivers/block/nbd.c
|
|
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
|
|
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
|
|
set_capacity(nbd->disk, config->bytesize >> 9);
|
|
if (bdev) {
|
|
- if (bdev->bd_disk)
|
|
+ if (bdev->bd_disk) {
|
|
bd_set_size(bdev, config->bytesize);
|
|
- else
|
|
+ set_blocksize(bdev, config->blksize);
|
|
+ } else
|
|
bdev->bd_invalidated = 1;
|
|
bdput(bdev);
|
|
}
|
|
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
|
|
index 8e5140bbf2419..1e92b61d0bd51 100644
|
|
--- a/drivers/block/rbd.c
|
|
+++ b/drivers/block/rbd.c
|
|
@@ -5986,7 +5986,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
|
|
struct list_head *tmp;
|
|
int dev_id;
|
|
char opt_buf[6];
|
|
- bool already = false;
|
|
bool force = false;
|
|
int ret;
|
|
|
|
@@ -6019,13 +6018,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
|
|
spin_lock_irq(&rbd_dev->lock);
|
|
if (rbd_dev->open_count && !force)
|
|
ret = -EBUSY;
|
|
- else
|
|
- already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
|
|
- &rbd_dev->flags);
|
|
+ else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
|
|
+ &rbd_dev->flags))
|
|
+ ret = -EINPROGRESS;
|
|
spin_unlock_irq(&rbd_dev->lock);
|
|
}
|
|
spin_unlock(&rbd_dev_list_lock);
|
|
- if (ret < 0 || already)
|
|
+ if (ret)
|
|
return ret;
|
|
|
|
if (force) {
|
|
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
|
|
index b54fa6726303b..6b7b0d8a2acbc 100644
|
|
--- a/drivers/block/sunvdc.c
|
|
+++ b/drivers/block/sunvdc.c
|
|
@@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
|
|
#define WAITING_FOR_GEN_CMD 0x04
|
|
#define WAITING_FOR_ANY -1
|
|
|
|
+#define VDC_MAX_RETRIES 10
|
|
+
|
|
static struct workqueue_struct *sunvdc_wq;
|
|
|
|
struct vdc_req_entry {
|
|
@@ -431,6 +433,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
|
|
.end_idx = dr->prod,
|
|
};
|
|
int err, delay;
|
|
+ int retries = 0;
|
|
|
|
hdr.seq = dr->snd_nxt;
|
|
delay = 1;
|
|
@@ -443,6 +446,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
|
|
udelay(delay);
|
|
if ((delay <<= 1) > 128)
|
|
delay = 128;
|
|
+ if (retries++ > VDC_MAX_RETRIES)
|
|
+ break;
|
|
} while (err == -EAGAIN);
|
|
|
|
if (err == -ENOTCONN)
|
|
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
|
|
index c1c676a33e4a6..1046459f172be 100644
|
|
--- a/drivers/block/swim3.c
|
|
+++ b/drivers/block/swim3.c
|
|
@@ -995,7 +995,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
|
|
struct swim3 __iomem *sw = fs->swim3;
|
|
|
|
mutex_lock(&swim3_mutex);
|
|
- if (fs->ref_count > 0 && --fs->ref_count == 0) {
|
|
+ if (fs->ref_count > 0)
|
|
+ --fs->ref_count;
|
|
+ else if (fs->ref_count == -1)
|
|
+ fs->ref_count = 0;
|
|
+ if (fs->ref_count == 0) {
|
|
swim3_action(fs, MOTOR_OFF);
|
|
out_8(&sw->control_bic, 0xff);
|
|
swim3_select(fs, RELAX);
|
|
@@ -1087,8 +1091,6 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
|
|
struct floppy_state *fs = &floppy_states[index];
|
|
int rc = -EBUSY;
|
|
|
|
- /* Do this first for message macros */
|
|
- memset(fs, 0, sizeof(*fs));
|
|
fs->mdev = mdev;
|
|
fs->index = index;
|
|
|
|
@@ -1188,14 +1190,15 @@ static int swim3_attach(struct macio_dev *mdev,
|
|
return rc;
|
|
}
|
|
|
|
- fs = &floppy_states[floppy_count];
|
|
-
|
|
disk = alloc_disk(1);
|
|
if (disk == NULL) {
|
|
rc = -ENOMEM;
|
|
goto out_unregister;
|
|
}
|
|
|
|
+ fs = &floppy_states[floppy_count];
|
|
+ memset(fs, 0, sizeof(*fs));
|
|
+
|
|
disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
|
|
BLK_MQ_F_SHOULD_MERGE);
|
|
if (IS_ERR(disk->queue)) {
|
|
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
|
index 4879595200e1c..d1459cc1159ff 100644
|
|
--- a/drivers/block/zram/zram_drv.c
|
|
+++ b/drivers/block/zram/zram_drv.c
|
|
@@ -53,6 +53,11 @@ static size_t huge_class_size;
|
|
|
|
static void zram_free_page(struct zram *zram, size_t index);
|
|
|
|
+static int zram_slot_trylock(struct zram *zram, u32 index)
|
|
+{
|
|
+ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value);
|
|
+}
|
|
+
|
|
static void zram_slot_lock(struct zram *zram, u32 index)
|
|
{
|
|
bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
|
|
@@ -382,8 +387,10 @@ static ssize_t backing_dev_store(struct device *dev,
|
|
|
|
bdev = bdgrab(I_BDEV(inode));
|
|
err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
|
|
- if (err < 0)
|
|
+ if (err < 0) {
|
|
+ bdev = NULL;
|
|
goto out;
|
|
+ }
|
|
|
|
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
|
|
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
|
@@ -399,7 +406,6 @@ static ssize_t backing_dev_store(struct device *dev,
|
|
goto out;
|
|
|
|
reset_bdev(zram);
|
|
- spin_lock_init(&zram->bitmap_lock);
|
|
|
|
zram->old_block_size = old_block_size;
|
|
zram->bdev = bdev;
|
|
@@ -443,29 +449,24 @@ out:
|
|
|
|
static unsigned long get_entry_bdev(struct zram *zram)
|
|
{
|
|
- unsigned long entry;
|
|
-
|
|
- spin_lock(&zram->bitmap_lock);
|
|
+ unsigned long blk_idx = 1;
|
|
+retry:
|
|
/* skip 0 bit to confuse zram.handle = 0 */
|
|
- entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
|
|
- if (entry == zram->nr_pages) {
|
|
- spin_unlock(&zram->bitmap_lock);
|
|
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
|
|
+ if (blk_idx == zram->nr_pages)
|
|
return 0;
|
|
- }
|
|
|
|
- set_bit(entry, zram->bitmap);
|
|
- spin_unlock(&zram->bitmap_lock);
|
|
+ if (test_and_set_bit(blk_idx, zram->bitmap))
|
|
+ goto retry;
|
|
|
|
- return entry;
|
|
+ return blk_idx;
|
|
}
|
|
|
|
static void put_entry_bdev(struct zram *zram, unsigned long entry)
|
|
{
|
|
int was_set;
|
|
|
|
- spin_lock(&zram->bitmap_lock);
|
|
was_set = test_and_clear_bit(entry, zram->bitmap);
|
|
- spin_unlock(&zram->bitmap_lock);
|
|
WARN_ON_ONCE(!was_set);
|
|
}
|
|
|
|
@@ -886,9 +887,10 @@ static ssize_t debug_stat_show(struct device *dev,
|
|
|
|
down_read(&zram->init_lock);
|
|
ret = scnprintf(buf, PAGE_SIZE,
|
|
- "version: %d\n%8llu\n",
|
|
+ "version: %d\n%8llu %8llu\n",
|
|
version,
|
|
- (u64)atomic64_read(&zram->stats.writestall));
|
|
+ (u64)atomic64_read(&zram->stats.writestall),
|
|
+ (u64)atomic64_read(&zram->stats.miss_free));
|
|
up_read(&zram->init_lock);
|
|
|
|
return ret;
|
|
@@ -1400,10 +1402,14 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
|
|
- zram_slot_lock(zram, index);
|
|
+ atomic64_inc(&zram->stats.notify_free);
|
|
+ if (!zram_slot_trylock(zram, index)) {
|
|
+ atomic64_inc(&zram->stats.miss_free);
|
|
+ return;
|
|
+ }
|
|
+
|
|
zram_free_page(zram, index);
|
|
zram_slot_unlock(zram, index);
|
|
- atomic64_inc(&zram->stats.notify_free);
|
|
}
|
|
|
|
static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
|
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
|
|
index 72c8584b6dfff..d1095dfdffa81 100644
|
|
--- a/drivers/block/zram/zram_drv.h
|
|
+++ b/drivers/block/zram/zram_drv.h
|
|
@@ -79,6 +79,7 @@ struct zram_stats {
|
|
atomic64_t pages_stored; /* no. of pages currently stored */
|
|
atomic_long_t max_used_pages; /* no. of maximum pages stored */
|
|
atomic64_t writestall; /* no. of write slow paths */
|
|
+ atomic64_t miss_free; /* no. of missed free */
|
|
};
|
|
|
|
struct zram {
|
|
@@ -110,7 +111,6 @@ struct zram {
|
|
unsigned int old_block_size;
|
|
unsigned long *bitmap;
|
|
unsigned long nr_pages;
|
|
- spinlock_t bitmap_lock;
|
|
#endif
|
|
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
|
|
struct dentry *debugfs_dir;
|
|
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
|
|
index 7439a7eb50acb..05c8a7ed859c5 100644
|
|
--- a/drivers/bluetooth/btusb.c
|
|
+++ b/drivers/bluetooth/btusb.c
|
|
@@ -344,6 +344,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|
/* Intel Bluetooth devices */
|
|
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
|
|
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
|
|
+ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
|
|
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
|
|
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
|
|
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
|
|
@@ -2055,6 +2056,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
|
|
return -EILSEQ;
|
|
}
|
|
|
|
+static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
|
|
+ struct intel_boot_params *params,
|
|
+ char *fw_name, size_t len,
|
|
+ const char *suffix)
|
|
+{
|
|
+ switch (ver->hw_variant) {
|
|
+ case 0x0b: /* SfP */
|
|
+ case 0x0c: /* WsP */
|
|
+ snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
|
|
+ le16_to_cpu(ver->hw_variant),
|
|
+ le16_to_cpu(params->dev_revid),
|
|
+ suffix);
|
|
+ break;
|
|
+ case 0x11: /* JfP */
|
|
+ case 0x12: /* ThP */
|
|
+ case 0x13: /* HrP */
|
|
+ case 0x14: /* CcP */
|
|
+ snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
|
|
+ le16_to_cpu(ver->hw_variant),
|
|
+ le16_to_cpu(ver->hw_revision),
|
|
+ le16_to_cpu(ver->fw_revision),
|
|
+ suffix);
|
|
+ break;
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
static int btusb_setup_intel_new(struct hci_dev *hdev)
|
|
{
|
|
struct btusb_data *data = hci_get_drvdata(hdev);
|
|
@@ -2106,7 +2136,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
|
case 0x11: /* JfP */
|
|
case 0x12: /* ThP */
|
|
case 0x13: /* HrP */
|
|
- case 0x14: /* QnJ, IcP */
|
|
+ case 0x14: /* CcP */
|
|
break;
|
|
default:
|
|
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
|
|
@@ -2190,23 +2220,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
|
* ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
|
|
*
|
|
*/
|
|
- switch (ver.hw_variant) {
|
|
- case 0x0b: /* SfP */
|
|
- case 0x0c: /* WsP */
|
|
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
|
|
- le16_to_cpu(ver.hw_variant),
|
|
- le16_to_cpu(params.dev_revid));
|
|
- break;
|
|
- case 0x11: /* JfP */
|
|
- case 0x12: /* ThP */
|
|
- case 0x13: /* HrP */
|
|
- case 0x14: /* QnJ, IcP */
|
|
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
|
|
- le16_to_cpu(ver.hw_variant),
|
|
- le16_to_cpu(ver.hw_revision),
|
|
- le16_to_cpu(ver.fw_revision));
|
|
- break;
|
|
- default:
|
|
+ err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname,
|
|
+ sizeof(fwname), "sfi");
|
|
+ if (!err) {
|
|
bt_dev_err(hdev, "Unsupported Intel firmware naming");
|
|
return -EINVAL;
|
|
}
|
|
@@ -2222,23 +2238,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
|
/* Save the DDC file name for later use to apply once the firmware
|
|
* downloading is done.
|
|
*/
|
|
- switch (ver.hw_variant) {
|
|
- case 0x0b: /* SfP */
|
|
- case 0x0c: /* WsP */
|
|
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
|
|
- le16_to_cpu(ver.hw_variant),
|
|
- le16_to_cpu(params.dev_revid));
|
|
- break;
|
|
- case 0x11: /* JfP */
|
|
- case 0x12: /* ThP */
|
|
- case 0x13: /* HrP */
|
|
- case 0x14: /* QnJ, IcP */
|
|
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
|
|
- le16_to_cpu(ver.hw_variant),
|
|
- le16_to_cpu(ver.hw_revision),
|
|
- le16_to_cpu(ver.fw_revision));
|
|
- break;
|
|
- default:
|
|
+ err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname,
|
|
+ sizeof(fwname), "ddc");
|
|
+ if (!err) {
|
|
bt_dev_err(hdev, "Unsupported Intel firmware naming");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
|
|
index ddbd8c6a0cebd..8001323691349 100644
|
|
--- a/drivers/bluetooth/hci_bcm.c
|
|
+++ b/drivers/bluetooth/hci_bcm.c
|
|
@@ -907,6 +907,10 @@ static int bcm_get_resources(struct bcm_device *dev)
|
|
|
|
dev->clk = devm_clk_get(dev->dev, NULL);
|
|
|
|
+ /* Handle deferred probing */
|
|
+ if (dev->clk == ERR_PTR(-EPROBE_DEFER))
|
|
+ return PTR_ERR(dev->clk);
|
|
+
|
|
dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
|
|
GPIOD_OUT_LOW);
|
|
if (IS_ERR(dev->device_wakeup))
|
|
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
|
|
index a5b8afe3609c1..f8b7345fe1cb6 100644
|
|
--- a/drivers/cdrom/gdrom.c
|
|
+++ b/drivers/cdrom/gdrom.c
|
|
@@ -873,6 +873,7 @@ static void __exit exit_gdrom(void)
|
|
platform_device_unregister(pd);
|
|
platform_driver_unregister(&gdrom_driver);
|
|
kfree(gd.toc);
|
|
+ kfree(gd.cd_info);
|
|
}
|
|
|
|
module_init(init_gdrom);
|
|
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
|
|
index a74ce885b5412..c518659b4d9fe 100644
|
|
--- a/drivers/char/ipmi/ipmi_msghandler.c
|
|
+++ b/drivers/char/ipmi/ipmi_msghandler.c
|
|
@@ -32,6 +32,7 @@
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/uuid.h>
|
|
+#include <linux/nospec.h>
|
|
|
|
#define IPMI_DRIVER_VERSION "39.2"
|
|
|
|
@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
|
|
{ }
|
|
#endif
|
|
|
|
-static int initialized;
|
|
+static bool initialized;
|
|
+static bool drvregistered;
|
|
|
|
enum ipmi_panic_event_op {
|
|
IPMI_SEND_PANIC_EVENT_NONE,
|
|
@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
|
|
|
|
static LIST_HEAD(ipmi_interfaces);
|
|
static DEFINE_MUTEX(ipmi_interfaces_mutex);
|
|
-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
|
|
+struct srcu_struct ipmi_interfaces_srcu;
|
|
|
|
/*
|
|
* List of watchers that want to know when smi's are added and deleted.
|
|
@@ -720,7 +722,15 @@ struct watcher_entry {
|
|
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
|
|
{
|
|
struct ipmi_smi *intf;
|
|
- int index;
|
|
+ int index, rv;
|
|
+
|
|
+ /*
|
|
+ * Make sure the driver is actually initialized, this handles
|
|
+ * problems with initialization order.
|
|
+ */
|
|
+ rv = ipmi_init_msghandler();
|
|
+ if (rv)
|
|
+ return rv;
|
|
|
|
mutex_lock(&smi_watchers_mutex);
|
|
|
|
@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
|
|
|
|
if (user) {
|
|
user->handler->ipmi_recv_hndl(msg, user->handler_data);
|
|
- release_ipmi_user(msg->user, index);
|
|
+ release_ipmi_user(user, index);
|
|
} else {
|
|
/* User went away, give up. */
|
|
ipmi_free_recv_msg(msg);
|
|
@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
|
|
{
|
|
unsigned long flags;
|
|
struct ipmi_user *new_user;
|
|
- int rv = 0, index;
|
|
+ int rv, index;
|
|
struct ipmi_smi *intf;
|
|
|
|
/*
|
|
@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
|
|
* Make sure the driver is actually initialized, this handles
|
|
* problems with initialization order.
|
|
*/
|
|
- if (!initialized) {
|
|
- rv = ipmi_init_msghandler();
|
|
- if (rv)
|
|
- return rv;
|
|
-
|
|
- /*
|
|
- * The init code doesn't return an error if it was turned
|
|
- * off, but it won't initialize. Check that.
|
|
- */
|
|
- if (!initialized)
|
|
- return -ENODEV;
|
|
- }
|
|
+ rv = ipmi_init_msghandler();
|
|
+ if (rv)
|
|
+ return rv;
|
|
|
|
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
|
|
if (!new_user)
|
|
@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
|
|
static void free_user(struct kref *ref)
|
|
{
|
|
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
|
+ cleanup_srcu_struct(&user->release_barrier);
|
|
kfree(user);
|
|
}
|
|
|
|
@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
|
|
{
|
|
_ipmi_destroy_user(user);
|
|
|
|
- cleanup_srcu_struct(&user->release_barrier);
|
|
kref_put(&user->refcount, free_user);
|
|
|
|
return 0;
|
|
@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
|
|
if (!user)
|
|
return -ENODEV;
|
|
|
|
- if (channel >= IPMI_MAX_CHANNELS)
|
|
+ if (channel >= IPMI_MAX_CHANNELS) {
|
|
rv = -EINVAL;
|
|
- else
|
|
+ } else {
|
|
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
|
user->intf->addrinfo[channel].address = address;
|
|
+ }
|
|
release_ipmi_user(user, index);
|
|
|
|
return rv;
|
|
@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
|
|
if (!user)
|
|
return -ENODEV;
|
|
|
|
- if (channel >= IPMI_MAX_CHANNELS)
|
|
+ if (channel >= IPMI_MAX_CHANNELS) {
|
|
rv = -EINVAL;
|
|
- else
|
|
+ } else {
|
|
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
|
*address = user->intf->addrinfo[channel].address;
|
|
+ }
|
|
release_ipmi_user(user, index);
|
|
|
|
return rv;
|
|
@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
|
|
if (!user)
|
|
return -ENODEV;
|
|
|
|
- if (channel >= IPMI_MAX_CHANNELS)
|
|
+ if (channel >= IPMI_MAX_CHANNELS) {
|
|
rv = -EINVAL;
|
|
- else
|
|
+ } else {
|
|
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
|
user->intf->addrinfo[channel].lun = LUN & 0x3;
|
|
+ }
|
|
release_ipmi_user(user, index);
|
|
|
|
return rv;
|
|
@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
|
|
if (!user)
|
|
return -ENODEV;
|
|
|
|
- if (channel >= IPMI_MAX_CHANNELS)
|
|
+ if (channel >= IPMI_MAX_CHANNELS) {
|
|
rv = -EINVAL;
|
|
- else
|
|
+ } else {
|
|
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
|
|
*address = user->intf->addrinfo[channel].lun;
|
|
+ }
|
|
release_ipmi_user(user, index);
|
|
|
|
return rv;
|
|
@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
|
|
{
|
|
if (addr->channel >= IPMI_MAX_CHANNELS)
|
|
return -EINVAL;
|
|
+ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
|
|
*lun = intf->addrinfo[addr->channel].lun;
|
|
*saddr = intf->addrinfo[addr->channel].address;
|
|
return 0;
|
|
@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
|
|
* Make sure the driver is actually initialized, this handles
|
|
* problems with initialization order.
|
|
*/
|
|
- if (!initialized) {
|
|
- rv = ipmi_init_msghandler();
|
|
- if (rv)
|
|
- return rv;
|
|
- /*
|
|
- * The init code doesn't return an error if it was turned
|
|
- * off, but it won't initialize. Check that.
|
|
- */
|
|
- if (!initialized)
|
|
- return -ENODEV;
|
|
- }
|
|
+ rv = ipmi_init_msghandler();
|
|
+ if (rv)
|
|
+ return rv;
|
|
|
|
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
|
|
if (!intf)
|
|
@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
+/* Must be called with ipmi_interfaces_mutex held. */
|
|
+static int ipmi_register_driver(void)
|
|
+{
|
|
+ int rv;
|
|
+
|
|
+ if (drvregistered)
|
|
+ return 0;
|
|
+
|
|
+ rv = driver_register(&ipmidriver.driver);
|
|
+ if (rv)
|
|
+ pr_err("Could not register IPMI driver\n");
|
|
+ else
|
|
+ drvregistered = true;
|
|
+ return rv;
|
|
+}
|
|
+
|
|
static struct notifier_block panic_block = {
|
|
.notifier_call = panic_event,
|
|
.next = NULL,
|
|
@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
|
|
{
|
|
int rv;
|
|
|
|
+ mutex_lock(&ipmi_interfaces_mutex);
|
|
+ rv = ipmi_register_driver();
|
|
+ if (rv)
|
|
+ goto out;
|
|
if (initialized)
|
|
- return 0;
|
|
-
|
|
- rv = driver_register(&ipmidriver.driver);
|
|
- if (rv) {
|
|
- pr_err("Could not register IPMI driver\n");
|
|
- return rv;
|
|
- }
|
|
+ goto out;
|
|
|
|
- pr_info("version " IPMI_DRIVER_VERSION "\n");
|
|
+ init_srcu_struct(&ipmi_interfaces_srcu);
|
|
|
|
timer_setup(&ipmi_timer, ipmi_timeout, 0);
|
|
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
|
|
|
|
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
|
|
|
- initialized = 1;
|
|
+ initialized = true;
|
|
|
|
- return 0;
|
|
+out:
|
|
+ mutex_unlock(&ipmi_interfaces_mutex);
|
|
+ return rv;
|
|
}
|
|
|
|
static int __init ipmi_init_msghandler_mod(void)
|
|
{
|
|
- ipmi_init_msghandler();
|
|
- return 0;
|
|
+ int rv;
|
|
+
|
|
+ pr_info("version " IPMI_DRIVER_VERSION "\n");
|
|
+
|
|
+ mutex_lock(&ipmi_interfaces_mutex);
|
|
+ rv = ipmi_register_driver();
|
|
+ mutex_unlock(&ipmi_interfaces_mutex);
|
|
+
|
|
+ return rv;
|
|
}
|
|
|
|
static void __exit cleanup_ipmi(void)
|
|
{
|
|
int count;
|
|
|
|
- if (!initialized)
|
|
- return;
|
|
-
|
|
- atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
|
|
+ if (initialized) {
|
|
+ atomic_notifier_chain_unregister(&panic_notifier_list,
|
|
+ &panic_block);
|
|
|
|
- /*
|
|
- * This can't be called if any interfaces exist, so no worry
|
|
- * about shutting down the interfaces.
|
|
- */
|
|
+ /*
|
|
+ * This can't be called if any interfaces exist, so no worry
|
|
+ * about shutting down the interfaces.
|
|
+ */
|
|
|
|
- /*
|
|
- * Tell the timer to stop, then wait for it to stop. This
|
|
- * avoids problems with race conditions removing the timer
|
|
- * here.
|
|
- */
|
|
- atomic_inc(&stop_operation);
|
|
- del_timer_sync(&ipmi_timer);
|
|
+ /*
|
|
+ * Tell the timer to stop, then wait for it to stop. This
|
|
+ * avoids problems with race conditions removing the timer
|
|
+ * here.
|
|
+ */
|
|
+ atomic_inc(&stop_operation);
|
|
+ del_timer_sync(&ipmi_timer);
|
|
|
|
- driver_unregister(&ipmidriver.driver);
|
|
+ initialized = false;
|
|
|
|
- initialized = 0;
|
|
+ /* Check for buffer leaks. */
|
|
+ count = atomic_read(&smi_msg_inuse_count);
|
|
+ if (count != 0)
|
|
+ pr_warn("SMI message count %d at exit\n", count);
|
|
+ count = atomic_read(&recv_msg_inuse_count);
|
|
+ if (count != 0)
|
|
+ pr_warn("recv message count %d at exit\n", count);
|
|
|
|
- /* Check for buffer leaks. */
|
|
- count = atomic_read(&smi_msg_inuse_count);
|
|
- if (count != 0)
|
|
- pr_warn("SMI message count %d at exit\n", count);
|
|
- count = atomic_read(&recv_msg_inuse_count);
|
|
- if (count != 0)
|
|
- pr_warn("recv message count %d at exit\n", count);
|
|
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
|
|
+ }
|
|
+ if (drvregistered)
|
|
+ driver_unregister(&ipmidriver.driver);
|
|
}
|
|
module_exit(cleanup_ipmi);
|
|
|
|
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
|
|
index ca9528c4f183e..b7a1ae2afaeac 100644
|
|
--- a/drivers/char/ipmi/ipmi_ssif.c
|
|
+++ b/drivers/char/ipmi/ipmi_ssif.c
|
|
@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|
|
|
/* Remove the multi-part read marker. */
|
|
len -= 2;
|
|
+ data += 2;
|
|
for (i = 0; i < len; i++)
|
|
- ssif_info->data[i] = data[i+2];
|
|
+ ssif_info->data[i] = data[i];
|
|
ssif_info->multi_len = len;
|
|
ssif_info->multi_pos = 1;
|
|
|
|
@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|
}
|
|
|
|
blocknum = data[0];
|
|
+ len--;
|
|
+ data++;
|
|
+
|
|
+ if (blocknum != 0xff && len != 31) {
|
|
+ /* All blocks but the last must have 31 data bytes. */
|
|
+ result = -EIO;
|
|
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
|
|
+ pr_info("Received middle message <31\n");
|
|
|
|
- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
|
|
+ goto continue_op;
|
|
+ }
|
|
+
|
|
+ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
|
|
/* Received message too big, abort the operation. */
|
|
result = -E2BIG;
|
|
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
|
|
@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|
goto continue_op;
|
|
}
|
|
|
|
- /* Remove the blocknum from the data. */
|
|
- len--;
|
|
for (i = 0; i < len; i++)
|
|
- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
|
|
+ ssif_info->data[i + ssif_info->multi_len] = data[i];
|
|
ssif_info->multi_len += len;
|
|
if (blocknum == 0xff) {
|
|
/* End of read */
|
|
len = ssif_info->multi_len;
|
|
data = ssif_info->data;
|
|
- } else if (blocknum + 1 != ssif_info->multi_pos) {
|
|
+ } else if (blocknum != ssif_info->multi_pos) {
|
|
/*
|
|
* Out of sequence block, just abort. Block
|
|
* numbers start at zero for the second block,
|
|
@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|
}
|
|
}
|
|
|
|
+ continue_op:
|
|
if (result < 0) {
|
|
ssif_inc_stat(ssif_info, receive_errors);
|
|
} else {
|
|
@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|
ssif_inc_stat(ssif_info, received_message_parts);
|
|
}
|
|
|
|
-
|
|
- continue_op:
|
|
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
|
|
pr_info("DONE 1: state = %d, result=%d\n",
|
|
ssif_info->ssif_state, result);
|
|
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
|
|
index b5e3103c11755..e43c876a92232 100644
|
|
--- a/drivers/char/mwave/mwavedd.c
|
|
+++ b/drivers/char/mwave/mwavedd.c
|
|
@@ -59,6 +59,7 @@
|
|
#include <linux/mutex.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/serial_8250.h>
|
|
+#include <linux/nospec.h>
|
|
#include "smapi.h"
|
|
#include "mwavedd.h"
|
|
#include "3780i.h"
|
|
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|
ipcnum);
|
|
return -EINVAL;
|
|
}
|
|
+ ipcnum = array_index_nospec(ipcnum,
|
|
+ ARRAY_SIZE(pDrvData->IPCs));
|
|
PRINTK_3(TRACE_MWAVE,
|
|
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
|
|
" ipcnum %x entry usIntCount %x\n",
|
|
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|
" Invalid ipcnum %x\n", ipcnum);
|
|
return -EINVAL;
|
|
}
|
|
+ ipcnum = array_index_nospec(ipcnum,
|
|
+ ARRAY_SIZE(pDrvData->IPCs));
|
|
PRINTK_3(TRACE_MWAVE,
|
|
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
|
|
" ipcnum %x, usIntCount %x\n",
|
|
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|
ipcnum);
|
|
return -EINVAL;
|
|
}
|
|
+ ipcnum = array_index_nospec(ipcnum,
|
|
+ ARRAY_SIZE(pDrvData->IPCs));
|
|
mutex_lock(&mwave_mutex);
|
|
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
|
|
pDrvData->IPCs[ipcnum].bIsEnabled = false;
|
|
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
|
|
index 129f640424b79..95db630dd722f 100644
|
|
--- a/drivers/char/tpm/tpm-interface.c
|
|
+++ b/drivers/char/tpm/tpm-interface.c
|
|
@@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
|
|
|
|
if (need_locality) {
|
|
rc = tpm_request_locality(chip, flags);
|
|
- if (rc < 0)
|
|
- goto out_no_locality;
|
|
+ if (rc < 0) {
|
|
+ need_locality = false;
|
|
+ goto out_locality;
|
|
+ }
|
|
}
|
|
|
|
rc = tpm_cmd_ready(chip, flags);
|
|
if (rc)
|
|
- goto out;
|
|
+ goto out_locality;
|
|
|
|
rc = tpm2_prepare_space(chip, space, ordinal, buf);
|
|
if (rc)
|
|
@@ -547,14 +549,13 @@ out_recv:
|
|
dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
|
|
|
|
out:
|
|
- rc = tpm_go_idle(chip, flags);
|
|
- if (rc)
|
|
- goto out;
|
|
+ /* may fail but do not override previous error value in rc */
|
|
+ tpm_go_idle(chip, flags);
|
|
|
|
+out_locality:
|
|
if (need_locality)
|
|
tpm_relinquish_locality(chip, flags);
|
|
|
|
-out_no_locality:
|
|
if (chip->ops->clk_enable != NULL)
|
|
chip->ops->clk_enable(chip, false);
|
|
|
|
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
index caa86b19c76dd..f74f451baf6ae 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
@@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
struct device *dev = chip->dev.parent;
|
|
struct i2c_client *client = to_i2c_client(dev);
|
|
u32 ordinal;
|
|
+ unsigned long duration;
|
|
size_t count = 0;
|
|
int burst_count, bytes2write, retries, rc = -EIO;
|
|
|
|
@@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
return rc;
|
|
}
|
|
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
|
|
- rc = i2c_nuvoton_wait_for_data_avail(chip,
|
|
- tpm_calc_ordinal_duration(chip,
|
|
- ordinal),
|
|
- &priv->read_queue);
|
|
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
|
+ duration = tpm2_calc_ordinal_duration(chip, ordinal);
|
|
+ else
|
|
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
|
|
+
|
|
+ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
|
|
if (rc) {
|
|
dev_err(dev, "%s() timeout command duration\n", __func__);
|
|
i2c_nuvoton_ready(chip);
|
|
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
|
|
index 2fe225a697df8..3487e03d4bc61 100644
|
|
--- a/drivers/clk/at91/at91sam9x5.c
|
|
+++ b/drivers/clk/at91/at91sam9x5.c
|
|
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
|
|
return;
|
|
|
|
at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
|
|
- nck(at91sam9x5_systemck),
|
|
- nck(at91sam9x35_periphck), 0);
|
|
+ nck(at91sam9x5_systemck), 31, 0);
|
|
if (!at91sam9x5_pmc)
|
|
return;
|
|
|
|
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
|
|
parent_names[1] = "mainck";
|
|
parent_names[2] = "plladivck";
|
|
parent_names[3] = "utmick";
|
|
- parent_names[4] = "mck";
|
|
+ parent_names[4] = "masterck";
|
|
for (i = 0; i < 2; i++) {
|
|
char name[6];
|
|
|
|
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
|
|
index d69ad96fe988b..cd0ef7274fdbf 100644
|
|
--- a/drivers/clk/at91/sama5d2.c
|
|
+++ b/drivers/clk/at91/sama5d2.c
|
|
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
|
|
parent_names[1] = "mainck";
|
|
parent_names[2] = "plladivck";
|
|
parent_names[3] = "utmick";
|
|
- parent_names[4] = "mck";
|
|
+ parent_names[4] = "masterck";
|
|
for (i = 0; i < 3; i++) {
|
|
char name[6];
|
|
|
|
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
|
|
parent_names[1] = "mainck";
|
|
parent_names[2] = "plladivck";
|
|
parent_names[3] = "utmick";
|
|
- parent_names[4] = "mck";
|
|
+ parent_names[4] = "masterck";
|
|
parent_names[5] = "audiopll_pmcck";
|
|
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
|
|
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
|
|
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
|
|
index e358be7f6c8d5..b645a9d59cdbd 100644
|
|
--- a/drivers/clk/at91/sama5d4.c
|
|
+++ b/drivers/clk/at91/sama5d4.c
|
|
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
|
|
parent_names[1] = "mainck";
|
|
parent_names[2] = "plladivck";
|
|
parent_names[3] = "utmick";
|
|
- parent_names[4] = "mck";
|
|
+ parent_names[4] = "masterck";
|
|
for (i = 0; i < 3; i++) {
|
|
char name[6];
|
|
|
|
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
|
|
index 15af423cc0c90..f5d54a64d33c5 100644
|
|
--- a/drivers/clk/imgtec/clk-boston.c
|
|
+++ b/drivers/clk/imgtec/clk-boston.c
|
|
@@ -73,27 +73,32 @@ static void __init clk_boston_setup(struct device_node *np)
|
|
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
|
|
if (IS_ERR(hw)) {
|
|
pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
|
|
- return;
|
|
+ goto error;
|
|
}
|
|
onecell->hws[BOSTON_CLK_INPUT] = hw;
|
|
|
|
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
|
|
if (IS_ERR(hw)) {
|
|
pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
|
|
- return;
|
|
+ goto error;
|
|
}
|
|
onecell->hws[BOSTON_CLK_SYS] = hw;
|
|
|
|
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
|
|
if (IS_ERR(hw)) {
|
|
pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
|
|
- return;
|
|
+ goto error;
|
|
}
|
|
onecell->hws[BOSTON_CLK_CPU] = hw;
|
|
|
|
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
|
|
if (err)
|
|
pr_err("failed to add DT provider: %d\n", err);
|
|
+
|
|
+ return;
|
|
+
|
|
+error:
|
|
+ kfree(onecell);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
|
|
index 99036527eb0d8..e695622c5aa56 100644
|
|
--- a/drivers/clk/imx/clk-busy.c
|
|
+++ b/drivers/clk/imx/clk-busy.c
|
|
@@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = {
|
|
|
|
struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
|
|
u8 width, void __iomem *busy_reg, u8 busy_shift,
|
|
- const char **parent_names, int num_parents)
|
|
+ const char * const *parent_names, int num_parents)
|
|
{
|
|
struct clk_busy_mux *busy;
|
|
struct clk *clk;
|
|
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
|
|
index c9b327e0a8dd9..44817c1b0b88c 100644
|
|
--- a/drivers/clk/imx/clk-fixup-mux.c
|
|
+++ b/drivers/clk/imx/clk-fixup-mux.c
|
|
@@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = {
|
|
};
|
|
|
|
struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
|
|
- u8 shift, u8 width, const char **parents,
|
|
+ u8 shift, u8 width, const char * const *parents,
|
|
int num_parents, void (*fixup)(u32 *val))
|
|
{
|
|
struct clk_fixup_mux *fixup_mux;
|
|
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
|
|
index bbe0c60f4d09f..59f6a3e087dbc 100644
|
|
--- a/drivers/clk/imx/clk-imx6q.c
|
|
+++ b/drivers/clk/imx/clk-imx6q.c
|
|
@@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
|
|
* lvds1_gate and lvds2_gate are pseudo-gates. Both can be
|
|
* independently configured as clock inputs or outputs. We treat
|
|
* the "output_enable" bit as a gate, even though it's really just
|
|
- * enabling clock output.
|
|
+ * enabling clock output. Initially the gate bits are cleared, as
|
|
+ * otherwise the exclusive configuration gets locked in the setup done
|
|
+ * by software running before the clock driver, with no way to change
|
|
+ * it.
|
|
*/
|
|
+ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
|
|
clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
|
|
clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
|
|
|
|
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
|
|
index 6fcfbbd907a56..e13d8814cfa4e 100644
|
|
--- a/drivers/clk/imx/clk-imx6sl.c
|
|
+++ b/drivers/clk/imx/clk-imx6sl.c
|
|
@@ -17,6 +17,8 @@
|
|
|
|
#include "clk.h"
|
|
|
|
+#define CCDR 0x4
|
|
+#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
|
|
#define CCSR 0xc
|
|
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
|
|
#define CACRR 0x10
|
|
@@ -411,6 +413,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
|
|
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
|
|
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
|
|
|
|
+ /* Ensure the MMDC CH0 handshake is bypassed */
|
|
+ writel_relaxed(readl_relaxed(base + CCDR) |
|
|
+ BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
|
|
+
|
|
imx_check_clocks(clks, ARRAY_SIZE(clks));
|
|
|
|
clk_data.clks = clks;
|
|
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
|
|
index 5895e2237b6c2..2c377e1882812 100644
|
|
--- a/drivers/clk/imx/clk.h
|
|
+++ b/drivers/clk/imx/clk.h
|
|
@@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
|
|
|
|
struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
|
|
u8 width, void __iomem *busy_reg, u8 busy_shift,
|
|
- const char **parent_names, int num_parents);
|
|
+ const char * const *parent_names, int num_parents);
|
|
|
|
struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
|
|
void __iomem *reg, u8 shift, u8 width,
|
|
void (*fixup)(u32 *val));
|
|
|
|
struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
|
|
- u8 shift, u8 width, const char **parents,
|
|
+ u8 shift, u8 width, const char * const *parents,
|
|
int num_parents, void (*fixup)(u32 *val));
|
|
|
|
static inline struct clk *imx_clk_fixed(const char *name, int rate)
|
|
@@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
|
|
}
|
|
|
|
static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
|
|
- u8 shift, u8 width, const char **parents, int num_parents)
|
|
+ u8 shift, u8 width, const char * const *parents,
|
|
+ int num_parents)
|
|
{
|
|
return clk_register_mux(NULL, name, parents, num_parents,
|
|
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
|
|
@@ -199,7 +200,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
|
|
}
|
|
|
|
static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
|
|
- u8 shift, u8 width, const char **parents, int num_parents)
|
|
+ u8 shift, u8 width, const char * const *parents,
|
|
+ int num_parents)
|
|
{
|
|
return clk_register_mux(NULL, name, parents, num_parents,
|
|
CLK_SET_RATE_NO_REPARENT, reg, shift,
|
|
@@ -207,7 +209,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
|
|
}
|
|
|
|
static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
|
|
- u8 shift, u8 width, const char **parents, int num_parents)
|
|
+ u8 shift, u8 width, const char * const *parents,
|
|
+ int num_parents)
|
|
{
|
|
return clk_register_mux(NULL, name, parents, num_parents,
|
|
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
|
|
@@ -215,8 +218,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
|
|
}
|
|
|
|
static inline struct clk *imx_clk_mux_flags(const char *name,
|
|
- void __iomem *reg, u8 shift, u8 width, const char **parents,
|
|
- int num_parents, unsigned long flags)
|
|
+ void __iomem *reg, u8 shift, u8 width,
|
|
+ const char * const *parents, int num_parents,
|
|
+ unsigned long flags)
|
|
{
|
|
return clk_register_mux(NULL, name, parents, num_parents,
|
|
flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
|
|
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
|
|
index 346b9e165b7a9..a3a826419c165 100644
|
|
--- a/drivers/clk/meson/meson8b.c
|
|
+++ b/drivers/clk/meson/meson8b.c
|
|
@@ -42,6 +42,11 @@ static const struct pll_params_table sys_pll_params_table[] = {
|
|
PLL_PARAMS(62, 1),
|
|
PLL_PARAMS(63, 1),
|
|
PLL_PARAMS(64, 1),
|
|
+ PLL_PARAMS(65, 1),
|
|
+ PLL_PARAMS(66, 1),
|
|
+ PLL_PARAMS(67, 1),
|
|
+ PLL_PARAMS(68, 1),
|
|
+ PLL_PARAMS(84, 1),
|
|
{ /* sentinel */ },
|
|
};
|
|
|
|
@@ -579,13 +584,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = {
|
|
};
|
|
|
|
static const struct clk_div_table cpu_scale_table[] = {
|
|
- { .val = 2, .div = 4 },
|
|
- { .val = 3, .div = 6 },
|
|
- { .val = 4, .div = 8 },
|
|
- { .val = 5, .div = 10 },
|
|
- { .val = 6, .div = 12 },
|
|
- { .val = 7, .div = 14 },
|
|
- { .val = 8, .div = 16 },
|
|
+ { .val = 1, .div = 4 },
|
|
+ { .val = 2, .div = 6 },
|
|
+ { .val = 3, .div = 8 },
|
|
+ { .val = 4, .div = 10 },
|
|
+ { .val = 5, .div = 12 },
|
|
+ { .val = 6, .div = 14 },
|
|
+ { .val = 7, .div = 16 },
|
|
+ { .val = 8, .div = 18 },
|
|
{ /* sentinel */ },
|
|
};
|
|
|
|
@@ -593,7 +599,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
|
|
.data = &(struct clk_regmap_div_data){
|
|
.offset = HHI_SYS_CPU_CLK_CNTL1,
|
|
.shift = 20,
|
|
- .width = 9,
|
|
+ .width = 10,
|
|
.table = cpu_scale_table,
|
|
.flags = CLK_DIVIDER_ALLOW_ZERO,
|
|
},
|
|
@@ -606,20 +612,27 @@ static struct clk_regmap meson8b_cpu_scale_div = {
|
|
},
|
|
};
|
|
|
|
+static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
|
|
static struct clk_regmap meson8b_cpu_scale_out_sel = {
|
|
.data = &(struct clk_regmap_mux_data){
|
|
.offset = HHI_SYS_CPU_CLK_CNTL0,
|
|
.mask = 0x3,
|
|
.shift = 2,
|
|
+ .table = mux_table_cpu_scale_out_sel,
|
|
},
|
|
.hw.init = &(struct clk_init_data){
|
|
.name = "cpu_scale_out_sel",
|
|
.ops = &clk_regmap_mux_ro_ops,
|
|
+ /*
|
|
+ * NOTE: We are skipping the parent with value 0x2 (which is
|
|
+ * "cpu_div3") because it results in a duty cycle of 33% which
|
|
+ * makes the system unstable and can result in a lockup of the
|
|
+ * whole system.
|
|
+ */
|
|
.parent_names = (const char *[]) { "cpu_in_sel",
|
|
"cpu_div2",
|
|
- "cpu_div3",
|
|
"cpu_scale_div" },
|
|
- .num_parents = 4,
|
|
+ .num_parents = 3,
|
|
.flags = CLK_SET_RATE_PARENT,
|
|
},
|
|
};
|
|
@@ -637,7 +650,8 @@ static struct clk_regmap meson8b_cpu_clk = {
|
|
"cpu_scale_out_sel" },
|
|
.num_parents = 2,
|
|
.flags = (CLK_SET_RATE_PARENT |
|
|
- CLK_SET_RATE_NO_REPARENT),
|
|
+ CLK_SET_RATE_NO_REPARENT |
|
|
+ CLK_IS_CRITICAL),
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
|
|
index 9f0ae403d5f53..cb714c3e2924e 100644
|
|
--- a/drivers/clk/qcom/gcc-msm8998.c
|
|
+++ b/drivers/clk/qcom/gcc-msm8998.c
|
|
@@ -2042,6 +2042,12 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
|
|
.hw.init = &(struct clk_init_data){
|
|
.name = "gcc_mmss_noc_cfg_ahb_clk",
|
|
.ops = &clk_branch2_ops,
|
|
+ /*
|
|
+ * Any access to mmss depends on this clock.
|
|
+ * Gating this clock has been shown to crash the system
|
|
+ * when mmssnoc_axi_rpm_clk is inited in rpmcc.
|
|
+ */
|
|
+ .flags = CLK_IS_CRITICAL,
|
|
},
|
|
},
|
|
};
|
|
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
|
|
index fa25e35ce7d5d..08b42b053fce3 100644
|
|
--- a/drivers/clk/rockchip/clk-rk3188.c
|
|
+++ b/drivers/clk/rockchip/clk-rk3188.c
|
|
@@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
|
COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
|
|
RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
|
|
RK2928_CLKGATE_CON(0), 13, GFLAGS),
|
|
- COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
|
|
+ COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
|
|
RK2928_CLKSEL_CON(9), 0,
|
|
RK2928_CLKGATE_CON(0), 14, GFLAGS,
|
|
&common_spdif_fracmux),
|
|
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
|
|
index 2d5d8b43727e9..c4d0b6f6abf2e 100644
|
|
--- a/drivers/clk/socfpga/clk-pll-s10.c
|
|
+++ b/drivers/clk/socfpga/clk-pll-s10.c
|
|
@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
|
|
/* Read mdiv and fdiv from the fdbck register */
|
|
reg = readl(socfpgaclk->hw.reg + 0x4);
|
|
mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
|
|
- vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
|
|
+ vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
|
|
|
|
return (unsigned long)vco_freq;
|
|
}
|
|
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
|
|
index 5b238fc314ac6..8281dfbf38c2f 100644
|
|
--- a/drivers/clk/socfpga/clk-s10.c
|
|
+++ b/drivers/clk/socfpga/clk-s10.c
|
|
@@ -12,17 +12,17 @@
|
|
|
|
#include "stratix10-clk.h"
|
|
|
|
-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
|
|
- "f2s_free_clk",};
|
|
+static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
|
|
+ "f2s-free-clk",};
|
|
static const char * const cntr_mux[] = { "main_pll", "periph_pll",
|
|
- "osc1", "cb_intosc_hs_div2_clk",
|
|
- "f2s_free_clk"};
|
|
-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
|
|
+ "osc1", "cb-intosc-hs-div2-clk",
|
|
+ "f2s-free-clk"};
|
|
+static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
|
|
|
|
static const char * const noc_free_mux[] = {"main_noc_base_clk",
|
|
"peri_noc_base_clk",
|
|
- "osc1", "cb_intosc_hs_div2_clk",
|
|
- "f2s_free_clk"};
|
|
+ "osc1", "cb-intosc-hs-div2-clk",
|
|
+ "f2s-free-clk"};
|
|
|
|
static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
|
|
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
|
|
@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
|
|
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
|
|
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
|
|
|
|
-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
|
|
+static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
|
|
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
|
|
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
|
|
|
|
static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
|
|
"peri_mpu_base_clk",
|
|
- "osc1", "cb_intosc_hs_div2_clk",
|
|
- "f2s_free_clk"};
|
|
+ "osc1", "cb-intosc-hs-div2-clk",
|
|
+ "f2s-free-clk"};
|
|
|
|
/* clocks in AO (always on) controller */
|
|
static const struct stratix10_pll_clock s10_pll_clks[] = {
|
|
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
|
|
index 2193e1495086e..19ff09f610e48 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
|
|
@@ -120,6 +120,8 @@ static struct ccu_nm pll_video0_clk = {
|
|
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
|
|
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
|
|
.fixed_post_div = 4,
|
|
+ .min_rate = 288000000,
|
|
+ .max_rate = 2400000000UL,
|
|
.common = {
|
|
.reg = 0x040,
|
|
.features = CCU_FEATURE_FIXED_POSTDIV,
|
|
@@ -136,6 +138,8 @@ static struct ccu_nm pll_video1_clk = {
|
|
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
|
|
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
|
|
.fixed_post_div = 4,
|
|
+ .min_rate = 288000000,
|
|
+ .max_rate = 2400000000UL,
|
|
.common = {
|
|
.reg = 0x048,
|
|
.features = CCU_FEATURE_FIXED_POSTDIV,
|
|
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
|
|
index 13eb5b23c5e7f..c40d572a76029 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
|
|
@@ -366,10 +366,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
|
|
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
|
|
"pll-audio-2x", "pll-audio" };
|
|
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
|
|
- 0x0b0, 16, 2, BIT(31), 0);
|
|
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
|
|
|
|
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
|
|
- 0x0b4, 16, 2, BIT(31), 0);
|
|
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
|
|
|
|
/* TODO: the parent for most of the USB clocks is not known */
|
|
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
|
|
@@ -446,7 +446,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
|
|
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
|
|
0x140, BIT(31), CLK_SET_RATE_PARENT);
|
|
static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
|
|
- 0x140, BIT(30), 0);
|
|
+ 0x140, BIT(30), CLK_SET_RATE_PARENT);
|
|
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
|
|
0x144, BIT(31), 0);
|
|
|
|
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
|
|
index 6fe3c14f7b2da..424d8635b0537 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu_nm.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
|
|
@@ -19,6 +19,17 @@ struct _ccu_nm {
|
|
unsigned long m, min_m, max_m;
|
|
};
|
|
|
|
+static unsigned long ccu_nm_calc_rate(unsigned long parent,
|
|
+ unsigned long n, unsigned long m)
|
|
+{
|
|
+ u64 rate = parent;
|
|
+
|
|
+ rate *= n;
|
|
+ do_div(rate, m);
|
|
+
|
|
+ return rate;
|
|
+}
|
|
+
|
|
static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
|
|
struct _ccu_nm *nm)
|
|
{
|
|
@@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
|
|
|
|
for (_n = nm->min_n; _n <= nm->max_n; _n++) {
|
|
for (_m = nm->min_m; _m <= nm->max_m; _m++) {
|
|
- unsigned long tmp_rate = parent * _n / _m;
|
|
+ unsigned long tmp_rate = ccu_nm_calc_rate(parent,
|
|
+ _n, _m);
|
|
|
|
if (tmp_rate > rate)
|
|
continue;
|
|
@@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
|
|
if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
|
|
rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
|
|
else
|
|
- rate = parent_rate * n / m;
|
|
+ rate = ccu_nm_calc_rate(parent_rate, n, m);
|
|
|
|
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
|
|
rate /= nm->fixed_post_div;
|
|
@@ -149,7 +161,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
|
|
|
|
ccu_nm_find_best(*parent_rate, rate, &_nm);
|
|
- rate = *parent_rate * _nm.n / _nm.m;
|
|
+ rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
|
|
|
|
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
|
|
rate /= nm->fixed_post_div;
|
|
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
|
|
index f65cc0ff76abd..b0908ec62f73b 100644
|
|
--- a/drivers/clk/zynqmp/clkc.c
|
|
+++ b/drivers/clk/zynqmp/clkc.c
|
|
@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) *
|
|
- clock_max_idx, GFP_KERNEL);
|
|
+ zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
|
|
+ GFP_KERNEL);
|
|
if (!zynqmp_data)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
|
|
index 55c77e44bb2db..d9c8a779dd7d6 100644
|
|
--- a/drivers/clocksource/Kconfig
|
|
+++ b/drivers/clocksource/Kconfig
|
|
@@ -290,6 +290,7 @@ config CLKSRC_MPS2
|
|
|
|
config ARC_TIMERS
|
|
bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
|
|
+ depends on GENERIC_SCHED_CLOCK
|
|
select TIMER_OF
|
|
help
|
|
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
|
|
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
|
|
index 20da9b1d7f7d0..b28970ca4a7a9 100644
|
|
--- a/drivers/clocksource/arc_timer.c
|
|
+++ b/drivers/clocksource/arc_timer.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/cpu.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_irq.h>
|
|
+#include <linux/sched_clock.h>
|
|
|
|
#include <soc/arc/timers.h>
|
|
#include <soc/arc/mcip.h>
|
|
@@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs)
|
|
return (((u64)h) << 32) | l;
|
|
}
|
|
|
|
+static notrace u64 arc_gfrc_clock_read(void)
|
|
+{
|
|
+ return arc_read_gfrc(NULL);
|
|
+}
|
|
+
|
|
static struct clocksource arc_counter_gfrc = {
|
|
.name = "ARConnect GFRC",
|
|
.rating = 400,
|
|
@@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
|
|
+
|
|
return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
|
|
}
|
|
TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
|
|
@@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs)
|
|
return (((u64)h) << 32) | l;
|
|
}
|
|
|
|
+static notrace u64 arc_rtc_clock_read(void)
|
|
+{
|
|
+ return arc_read_rtc(NULL);
|
|
+}
|
|
+
|
|
static struct clocksource arc_counter_rtc = {
|
|
.name = "ARCv2 RTC",
|
|
.rating = 350,
|
|
@@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
|
|
|
|
write_aux_reg(AUX_RTC_CTRL, 1);
|
|
|
|
+ sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
|
|
+
|
|
return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
|
|
}
|
|
TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
|
|
@@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs)
|
|
return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
|
|
}
|
|
|
|
+static notrace u64 arc_timer1_clock_read(void)
|
|
+{
|
|
+ return arc_read_timer1(NULL);
|
|
+}
|
|
+
|
|
static struct clocksource arc_counter_timer1 = {
|
|
.name = "ARC Timer1",
|
|
.rating = 300,
|
|
@@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
|
|
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
|
|
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
|
|
|
|
+ sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
|
|
+
|
|
return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
|
|
}
|
|
|
|
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
|
|
index 76e526f586209..19fb7de4b9288 100644
|
|
--- a/drivers/clocksource/timer-integrator-ap.c
|
|
+++ b/drivers/clocksource/timer-integrator-ap.c
|
|
@@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
|
|
int irq;
|
|
struct clk *clk;
|
|
unsigned long rate;
|
|
- struct device_node *pri_node;
|
|
- struct device_node *sec_node;
|
|
+ struct device_node *alias_node;
|
|
|
|
base = of_io_request_and_map(node, 0, "integrator-timer");
|
|
if (IS_ERR(base))
|
|
@@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
|
|
return err;
|
|
}
|
|
|
|
- pri_node = of_find_node_by_path(path);
|
|
+ alias_node = of_find_node_by_path(path);
|
|
+
|
|
+ /*
|
|
+ * The pointer is used as an identifier not as a pointer, we
|
|
+ * can drop the refcount on the of__node immediately after
|
|
+ * getting it.
|
|
+ */
|
|
+ of_node_put(alias_node);
|
|
+
|
|
+ if (node == alias_node)
|
|
+ /* The primary timer lacks IRQ, use as clocksource */
|
|
+ return integrator_clocksource_init(rate, base);
|
|
|
|
err = of_property_read_string(of_aliases,
|
|
"arm,timer-secondary", &path);
|
|
@@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
|
|
return err;
|
|
}
|
|
|
|
+ alias_node = of_find_node_by_path(path);
|
|
|
|
- sec_node = of_find_node_by_path(path);
|
|
-
|
|
- if (node == pri_node)
|
|
- /* The primary timer lacks IRQ, use as clocksource */
|
|
- return integrator_clocksource_init(rate, base);
|
|
+ of_node_put(alias_node);
|
|
|
|
- if (node == sec_node) {
|
|
+ if (node == alias_node) {
|
|
/* The secondary timer will drive the clock event */
|
|
irq = irq_of_parse_and_map(node, 0);
|
|
return integrator_clockevent_init(rate, base, irq);
|
|
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
|
|
index 7aa3dcad21758..df34a12a388f3 100644
|
|
--- a/drivers/cpufreq/cpufreq.c
|
|
+++ b/drivers/cpufreq/cpufreq.c
|
|
@@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
|
|
{
|
|
unsigned int ret_freq = 0;
|
|
|
|
- if (!cpufreq_driver->get)
|
|
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
|
|
return ret_freq;
|
|
|
|
ret_freq = cpufreq_driver->get(policy->cpu);
|
|
|
|
/*
|
|
- * Updating inactive policies is invalid, so avoid doing that. Also
|
|
- * if fast frequency switching is used with the given policy, the check
|
|
+ * If fast frequency switching is used with the given policy, the check
|
|
* against policy->cur is pointless, so skip it in that case too.
|
|
*/
|
|
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
|
|
+ if (policy->fast_switch_enabled)
|
|
return ret_freq;
|
|
|
|
if (ret_freq && policy->cur &&
|
|
@@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu)
|
|
|
|
if (policy) {
|
|
down_read(&policy->rwsem);
|
|
-
|
|
- if (!policy_is_inactive(policy))
|
|
- ret_freq = __cpufreq_get(policy);
|
|
-
|
|
+ ret_freq = __cpufreq_get(policy);
|
|
up_read(&policy->rwsem);
|
|
|
|
cpufreq_cpu_put(policy);
|
|
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
|
|
index 50b1551ba8942..9ed46d188cb5b 100644
|
|
--- a/drivers/cpufreq/scmi-cpufreq.c
|
|
+++ b/drivers/cpufreq/scmi-cpufreq.c
|
|
@@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
|
int ret;
|
|
struct scmi_data *priv = policy->driver_data;
|
|
struct scmi_perf_ops *perf_ops = handle->perf_ops;
|
|
- u64 freq = policy->freq_table[index].frequency * 1000;
|
|
+ u64 freq = policy->freq_table[index].frequency;
|
|
|
|
- ret = perf_ops->freq_set(handle, priv->domain_id, freq, false);
|
|
+ ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
|
|
if (!ret)
|
|
arch_set_freq_scale(policy->related_cpus, freq,
|
|
policy->cpuinfo.max_freq);
|
|
@@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
|
out_free_priv:
|
|
kfree(priv);
|
|
out_free_opp:
|
|
- dev_pm_opp_cpumask_remove_table(policy->cpus);
|
|
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
|
|
|
|
return ret;
|
|
}
|
|
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
|
|
|
|
cpufreq_cooling_unregister(priv->cdev);
|
|
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
|
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
|
|
kfree(priv);
|
|
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
|
|
index 87a98ec77773a..99449738faa4d 100644
|
|
--- a/drivers/cpufreq/scpi-cpufreq.c
|
|
+++ b/drivers/cpufreq/scpi-cpufreq.c
|
|
@@ -177,7 +177,7 @@ out_free_cpufreq_table:
|
|
out_free_priv:
|
|
kfree(priv);
|
|
out_free_opp:
|
|
- dev_pm_opp_cpumask_remove_table(policy->cpus);
|
|
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
|
|
|
|
return ret;
|
|
}
|
|
@@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
|
|
clk_put(priv->clk);
|
|
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
|
kfree(priv);
|
|
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
|
|
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
|
|
index db2ede565f1aa..b44476a1b7ad8 100644
|
|
--- a/drivers/cpuidle/cpuidle-big_little.c
|
|
+++ b/drivers/cpuidle/cpuidle-big_little.c
|
|
@@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
|
|
{
|
|
int ret;
|
|
struct device_node *root = of_find_node_by_path("/");
|
|
+ const struct of_device_id *match_id;
|
|
|
|
if (!root)
|
|
return -ENODEV;
|
|
@@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
|
|
/*
|
|
* Initialize the driver just for a compliant set of machines
|
|
*/
|
|
- if (!of_match_node(compatible_machine_match, root))
|
|
+ match_id = of_match_node(compatible_machine_match, root);
|
|
+
|
|
+ of_node_put(root);
|
|
+
|
|
+ if (!match_id)
|
|
return -ENODEV;
|
|
|
|
if (!mcpm_is_available())
|
|
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
|
|
index 9e56bc411061f..74c247972bb36 100644
|
|
--- a/drivers/cpuidle/cpuidle-pseries.c
|
|
+++ b/drivers/cpuidle/cpuidle-pseries.c
|
|
@@ -247,7 +247,13 @@ static int pseries_idle_probe(void)
|
|
return -ENODEV;
|
|
|
|
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
|
|
- if (lppaca_shared_proc(get_lppaca())) {
|
|
+ /*
|
|
+ * Use local_paca instead of get_lppaca() since
|
|
+ * preemption is not disabled, and it is not required in
|
|
+ * fact, since lppaca_ptr does not need to be the value
|
|
+ * associated to the current CPU, it can be from any CPU.
|
|
+ */
|
|
+ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
|
|
cpuidle_state_table = shared_states;
|
|
max_idle_state = ARRAY_SIZE(shared_states);
|
|
} else {
|
|
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
|
|
index caa98a7fe3923..db330a0106b2b 100644
|
|
--- a/drivers/crypto/Kconfig
|
|
+++ b/drivers/crypto/Kconfig
|
|
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
|
|
depends on ARCH_BCM_IPROC
|
|
depends on MAILBOX
|
|
default m
|
|
+ select CRYPTO_AUTHENC
|
|
select CRYPTO_DES
|
|
select CRYPTO_MD5
|
|
select CRYPTO_SHA1
|
|
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
|
|
index 2d1f1db9f8074..cd464637b0cb6 100644
|
|
--- a/drivers/crypto/bcm/cipher.c
|
|
+++ b/drivers/crypto/bcm/cipher.c
|
|
@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
|
|
struct spu_hw *spu = &iproc_priv.spu;
|
|
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
|
|
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
|
|
- struct rtattr *rta = (void *)key;
|
|
- struct crypto_authenc_key_param *param;
|
|
- const u8 *origkey = key;
|
|
- const unsigned int origkeylen = keylen;
|
|
-
|
|
- int ret = 0;
|
|
+ struct crypto_authenc_keys keys;
|
|
+ int ret;
|
|
|
|
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
|
|
keylen);
|
|
flow_dump(" key: ", key, keylen);
|
|
|
|
- if (!RTA_OK(rta, keylen))
|
|
- goto badkey;
|
|
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
|
|
- goto badkey;
|
|
- if (RTA_PAYLOAD(rta) < sizeof(*param))
|
|
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
|
|
+ if (ret)
|
|
goto badkey;
|
|
|
|
- param = RTA_DATA(rta);
|
|
- ctx->enckeylen = be32_to_cpu(param->enckeylen);
|
|
-
|
|
- key += RTA_ALIGN(rta->rta_len);
|
|
- keylen -= RTA_ALIGN(rta->rta_len);
|
|
-
|
|
- if (keylen < ctx->enckeylen)
|
|
- goto badkey;
|
|
- if (ctx->enckeylen > MAX_KEY_SIZE)
|
|
+ if (keys.enckeylen > MAX_KEY_SIZE ||
|
|
+ keys.authkeylen > MAX_KEY_SIZE)
|
|
goto badkey;
|
|
|
|
- ctx->authkeylen = keylen - ctx->enckeylen;
|
|
-
|
|
- if (ctx->authkeylen > MAX_KEY_SIZE)
|
|
- goto badkey;
|
|
+ ctx->enckeylen = keys.enckeylen;
|
|
+ ctx->authkeylen = keys.authkeylen;
|
|
|
|
- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
|
|
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
|
|
/* May end up padding auth key. So make sure it's zeroed. */
|
|
memset(ctx->authkey, 0, sizeof(ctx->authkey));
|
|
- memcpy(ctx->authkey, key, ctx->authkeylen);
|
|
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
|
|
|
|
switch (ctx->alg->cipher_info.alg) {
|
|
case CIPHER_ALG_DES:
|
|
@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
|
|
u32 tmp[DES_EXPKEY_WORDS];
|
|
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
|
|
|
|
- if (des_ekey(tmp, key) == 0) {
|
|
+ if (des_ekey(tmp, keys.enckey) == 0) {
|
|
if (crypto_aead_get_flags(cipher) &
|
|
CRYPTO_TFM_REQ_WEAK_KEY) {
|
|
crypto_aead_set_flags(cipher, flags);
|
|
@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
|
|
break;
|
|
case CIPHER_ALG_3DES:
|
|
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
|
|
- const u32 *K = (const u32 *)key;
|
|
+ const u32 *K = (const u32 *)keys.enckey;
|
|
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
|
|
|
|
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
|
@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
|
|
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
|
ctx->fallback_cipher->base.crt_flags |=
|
|
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
|
|
- ret =
|
|
- crypto_aead_setkey(ctx->fallback_cipher, origkey,
|
|
- origkeylen);
|
|
+ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
|
|
if (ret) {
|
|
flow_log(" fallback setkey() returned:%d\n", ret);
|
|
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
|
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
|
|
index 46924affa0bd6..212fd0b3b8dd7 100644
|
|
--- a/drivers/crypto/caam/caamhash.c
|
|
+++ b/drivers/crypto/caam/caamhash.c
|
|
@@ -1071,13 +1071,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
|
|
- if (dma_mapping_error(jrdev, state->buf_dma)) {
|
|
- dev_err(jrdev, "unable to map src\n");
|
|
- goto unmap;
|
|
- }
|
|
+ if (buflen) {
|
|
+ state->buf_dma = dma_map_single(jrdev, buf, buflen,
|
|
+ DMA_TO_DEVICE);
|
|
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
|
|
+ dev_err(jrdev, "unable to map src\n");
|
|
+ goto unmap;
|
|
+ }
|
|
|
|
- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
|
|
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
|
|
+ }
|
|
|
|
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
digestsize);
|
|
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
|
|
index 2ae6124e5da67..5d54ebc20cb30 100644
|
|
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
|
|
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
|
|
@@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen)
|
|
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
|
|
{
|
|
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
|
|
- void *fctx;
|
|
+ struct crypto_ctx_hdr *chdr;
|
|
|
|
/* get the first device */
|
|
nctx->ndev = nitrox_get_first_device();
|
|
@@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
|
|
return -ENODEV;
|
|
|
|
/* allocate nitrox crypto context */
|
|
- fctx = crypto_alloc_context(nctx->ndev);
|
|
- if (!fctx) {
|
|
+ chdr = crypto_alloc_context(nctx->ndev);
|
|
+ if (!chdr) {
|
|
nitrox_put_device(nctx->ndev);
|
|
return -ENOMEM;
|
|
}
|
|
- nctx->u.ctx_handle = (uintptr_t)fctx;
|
|
+ nctx->chdr = chdr;
|
|
+ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
|
|
+ sizeof(struct ctx_hdr));
|
|
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
|
|
sizeof(struct nitrox_kcrypt_request));
|
|
return 0;
|
|
@@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
|
|
|
|
memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
|
|
memset(&fctx->auth, 0, sizeof(struct auth_keys));
|
|
- crypto_free_context((void *)fctx);
|
|
+ crypto_free_context((void *)nctx->chdr);
|
|
}
|
|
nitrox_put_device(nctx->ndev);
|
|
|
|
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
|
|
index 2260efa423083..9138bae125212 100644
|
|
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
|
|
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
|
|
@@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
|
|
void *crypto_alloc_context(struct nitrox_device *ndev)
|
|
{
|
|
struct ctx_hdr *ctx;
|
|
+ struct crypto_ctx_hdr *chdr;
|
|
void *vaddr;
|
|
dma_addr_t dma;
|
|
|
|
+ chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
|
|
+ if (!chdr)
|
|
+ return NULL;
|
|
+
|
|
vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
|
|
- if (!vaddr)
|
|
+ if (!vaddr) {
|
|
+ kfree(chdr);
|
|
return NULL;
|
|
+ }
|
|
|
|
/* fill meta data */
|
|
ctx = vaddr;
|
|
@@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
|
|
ctx->dma = dma;
|
|
ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
|
|
|
|
- return ((u8 *)vaddr + sizeof(struct ctx_hdr));
|
|
+ chdr->pool = ndev->ctx_pool;
|
|
+ chdr->dma = dma;
|
|
+ chdr->vaddr = vaddr;
|
|
+
|
|
+ return chdr;
|
|
}
|
|
|
|
/**
|
|
@@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
|
|
*/
|
|
void crypto_free_context(void *ctx)
|
|
{
|
|
- struct ctx_hdr *ctxp;
|
|
+ struct crypto_ctx_hdr *ctxp;
|
|
|
|
if (!ctx)
|
|
return;
|
|
|
|
- ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
|
|
- dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
|
|
+ ctxp = ctx;
|
|
+ dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
|
|
+ kfree(ctxp);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
|
|
index d091b6f5f5dd6..19f0a20e3bb3b 100644
|
|
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
|
|
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
|
|
@@ -181,12 +181,19 @@ struct flexi_crypto_context {
|
|
struct auth_keys auth;
|
|
};
|
|
|
|
+struct crypto_ctx_hdr {
|
|
+ struct dma_pool *pool;
|
|
+ dma_addr_t dma;
|
|
+ void *vaddr;
|
|
+};
|
|
+
|
|
struct nitrox_crypto_ctx {
|
|
struct nitrox_device *ndev;
|
|
union {
|
|
u64 ctx_handle;
|
|
struct flexi_crypto_context *fctx;
|
|
} u;
|
|
+ struct crypto_ctx_hdr *chdr;
|
|
};
|
|
|
|
struct nitrox_kcrypt_request {
|
|
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
|
|
index 01b82b82f8b87..5852d29ae2dac 100644
|
|
--- a/drivers/crypto/ccree/cc_aead.c
|
|
+++ b/drivers/crypto/ccree/cc_aead.c
|
|
@@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
|
- struct rtattr *rta = (struct rtattr *)key;
|
|
struct cc_crypto_req cc_req = {};
|
|
- struct crypto_authenc_key_param *param;
|
|
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
|
|
- int rc = -EINVAL;
|
|
unsigned int seq_len = 0;
|
|
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
|
+ const u8 *enckey, *authkey;
|
|
+ int rc;
|
|
|
|
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
|
|
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
|
|
@@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
/* STAT_PHASE_0: Init and sanity checks */
|
|
|
|
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
|
|
- if (!RTA_OK(rta, keylen))
|
|
- goto badkey;
|
|
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
|
|
- goto badkey;
|
|
- if (RTA_PAYLOAD(rta) < sizeof(*param))
|
|
- goto badkey;
|
|
- param = RTA_DATA(rta);
|
|
- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
|
|
- key += RTA_ALIGN(rta->rta_len);
|
|
- keylen -= RTA_ALIGN(rta->rta_len);
|
|
- if (keylen < ctx->enc_keylen)
|
|
+ struct crypto_authenc_keys keys;
|
|
+
|
|
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
|
|
+ if (rc)
|
|
goto badkey;
|
|
- ctx->auth_keylen = keylen - ctx->enc_keylen;
|
|
+ enckey = keys.enckey;
|
|
+ authkey = keys.authkey;
|
|
+ ctx->enc_keylen = keys.enckeylen;
|
|
+ ctx->auth_keylen = keys.authkeylen;
|
|
|
|
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
|
|
/* the nonce is stored in bytes at end of key */
|
|
+ rc = -EINVAL;
|
|
if (ctx->enc_keylen <
|
|
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
|
|
goto badkey;
|
|
/* Copy nonce from last 4 bytes in CTR key to
|
|
* first 4 bytes in CTR IV
|
|
*/
|
|
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
|
|
- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
|
|
- CTR_RFC3686_NONCE_SIZE);
|
|
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
|
|
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
|
|
/* Set CTR key size */
|
|
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
|
|
}
|
|
} else { /* non-authenc - has just one key */
|
|
+ enckey = key;
|
|
+ authkey = NULL;
|
|
ctx->enc_keylen = keylen;
|
|
ctx->auth_keylen = 0;
|
|
}
|
|
@@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
/* STAT_PHASE_1: Copy key to ctx */
|
|
|
|
/* Get key material */
|
|
- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
|
|
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
|
|
if (ctx->enc_keylen == 24)
|
|
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
|
|
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
|
|
- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
|
|
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
|
|
+ ctx->auth_keylen);
|
|
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
|
|
- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
|
|
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
|
|
if (rc)
|
|
goto badkey;
|
|
}
|
|
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
|
|
index 1ff229c2aeab1..186a2536fb8b9 100644
|
|
--- a/drivers/crypto/ccree/cc_driver.c
|
|
+++ b/drivers/crypto/ccree/cc_driver.c
|
|
@@ -364,7 +364,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
|
rc = cc_ivgen_init(new_drvdata);
|
|
if (rc) {
|
|
dev_err(dev, "cc_ivgen_init failed\n");
|
|
- goto post_power_mgr_err;
|
|
+ goto post_buf_mgr_err;
|
|
}
|
|
|
|
/* Allocate crypto algs */
|
|
@@ -387,6 +387,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
|
goto post_hash_err;
|
|
}
|
|
|
|
+ /* All set, we can allow autosuspend */
|
|
+ cc_pm_go(new_drvdata);
|
|
+
|
|
/* If we got here and FIPS mode is enabled
|
|
* it means all FIPS test passed, so let TEE
|
|
* know we're good.
|
|
@@ -401,8 +404,6 @@ post_cipher_err:
|
|
cc_cipher_free(new_drvdata);
|
|
post_ivgen_err:
|
|
cc_ivgen_fini(new_drvdata);
|
|
-post_power_mgr_err:
|
|
- cc_pm_fini(new_drvdata);
|
|
post_buf_mgr_err:
|
|
cc_buffer_mgr_fini(new_drvdata);
|
|
post_req_mgr_err:
|
|
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
|
|
index d990f472e89fb..6ff7e75ad90eb 100644
|
|
--- a/drivers/crypto/ccree/cc_pm.c
|
|
+++ b/drivers/crypto/ccree/cc_pm.c
|
|
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev)
|
|
|
|
int cc_pm_init(struct cc_drvdata *drvdata)
|
|
{
|
|
- int rc = 0;
|
|
struct device *dev = drvdata_to_dev(drvdata);
|
|
|
|
/* must be before the enabling to avoid resdundent suspending */
|
|
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
|
|
pm_runtime_use_autosuspend(dev);
|
|
/* activate the PM module */
|
|
- rc = pm_runtime_set_active(dev);
|
|
- if (rc)
|
|
- return rc;
|
|
- /* enable the PM module*/
|
|
- pm_runtime_enable(dev);
|
|
+ return pm_runtime_set_active(dev);
|
|
+}
|
|
|
|
- return rc;
|
|
+/* enable the PM module*/
|
|
+void cc_pm_go(struct cc_drvdata *drvdata)
|
|
+{
|
|
+ pm_runtime_enable(drvdata_to_dev(drvdata));
|
|
}
|
|
|
|
void cc_pm_fini(struct cc_drvdata *drvdata)
|
|
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
|
|
index 020a5403c58ba..f626243570209 100644
|
|
--- a/drivers/crypto/ccree/cc_pm.h
|
|
+++ b/drivers/crypto/ccree/cc_pm.h
|
|
@@ -16,6 +16,7 @@
|
|
extern const struct dev_pm_ops ccree_pm;
|
|
|
|
int cc_pm_init(struct cc_drvdata *drvdata);
|
|
+void cc_pm_go(struct cc_drvdata *drvdata);
|
|
void cc_pm_fini(struct cc_drvdata *drvdata);
|
|
int cc_pm_suspend(struct device *dev);
|
|
int cc_pm_resume(struct device *dev);
|
|
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
|
|
return 0;
|
|
}
|
|
|
|
+static void cc_pm_go(struct cc_drvdata *drvdata) {}
|
|
+
|
|
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
|
|
|
|
static inline int cc_pm_suspend(struct device *dev)
|
|
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
|
|
index 461b97e2f1fdc..1ff8738631a38 100644
|
|
--- a/drivers/crypto/chelsio/chcr_ipsec.c
|
|
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
|
|
@@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
|
|
|
|
static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
|
|
{
|
|
- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
|
|
+ int hdrlen;
|
|
+
|
|
+ hdrlen = sizeof(struct fw_ulptx_wr) +
|
|
+ sizeof(struct chcr_ipsec_req) + kctx_len;
|
|
|
|
hdrlen += sizeof(struct cpl_tx_pkt);
|
|
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
|
|
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
|
|
index 6988012deca4c..f4f3e9a5851e9 100644
|
|
--- a/drivers/crypto/talitos.c
|
|
+++ b/drivers/crypto/talitos.c
|
|
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
|
struct talitos_private *priv = dev_get_drvdata(dev);
|
|
bool is_sec1 = has_ftr_sec1(priv);
|
|
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
|
|
- void *err;
|
|
|
|
if (cryptlen + authsize > max_len) {
|
|
dev_err(dev, "length exceeds h/w max limit\n");
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
- if (ivsize)
|
|
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
|
|
-
|
|
if (!dst || dst == src) {
|
|
src_len = assoclen + cryptlen + authsize;
|
|
src_nents = sg_nents_for_len(src, src_len);
|
|
if (src_nents < 0) {
|
|
dev_err(dev, "Invalid number of src SG.\n");
|
|
- err = ERR_PTR(-EINVAL);
|
|
- goto error_sg;
|
|
+ return ERR_PTR(-EINVAL);
|
|
}
|
|
src_nents = (src_nents == 1) ? 0 : src_nents;
|
|
dst_nents = dst ? src_nents : 0;
|
|
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
|
src_nents = sg_nents_for_len(src, src_len);
|
|
if (src_nents < 0) {
|
|
dev_err(dev, "Invalid number of src SG.\n");
|
|
- err = ERR_PTR(-EINVAL);
|
|
- goto error_sg;
|
|
+ return ERR_PTR(-EINVAL);
|
|
}
|
|
src_nents = (src_nents == 1) ? 0 : src_nents;
|
|
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
|
|
dst_nents = sg_nents_for_len(dst, dst_len);
|
|
if (dst_nents < 0) {
|
|
dev_err(dev, "Invalid number of dst SG.\n");
|
|
- err = ERR_PTR(-EINVAL);
|
|
- goto error_sg;
|
|
+ return ERR_PTR(-EINVAL);
|
|
}
|
|
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
|
|
}
|
|
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
|
/* if its a ahash, add space for a second desc next to the first one */
|
|
if (is_sec1 && !dst)
|
|
alloc_len += sizeof(struct talitos_desc);
|
|
+ alloc_len += ivsize;
|
|
|
|
edesc = kmalloc(alloc_len, GFP_DMA | flags);
|
|
- if (!edesc) {
|
|
- err = ERR_PTR(-ENOMEM);
|
|
- goto error_sg;
|
|
+ if (!edesc)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ if (ivsize) {
|
|
+ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
|
|
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
|
|
}
|
|
memset(&edesc->desc, 0, sizeof(edesc->desc));
|
|
|
|
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
|
DMA_BIDIRECTIONAL);
|
|
}
|
|
return edesc;
|
|
-error_sg:
|
|
- if (iv_dma)
|
|
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
|
- return err;
|
|
}
|
|
|
|
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
|
|
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
|
|
index d2663a4e1f5eb..a92a66b1ff46e 100644
|
|
--- a/drivers/crypto/ux500/cryp/cryp_core.c
|
|
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
|
|
@@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
|
|
desc = dmaengine_prep_slave_sg(channel,
|
|
ctx->device->dma.sg_src,
|
|
ctx->device->dma.sg_src_len,
|
|
- direction, DMA_CTRL_ACK);
|
|
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
|
|
break;
|
|
|
|
case DMA_FROM_DEVICE:
|
|
@@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
|
|
desc = dmaengine_prep_slave_sg(channel,
|
|
ctx->device->dma.sg_dst,
|
|
ctx->device->dma.sg_dst_len,
|
|
- direction,
|
|
+ DMA_DEV_TO_MEM,
|
|
DMA_CTRL_ACK |
|
|
DMA_PREP_INTERRUPT);
|
|
|
|
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
|
|
index 633321a8dd034..a0bb8a6eec3fd 100644
|
|
--- a/drivers/crypto/ux500/hash/hash_core.c
|
|
+++ b/drivers/crypto/ux500/hash/hash_core.c
|
|
@@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
|
|
__func__);
|
|
desc = dmaengine_prep_slave_sg(channel,
|
|
ctx->device->dma.sg, ctx->device->dma.sg_len,
|
|
- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
|
|
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
|
|
if (!desc) {
|
|
dev_err(ctx->device->dev,
|
|
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
|
|
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
|
|
index 99e2aace8078c..2c1f459c0c63a 100644
|
|
--- a/drivers/dax/pmem.c
|
|
+++ b/drivers/dax/pmem.c
|
|
@@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data)
|
|
percpu_ref_exit(ref);
|
|
}
|
|
|
|
-static void dax_pmem_percpu_kill(void *data)
|
|
+static void dax_pmem_percpu_kill(struct percpu_ref *ref)
|
|
{
|
|
- struct percpu_ref *ref = data;
|
|
struct dax_pmem *dax_pmem = to_dax_pmem(ref);
|
|
|
|
dev_dbg(dax_pmem->dev, "trace\n");
|
|
@@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev)
|
|
}
|
|
|
|
dax_pmem->pgmap.ref = &dax_pmem->ref;
|
|
+ dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
|
|
addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
|
|
- if (IS_ERR(addr)) {
|
|
- devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
|
|
- percpu_ref_exit(&dax_pmem->ref);
|
|
+ if (IS_ERR(addr))
|
|
return PTR_ERR(addr);
|
|
- }
|
|
-
|
|
- rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
|
|
- &dax_pmem->ref);
|
|
- if (rc)
|
|
- return rc;
|
|
|
|
/* adjust the dax_region resource to the start of data */
|
|
memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
|
|
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
|
|
index cad55ab80d417..15795175160a5 100644
|
|
--- a/drivers/dma/bcm2835-dma.c
|
|
+++ b/drivers/dma/bcm2835-dma.c
|
|
@@ -415,38 +415,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
|
|
}
|
|
}
|
|
|
|
-static int bcm2835_dma_abort(void __iomem *chan_base)
|
|
+static int bcm2835_dma_abort(struct bcm2835_chan *c)
|
|
{
|
|
- unsigned long cs;
|
|
+ void __iomem *chan_base = c->chan_base;
|
|
long int timeout = 10000;
|
|
|
|
- cs = readl(chan_base + BCM2835_DMA_CS);
|
|
- if (!(cs & BCM2835_DMA_ACTIVE))
|
|
+ /*
|
|
+ * A zero control block address means the channel is idle.
|
|
+ * (The ACTIVE flag in the CS register is not a reliable indicator.)
|
|
+ */
|
|
+ if (!readl(chan_base + BCM2835_DMA_ADDR))
|
|
return 0;
|
|
|
|
/* Write 0 to the active bit - Pause the DMA */
|
|
writel(0, chan_base + BCM2835_DMA_CS);
|
|
|
|
/* Wait for any current AXI transfer to complete */
|
|
- while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
|
|
+ while ((readl(chan_base + BCM2835_DMA_CS) &
|
|
+ BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
|
|
cpu_relax();
|
|
- cs = readl(chan_base + BCM2835_DMA_CS);
|
|
- }
|
|
|
|
- /* We'll un-pause when we set of our next DMA */
|
|
+ /* Peripheral might be stuck and fail to signal AXI write responses */
|
|
if (!timeout)
|
|
- return -ETIMEDOUT;
|
|
-
|
|
- if (!(cs & BCM2835_DMA_ACTIVE))
|
|
- return 0;
|
|
-
|
|
- /* Terminate the control block chain */
|
|
- writel(0, chan_base + BCM2835_DMA_NEXTCB);
|
|
-
|
|
- /* Abort the whole DMA */
|
|
- writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
|
|
- chan_base + BCM2835_DMA_CS);
|
|
+ dev_err(c->vc.chan.device->dev,
|
|
+ "failed to complete outstanding writes\n");
|
|
|
|
+ writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
|
|
return 0;
|
|
}
|
|
|
|
@@ -485,8 +479,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
|
|
|
|
spin_lock_irqsave(&c->vc.lock, flags);
|
|
|
|
- /* Acknowledge interrupt */
|
|
- writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
|
|
+ /*
|
|
+ * Clear the INT flag to receive further interrupts. Keep the channel
|
|
+ * active in case the descriptor is cyclic or in case the client has
|
|
+ * already terminated the descriptor and issued a new one. (May happen
|
|
+ * if this IRQ handler is threaded.) If the channel is finished, it
|
|
+ * will remain idle despite the ACTIVE flag being set.
|
|
+ */
|
|
+ writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
|
|
+ c->chan_base + BCM2835_DMA_CS);
|
|
|
|
d = c->desc;
|
|
|
|
@@ -494,11 +495,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
|
|
if (d->cyclic) {
|
|
/* call the cyclic callback */
|
|
vchan_cyclic_callback(&d->vd);
|
|
-
|
|
- /* Keep the DMA engine running */
|
|
- writel(BCM2835_DMA_ACTIVE,
|
|
- c->chan_base + BCM2835_DMA_CS);
|
|
- } else {
|
|
+ } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
|
|
vchan_cookie_complete(&c->desc->vd);
|
|
bcm2835_dma_start_desc(c);
|
|
}
|
|
@@ -788,7 +785,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
|
|
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
|
|
struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
|
|
unsigned long flags;
|
|
- int timeout = 10000;
|
|
LIST_HEAD(head);
|
|
|
|
spin_lock_irqsave(&c->vc.lock, flags);
|
|
@@ -798,27 +794,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
|
|
list_del_init(&c->node);
|
|
spin_unlock(&d->lock);
|
|
|
|
- /*
|
|
- * Stop DMA activity: we assume the callback will not be called
|
|
- * after bcm_dma_abort() returns (even if it does, it will see
|
|
- * c->desc is NULL and exit.)
|
|
- */
|
|
+ /* stop DMA activity */
|
|
if (c->desc) {
|
|
vchan_terminate_vdesc(&c->desc->vd);
|
|
c->desc = NULL;
|
|
- bcm2835_dma_abort(c->chan_base);
|
|
-
|
|
- /* Wait for stopping */
|
|
- while (--timeout) {
|
|
- if (!(readl(c->chan_base + BCM2835_DMA_CS) &
|
|
- BCM2835_DMA_ACTIVE))
|
|
- break;
|
|
-
|
|
- cpu_relax();
|
|
- }
|
|
-
|
|
- if (!timeout)
|
|
- dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
|
|
+ bcm2835_dma_abort(c);
|
|
}
|
|
|
|
vchan_get_all_descriptors(&c->vc, &head);
|
|
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
|
|
index c2fff3f6c9ca5..4a09af3cd546a 100644
|
|
--- a/drivers/dma/imx-dma.c
|
|
+++ b/drivers/dma/imx-dma.c
|
|
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
|
|
{
|
|
struct imxdma_channel *imxdmac = (void *)data;
|
|
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
|
- struct imxdma_desc *desc;
|
|
+ struct imxdma_desc *desc, *next_desc;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&imxdma->lock, flags);
|
|
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
|
|
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
|
|
|
|
if (!list_empty(&imxdmac->ld_queue)) {
|
|
- desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
|
|
- node);
|
|
+ next_desc = list_first_entry(&imxdmac->ld_queue,
|
|
+ struct imxdma_desc, node);
|
|
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
|
|
- if (imxdma_xfer_desc(desc) < 0)
|
|
+ if (imxdma_xfer_desc(next_desc) < 0)
|
|
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
|
|
__func__, imxdmac->channel);
|
|
}
|
|
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
|
|
index 38d4e4f07c66d..f7da9ab31b7cb 100644
|
|
--- a/drivers/dma/sprd-dma.c
|
|
+++ b/drivers/dma/sprd-dma.c
|
|
@@ -450,7 +450,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
|
|
struct sprd_dma_desc *sdesc;
|
|
enum sprd_dma_req_mode req_type;
|
|
enum sprd_dma_int_type int_type;
|
|
- bool trans_done = false;
|
|
+ bool trans_done = false, cyclic = false;
|
|
u32 i;
|
|
|
|
while (irq_status) {
|
|
@@ -465,13 +465,19 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
|
|
|
|
sdesc = schan->cur_desc;
|
|
|
|
- /* Check if the dma request descriptor is done. */
|
|
- trans_done = sprd_dma_check_trans_done(sdesc, int_type,
|
|
- req_type);
|
|
- if (trans_done == true) {
|
|
- vchan_cookie_complete(&sdesc->vd);
|
|
- schan->cur_desc = NULL;
|
|
- sprd_dma_start(schan);
|
|
+ /* cyclic mode schedule callback */
|
|
+ cyclic = schan->linklist.phy_addr ? true : false;
|
|
+ if (cyclic == true) {
|
|
+ vchan_cyclic_callback(&sdesc->vd);
|
|
+ } else {
|
|
+ /* Check if the dma request descriptor is done. */
|
|
+ trans_done = sprd_dma_check_trans_done(sdesc, int_type,
|
|
+ req_type);
|
|
+ if (trans_done == true) {
|
|
+ vchan_cookie_complete(&sdesc->vd);
|
|
+ schan->cur_desc = NULL;
|
|
+ sprd_dma_start(schan);
|
|
+ }
|
|
}
|
|
spin_unlock(&schan->vc.lock);
|
|
}
|
|
@@ -674,9 +680,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
|
|
|
|
/* link-list configuration */
|
|
if (schan->linklist.phy_addr) {
|
|
- if (sg_index == sglen - 1)
|
|
- hw->frg_len |= SPRD_DMA_LLIST_END;
|
|
-
|
|
hw->cfg |= SPRD_DMA_LINKLIST_EN;
|
|
|
|
/* link-list index */
|
|
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
|
|
index c74a88b650396..73de6a6179fcd 100644
|
|
--- a/drivers/dma/xilinx/zynqmp_dma.c
|
|
+++ b/drivers/dma/xilinx/zynqmp_dma.c
|
|
@@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll {
|
|
u32 ctrl;
|
|
u64 nxtdscraddr;
|
|
u64 rsvd;
|
|
-}; __aligned(64)
|
|
+};
|
|
|
|
/**
|
|
* struct zynqmp_dma_desc_sw - Per Transaction structure
|
|
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
|
|
index 4213cb0bb2a79..f8664bac9fa82 100644
|
|
--- a/drivers/edac/altera_edac.h
|
|
+++ b/drivers/edac/altera_edac.h
|
|
@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
|
|
#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
|
|
|
|
/* Sticky registers for Uncorrected Errors */
|
|
-#define S10_SYSMGR_UE_VAL_OFST 0x120
|
|
-#define S10_SYSMGR_UE_ADDR_OFST 0x124
|
|
+#define S10_SYSMGR_UE_VAL_OFST 0x220
|
|
+#define S10_SYSMGR_UE_ADDR_OFST 0x224
|
|
|
|
#define S10_DDR0_IRQ_MASK BIT(16)
|
|
|
|
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
|
|
index 472c88ae1c0f9..92f843eaf1e01 100644
|
|
--- a/drivers/firmware/arm_scmi/bus.c
|
|
+++ b/drivers/firmware/arm_scmi/bus.c
|
|
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
|
|
}
|
|
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
|
|
|
|
+static void scmi_device_release(struct device *dev)
|
|
+{
|
|
+ kfree(to_scmi_dev(dev));
|
|
+}
|
|
+
|
|
struct scmi_device *
|
|
scmi_device_create(struct device_node *np, struct device *parent, int protocol)
|
|
{
|
|
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
|
|
scmi_dev->dev.parent = parent;
|
|
scmi_dev->dev.of_node = np;
|
|
scmi_dev->dev.bus = &scmi_bus_type;
|
|
+ scmi_dev->dev.release = scmi_device_release;
|
|
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
|
|
|
|
retval = device_register(&scmi_dev->dev);
|
|
@@ -156,9 +162,8 @@ free_mem:
|
|
void scmi_device_destroy(struct scmi_device *scmi_dev)
|
|
{
|
|
scmi_handle_put(scmi_dev->handle);
|
|
- device_unregister(&scmi_dev->dev);
|
|
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
|
|
- kfree(scmi_dev);
|
|
+ device_unregister(&scmi_dev->dev);
|
|
}
|
|
|
|
void scmi_set_handle(struct scmi_device *scmi_dev)
|
|
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
|
|
index 415849bab2339..bde3822cf539a 100644
|
|
--- a/drivers/firmware/efi/efi.c
|
|
+++ b/drivers/firmware/efi/efi.c
|
|
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
|
|
|
|
early_memunmap(tbl, sizeof(*tbl));
|
|
}
|
|
- return 0;
|
|
-}
|
|
|
|
-int __init efi_apply_persistent_mem_reservations(void)
|
|
-{
|
|
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
|
|
unsigned long prsv = efi.mem_reserve;
|
|
|
|
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
|
|
index c51627660dbb9..d9845099635e0 100644
|
|
--- a/drivers/firmware/efi/libstub/Makefile
|
|
+++ b/drivers/firmware/efi/libstub/Makefile
|
|
@@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386
|
|
cflags-$(CONFIG_X86_64) := -mcmodel=small
|
|
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
|
|
-fPIC -fno-strict-aliasing -mno-red-zone \
|
|
- -mno-mmx -mno-sse -fshort-wchar
|
|
+ -mno-mmx -mno-sse -fshort-wchar \
|
|
+ -Wno-pointer-sign \
|
|
+ $(call cc-disable-warning, address-of-packed-member) \
|
|
+ $(call cc-disable-warning, gnu)
|
|
|
|
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
|
|
# disable the stackleak plugin
|
|
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
|
|
index 3d36142cf8120..30ac0c975f8a1 100644
|
|
--- a/drivers/firmware/efi/libstub/arm-stub.c
|
|
+++ b/drivers/firmware/efi/libstub/arm-stub.c
|
|
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
|
|
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
|
|
efi_status_t status;
|
|
|
|
- if (IS_ENABLED(CONFIG_ARM))
|
|
- return;
|
|
-
|
|
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
|
|
(void **)&rsv);
|
|
if (status != EFI_SUCCESS) {
|
|
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
|
|
index 8903b9ccfc2b8..e2abfdb5cee6a 100644
|
|
--- a/drivers/firmware/efi/runtime-wrappers.c
|
|
+++ b/drivers/firmware/efi/runtime-wrappers.c
|
|
@@ -146,6 +146,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
|
|
*/
|
|
static DEFINE_SEMAPHORE(efi_runtime_lock);
|
|
|
|
+/*
|
|
+ * Expose the EFI runtime lock to the UV platform
|
|
+ */
|
|
+#ifdef CONFIG_X86_UV
|
|
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Calls the appropriate efi_runtime_service() with the appropriate
|
|
* arguments.
|
|
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
|
|
index 9336ffdf6e2c6..fceaafd67ec61 100644
|
|
--- a/drivers/firmware/efi/vars.c
|
|
+++ b/drivers/firmware/efi/vars.c
|
|
@@ -318,7 +318,12 @@ EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
|
|
static efi_status_t
|
|
check_var_size(u32 attributes, unsigned long size)
|
|
{
|
|
- const struct efivar_operations *fops = __efivars->ops;
|
|
+ const struct efivar_operations *fops;
|
|
+
|
|
+ if (!__efivars)
|
|
+ return EFI_UNSUPPORTED;
|
|
+
|
|
+ fops = __efivars->ops;
|
|
|
|
if (!fops->query_variable_store)
|
|
return EFI_UNSUPPORTED;
|
|
@@ -329,7 +334,12 @@ check_var_size(u32 attributes, unsigned long size)
|
|
static efi_status_t
|
|
check_var_size_nonblocking(u32 attributes, unsigned long size)
|
|
{
|
|
- const struct efivar_operations *fops = __efivars->ops;
|
|
+ const struct efivar_operations *fops;
|
|
+
|
|
+ if (!__efivars)
|
|
+ return EFI_UNSUPPORTED;
|
|
+
|
|
+ fops = __efivars->ops;
|
|
|
|
if (!fops->query_variable_store)
|
|
return EFI_UNSUPPORTED;
|
|
@@ -429,13 +439,18 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
|
|
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
|
void *data, bool duplicates, struct list_head *head)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
unsigned long variable_name_size = 1024;
|
|
efi_char16_t *variable_name;
|
|
efi_status_t status;
|
|
efi_guid_t vendor_guid;
|
|
int err = 0;
|
|
|
|
+ if (!__efivars)
|
|
+ return -EFAULT;
|
|
+
|
|
+ ops = __efivars->ops;
|
|
+
|
|
variable_name = kzalloc(variable_name_size, GFP_KERNEL);
|
|
if (!variable_name) {
|
|
printk(KERN_ERR "efivars: Memory allocation failed.\n");
|
|
@@ -583,12 +598,14 @@ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
|
|
*/
|
|
int __efivar_entry_delete(struct efivar_entry *entry)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
efi_status_t status;
|
|
|
|
- status = ops->set_variable(entry->var.VariableName,
|
|
- &entry->var.VendorGuid,
|
|
- 0, 0, NULL);
|
|
+ if (!__efivars)
|
|
+ return -EINVAL;
|
|
+
|
|
+ status = __efivars->ops->set_variable(entry->var.VariableName,
|
|
+ &entry->var.VendorGuid,
|
|
+ 0, 0, NULL);
|
|
|
|
return efi_status_to_err(status);
|
|
}
|
|
@@ -607,12 +624,17 @@ EXPORT_SYMBOL_GPL(__efivar_entry_delete);
|
|
*/
|
|
int efivar_entry_delete(struct efivar_entry *entry)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
efi_status_t status;
|
|
|
|
if (down_interruptible(&efivars_lock))
|
|
return -EINTR;
|
|
|
|
+ if (!__efivars) {
|
|
+ up(&efivars_lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ ops = __efivars->ops;
|
|
status = ops->set_variable(entry->var.VariableName,
|
|
&entry->var.VendorGuid,
|
|
0, 0, NULL);
|
|
@@ -650,13 +672,19 @@ EXPORT_SYMBOL_GPL(efivar_entry_delete);
|
|
int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
|
|
unsigned long size, void *data, struct list_head *head)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
efi_status_t status;
|
|
efi_char16_t *name = entry->var.VariableName;
|
|
efi_guid_t vendor = entry->var.VendorGuid;
|
|
|
|
if (down_interruptible(&efivars_lock))
|
|
return -EINTR;
|
|
+
|
|
+ if (!__efivars) {
|
|
+ up(&efivars_lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ ops = __efivars->ops;
|
|
if (head && efivar_entry_find(name, vendor, head, false)) {
|
|
up(&efivars_lock);
|
|
return -EEXIST;
|
|
@@ -687,12 +715,17 @@ static int
|
|
efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
|
|
u32 attributes, unsigned long size, void *data)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
efi_status_t status;
|
|
|
|
if (down_trylock(&efivars_lock))
|
|
return -EBUSY;
|
|
|
|
+ if (!__efivars) {
|
|
+ up(&efivars_lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
status = check_var_size_nonblocking(attributes,
|
|
size + ucs2_strsize(name, 1024));
|
|
if (status != EFI_SUCCESS) {
|
|
@@ -700,6 +733,7 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
|
|
return -ENOSPC;
|
|
}
|
|
|
|
+ ops = __efivars->ops;
|
|
status = ops->set_variable_nonblocking(name, &vendor, attributes,
|
|
size, data);
|
|
|
|
@@ -727,9 +761,13 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
|
|
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
|
|
bool block, unsigned long size, void *data)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
efi_status_t status;
|
|
|
|
+ if (!__efivars)
|
|
+ return -EINVAL;
|
|
+
|
|
+ ops = __efivars->ops;
|
|
if (!ops->query_variable_store)
|
|
return -ENOSYS;
|
|
|
|
@@ -829,13 +867,18 @@ EXPORT_SYMBOL_GPL(efivar_entry_find);
|
|
*/
|
|
int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
efi_status_t status;
|
|
|
|
*size = 0;
|
|
|
|
if (down_interruptible(&efivars_lock))
|
|
return -EINTR;
|
|
+ if (!__efivars) {
|
|
+ up(&efivars_lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ ops = __efivars->ops;
|
|
status = ops->get_variable(entry->var.VariableName,
|
|
&entry->var.VendorGuid, NULL, size, NULL);
|
|
up(&efivars_lock);
|
|
@@ -861,12 +904,14 @@ EXPORT_SYMBOL_GPL(efivar_entry_size);
|
|
int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
|
|
unsigned long *size, void *data)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
efi_status_t status;
|
|
|
|
- status = ops->get_variable(entry->var.VariableName,
|
|
- &entry->var.VendorGuid,
|
|
- attributes, size, data);
|
|
+ if (!__efivars)
|
|
+ return -EINVAL;
|
|
+
|
|
+ status = __efivars->ops->get_variable(entry->var.VariableName,
|
|
+ &entry->var.VendorGuid,
|
|
+ attributes, size, data);
|
|
|
|
return efi_status_to_err(status);
|
|
}
|
|
@@ -882,14 +927,19 @@ EXPORT_SYMBOL_GPL(__efivar_entry_get);
|
|
int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
|
|
unsigned long *size, void *data)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
efi_status_t status;
|
|
|
|
if (down_interruptible(&efivars_lock))
|
|
return -EINTR;
|
|
- status = ops->get_variable(entry->var.VariableName,
|
|
- &entry->var.VendorGuid,
|
|
- attributes, size, data);
|
|
+
|
|
+ if (!__efivars) {
|
|
+ up(&efivars_lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ status = __efivars->ops->get_variable(entry->var.VariableName,
|
|
+ &entry->var.VendorGuid,
|
|
+ attributes, size, data);
|
|
up(&efivars_lock);
|
|
|
|
return efi_status_to_err(status);
|
|
@@ -921,7 +971,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_get);
|
|
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
|
|
unsigned long *size, void *data, bool *set)
|
|
{
|
|
- const struct efivar_operations *ops = __efivars->ops;
|
|
+ const struct efivar_operations *ops;
|
|
efi_char16_t *name = entry->var.VariableName;
|
|
efi_guid_t *vendor = &entry->var.VendorGuid;
|
|
efi_status_t status;
|
|
@@ -940,6 +990,11 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
|
|
if (down_interruptible(&efivars_lock))
|
|
return -EINTR;
|
|
|
|
+ if (!__efivars) {
|
|
+ err = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
/*
|
|
* Ensure that the available space hasn't shrunk below the safe level
|
|
*/
|
|
@@ -956,6 +1011,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
|
|
}
|
|
}
|
|
|
|
+ ops = __efivars->ops;
|
|
+
|
|
status = ops->set_variable(name, vendor, attributes, *size, data);
|
|
if (status != EFI_SUCCESS) {
|
|
err = efi_status_to_err(status);
|
|
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
|
|
index 610a1558e0ed3..35c3aa5792e26 100644
|
|
--- a/drivers/fpga/altera-cvp.c
|
|
+++ b/drivers/fpga/altera-cvp.c
|
|
@@ -403,6 +403,7 @@ static int altera_cvp_probe(struct pci_dev *pdev,
|
|
struct altera_cvp_conf *conf;
|
|
struct fpga_manager *mgr;
|
|
u16 cmd, val;
|
|
+ u32 regval;
|
|
int ret;
|
|
|
|
/*
|
|
@@ -416,6 +417,14 @@ static int altera_cvp_probe(struct pci_dev *pdev,
|
|
return -ENODEV;
|
|
}
|
|
|
|
+ pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val);
|
|
+ if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
|
|
+ dev_err(&pdev->dev,
|
|
+ "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
|
|
+ regval);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
|
|
if (!conf)
|
|
return -ENOMEM;
|
|
@@ -466,18 +475,11 @@ static int altera_cvp_probe(struct pci_dev *pdev,
|
|
if (ret)
|
|
goto err_unmap;
|
|
|
|
- ret = driver_create_file(&altera_cvp_driver.driver,
|
|
- &driver_attr_chkcfg);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
|
|
- fpga_mgr_unregister(mgr);
|
|
- goto err_unmap;
|
|
- }
|
|
-
|
|
return 0;
|
|
|
|
err_unmap:
|
|
- pci_iounmap(pdev, conf->map);
|
|
+ if (conf->map)
|
|
+ pci_iounmap(pdev, conf->map);
|
|
pci_release_region(pdev, CVP_BAR);
|
|
err_disable:
|
|
cmd &= ~PCI_COMMAND_MEMORY;
|
|
@@ -491,16 +493,39 @@ static void altera_cvp_remove(struct pci_dev *pdev)
|
|
struct altera_cvp_conf *conf = mgr->priv;
|
|
u16 cmd;
|
|
|
|
- driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
|
|
fpga_mgr_unregister(mgr);
|
|
- pci_iounmap(pdev, conf->map);
|
|
+ if (conf->map)
|
|
+ pci_iounmap(pdev, conf->map);
|
|
pci_release_region(pdev, CVP_BAR);
|
|
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
|
|
cmd &= ~PCI_COMMAND_MEMORY;
|
|
pci_write_config_word(pdev, PCI_COMMAND, cmd);
|
|
}
|
|
|
|
-module_pci_driver(altera_cvp_driver);
|
|
+static int __init altera_cvp_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = pci_register_driver(&altera_cvp_driver);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = driver_create_file(&altera_cvp_driver.driver,
|
|
+ &driver_attr_chkcfg);
|
|
+ if (ret)
|
|
+ pr_warn("Can't create sysfs chkcfg file\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __exit altera_cvp_exit(void)
|
|
+{
|
|
+ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
|
|
+ pci_unregister_driver(&altera_cvp_driver);
|
|
+}
|
|
+
|
|
+module_init(altera_cvp_init);
|
|
+module_exit(altera_cvp_exit);
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
|
|
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
|
|
index 6b11f13142484..7f9e0304b5109 100644
|
|
--- a/drivers/gpio/gpio-altera-a10sr.c
|
|
+++ b/drivers/gpio/gpio-altera-a10sr.c
|
|
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
|
|
static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
|
|
unsigned int nr, int value)
|
|
{
|
|
- if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
|
|
+ if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
|
|
+ altr_a10sr_gpio_set(gc, nr, value);
|
|
return 0;
|
|
+ }
|
|
return -EINVAL;
|
|
}
|
|
|
|
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
|
|
index e0d6a0a7bc697..e41223c05f6e2 100644
|
|
--- a/drivers/gpio/gpio-eic-sprd.c
|
|
+++ b/drivers/gpio/gpio-eic-sprd.c
|
|
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
|
|
|
|
static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
|
|
{
|
|
- return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
|
|
+ struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
|
|
+
|
|
+ switch (sprd_eic->type) {
|
|
+ case SPRD_EIC_DEBOUNCE:
|
|
+ return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
|
|
+ case SPRD_EIC_ASYNC:
|
|
+ return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
|
|
+ case SPRD_EIC_SYNC:
|
|
+ return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
|
|
+ default:
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
}
|
|
|
|
static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
|
|
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
|
irq_set_handler_locked(data, handle_edge_irq);
|
|
break;
|
|
case IRQ_TYPE_EDGE_BOTH:
|
|
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
|
|
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
|
|
irq_set_handler_locked(data, handle_edge_irq);
|
|
break;
|
|
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
|
|
index d72af6f6cdbda..74401e0adb29c 100644
|
|
--- a/drivers/gpio/gpio-mt7621.c
|
|
+++ b/drivers/gpio/gpio-mt7621.c
|
|
@@ -30,6 +30,7 @@
|
|
#define GPIO_REG_EDGE 0xA0
|
|
|
|
struct mtk_gc {
|
|
+ struct irq_chip irq_chip;
|
|
struct gpio_chip chip;
|
|
spinlock_t lock;
|
|
int bank;
|
|
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
|
|
return 0;
|
|
}
|
|
|
|
-static struct irq_chip mediatek_gpio_irq_chip = {
|
|
- .irq_unmask = mediatek_gpio_irq_unmask,
|
|
- .irq_mask = mediatek_gpio_irq_mask,
|
|
- .irq_mask_ack = mediatek_gpio_irq_mask,
|
|
- .irq_set_type = mediatek_gpio_irq_type,
|
|
-};
|
|
-
|
|
static int
|
|
mediatek_gpio_xlate(struct gpio_chip *chip,
|
|
const struct of_phandle_args *spec, u32 *flags)
|
|
@@ -244,6 +238,8 @@ mediatek_gpio_bank_probe(struct device *dev,
|
|
rg->chip.of_xlate = mediatek_gpio_xlate;
|
|
rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
|
|
dev_name(dev), bank);
|
|
+ if (!rg->chip.label)
|
|
+ return -ENOMEM;
|
|
|
|
ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
|
|
if (ret < 0) {
|
|
@@ -252,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
|
|
return ret;
|
|
}
|
|
|
|
+ rg->irq_chip.name = dev_name(dev);
|
|
+ rg->irq_chip.parent_device = dev;
|
|
+ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
|
|
+ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
|
|
+ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
|
|
+ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
|
|
+
|
|
if (mtk->gpio_irq) {
|
|
/*
|
|
* Manually request the irq here instead of passing
|
|
@@ -268,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
|
|
return ret;
|
|
}
|
|
|
|
- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
|
|
+ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
|
|
0, handle_simple_irq, IRQ_TYPE_NONE);
|
|
if (ret) {
|
|
dev_err(dev, "failed to add gpiochip_irqchip\n");
|
|
return ret;
|
|
}
|
|
|
|
- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
|
|
+ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
|
|
mtk->gpio_irq, NULL);
|
|
}
|
|
|
|
@@ -295,6 +298,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
|
|
struct device_node *np = dev->of_node;
|
|
struct mtk *mtk;
|
|
int i;
|
|
+ int ret;
|
|
|
|
mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
|
|
if (!mtk)
|
|
@@ -307,10 +311,12 @@ mediatek_gpio_probe(struct platform_device *pdev)
|
|
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
|
|
mtk->dev = dev;
|
|
platform_set_drvdata(pdev, mtk);
|
|
- mediatek_gpio_irq_chip.name = dev_name(dev);
|
|
|
|
- for (i = 0; i < MTK_BANK_CNT; i++)
|
|
- mediatek_gpio_bank_probe(dev, np, i);
|
|
+ for (i = 0; i < MTK_BANK_CNT; i++) {
|
|
+ ret = mediatek_gpio_bank_probe(dev, np, i);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
|
|
index 995cf0b9e0b1b..2d1dfa1e07456 100644
|
|
--- a/drivers/gpio/gpio-mxc.c
|
|
+++ b/drivers/gpio/gpio-mxc.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/irqchip/chained_irq.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/syscore_ops.h>
|
|
#include <linux/gpio/driver.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_device.h>
|
|
@@ -550,33 +551,38 @@ static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
|
|
writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
|
|
}
|
|
|
|
-static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
|
|
+static int mxc_gpio_syscore_suspend(void)
|
|
{
|
|
- struct platform_device *pdev = to_platform_device(dev);
|
|
- struct mxc_gpio_port *port = platform_get_drvdata(pdev);
|
|
+ struct mxc_gpio_port *port;
|
|
|
|
- mxc_gpio_save_regs(port);
|
|
- clk_disable_unprepare(port->clk);
|
|
+ /* walk through all ports */
|
|
+ list_for_each_entry(port, &mxc_gpio_ports, node) {
|
|
+ mxc_gpio_save_regs(port);
|
|
+ clk_disable_unprepare(port->clk);
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
|
|
+static void mxc_gpio_syscore_resume(void)
|
|
{
|
|
- struct platform_device *pdev = to_platform_device(dev);
|
|
- struct mxc_gpio_port *port = platform_get_drvdata(pdev);
|
|
+ struct mxc_gpio_port *port;
|
|
int ret;
|
|
|
|
- ret = clk_prepare_enable(port->clk);
|
|
- if (ret)
|
|
- return ret;
|
|
- mxc_gpio_restore_regs(port);
|
|
-
|
|
- return 0;
|
|
+ /* walk through all ports */
|
|
+ list_for_each_entry(port, &mxc_gpio_ports, node) {
|
|
+ ret = clk_prepare_enable(port->clk);
|
|
+ if (ret) {
|
|
+ pr_err("mxc: failed to enable gpio clock %d\n", ret);
|
|
+ return;
|
|
+ }
|
|
+ mxc_gpio_restore_regs(port);
|
|
+ }
|
|
}
|
|
|
|
-static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
|
|
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
|
|
+static struct syscore_ops mxc_gpio_syscore_ops = {
|
|
+ .suspend = mxc_gpio_syscore_suspend,
|
|
+ .resume = mxc_gpio_syscore_resume,
|
|
};
|
|
|
|
static struct platform_driver mxc_gpio_driver = {
|
|
@@ -584,7 +590,6 @@ static struct platform_driver mxc_gpio_driver = {
|
|
.name = "gpio-mxc",
|
|
.of_match_table = mxc_gpio_dt_ids,
|
|
.suppress_bind_attrs = true,
|
|
- .pm = &mxc_gpio_dev_pm_ops,
|
|
},
|
|
.probe = mxc_gpio_probe,
|
|
.id_table = mxc_gpio_devtype,
|
|
@@ -592,6 +597,8 @@ static struct platform_driver mxc_gpio_driver = {
|
|
|
|
static int __init gpio_mxc_init(void)
|
|
{
|
|
+ register_syscore_ops(&mxc_gpio_syscore_ops);
|
|
+
|
|
return platform_driver_register(&mxc_gpio_driver);
|
|
}
|
|
subsys_initcall(gpio_mxc_init);
|
|
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
|
|
index adf72dda25a2b..68a35b65925ac 100644
|
|
--- a/drivers/gpio/gpio-pcf857x.c
|
|
+++ b/drivers/gpio/gpio-pcf857x.c
|
|
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
|
|
*/
|
|
struct pcf857x {
|
|
struct gpio_chip chip;
|
|
+ struct irq_chip irqchip;
|
|
struct i2c_client *client;
|
|
struct mutex lock; /* protect 'out' */
|
|
unsigned out; /* software latch */
|
|
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
|
|
mutex_unlock(&gpio->lock);
|
|
}
|
|
|
|
-static struct irq_chip pcf857x_irq_chip = {
|
|
- .name = "pcf857x",
|
|
- .irq_enable = pcf857x_irq_enable,
|
|
- .irq_disable = pcf857x_irq_disable,
|
|
- .irq_ack = noop,
|
|
- .irq_mask = noop,
|
|
- .irq_unmask = noop,
|
|
- .irq_set_wake = pcf857x_irq_set_wake,
|
|
- .irq_bus_lock = pcf857x_irq_bus_lock,
|
|
- .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
|
|
-};
|
|
-
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
static int pcf857x_probe(struct i2c_client *client,
|
|
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
|
|
|
|
/* Enable irqchip if we have an interrupt */
|
|
if (client->irq) {
|
|
+ gpio->irqchip.name = "pcf857x",
|
|
+ gpio->irqchip.irq_enable = pcf857x_irq_enable,
|
|
+ gpio->irqchip.irq_disable = pcf857x_irq_disable,
|
|
+ gpio->irqchip.irq_ack = noop,
|
|
+ gpio->irqchip.irq_mask = noop,
|
|
+ gpio->irqchip.irq_unmask = noop,
|
|
+ gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
|
|
+ gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
|
|
+ gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
|
|
status = gpiochip_irqchip_add_nested(&gpio->chip,
|
|
- &pcf857x_irq_chip,
|
|
+ &gpio->irqchip,
|
|
0, handle_level_irq,
|
|
IRQ_TYPE_NONE);
|
|
if (status) {
|
|
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
|
|
if (status)
|
|
goto fail;
|
|
|
|
- gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
|
|
+ gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
|
|
client->irq);
|
|
gpio->irq_parent = client->irq;
|
|
}
|
|
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
|
|
index 2afd9de84a0d0..dc42571e6fdc8 100644
|
|
--- a/drivers/gpio/gpio-pl061.c
|
|
+++ b/drivers/gpio/gpio-pl061.c
|
|
@@ -54,6 +54,7 @@ struct pl061 {
|
|
|
|
void __iomem *base;
|
|
struct gpio_chip gc;
|
|
+ struct irq_chip irq_chip;
|
|
int parent_irq;
|
|
|
|
#ifdef CONFIG_PM
|
|
@@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
|
|
return irq_set_irq_wake(pl061->parent_irq, state);
|
|
}
|
|
|
|
-static struct irq_chip pl061_irqchip = {
|
|
- .name = "pl061",
|
|
- .irq_ack = pl061_irq_ack,
|
|
- .irq_mask = pl061_irq_mask,
|
|
- .irq_unmask = pl061_irq_unmask,
|
|
- .irq_set_type = pl061_irq_type,
|
|
- .irq_set_wake = pl061_irq_set_wake,
|
|
-};
|
|
-
|
|
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
|
|
{
|
|
struct device *dev = &adev->dev;
|
|
@@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
|
|
/*
|
|
* irq_chip support
|
|
*/
|
|
+ pl061->irq_chip.name = dev_name(dev);
|
|
+ pl061->irq_chip.irq_ack = pl061_irq_ack;
|
|
+ pl061->irq_chip.irq_mask = pl061_irq_mask;
|
|
+ pl061->irq_chip.irq_unmask = pl061_irq_unmask;
|
|
+ pl061->irq_chip.irq_set_type = pl061_irq_type;
|
|
+ pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
|
|
+
|
|
writeb(0, pl061->base + GPIOIE); /* disable irqs */
|
|
irq = adev->irq[0];
|
|
if (irq < 0) {
|
|
@@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
|
|
}
|
|
pl061->parent_irq = irq;
|
|
|
|
- ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
|
|
+ ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
|
|
0, handle_bad_irq,
|
|
IRQ_TYPE_NONE);
|
|
if (ret) {
|
|
dev_info(&adev->dev, "could not add irqchip\n");
|
|
return ret;
|
|
}
|
|
- gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
|
|
+ gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
|
|
irq, pl061_irq_handler);
|
|
|
|
amba_set_drvdata(adev, pl061);
|
|
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
|
|
index e9600b556f397..bcc6be4a5cb2e 100644
|
|
--- a/drivers/gpio/gpio-pxa.c
|
|
+++ b/drivers/gpio/gpio-pxa.c
|
|
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
|
|
{
|
|
switch (gpio_type) {
|
|
case PXA3XX_GPIO:
|
|
+ case MMP2_GPIO:
|
|
return false;
|
|
|
|
default:
|
|
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
|
|
index a2cbb474901c2..bd44be115cdd7 100644
|
|
--- a/drivers/gpio/gpiolib.c
|
|
+++ b/drivers/gpio/gpiolib.c
|
|
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
|
|
/* Do not leak kernel stack to userspace */
|
|
memset(&ge, 0, sizeof(ge));
|
|
|
|
- ge.timestamp = le->timestamp;
|
|
+ /*
|
|
+ * We may be running from a nested threaded interrupt in which case
|
|
+ * we didn't get the timestamp from lineevent_irq_handler().
|
|
+ */
|
|
+ if (!le->timestamp)
|
|
+ ge.timestamp = ktime_get_real_ns();
|
|
+ else
|
|
+ ge.timestamp = le->timestamp;
|
|
|
|
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
|
|
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
|
|
@@ -2299,6 +2306,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
|
|
unsigned long flags;
|
|
unsigned offset;
|
|
|
|
+ if (label) {
|
|
+ label = kstrdup_const(label, GFP_KERNEL);
|
|
+ if (!label)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
|
|
/* NOTE: gpio_request() can be called in early boot,
|
|
@@ -2309,6 +2322,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
|
|
desc_set_label(desc, label ? : "?");
|
|
status = 0;
|
|
} else {
|
|
+ kfree_const(label);
|
|
status = -EBUSY;
|
|
goto done;
|
|
}
|
|
@@ -2325,6 +2339,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
|
|
|
|
if (status < 0) {
|
|
desc_set_label(desc, NULL);
|
|
+ kfree_const(label);
|
|
clear_bit(FLAG_REQUESTED, &desc->flags);
|
|
goto done;
|
|
}
|
|
@@ -2420,6 +2435,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
|
|
chip->free(chip, gpio_chip_hwgpio(desc));
|
|
spin_lock_irqsave(&gpio_lock, flags);
|
|
}
|
|
+ kfree_const(desc->label);
|
|
desc_set_label(desc, NULL);
|
|
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
|
clear_bit(FLAG_REQUESTED, &desc->flags);
|
|
@@ -3375,11 +3391,19 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
|
|
* @desc: gpio to set the consumer name on
|
|
* @name: the new consumer name
|
|
*/
|
|
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
|
|
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
|
|
{
|
|
- VALIDATE_DESC_VOID(desc);
|
|
- /* Just overwrite whatever the previous name was */
|
|
- desc->label = name;
|
|
+ VALIDATE_DESC(desc);
|
|
+ if (name) {
|
|
+ name = kstrdup_const(name, GFP_KERNEL);
|
|
+ if (!name)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ kfree_const(desc->label);
|
|
+ desc_set_label(desc, name);
|
|
+
|
|
+ return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
|
|
index a028661d9e201..92b11de195813 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
|
|
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
|
|
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
|
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
|
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
|
+ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
|
{ 0, 0, 0, 0, 0 },
|
|
};
|
|
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
index 30bc345d6fdf0..8547fdaf82733 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
@@ -1684,8 +1684,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|
amdgpu_xgmi_add_device(adev);
|
|
amdgpu_amdkfd_device_init(adev);
|
|
|
|
- if (amdgpu_sriov_vf(adev))
|
|
+ if (amdgpu_sriov_vf(adev)) {
|
|
+ amdgpu_virt_init_data_exchange(adev);
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
@@ -2597,9 +2599,6 @@ fence_driver_init:
|
|
goto failed;
|
|
}
|
|
|
|
- if (amdgpu_sriov_vf(adev))
|
|
- amdgpu_virt_init_data_exchange(adev);
|
|
-
|
|
amdgpu_fbdev_init(adev);
|
|
|
|
r = amdgpu_pm_sysfs_init(adev);
|
|
@@ -3271,6 +3270,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|
r = amdgpu_ib_ring_tests(adev);
|
|
|
|
error:
|
|
+ amdgpu_virt_init_data_exchange(adev);
|
|
amdgpu_virt_release_full_gpu(adev, true);
|
|
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
|
atomic_inc(&adev->vram_lost_counter);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
index 74b611e8a1b10..c79517dc8804c 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
|
|
@@ -864,6 +864,7 @@ static const struct pci_device_id pciidlist[] = {
|
|
/* VEGAM */
|
|
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
|
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
|
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
|
|
/* Vega 10 */
|
|
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
|
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
index 8f3d44e5e7878..722b1421d8f39 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
|
|
@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
}
|
|
|
|
if (amdgpu_device_is_px(dev)) {
|
|
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
|
|
pm_runtime_use_autosuspend(dev->dev);
|
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
|
pm_runtime_set_active(dev->dev);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
|
|
index 0877ff9a95944..62df4bd0a0fc2 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
|
|
@@ -637,12 +637,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
|
struct ttm_bo_global *glob = adev->mman.bdev.glob;
|
|
struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
+#if 0
|
|
if (vm->bulk_moveable) {
|
|
spin_lock(&glob->lru_lock);
|
|
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
|
|
spin_unlock(&glob->lru_lock);
|
|
return;
|
|
}
|
|
+#endif
|
|
|
|
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
|
|
|
|
@@ -850,9 +852,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
bp->size = amdgpu_vm_bo_size(adev, level);
|
|
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
|
|
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
|
|
- if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
|
|
- adev->flags & AMD_IS_APU)
|
|
- bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
|
|
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
|
|
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
index 21363b2b2ee57..88ed064b35859 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
@@ -112,7 +112,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
|
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
|
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
|
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
|
|
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
|
|
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
|
|
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
|
|
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
|
|
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
|
|
};
|
|
|
|
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
|
|
@@ -134,10 +137,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
|
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
|
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
|
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
|
|
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
|
|
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
|
|
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
|
|
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
|
|
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
|
|
};
|
|
|
|
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
|
|
index 8cbb4655896a3..b11a1c17a7f27 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
|
|
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
|
return r;
|
|
}
|
|
/* Retrieve checksum from mailbox2 */
|
|
- if (req == IDH_REQ_GPU_INIT_ACCESS) {
|
|
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
|
|
adev->virt.fw_reserve.checksum_key =
|
|
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
|
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
|
|
index 7a8c9172d30a9..86d5dc5f88870 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
|
|
@@ -73,7 +73,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
|
|
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
|
|
@@ -91,6 +90,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
|
|
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
|
|
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
|
|
};
|
|
@@ -98,6 +98,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
|
|
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
|
|
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
|
|
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
|
|
};
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
|
|
index 1fc17bf39fed7..44ca418371879 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
|
|
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
|
|
if (r)
|
|
return r;
|
|
|
|
- r = amdgpu_uvd_resume(adev);
|
|
- if (r)
|
|
- return r;
|
|
-
|
|
ring = &adev->uvd.inst->ring;
|
|
sprintf(ring->name, "uvd");
|
|
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
|
if (r)
|
|
return r;
|
|
|
|
+ r = amdgpu_uvd_resume(adev);
|
|
+ if (r)
|
|
+ return r;
|
|
+
|
|
r = amdgpu_uvd_entity_init(adev);
|
|
|
|
return r;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
|
|
index fde6ad5ac9ab3..6bb05ae232b20 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
|
|
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
|
|
if (r)
|
|
return r;
|
|
|
|
- r = amdgpu_uvd_resume(adev);
|
|
- if (r)
|
|
- return r;
|
|
-
|
|
ring = &adev->uvd.inst->ring;
|
|
sprintf(ring->name, "uvd");
|
|
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
|
if (r)
|
|
return r;
|
|
|
|
+ r = amdgpu_uvd_resume(adev);
|
|
+ if (r)
|
|
+ return r;
|
|
+
|
|
r = amdgpu_uvd_entity_init(adev);
|
|
|
|
return r;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
|
|
index 7a5b40275e8e7..07fd96df4321a 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
|
|
@@ -416,16 +416,16 @@ static int uvd_v6_0_sw_init(void *handle)
|
|
DRM_INFO("UVD ENC is disabled\n");
|
|
}
|
|
|
|
- r = amdgpu_uvd_resume(adev);
|
|
- if (r)
|
|
- return r;
|
|
-
|
|
ring = &adev->uvd.inst->ring;
|
|
sprintf(ring->name, "uvd");
|
|
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
|
if (r)
|
|
return r;
|
|
|
|
+ r = amdgpu_uvd_resume(adev);
|
|
+ if (r)
|
|
+ return r;
|
|
+
|
|
if (uvd_v6_0_enc_support(adev)) {
|
|
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
|
ring = &adev->uvd.inst->ring_enc[i];
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
|
|
index 58b39afcfb864..1ef023a7b8ec8 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
|
|
@@ -447,10 +447,6 @@ static int uvd_v7_0_sw_init(void *handle)
|
|
DRM_INFO("PSP loading UVD firmware\n");
|
|
}
|
|
|
|
- r = amdgpu_uvd_resume(adev);
|
|
- if (r)
|
|
- return r;
|
|
-
|
|
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
|
|
if (adev->uvd.harvest_config & (1 << j))
|
|
continue;
|
|
@@ -482,6 +478,10 @@ static int uvd_v7_0_sw_init(void *handle)
|
|
}
|
|
}
|
|
|
|
+ r = amdgpu_uvd_resume(adev);
|
|
+ if (r)
|
|
+ return r;
|
|
+
|
|
r = amdgpu_uvd_entity_init(adev);
|
|
if (r)
|
|
return r;
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
|
|
index e4ded890b1cbc..6edaf11d69aac 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
|
|
@@ -688,6 +688,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
|
{
|
|
uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
|
|
bool is_patched = false;
|
|
+ unsigned long flags;
|
|
|
|
if (!kfd->init_complete)
|
|
return;
|
|
@@ -697,7 +698,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
|
return;
|
|
}
|
|
|
|
- spin_lock(&kfd->interrupt_lock);
|
|
+ spin_lock_irqsave(&kfd->interrupt_lock, flags);
|
|
|
|
if (kfd->interrupts_active
|
|
&& interrupt_is_wanted(kfd, ih_ring_entry,
|
|
@@ -706,7 +707,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
|
is_patched ? patched_ihre : ih_ring_entry))
|
|
queue_work(kfd->ih_wq, &kfd->interrupt_work);
|
|
|
|
- spin_unlock(&kfd->interrupt_lock);
|
|
+ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
|
|
}
|
|
|
|
int kgd2kfd_quiesce_mm(struct mm_struct *mm)
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
|
index e3843c5929edf..fffece5e42c56 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
|
|
@@ -1074,8 +1074,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
|
|
* the GPU device is not already present in the topology device
|
|
* list then return NULL. This means a new topology device has to
|
|
* be created for this GPU.
|
|
- * TODO: Rather than assiging @gpu to first topology device withtout
|
|
- * gpu attached, it will better to have more stringent check.
|
|
*/
|
|
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
|
|
{
|
|
@@ -1083,12 +1081,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
|
|
struct kfd_topology_device *out_dev = NULL;
|
|
|
|
down_write(&topology_lock);
|
|
- list_for_each_entry(dev, &topology_device_list, list)
|
|
+ list_for_each_entry(dev, &topology_device_list, list) {
|
|
+ /* Discrete GPUs need their own topology device list
|
|
+ * entries. Don't assign them to CPU/APU nodes.
|
|
+ */
|
|
+ if (!gpu->device_info->needs_iommu_device &&
|
|
+ dev->node_props.cpu_cores_count)
|
|
+ continue;
|
|
+
|
|
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
|
|
dev->gpu = gpu;
|
|
out_dev = dev;
|
|
break;
|
|
}
|
|
+ }
|
|
up_write(&topology_lock);
|
|
return out_dev;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index 5a6edf65c9eae..d92120b62e89f 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -645,22 +645,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
|
{
|
|
struct amdgpu_dm_connector *aconnector;
|
|
struct drm_connector *connector;
|
|
+ struct drm_dp_mst_topology_mgr *mgr;
|
|
+ int ret;
|
|
+ bool need_hotplug = false;
|
|
|
|
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
|
|
|
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
- aconnector = to_amdgpu_dm_connector(connector);
|
|
- if (aconnector->dc_link->type == dc_connection_mst_branch &&
|
|
- !aconnector->mst_port) {
|
|
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
|
|
+ head) {
|
|
+ aconnector = to_amdgpu_dm_connector(connector);
|
|
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
|
|
+ aconnector->mst_port)
|
|
+ continue;
|
|
+
|
|
+ mgr = &aconnector->mst_mgr;
|
|
|
|
- if (suspend)
|
|
- drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
|
|
- else
|
|
- drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
|
|
- }
|
|
+ if (suspend) {
|
|
+ drm_dp_mst_topology_mgr_suspend(mgr);
|
|
+ } else {
|
|
+ ret = drm_dp_mst_topology_mgr_resume(mgr);
|
|
+ if (ret < 0) {
|
|
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
|
|
+ need_hotplug = true;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
|
+
|
|
+ if (need_hotplug)
|
|
+ drm_kms_helper_hotplug_event(dev);
|
|
}
|
|
|
|
static int dm_hw_init(void *handle)
|
|
@@ -690,12 +704,13 @@ static int dm_suspend(void *handle)
|
|
struct amdgpu_display_manager *dm = &adev->dm;
|
|
int ret = 0;
|
|
|
|
+ WARN_ON(adev->dm.cached_state);
|
|
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
|
|
+
|
|
s3_handle_mst(adev->ddev, true);
|
|
|
|
amdgpu_dm_irq_suspend(adev);
|
|
|
|
- WARN_ON(adev->dm.cached_state);
|
|
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
|
|
|
|
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
|
|
|
|
@@ -816,7 +831,6 @@ static int dm_resume(void *handle)
|
|
struct drm_plane_state *new_plane_state;
|
|
struct dm_plane_state *dm_new_plane_state;
|
|
enum dc_connection_type new_connection_type = dc_connection_none;
|
|
- int ret;
|
|
int i;
|
|
|
|
/* power on hardware */
|
|
@@ -889,13 +903,13 @@ static int dm_resume(void *handle)
|
|
}
|
|
}
|
|
|
|
- ret = drm_atomic_helper_resume(ddev, dm->cached_state);
|
|
+ drm_atomic_helper_resume(ddev, dm->cached_state);
|
|
|
|
dm->cached_state = NULL;
|
|
|
|
amdgpu_dm_irq_resume_late(adev);
|
|
|
|
- return ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static const struct amd_ip_funcs amdgpu_dm_funcs = {
|
|
@@ -5320,6 +5334,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
|
|
struct dc_stream_update stream_update;
|
|
enum surface_update_type update_type = UPDATE_TYPE_FAST;
|
|
|
|
+ if (!updates || !surface) {
|
|
+ DRM_ERROR("Plane or surface update failed to allocate");
|
|
+ /* Set type to FULL to avoid crashing in DC*/
|
|
+ update_type = UPDATE_TYPE_FULL;
|
|
+ goto ret;
|
|
+ }
|
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
|
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
|
|
index 01fc5717b657f..f088ac5859780 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
|
|
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (!stream_state) {
|
|
+ DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
/* When enabling CRC, we should also disable dithering. */
|
|
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
|
|
if (dc_stream_configure_crc(stream_state->ctx->dc,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
|
|
index 5da2186b3615f..8b69913b6943a 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
|
|
@@ -208,6 +208,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
|
|
return true;
|
|
}
|
|
|
|
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
|
|
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
|
+
|
|
/* todo: may need to lock gpio access */
|
|
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
|
|
if (hpd_pin == NULL)
|
|
@@ -332,7 +335,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
|
{
|
|
enum gpio_result gpio_result;
|
|
uint32_t clock_pin = 0;
|
|
-
|
|
+ uint8_t retry = 0;
|
|
struct ddc *ddc;
|
|
|
|
enum connector_id connector_id =
|
|
@@ -361,11 +364,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
|
return present;
|
|
}
|
|
|
|
- /* Read GPIO: DP sink is present if both clock and data pins are zero */
|
|
- /* [anaumov] in DAL2, there was no check for GPIO failure */
|
|
-
|
|
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
|
|
- ASSERT(gpio_result == GPIO_RESULT_OK);
|
|
+ /*
|
|
+ * Read GPIO: DP sink is present if both clock and data pins are zero
|
|
+ *
|
|
+ * [W/A] plug-unplug DP cable, sometimes customer board has
|
|
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
|
|
+ * then monitor can't br light up. Add retry 3 times
|
|
+ * But in real passive dongle, it need additional 3ms to detect
|
|
+ */
|
|
+ do {
|
|
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
|
|
+ ASSERT(gpio_result == GPIO_RESULT_OK);
|
|
+ if (clock_pin)
|
|
+ udelay(1000);
|
|
+ else
|
|
+ break;
|
|
+ } while (retry++ < 3);
|
|
|
|
present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
|
|
|
|
@@ -2617,11 +2631,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
|
{
|
|
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
|
|
|
+ core_dc->hwss.blank_stream(pipe_ctx);
|
|
+
|
|
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
|
deallocate_mst_payload(pipe_ctx);
|
|
|
|
- core_dc->hwss.blank_stream(pipe_ctx);
|
|
-
|
|
core_dc->hwss.disable_stream(pipe_ctx, option);
|
|
|
|
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
|
|
index d91df5ef0cb34..d33a5ebe990b4 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
|
|
@@ -2240,7 +2240,8 @@ static void get_active_converter_info(
|
|
translate_dpcd_max_bpc(
|
|
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
|
|
|
|
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
|
|
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
|
|
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
|
|
}
|
|
|
|
break;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
|
|
index b6fe29b9fb657..8bd8f34b979c5 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
|
|
@@ -2002,6 +2002,8 @@ enum dc_status resource_map_pool_resources(
|
|
}
|
|
*/
|
|
|
|
+ calculate_phy_pix_clks(stream);
|
|
+
|
|
/* acquire new resources */
|
|
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
|
|
index a6bcb90e8419a..e84275f15e7ad 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
|
|
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|
|
|
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
|
|
|
|
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
|
+ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
|
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
|
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
|
|
/* un-mute audio */
|
|
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
|
|
pipe_ctx->stream_res.stream_enc, true);
|
|
if (pipe_ctx->stream_res.audio) {
|
|
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
|
|
+
|
|
if (option != KEEP_ACQUIRED_RESOURCE ||
|
|
!dc->debug.az_endpoint_mute_only) {
|
|
/*only disalbe az_endpoint if power down or free*/
|
|
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
|
|
pipe_ctx->stream_res.audio = NULL;
|
|
}
|
|
+ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
|
|
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
|
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
|
|
|
|
/* TODO: notify audio driver for if audio modes list changed
|
|
* add audio mode list change flag */
|
|
@@ -1268,10 +1273,19 @@ static void program_scaler(const struct dc *dc,
|
|
pipe_ctx->plane_res.scl_data.lb_params.depth,
|
|
&pipe_ctx->stream->bit_depth_params);
|
|
|
|
- if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
|
|
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
|
|
+ /*
|
|
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
|
|
+ * alternate between Cb and Cr, so both channels need the pixel
|
|
+ * value for Y
|
|
+ */
|
|
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
|
+ color.color_r_cr = color.color_g_y;
|
|
+
|
|
pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
|
|
pipe_ctx->stream_res.tg,
|
|
&color);
|
|
+ }
|
|
|
|
pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
|
|
&pipe_ctx->plane_res.scl_data);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
|
|
index dcb3c55302365..cd1ebe57ed594 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
|
|
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
|
|
if (src_y_offset >= (int)param->viewport.height)
|
|
cur_en = 0; /* not visible beyond bottom edge*/
|
|
|
|
- if (src_y_offset < 0)
|
|
+ if (src_y_offset + (int)height <= 0)
|
|
cur_en = 0; /* not visible beyond top edge*/
|
|
|
|
REG_UPDATE(CURSOR0_CONTROL,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
|
|
index 74132a1f3046b..a34f0fdf7be2e 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
|
|
@@ -1134,7 +1134,7 @@ void hubp1_cursor_set_position(
|
|
if (src_y_offset >= (int)param->viewport.height)
|
|
cur_en = 0; /* not visible beyond bottom edge*/
|
|
|
|
- if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
|
|
+ if (src_y_offset + (int)hubp->curs_attr.height <= 0)
|
|
cur_en = 0; /* not visible beyond top edge*/
|
|
|
|
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
index 193184affefbe..220ba828748de 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
@@ -1226,7 +1226,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
|
|
tf = plane_state->in_transfer_func;
|
|
|
|
if (plane_state->gamma_correction &&
|
|
- !plane_state->gamma_correction->is_identity
|
|
+ !dpp_base->ctx->dc->debug.always_use_regamma
|
|
+ && !plane_state->gamma_correction->is_identity
|
|
&& dce_use_lut(plane_state->format))
|
|
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
|
|
|
|
@@ -2164,6 +2165,15 @@ static void dcn10_blank_pixel_data(
|
|
color_space = stream->output_color_space;
|
|
color_space_to_black_color(dc, color_space, &black_color);
|
|
|
|
+ /*
|
|
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
|
|
+ * alternate between Cb and Cr, so both channels need the pixel
|
|
+ * value for Y
|
|
+ */
|
|
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
|
+ black_color.color_r_cr = black_color.color_g_y;
|
|
+
|
|
+
|
|
if (stream_res->tg->funcs->set_blank_color)
|
|
stream_res->tg->funcs->set_blank_color(
|
|
stream_res->tg,
|
|
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
|
|
index dd18cb710391a..0b945d0fd7322 100644
|
|
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
|
|
@@ -1005,6 +1005,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
|
|
break;
|
|
case amd_pp_dpp_clock:
|
|
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
|
|
+ break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
|
|
index 3b7fce5d7258e..b9e19b0eb905a 100644
|
|
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
|
|
@@ -2244,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
|
|
soft_min_level = mask ? (ffs(mask) - 1) : 0;
|
|
soft_max_level = mask ? (fls(mask) - 1) : 0;
|
|
|
|
+ if (soft_max_level >= data->dpm_table.gfx_table.count) {
|
|
+ pr_err("Clock level specified %d is over max allowed %d\n",
|
|
+ soft_max_level,
|
|
+ data->dpm_table.gfx_table.count - 1);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
data->dpm_table.gfx_table.dpm_state.soft_min_level =
|
|
data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
|
|
data->dpm_table.gfx_table.dpm_state.soft_max_level =
|
|
@@ -2264,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
|
|
soft_min_level = mask ? (ffs(mask) - 1) : 0;
|
|
soft_max_level = mask ? (fls(mask) - 1) : 0;
|
|
|
|
+ if (soft_max_level >= data->dpm_table.mem_table.count) {
|
|
+ pr_err("Clock level specified %d is over max allowed %d\n",
|
|
+ soft_max_level,
|
|
+ data->dpm_table.mem_table.count - 1);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
data->dpm_table.mem_table.dpm_state.soft_min_level =
|
|
data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
|
|
data->dpm_table.mem_table.dpm_state.soft_max_level =
|
|
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
|
|
index a1e0ac9ae2482..90c1215c6f5e0 100644
|
|
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
|
|
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
|
|
@@ -1529,8 +1529,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
|
|
efuse = efuse >> 24;
|
|
|
|
if (hwmgr->chip_id == CHIP_POLARIS10) {
|
|
- min = 1000;
|
|
- max = 2300;
|
|
+ if (hwmgr->is_kicker) {
|
|
+ min = 1200;
|
|
+ max = 2500;
|
|
+ } else {
|
|
+ min = 1000;
|
|
+ max = 2300;
|
|
+ }
|
|
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
|
|
+ if (hwmgr->is_kicker) {
|
|
+ min = 900;
|
|
+ max = 2100;
|
|
+ } else {
|
|
+ min = 1100;
|
|
+ max = 2100;
|
|
+ }
|
|
} else {
|
|
min = 1100;
|
|
max = 2100;
|
|
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
|
|
index 8e28e738cb52d..3915473587569 100644
|
|
--- a/drivers/gpu/drm/bridge/tc358767.c
|
|
+++ b/drivers/gpu/drm/bridge/tc358767.c
|
|
@@ -98,6 +98,8 @@
|
|
#define DP0_STARTVAL 0x064c
|
|
#define DP0_ACTIVEVAL 0x0650
|
|
#define DP0_SYNCVAL 0x0654
|
|
+#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
|
|
+#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
|
|
#define DP0_MISC 0x0658
|
|
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
|
|
#define BPC_6 (0 << 5)
|
|
@@ -142,6 +144,8 @@
|
|
#define DP0_LTLOOPCTRL 0x06d8
|
|
#define DP0_SNKLTCTRL 0x06e4
|
|
|
|
+#define DP1_SRCCTRL 0x07a0
|
|
+
|
|
/* PHY */
|
|
#define DP_PHY_CTRL 0x0800
|
|
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
|
|
@@ -150,6 +154,7 @@
|
|
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
|
|
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
|
|
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
|
|
+#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
|
|
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
|
|
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
|
|
|
|
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
|
|
unsigned long rate;
|
|
u32 value;
|
|
int ret;
|
|
+ u32 dp_phy_ctrl;
|
|
|
|
rate = clk_get_rate(tc->refclk);
|
|
switch (rate) {
|
|
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
|
|
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
|
|
tc_write(SYS_PLLPARAM, value);
|
|
|
|
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
|
|
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
|
|
+ if (tc->link.base.num_lanes == 2)
|
|
+ dp_phy_ctrl |= PHY_2LANE;
|
|
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
|
|
|
|
/*
|
|
* Initially PLLs are in bypass. Force PLL parameter update,
|
|
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
|
|
|
|
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
|
|
|
|
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
|
|
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
|
|
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
|
|
+ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
|
|
|
|
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
|
|
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
|
|
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
|
|
if (!tc->mode)
|
|
return -EINVAL;
|
|
|
|
- /* from excel file - DP0_SrcCtrl */
|
|
- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
|
|
- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
|
|
- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
|
|
- /* from excel file - DP1_SrcCtrl */
|
|
- tc_write(0x07a0, 0x00003083);
|
|
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
|
|
+ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
|
|
+ tc_write(DP1_SRCCTRL,
|
|
+ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
|
|
+ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
|
|
|
|
rate = clk_get_rate(tc->refclk);
|
|
switch (rate) {
|
|
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
|
|
}
|
|
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
|
|
tc_write(SYS_PLLPARAM, value);
|
|
+
|
|
/* Setup Main Link */
|
|
- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
|
|
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
|
|
+ if (tc->link.base.num_lanes == 2)
|
|
+ dp_phy_ctrl |= PHY_2LANE;
|
|
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
|
|
msleep(100);
|
|
|
|
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
|
|
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
|
|
struct drm_display_mode *mode)
|
|
{
|
|
+ struct tc_data *tc = connector_to_tc(connector);
|
|
+ u32 req, avail;
|
|
+ u32 bits_per_pixel = 24;
|
|
+
|
|
/* DPI interface clock limitation: upto 154 MHz */
|
|
if (mode->clock > 154000)
|
|
return MODE_CLOCK_HIGH;
|
|
|
|
+ req = mode->clock * bits_per_pixel / 8;
|
|
+ avail = tc->link.base.num_lanes * tc->link.base.rate;
|
|
+
|
|
+ if (req > avail)
|
|
+ return MODE_BAD;
|
|
+
|
|
return MODE_OK;
|
|
}
|
|
|
|
@@ -1195,6 +1218,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
|
|
|
|
drm_display_info_set_bus_formats(&tc->connector.display_info,
|
|
&bus_format, 1);
|
|
+ tc->connector.display_info.bus_flags =
|
|
+ DRM_BUS_FLAG_DE_HIGH |
|
|
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE |
|
|
+ DRM_BUS_FLAG_SYNC_NEGEDGE;
|
|
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
|
|
|
|
return 0;
|
|
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
|
|
index d8b526b7932c3..df86c2ebfc129 100644
|
|
--- a/drivers/gpu/drm/drm_atomic_helper.c
|
|
+++ b/drivers/gpu/drm/drm_atomic_helper.c
|
|
@@ -1445,6 +1445,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
|
|
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
|
|
crtc->base.id, crtc->name);
|
|
}
|
|
+
|
|
+ if (old_state->fake_commit)
|
|
+ complete_all(&old_state->fake_commit->flip_done);
|
|
}
|
|
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
|
|
|
|
@@ -3209,7 +3212,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
|
|
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
|
|
struct drm_modeset_acquire_ctx *ctx)
|
|
{
|
|
- int i;
|
|
+ int i, ret;
|
|
struct drm_plane *plane;
|
|
struct drm_plane_state *new_plane_state;
|
|
struct drm_connector *connector;
|
|
@@ -3228,7 +3231,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
|
|
for_each_new_connector_in_state(state, connector, new_conn_state, i)
|
|
state->connectors[i].old_state = connector->state;
|
|
|
|
- return drm_atomic_commit(state);
|
|
+ ret = drm_atomic_commit(state);
|
|
+
|
|
+ state->acquire_ctx = NULL;
|
|
+
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
|
|
|
|
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
|
|
index d5b7f315098c2..087470ad6436d 100644
|
|
--- a/drivers/gpu/drm/drm_atomic_uapi.c
|
|
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
|
|
@@ -1275,12 +1275,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
|
|
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
|
|
return -EINVAL;
|
|
|
|
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
|
|
-
|
|
state = drm_atomic_state_alloc(dev);
|
|
if (!state)
|
|
return -ENOMEM;
|
|
|
|
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
|
|
state->acquire_ctx = &ctx;
|
|
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
|
|
|
|
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
|
|
index 7412acaf3cde5..d7d10cabb9bbf 100644
|
|
--- a/drivers/gpu/drm/drm_bufs.c
|
|
+++ b/drivers/gpu/drm/drm_bufs.c
|
|
@@ -36,6 +36,8 @@
|
|
#include <drm/drmP.h>
|
|
#include "drm_legacy.h"
|
|
|
|
+#include <linux/nospec.h>
|
|
+
|
|
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
|
|
struct drm_local_map *map)
|
|
{
|
|
@@ -1417,6 +1419,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
|
|
idx, dma->buf_count - 1);
|
|
return -EINVAL;
|
|
}
|
|
+ idx = array_index_nospec(idx, dma->buf_count);
|
|
buf = dma->buflist[idx];
|
|
if (buf->file_priv != file_priv) {
|
|
DRM_ERROR("Process %d freeing buffer not owned\n",
|
|
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
|
|
index 9d64f874f965b..6950e365135cf 100644
|
|
--- a/drivers/gpu/drm/drm_fb_helper.c
|
|
+++ b/drivers/gpu/drm/drm_fb_helper.c
|
|
@@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
|
|
var_1->transp.msb_right == var_2->transp.msb_right;
|
|
}
|
|
|
|
+static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
|
|
+ u8 depth)
|
|
+{
|
|
+ switch (depth) {
|
|
+ case 8:
|
|
+ var->red.offset = 0;
|
|
+ var->green.offset = 0;
|
|
+ var->blue.offset = 0;
|
|
+ var->red.length = 8; /* 8bit DAC */
|
|
+ var->green.length = 8;
|
|
+ var->blue.length = 8;
|
|
+ var->transp.offset = 0;
|
|
+ var->transp.length = 0;
|
|
+ break;
|
|
+ case 15:
|
|
+ var->red.offset = 10;
|
|
+ var->green.offset = 5;
|
|
+ var->blue.offset = 0;
|
|
+ var->red.length = 5;
|
|
+ var->green.length = 5;
|
|
+ var->blue.length = 5;
|
|
+ var->transp.offset = 15;
|
|
+ var->transp.length = 1;
|
|
+ break;
|
|
+ case 16:
|
|
+ var->red.offset = 11;
|
|
+ var->green.offset = 5;
|
|
+ var->blue.offset = 0;
|
|
+ var->red.length = 5;
|
|
+ var->green.length = 6;
|
|
+ var->blue.length = 5;
|
|
+ var->transp.offset = 0;
|
|
+ break;
|
|
+ case 24:
|
|
+ var->red.offset = 16;
|
|
+ var->green.offset = 8;
|
|
+ var->blue.offset = 0;
|
|
+ var->red.length = 8;
|
|
+ var->green.length = 8;
|
|
+ var->blue.length = 8;
|
|
+ var->transp.offset = 0;
|
|
+ var->transp.length = 0;
|
|
+ break;
|
|
+ case 32:
|
|
+ var->red.offset = 16;
|
|
+ var->green.offset = 8;
|
|
+ var->blue.offset = 0;
|
|
+ var->red.length = 8;
|
|
+ var->green.length = 8;
|
|
+ var->blue.length = 8;
|
|
+ var->transp.offset = 24;
|
|
+ var->transp.length = 8;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
/**
|
|
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
|
|
* @var: screeninfo to check
|
|
@@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
|
struct drm_fb_helper *fb_helper = info->par;
|
|
struct drm_framebuffer *fb = fb_helper->fb;
|
|
|
|
- if (var->pixclock != 0 || in_dbg_master())
|
|
+ if (in_dbg_master())
|
|
return -EINVAL;
|
|
|
|
+ if (var->pixclock != 0) {
|
|
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
|
|
+ var->pixclock = 0;
|
|
+ }
|
|
+
|
|
/*
|
|
* Changes struct fb_var_screeninfo are currently not pushed back
|
|
* to KMS, hence fail if different settings are requested.
|
|
@@ -1650,6 +1713,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ /*
|
|
+ * Workaround for SDL 1.2, which is known to be setting all pixel format
|
|
+ * fields values to zero in some cases. We treat this situation as a
|
|
+ * kind of "use some reasonable autodetected values".
|
|
+ */
|
|
+ if (!var->red.offset && !var->green.offset &&
|
|
+ !var->blue.offset && !var->transp.offset &&
|
|
+ !var->red.length && !var->green.length &&
|
|
+ !var->blue.length && !var->transp.length &&
|
|
+ !var->red.msb_right && !var->green.msb_right &&
|
|
+ !var->blue.msb_right && !var->transp.msb_right) {
|
|
+ drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
|
|
+ }
|
|
+
|
|
/*
|
|
* drm fbdev emulation doesn't support changing the pixel format at all,
|
|
* so reject all pixel format changing requests.
|
|
@@ -1961,59 +2038,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
|
|
info->var.yoffset = 0;
|
|
info->var.activate = FB_ACTIVATE_NOW;
|
|
|
|
- switch (fb->format->depth) {
|
|
- case 8:
|
|
- info->var.red.offset = 0;
|
|
- info->var.green.offset = 0;
|
|
- info->var.blue.offset = 0;
|
|
- info->var.red.length = 8; /* 8bit DAC */
|
|
- info->var.green.length = 8;
|
|
- info->var.blue.length = 8;
|
|
- info->var.transp.offset = 0;
|
|
- info->var.transp.length = 0;
|
|
- break;
|
|
- case 15:
|
|
- info->var.red.offset = 10;
|
|
- info->var.green.offset = 5;
|
|
- info->var.blue.offset = 0;
|
|
- info->var.red.length = 5;
|
|
- info->var.green.length = 5;
|
|
- info->var.blue.length = 5;
|
|
- info->var.transp.offset = 15;
|
|
- info->var.transp.length = 1;
|
|
- break;
|
|
- case 16:
|
|
- info->var.red.offset = 11;
|
|
- info->var.green.offset = 5;
|
|
- info->var.blue.offset = 0;
|
|
- info->var.red.length = 5;
|
|
- info->var.green.length = 6;
|
|
- info->var.blue.length = 5;
|
|
- info->var.transp.offset = 0;
|
|
- break;
|
|
- case 24:
|
|
- info->var.red.offset = 16;
|
|
- info->var.green.offset = 8;
|
|
- info->var.blue.offset = 0;
|
|
- info->var.red.length = 8;
|
|
- info->var.green.length = 8;
|
|
- info->var.blue.length = 8;
|
|
- info->var.transp.offset = 0;
|
|
- info->var.transp.length = 0;
|
|
- break;
|
|
- case 32:
|
|
- info->var.red.offset = 16;
|
|
- info->var.green.offset = 8;
|
|
- info->var.blue.offset = 0;
|
|
- info->var.red.length = 8;
|
|
- info->var.green.length = 8;
|
|
- info->var.blue.length = 8;
|
|
- info->var.transp.offset = 24;
|
|
- info->var.transp.length = 8;
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
+ drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
|
|
|
|
info->var.xres = fb_width;
|
|
info->var.yres = fb_height;
|
|
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
|
|
index c61680ad962d9..6e59789e33163 100644
|
|
--- a/drivers/gpu/drm/drm_lease.c
|
|
+++ b/drivers/gpu/drm/drm_lease.c
|
|
@@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|
|
|
object_count = cl->object_count;
|
|
|
|
- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
|
|
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
|
|
+ array_size(object_count, sizeof(__u32)));
|
|
if (IS_ERR(object_ids))
|
|
return PTR_ERR(object_ids);
|
|
|
|
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
|
|
index be8b754eaf60c..9bc3654c1c7f5 100644
|
|
--- a/drivers/gpu/drm/drm_mode_object.c
|
|
+++ b/drivers/gpu/drm/drm_mode_object.c
|
|
@@ -458,11 +458,11 @@ static int set_property_atomic(struct drm_mode_object *obj,
|
|
struct drm_modeset_acquire_ctx ctx;
|
|
int ret;
|
|
|
|
- drm_modeset_acquire_init(&ctx, 0);
|
|
-
|
|
state = drm_atomic_state_alloc(dev);
|
|
if (!state)
|
|
return -ENOMEM;
|
|
+
|
|
+ drm_modeset_acquire_init(&ctx, 0);
|
|
state->acquire_ctx = &ctx;
|
|
retry:
|
|
if (prop == state->dev->mode_config.dpms_property) {
|
|
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
|
|
index 02db9ac82d7a9..a3104d79b48f0 100644
|
|
--- a/drivers/gpu/drm/drm_modes.c
|
|
+++ b/drivers/gpu/drm/drm_modes.c
|
|
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
|
|
if (mode->hsync)
|
|
return mode->hsync;
|
|
|
|
- if (mode->htotal < 0)
|
|
+ if (mode->htotal <= 0)
|
|
return 0;
|
|
|
|
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
|
|
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
|
|
index c1072143da1dc..e70c450427dcd 100644
|
|
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
|
|
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
|
|
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
|
|
{
|
|
unsigned int index;
|
|
u64 virtaddr;
|
|
- unsigned long req_size, pgoff = 0;
|
|
+ unsigned long req_size, pgoff, req_start;
|
|
pgprot_t pg_prot;
|
|
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
|
|
|
|
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
|
|
pg_prot = vma->vm_page_prot;
|
|
virtaddr = vma->vm_start;
|
|
req_size = vma->vm_end - vma->vm_start;
|
|
- pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
|
|
+ pgoff = vma->vm_pgoff &
|
|
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
|
|
+ req_start = pgoff << PAGE_SHIFT;
|
|
+
|
|
+ if (!intel_vgpu_in_aperture(vgpu, req_start))
|
|
+ return -EINVAL;
|
|
+ if (req_start + req_size >
|
|
+ vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
|
|
+ return -EINVAL;
|
|
+
|
|
+ pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
|
|
|
|
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
|
|
index 6ae9a6080cc88..296f9c0fe19bc 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem.c
|
|
@@ -1826,6 +1826,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
|
return 0;
|
|
}
|
|
|
|
+static inline bool
|
|
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
|
|
+ unsigned long addr, unsigned long size)
|
|
+{
|
|
+ if (vma->vm_file != filp)
|
|
+ return false;
|
|
+
|
|
+ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
|
|
+}
|
|
+
|
|
/**
|
|
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
|
|
* it is mapped to.
|
|
@@ -1884,7 +1894,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
return -EINTR;
|
|
}
|
|
vma = find_vma(mm, addr);
|
|
- if (vma)
|
|
+ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
|
|
vma->vm_page_prot =
|
|
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
|
else
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
index 1aaccbe7e1deb..c45711fd78e92 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
@@ -1605,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
|
|
(char __user *)urelocs + copied,
|
|
len)) {
|
|
end_user:
|
|
+ user_access_end();
|
|
kvfree(relocs);
|
|
err = -EFAULT;
|
|
goto err;
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
index 07999fe09ad23..4fa1d2b146b15 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
|
|
@@ -2117,6 +2117,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
|
|
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
|
|
{
|
|
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
|
|
+ int err;
|
|
|
|
/*
|
|
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
|
|
@@ -2132,9 +2133,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
|
|
* allocator works in address space sizes, so it's multiplied by page
|
|
* size. We allocate at the top of the GTT to avoid fragmentation.
|
|
*/
|
|
- return i915_vma_pin(ppgtt->vma,
|
|
- 0, GEN6_PD_ALIGN,
|
|
- PIN_GLOBAL | PIN_HIGH);
|
|
+ err = i915_vma_pin(ppgtt->vma,
|
|
+ 0, GEN6_PD_ALIGN,
|
|
+ PIN_GLOBAL | PIN_HIGH);
|
|
+ if (err)
|
|
+ goto unpin;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+unpin:
|
|
+ ppgtt->pin_count = 0;
|
|
+ return err;
|
|
}
|
|
|
|
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
|
|
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
|
|
index 5186cd7075f91..372f30d286e3a 100644
|
|
--- a/drivers/gpu/drm/i915/intel_ddi.c
|
|
+++ b/drivers/gpu/drm/i915/intel_ddi.c
|
|
@@ -1085,7 +1085,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
|
|
return DDI_CLK_SEL_TBT_810;
|
|
default:
|
|
MISSING_CASE(clock);
|
|
- break;
|
|
+ return DDI_CLK_SEL_NONE;
|
|
}
|
|
case DPLL_ID_ICL_MGPLL1:
|
|
case DPLL_ID_ICL_MGPLL2:
|
|
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
|
|
index c9878dd1f7cd0..a8293a7bab8f4 100644
|
|
--- a/drivers/gpu/drm/i915/intel_display.c
|
|
+++ b/drivers/gpu/drm/i915/intel_display.c
|
|
@@ -15684,15 +15684,44 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
|
|
}
|
|
}
|
|
|
|
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
|
|
+{
|
|
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
|
+
|
|
+ /*
|
|
+ * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
|
|
+ * the hardware when a high res displays plugged in. DPLL P
|
|
+ * divider is zero, and the pipe timings are bonkers. We'll
|
|
+ * try to disable everything in that case.
|
|
+ *
|
|
+ * FIXME would be nice to be able to sanitize this state
|
|
+ * without several WARNs, but for now let's take the easy
|
|
+ * road.
|
|
+ */
|
|
+ return IS_GEN6(dev_priv) &&
|
|
+ crtc_state->base.active &&
|
|
+ crtc_state->shared_dpll &&
|
|
+ crtc_state->port_clock == 0;
|
|
+}
|
|
+
|
|
static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
|
{
|
|
struct intel_connector *connector;
|
|
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
|
+ struct intel_crtc_state *crtc_state = crtc ?
|
|
+ to_intel_crtc_state(crtc->base.state) : NULL;
|
|
|
|
/* We need to check both for a crtc link (meaning that the
|
|
* encoder is active and trying to read from a pipe) and the
|
|
* pipe itself being active. */
|
|
- bool has_active_crtc = encoder->base.crtc &&
|
|
- to_intel_crtc(encoder->base.crtc)->active;
|
|
+ bool has_active_crtc = crtc_state &&
|
|
+ crtc_state->base.active;
|
|
+
|
|
+ if (crtc_state && has_bogus_dpll_config(crtc_state)) {
|
|
+ DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
|
|
+ pipe_name(crtc->pipe));
|
|
+ has_active_crtc = false;
|
|
+ }
|
|
|
|
connector = intel_encoder_find_connector(encoder);
|
|
if (connector && !has_active_crtc) {
|
|
@@ -15703,15 +15732,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
|
/* Connector is active, but has no active pipe. This is
|
|
* fallout from our resume register restoring. Disable
|
|
* the encoder manually again. */
|
|
- if (encoder->base.crtc) {
|
|
- struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
|
|
+ if (crtc_state) {
|
|
+ struct drm_encoder *best_encoder;
|
|
|
|
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
|
|
encoder->base.base.id,
|
|
encoder->base.name);
|
|
- encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
|
|
+
|
|
+ /* avoid oopsing in case the hooks consult best_encoder */
|
|
+ best_encoder = connector->base.state->best_encoder;
|
|
+ connector->base.state->best_encoder = &encoder->base;
|
|
+
|
|
+ if (encoder->disable)
|
|
+ encoder->disable(encoder, crtc_state,
|
|
+ connector->base.state);
|
|
if (encoder->post_disable)
|
|
- encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
|
|
+ encoder->post_disable(encoder, crtc_state,
|
|
+ connector->base.state);
|
|
+
|
|
+ connector->base.state->best_encoder = best_encoder;
|
|
}
|
|
encoder->base.crtc = NULL;
|
|
|
|
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
|
|
index db6fa1d0cbdae..f35139b3ebc57 100644
|
|
--- a/drivers/gpu/drm/i915/intel_drv.h
|
|
+++ b/drivers/gpu/drm/i915/intel_drv.h
|
|
@@ -209,6 +209,16 @@ struct intel_fbdev {
|
|
unsigned long vma_flags;
|
|
async_cookie_t cookie;
|
|
int preferred_bpp;
|
|
+
|
|
+ /* Whether or not fbdev hpd processing is temporarily suspended */
|
|
+ bool hpd_suspended : 1;
|
|
+ /* Set when a hotplug was received while HPD processing was
|
|
+ * suspended
|
|
+ */
|
|
+ bool hpd_waiting : 1;
|
|
+
|
|
+ /* Protects hpd_suspended */
|
|
+ struct mutex hpd_lock;
|
|
};
|
|
|
|
struct intel_encoder {
|
|
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
|
|
index f99332972b7ab..e0c02a9889b2c 100644
|
|
--- a/drivers/gpu/drm/i915/intel_fbdev.c
|
|
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
|
|
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|
bool *enabled, int width, int height)
|
|
{
|
|
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
|
|
- unsigned long conn_configured, conn_seq, mask;
|
|
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
|
+ unsigned long conn_configured, conn_seq;
|
|
int i, j;
|
|
bool *save_enabled;
|
|
bool fallback = true, ret = true;
|
|
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|
drm_modeset_backoff(&ctx);
|
|
|
|
memcpy(save_enabled, enabled, count);
|
|
- mask = GENMASK(count - 1, 0);
|
|
+ conn_seq = GENMASK(count - 1, 0);
|
|
conn_configured = 0;
|
|
retry:
|
|
- conn_seq = conn_configured;
|
|
for (i = 0; i < count; i++) {
|
|
struct drm_fb_helper_connector *fb_conn;
|
|
struct drm_connector *connector;
|
|
@@ -371,7 +370,8 @@ retry:
|
|
if (conn_configured & BIT(i))
|
|
continue;
|
|
|
|
- if (conn_seq == 0 && !connector->has_tile)
|
|
+ /* First pass, only consider tiled connectors */
|
|
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
|
|
continue;
|
|
|
|
if (connector->status == connector_status_connected)
|
|
@@ -475,8 +475,10 @@ retry:
|
|
conn_configured |= BIT(i);
|
|
}
|
|
|
|
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
|
|
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
|
|
+ conn_seq = conn_configured;
|
|
goto retry;
|
|
+ }
|
|
|
|
/*
|
|
* If the BIOS didn't enable everything it could, fall back to have the
|
|
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
|
if (ifbdev == NULL)
|
|
return -ENOMEM;
|
|
|
|
+ mutex_init(&ifbdev->hpd_lock);
|
|
drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
|
|
|
|
if (!intel_fbdev_init_bios(dev, ifbdev))
|
|
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
|
intel_fbdev_destroy(ifbdev);
|
|
}
|
|
|
|
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
|
|
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
|
|
+ * was received while HPD was suspended.
|
|
+ */
|
|
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
|
|
+{
|
|
+ bool send_hpd = false;
|
|
+
|
|
+ mutex_lock(&ifbdev->hpd_lock);
|
|
+ ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
|
|
+ send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
|
|
+ ifbdev->hpd_waiting = false;
|
|
+ mutex_unlock(&ifbdev->hpd_lock);
|
|
+
|
|
+ if (send_hpd) {
|
|
+ DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
|
|
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
|
|
+ }
|
|
+}
|
|
+
|
|
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
|
{
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
|
*/
|
|
if (state != FBINFO_STATE_RUNNING)
|
|
flush_work(&dev_priv->fbdev_suspend_work);
|
|
+
|
|
console_lock();
|
|
} else {
|
|
/*
|
|
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
|
|
|
drm_fb_helper_set_suspend(&ifbdev->helper, state);
|
|
console_unlock();
|
|
+
|
|
+ intel_fbdev_hpd_set_suspend(ifbdev, state);
|
|
}
|
|
|
|
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
|
{
|
|
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
|
+ bool send_hpd;
|
|
|
|
if (!ifbdev)
|
|
return;
|
|
|
|
intel_fbdev_sync(ifbdev);
|
|
- if (ifbdev->vma || ifbdev->helper.deferred_setup)
|
|
+
|
|
+ mutex_lock(&ifbdev->hpd_lock);
|
|
+ send_hpd = !ifbdev->hpd_suspended;
|
|
+ ifbdev->hpd_waiting = true;
|
|
+ mutex_unlock(&ifbdev->hpd_lock);
|
|
+
|
|
+ if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
|
|
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
|
|
index 191b314f9e9e5..709475d5cc30e 100644
|
|
--- a/drivers/gpu/drm/meson/meson_crtc.c
|
|
+++ b/drivers/gpu/drm/meson/meson_crtc.c
|
|
@@ -45,7 +45,6 @@ struct meson_crtc {
|
|
struct drm_crtc base;
|
|
struct drm_pending_vblank_event *event;
|
|
struct meson_drm *priv;
|
|
- bool enabled;
|
|
};
|
|
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
|
|
|
|
@@ -81,7 +80,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
|
|
|
|
};
|
|
|
|
-static void meson_crtc_enable(struct drm_crtc *crtc)
|
|
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
|
|
+ struct drm_crtc_state *old_state)
|
|
{
|
|
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
|
|
struct drm_crtc_state *crtc_state = crtc->state;
|
|
@@ -103,20 +103,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
|
|
|
|
drm_crtc_vblank_on(crtc);
|
|
|
|
- meson_crtc->enabled = true;
|
|
-}
|
|
-
|
|
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
|
|
- struct drm_crtc_state *old_state)
|
|
-{
|
|
- struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
|
|
- struct meson_drm *priv = meson_crtc->priv;
|
|
-
|
|
- DRM_DEBUG_DRIVER("\n");
|
|
-
|
|
- if (!meson_crtc->enabled)
|
|
- meson_crtc_enable(crtc);
|
|
-
|
|
priv->viu.osd1_enabled = true;
|
|
}
|
|
|
|
@@ -142,8 +128,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
|
|
|
|
crtc->state->event = NULL;
|
|
}
|
|
-
|
|
- meson_crtc->enabled = false;
|
|
}
|
|
|
|
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
|
|
@@ -152,9 +136,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
|
|
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
|
|
unsigned long flags;
|
|
|
|
- if (crtc->state->enable && !meson_crtc->enabled)
|
|
- meson_crtc_enable(crtc);
|
|
-
|
|
if (crtc->state->event) {
|
|
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
|
|
|
|
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
|
|
index d3443125e6616..611ac340fb289 100644
|
|
--- a/drivers/gpu/drm/meson/meson_drv.c
|
|
+++ b/drivers/gpu/drm/meson/meson_drv.c
|
|
@@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
|
|
.fb_create = drm_gem_fb_create,
|
|
};
|
|
|
|
+static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
|
|
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
|
|
+};
|
|
+
|
|
static irqreturn_t meson_irq(int irq, void *arg)
|
|
{
|
|
struct drm_device *dev = arg;
|
|
@@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
|
|
drm->mode_config.max_width = 3840;
|
|
drm->mode_config.max_height = 2160;
|
|
drm->mode_config.funcs = &meson_mode_config_funcs;
|
|
+ drm->mode_config.helper_private = &meson_mode_config_helpers;
|
|
|
|
/* Hardware Initialization */
|
|
|
|
@@ -363,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
|
|
remote_node = of_graph_get_remote_port_parent(ep);
|
|
if (!remote_node ||
|
|
remote_node == parent || /* Ignore parent endpoint */
|
|
- !of_device_is_available(remote_node))
|
|
+ !of_device_is_available(remote_node)) {
|
|
+ of_node_put(remote_node);
|
|
continue;
|
|
+ }
|
|
|
|
count += meson_probe_remote(pdev, match, remote, remote_node);
|
|
|
|
@@ -383,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
|
|
|
|
for_each_endpoint_of_node(np, ep) {
|
|
remote = of_graph_get_remote_port_parent(ep);
|
|
- if (!remote || !of_device_is_available(remote))
|
|
+ if (!remote || !of_device_is_available(remote)) {
|
|
+ of_node_put(remote);
|
|
continue;
|
|
+ }
|
|
|
|
count += meson_probe_remote(pdev, &match, np, remote);
|
|
+ of_node_put(remote);
|
|
}
|
|
|
|
if (count && !match)
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
|
|
index 84de385a9f622..60f146f02b772 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
|
|
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
|
|
if (hw_ctl && hw_ctl->ops.get_flush_register)
|
|
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
|
|
|
|
- if (flush_register == 0)
|
|
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
|
|
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
|
|
-1, 0);
|
|
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
|
|
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
|
|
index 41bec570c5184..31205625c7346 100644
|
|
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
|
|
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
|
|
@@ -17,7 +17,7 @@
|
|
* | |
|
|
* | |
|
|
* +---------+ | +----------+ | +----+
|
|
- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
|
|
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
|
|
* +---------+ | +----------+ | +----+
|
|
* | |
|
|
* | | dsi0_pll_by_2_bit_clk
|
|
@@ -25,7 +25,7 @@
|
|
* | | +----+ | |\ dsi0_pclk_mux
|
|
* | |--| /2 |--o--| \ |
|
|
* | | +----+ | \ | +---------+
|
|
- * | --------------| |--o--| div_7_4 |-- dsi0pll
|
|
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
|
|
* |------------------------------| / +---------+
|
|
* | +-----+ | /
|
|
* -----------| /4? |--o----------|/
|
|
@@ -690,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
|
|
|
|
hws[num++] = hw;
|
|
|
|
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
|
|
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
|
|
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
|
|
|
|
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
|
|
@@ -739,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
|
|
|
|
hws[num++] = hw;
|
|
|
|
- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
|
|
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
|
|
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
|
|
|
|
/* PIX CLK DIV : DIV_CTRL_7_4*/
|
|
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
|
|
index f82bac0866664..5d500e58d5cee 100644
|
|
--- a/drivers/gpu/drm/msm/msm_gpu.h
|
|
+++ b/drivers/gpu/drm/msm/msm_gpu.h
|
|
@@ -63,7 +63,7 @@ struct msm_gpu_funcs {
|
|
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
|
|
void (*recover)(struct msm_gpu *gpu);
|
|
void (*destroy)(struct msm_gpu *gpu);
|
|
-#ifdef CONFIG_DEBUG_FS
|
|
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
|
|
/* show GPU status in debugfs: */
|
|
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
|
|
struct drm_printer *p);
|
|
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
|
|
index db1bf7f88c1f5..e0e6d66de7459 100644
|
|
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
|
|
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
|
|
@@ -1262,8 +1262,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
|
|
static void
|
|
nv50_mstm_init(struct nv50_mstm *mstm)
|
|
{
|
|
- if (mstm && mstm->mgr.mst_state)
|
|
- drm_dp_mst_topology_mgr_resume(&mstm->mgr);
|
|
+ int ret;
|
|
+
|
|
+ if (!mstm || !mstm->mgr.mst_state)
|
|
+ return;
|
|
+
|
|
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
|
|
+ if (ret == -1) {
|
|
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
|
|
+ drm_kms_helper_hotplug_event(mstm->mgr.dev);
|
|
+ }
|
|
}
|
|
|
|
static void
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
|
|
index 816ccaedfc732..8675613e142b6 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
|
|
@@ -22,6 +22,7 @@
|
|
#include <engine/falcon.h>
|
|
|
|
#include <core/gpuobj.h>
|
|
+#include <subdev/mc.h>
|
|
#include <subdev/timer.h>
|
|
#include <engine/fifo.h>
|
|
|
|
@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
|
|
}
|
|
}
|
|
|
|
- nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
|
|
- nvkm_wr32(device, base + 0x014, 0xffffffff);
|
|
+ if (nvkm_mc_enabled(device, engine->subdev.index)) {
|
|
+ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
|
|
+ nvkm_wr32(device, base + 0x014, 0xffffffff);
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
|
|
index 3695cde669f88..07914e36939e3 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
|
|
@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
|
|
duty = nvkm_therm_update_linear(therm);
|
|
break;
|
|
case NVBIOS_THERM_FAN_OTHER:
|
|
- if (therm->cstate)
|
|
+ if (therm->cstate) {
|
|
duty = therm->cstate;
|
|
- else
|
|
+ poll = false;
|
|
+ } else {
|
|
duty = nvkm_therm_update_linear_fallback(therm);
|
|
- poll = false;
|
|
+ }
|
|
break;
|
|
}
|
|
immd = false;
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
|
|
index dec1e081f5295..6a8fb6fd183c3 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_kms.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
|
|
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
}
|
|
|
|
if (radeon_is_px(dev)) {
|
|
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
|
|
pm_runtime_use_autosuspend(dev->dev);
|
|
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
|
pm_runtime_set_active(dev->dev);
|
|
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
|
|
index 17741843cf519..40a9afe818a86 100644
|
|
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
|
|
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
|
|
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
|
|
* system clock, and have no internal clock divider.
|
|
*/
|
|
|
|
- if (WARN_ON(!rcrtc->extclock))
|
|
- return;
|
|
-
|
|
/*
|
|
* The H3 ES1.x exhibits dot clock duty cycle stability issues.
|
|
* We can work around them by configuring the DPLL to twice the
|
|
@@ -1113,9 +1110,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
|
|
clk = devm_clk_get(rcdu->dev, clk_name);
|
|
if (!IS_ERR(clk)) {
|
|
rcrtc->extclock = clk;
|
|
- } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
|
|
- dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
|
|
+ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
|
|
return -EPROBE_DEFER;
|
|
+ } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
|
|
+ /*
|
|
+ * DU channels that have a display PLL can't use the internal
|
|
+ * system clock and thus require an external clock.
|
|
+ */
|
|
+ ret = PTR_ERR(clk);
|
|
+ dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
|
|
+ return ret;
|
|
}
|
|
|
|
init_waitqueue_head(&rcrtc->flip_wait);
|
|
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
|
|
index 3105965fc2603..5a485489a1e23 100644
|
|
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
|
|
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
|
|
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
|
|
}
|
|
|
|
static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
|
|
- u8 *buff, u8 buff_size)
|
|
+ u8 *buff, u16 buff_size)
|
|
{
|
|
u32 i;
|
|
int ret;
|
|
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
|
|
index 79d00d861a31f..01ff3c8588750 100644
|
|
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
|
|
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
|
|
@@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
|
|
int rockchip_drm_psr_register(struct drm_encoder *encoder,
|
|
int (*psr_set)(struct drm_encoder *, bool enable))
|
|
{
|
|
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
|
|
+ struct rockchip_drm_private *drm_drv;
|
|
struct psr_drv *psr;
|
|
|
|
if (!encoder || !psr_set)
|
|
return -EINVAL;
|
|
|
|
+ drm_drv = encoder->dev->dev_private;
|
|
+
|
|
psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
|
|
if (!psr)
|
|
return -ENOMEM;
|
|
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
|
|
index 96ac1458a59c9..c0351abf83a35 100644
|
|
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
|
|
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
|
|
@@ -1,17 +1,8 @@
|
|
-//SPDX-License-Identifier: GPL-2.0+
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
|
|
* Author:
|
|
* Sandy Huang <hjc@rock-chips.com>
|
|
- *
|
|
- * This software is licensed under the terms of the GNU General Public
|
|
- * License version 2, as published by the Free Software Foundation, and
|
|
- * may be copied, distributed, and modified under those terms.
|
|
- *
|
|
- * This program is distributed in the hope that it will be useful,
|
|
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
- * GNU General Public License for more details.
|
|
*/
|
|
|
|
#include <drm/drmP.h>
|
|
@@ -113,8 +104,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
|
|
child_count++;
|
|
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
|
|
&panel, &bridge);
|
|
- if (!ret)
|
|
+ if (!ret) {
|
|
+ of_node_put(endpoint);
|
|
break;
|
|
+ }
|
|
}
|
|
|
|
of_node_put(port);
|
|
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
|
|
index 38b52e63b2b04..27b9635124bc1 100644
|
|
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
|
|
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
|
|
@@ -1,17 +1,8 @@
|
|
-//SPDX-License-Identifier: GPL-2.0+
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
|
|
* Author:
|
|
* Sandy Huang <hjc@rock-chips.com>
|
|
- *
|
|
- * This software is licensed under the terms of the GNU General Public
|
|
- * License version 2, as published by the Free Software Foundation, and
|
|
- * may be copied, distributed, and modified under those terms.
|
|
- *
|
|
- * This program is distributed in the hope that it will be useful,
|
|
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
- * GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifdef CONFIG_ROCKCHIP_RGB
|
|
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
|
|
index 3e22a54a99c25..2c02f5b03db8a 100644
|
|
--- a/drivers/gpu/drm/scheduler/sched_entity.c
|
|
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
|
|
@@ -434,13 +434,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
|
|
|
while ((entity->dependency =
|
|
sched->ops->dependency(sched_job, entity))) {
|
|
+ trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
|
|
|
|
- if (drm_sched_entity_add_dependency_cb(entity)) {
|
|
-
|
|
- trace_drm_sched_job_wait_dep(sched_job,
|
|
- entity->dependency);
|
|
+ if (drm_sched_entity_add_dependency_cb(entity))
|
|
return NULL;
|
|
- }
|
|
}
|
|
|
|
/* skip jobs from entity that marked guilty */
|
|
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
|
|
index 44fe587aaef97..c5bbbd7cb2de9 100644
|
|
--- a/drivers/gpu/drm/scheduler/sched_main.c
|
|
+++ b/drivers/gpu/drm/scheduler/sched_main.c
|
|
@@ -60,6 +60,8 @@
|
|
|
|
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
|
|
|
|
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
|
|
+
|
|
/**
|
|
* drm_sched_rq_init - initialize a given run queue struct
|
|
*
|
|
@@ -215,7 +217,7 @@ static void drm_sched_job_finish(struct work_struct *work)
|
|
|
|
spin_lock(&sched->job_list_lock);
|
|
/* remove job from ring_mirror_list */
|
|
- list_del(&s_job->node);
|
|
+ list_del_init(&s_job->node);
|
|
/* queue TDR for next job */
|
|
drm_sched_start_timeout(sched);
|
|
spin_unlock(&sched->job_list_lock);
|
|
@@ -378,6 +380,8 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
|
|
r);
|
|
dma_fence_put(fence);
|
|
} else {
|
|
+ if (s_fence->finished.error < 0)
|
|
+ drm_sched_expel_job_unlocked(s_job);
|
|
drm_sched_process_job(NULL, &s_fence->cb);
|
|
}
|
|
spin_lock(&sched->job_list_lock);
|
|
@@ -567,6 +571,8 @@ static int drm_sched_main(void *param)
|
|
r);
|
|
dma_fence_put(fence);
|
|
} else {
|
|
+ if (s_fence->finished.error < 0)
|
|
+ drm_sched_expel_job_unlocked(sched_job);
|
|
drm_sched_process_job(NULL, &s_fence->cb);
|
|
}
|
|
|
|
@@ -575,6 +581,15 @@ static int drm_sched_main(void *param)
|
|
return 0;
|
|
}
|
|
|
|
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
|
|
+{
|
|
+ struct drm_gpu_scheduler *sched = s_job->sched;
|
|
+
|
|
+ spin_lock(&sched->job_list_lock);
|
|
+ list_del_init(&s_job->node);
|
|
+ spin_unlock(&sched->job_list_lock);
|
|
+}
|
|
+
|
|
/**
|
|
* drm_sched_init - Init a gpu scheduler instance
|
|
*
|
|
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
|
|
index bf49c55b0f2c7..9f27d5464804b 100644
|
|
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
|
|
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
|
|
@@ -704,17 +704,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
|
|
remote = of_graph_get_remote_port_parent(ep);
|
|
if (!remote)
|
|
continue;
|
|
+ of_node_put(remote);
|
|
|
|
/* does this node match any registered engines? */
|
|
list_for_each_entry(frontend, &drv->frontend_list, list) {
|
|
if (remote == frontend->node) {
|
|
- of_node_put(remote);
|
|
of_node_put(port);
|
|
+ of_node_put(ep);
|
|
return frontend;
|
|
}
|
|
}
|
|
}
|
|
-
|
|
+ of_node_put(port);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
|
|
index 3040a79f298ff..37158548b4476 100644
|
|
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
|
|
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
|
|
@@ -167,6 +167,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
|
|
goto err_assert_reset;
|
|
}
|
|
|
|
+ /*
|
|
+ * At least on H6, some registers have some bits set by default
|
|
+ * which may cause issues. Clear them here.
|
|
+ */
|
|
+ writel(0, regs + TCON_TOP_PORT_SEL_REG);
|
|
+ writel(0, regs + TCON_TOP_GATE_SRC_REG);
|
|
+
|
|
/*
|
|
* TCON TOP has two muxes, which select parent clock for each TCON TV
|
|
* channel clock. Parent could be either TCON TV or TVE clock. For now
|
|
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
|
|
index f455f095a1468..1b014d92855b9 100644
|
|
--- a/drivers/gpu/drm/udl/udl_main.c
|
|
+++ b/drivers/gpu/drm/udl/udl_main.c
|
|
@@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
|
|
if (ret)
|
|
goto err;
|
|
|
|
- ret = drm_vblank_init(dev, 1);
|
|
- if (ret)
|
|
- goto err_fb;
|
|
-
|
|
drm_kms_helper_poll_init(dev);
|
|
|
|
return 0;
|
|
-err_fb:
|
|
- udl_fbdev_cleanup(dev);
|
|
+
|
|
err:
|
|
if (udl->urbs.count)
|
|
udl_free_urb_list(dev);
|
|
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
|
|
index 54d96518a1316..a08766d39eab5 100644
|
|
--- a/drivers/gpu/drm/v3d/v3d_bo.c
|
|
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
|
|
@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
|
|
bo->resv = attach->dmabuf->resv;
|
|
|
|
bo->sgt = sgt;
|
|
+ obj->import_attach = attach;
|
|
v3d_bo_get_pages(bo);
|
|
|
|
v3d_mmu_insert_ptes(bo);
|
|
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
|
|
index 4db62c5457482..26470c77eb6e5 100644
|
|
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
|
|
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
|
|
@@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
|
|
V3D_READ(v3d_hub_reg_defs[i].reg));
|
|
}
|
|
|
|
- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
|
|
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
|
|
- v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
|
|
- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
|
|
+ if (v3d->ver < 41) {
|
|
+ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
|
|
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
|
|
+ v3d_gca_reg_defs[i].name,
|
|
+ v3d_gca_reg_defs[i].reg,
|
|
+ V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
|
|
+ }
|
|
}
|
|
|
|
for (core = 0; core < v3d->cores; core++) {
|
|
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
|
|
index e6fed696ad869..cbe5be0c47ebf 100644
|
|
--- a/drivers/gpu/drm/v3d/v3d_drv.h
|
|
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
|
|
@@ -198,6 +198,11 @@ struct v3d_exec_info {
|
|
*/
|
|
struct dma_fence *bin_done_fence;
|
|
|
|
+ /* Fence for when the scheduler considers the render to be
|
|
+ * done, for when the BOs reservations should be complete.
|
|
+ */
|
|
+ struct dma_fence *render_done_fence;
|
|
+
|
|
struct kref refcount;
|
|
|
|
/* This is the array of BOs that were looked up at the start of exec. */
|
|
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
|
|
index 70c54774400b3..2814c72cb0902 100644
|
|
--- a/drivers/gpu/drm/v3d/v3d_gem.c
|
|
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
|
|
@@ -209,7 +209,7 @@ v3d_flush_caches(struct v3d_dev *v3d)
|
|
static void
|
|
v3d_attach_object_fences(struct v3d_exec_info *exec)
|
|
{
|
|
- struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
|
|
+ struct dma_fence *out_fence = exec->render_done_fence;
|
|
struct v3d_bo *bo;
|
|
int i;
|
|
|
|
@@ -409,6 +409,7 @@ v3d_exec_cleanup(struct kref *ref)
|
|
dma_fence_put(exec->render.done_fence);
|
|
|
|
dma_fence_put(exec->bin_done_fence);
|
|
+ dma_fence_put(exec->render_done_fence);
|
|
|
|
for (i = 0; i < exec->bo_count; i++)
|
|
drm_gem_object_put_unlocked(&exec->bo[i]->base);
|
|
@@ -572,6 +573,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|
if (ret)
|
|
goto fail_unreserve;
|
|
|
|
+ exec->render_done_fence =
|
|
+ dma_fence_get(&exec->render.base.s_fence->finished);
|
|
+
|
|
kref_get(&exec->refcount); /* put by scheduler job completion */
|
|
drm_sched_entity_push_job(&exec->render.base,
|
|
&v3d_priv->sched_entity[V3D_RENDER]);
|
|
@@ -585,7 +589,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|
sync_out = drm_syncobj_find(file_priv, args->out_sync);
|
|
if (sync_out) {
|
|
drm_syncobj_replace_fence(sync_out, 0,
|
|
- &exec->render.base.s_fence->finished);
|
|
+ exec->render_done_fence);
|
|
drm_syncobj_put(sync_out);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
|
|
index c6635f23918a8..ed8b0fc170ee3 100644
|
|
--- a/drivers/gpu/drm/vc4/vc4_plane.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
|
|
@@ -314,13 +314,16 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
|
vc4_get_scaling_mode(vc4_state->src_h[1],
|
|
vc4_state->crtc_h);
|
|
|
|
- /* YUV conversion requires that horizontal scaling be enabled,
|
|
- * even on a plane that's otherwise 1:1. Looks like only PPF
|
|
- * works in that case, so let's pick that one.
|
|
+ /* YUV conversion requires that horizontal scaling be enabled
|
|
+ * on the UV plane even if vc4_get_scaling_mode() returned
|
|
+ * VC4_SCALING_NONE (which can happen when the down-scaling
|
|
+ * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
|
|
+ * case.
|
|
*/
|
|
- if (vc4_state->is_unity)
|
|
- vc4_state->x_scaling[0] = VC4_SCALING_PPF;
|
|
+ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
|
|
+ vc4_state->x_scaling[1] = VC4_SCALING_PPF;
|
|
} else {
|
|
+ vc4_state->is_yuv = false;
|
|
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
|
|
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
|
|
}
|
|
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
|
|
index ec6af8b920daa..f1f7ab9dcdbfc 100644
|
|
--- a/drivers/gpu/drm/vgem/vgem_drv.c
|
|
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
|
|
@@ -471,31 +471,31 @@ static int __init vgem_init(void)
|
|
if (!vgem_device)
|
|
return -ENOMEM;
|
|
|
|
- ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
|
|
- if (ret)
|
|
- goto out_free;
|
|
-
|
|
vgem_device->platform =
|
|
platform_device_register_simple("vgem", -1, NULL, 0);
|
|
if (IS_ERR(vgem_device->platform)) {
|
|
ret = PTR_ERR(vgem_device->platform);
|
|
- goto out_fini;
|
|
+ goto out_free;
|
|
}
|
|
|
|
dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
|
|
DMA_BIT_MASK(64));
|
|
+ ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
|
|
+ &vgem_device->platform->dev);
|
|
+ if (ret)
|
|
+ goto out_unregister;
|
|
|
|
/* Final step: expose the device/driver to userspace */
|
|
ret = drm_dev_register(&vgem_device->drm, 0);
|
|
if (ret)
|
|
- goto out_unregister;
|
|
+ goto out_fini;
|
|
|
|
return 0;
|
|
|
|
-out_unregister:
|
|
- platform_device_unregister(vgem_device->platform);
|
|
out_fini:
|
|
drm_dev_fini(&vgem_device->drm);
|
|
+out_unregister:
|
|
+ platform_device_unregister(vgem_device->platform);
|
|
out_free:
|
|
kfree(vgem_device);
|
|
return ret;
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
|
|
index 9d9e8146db90c..d7b409a3c0f8c 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_crc.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
|
|
@@ -1,4 +1,5 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
#include "vkms_drv.h"
|
|
#include <linux/crc32.h>
|
|
#include <drm/drm_atomic.h>
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
|
|
index 177bbcb383063..eb56ee893761f 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
|
|
@@ -1,10 +1,4 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License as published by
|
|
- * the Free Software Foundation; either version 2 of the License, or
|
|
- * (at your option) any later version.
|
|
- */
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
#include "vkms_drv.h"
|
|
#include <drm/drm_atomic_helper.h>
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
|
|
index 07cfde1b4132b..8048b2486b0e2 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_drv.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
|
|
@@ -1,9 +1,4 @@
|
|
-/*
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License as published by
|
|
- * the Free Software Foundation; either version 2 of the License, or
|
|
- * (at your option) any later version.
|
|
- */
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
/**
|
|
* DOC: vkms (Virtual Kernel Modesetting)
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
|
|
index 1c93990693e3d..5adbc6fca41b5 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_drv.h
|
|
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
|
|
@@ -1,3 +1,5 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+
|
|
#ifndef _VKMS_DRV_H_
|
|
#define _VKMS_DRV_H_
|
|
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
|
|
index d04e988b4cbef..8310b96d4a9ce 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_gem.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
|
|
@@ -1,10 +1,4 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License as published by
|
|
- * the Free Software Foundation; either version 2 of the License, or
|
|
- * (at your option) any later version.
|
|
- */
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
#include <linux/shmem_fs.h>
|
|
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
|
|
index 271a0eb9042c3..4173e4f483341 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_output.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_output.c
|
|
@@ -1,10 +1,4 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License as published by
|
|
- * the Free Software Foundation; either version 2 of the License, or
|
|
- * (at your option) any later version.
|
|
- */
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
#include "vkms_drv.h"
|
|
#include <drm/drm_crtc_helper.h>
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
|
|
index 7041007396ae8..8ffc1dad64855 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_plane.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
|
|
@@ -1,10 +1,4 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License as published by
|
|
- * the Free Software Foundation; either version 2 of the License, or
|
|
- * (at your option) any later version.
|
|
- */
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
#include "vkms_drv.h"
|
|
#include <drm/drm_plane_helper.h>
|
|
@@ -23,8 +17,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
|
|
return NULL;
|
|
|
|
crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
|
|
- if (WARN_ON(!crc_data))
|
|
- DRM_INFO("Couldn't allocate crc_data");
|
|
+ if (!crc_data) {
|
|
+ DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
|
|
+ kfree(vkms_state);
|
|
+ return NULL;
|
|
+ }
|
|
|
|
vkms_state->crc_data = crc_data;
|
|
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
|
|
index d7a2dfb8ee9b1..ddf80935c4b9d 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
|
|
@@ -629,13 +629,16 @@ out_fixup:
|
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
|
{
|
|
struct drm_device *dev = dev_priv->dev;
|
|
+ int ret = 0;
|
|
|
|
- if (intel_iommu_enabled &&
|
|
+ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
|
+ if (dev_priv->map_mode != vmw_dma_phys &&
|
|
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
|
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
|
- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
|
|
+ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
|
|
}
|
|
- return 0;
|
|
+
|
|
+ return ret;
|
|
}
|
|
#else
|
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
|
|
index f2d13a72c05d3..88b8178d46871 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
|
|
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
|
|
*p_fence = NULL;
|
|
}
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
|
index dca04d4246ea8..d59125c55dc29 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
|
@@ -2592,8 +2592,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
|
|
user_fence_rep)
|
|
{
|
|
struct vmw_fence_obj *fence = NULL;
|
|
- uint32_t handle;
|
|
- int ret;
|
|
+ uint32_t handle = 0;
|
|
+ int ret = 0;
|
|
|
|
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
|
|
out_fence)
|
|
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
|
|
index f4081962784cc..91653adc41cc4 100644
|
|
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
|
|
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
|
|
@@ -1524,7 +1524,7 @@ unlock:
|
|
EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
|
|
|
|
/* Abort any active or pending conversions for this context */
|
|
-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
|
|
+static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
|
|
{
|
|
struct ipu_image_convert_chan *chan = ctx->chan;
|
|
struct ipu_image_convert_priv *priv = chan->priv;
|
|
@@ -1551,7 +1551,7 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
|
|
|
|
need_abort = (run_count || active_run);
|
|
|
|
- ctx->aborting = need_abort;
|
|
+ ctx->aborting = true;
|
|
|
|
spin_unlock_irqrestore(&chan->irqlock, flags);
|
|
|
|
@@ -1572,7 +1572,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
|
|
dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
|
|
force_abort(ctx);
|
|
}
|
|
+}
|
|
|
|
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
|
|
+{
|
|
+ __ipu_image_convert_abort(ctx);
|
|
ctx->aborting = false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
|
|
@@ -1586,7 +1590,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
|
|
bool put_res;
|
|
|
|
/* make sure no runs are hanging around */
|
|
- ipu_image_convert_abort(ctx);
|
|
+ __ipu_image_convert_abort(ctx);
|
|
|
|
dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
|
|
chan->ic_task, ctx);
|
|
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
|
|
index b48100236df89..ebc9ffde41e9d 100644
|
|
--- a/drivers/hid/hid-debug.c
|
|
+++ b/drivers/hid/hid-debug.c
|
|
@@ -30,6 +30,7 @@
|
|
|
|
#include <linux/debugfs.h>
|
|
#include <linux/seq_file.h>
|
|
+#include <linux/kfifo.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/export.h>
|
|
#include <linux/slab.h>
|
|
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
|
|
/* enqueue string to 'events' ring buffer */
|
|
void hid_debug_event(struct hid_device *hdev, char *buf)
|
|
{
|
|
- unsigned i;
|
|
struct hid_debug_list *list;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&hdev->debug_list_lock, flags);
|
|
- list_for_each_entry(list, &hdev->debug_list, node) {
|
|
- for (i = 0; buf[i]; i++)
|
|
- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
|
|
- buf[i];
|
|
- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
|
|
- }
|
|
+ list_for_each_entry(list, &hdev->debug_list, node)
|
|
+ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
|
|
spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
|
|
|
|
wake_up_interruptible(&hdev->debug_wait);
|
|
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
|
|
hid_debug_event(hdev, buf);
|
|
|
|
kfree(buf);
|
|
- wake_up_interruptible(&hdev->debug_wait);
|
|
-
|
|
+ wake_up_interruptible(&hdev->debug_wait);
|
|
}
|
|
EXPORT_SYMBOL_GPL(hid_dump_input);
|
|
|
|
@@ -1088,8 +1083,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
|
|
goto out;
|
|
}
|
|
|
|
- if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
|
|
- err = -ENOMEM;
|
|
+ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
|
|
+ if (err) {
|
|
kfree(list);
|
|
goto out;
|
|
}
|
|
@@ -1109,77 +1104,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
struct hid_debug_list *list = file->private_data;
|
|
- int ret = 0, len;
|
|
+ int ret = 0, copied;
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
mutex_lock(&list->read_mutex);
|
|
- while (ret == 0) {
|
|
- if (list->head == list->tail) {
|
|
- add_wait_queue(&list->hdev->debug_wait, &wait);
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
-
|
|
- while (list->head == list->tail) {
|
|
- if (file->f_flags & O_NONBLOCK) {
|
|
- ret = -EAGAIN;
|
|
- break;
|
|
- }
|
|
- if (signal_pending(current)) {
|
|
- ret = -ERESTARTSYS;
|
|
- break;
|
|
- }
|
|
+ if (kfifo_is_empty(&list->hid_debug_fifo)) {
|
|
+ add_wait_queue(&list->hdev->debug_wait, &wait);
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+
|
|
+ while (kfifo_is_empty(&list->hid_debug_fifo)) {
|
|
+ if (file->f_flags & O_NONBLOCK) {
|
|
+ ret = -EAGAIN;
|
|
+ break;
|
|
+ }
|
|
|
|
- if (!list->hdev || !list->hdev->debug) {
|
|
- ret = -EIO;
|
|
- set_current_state(TASK_RUNNING);
|
|
- goto out;
|
|
- }
|
|
+ if (signal_pending(current)) {
|
|
+ ret = -ERESTARTSYS;
|
|
+ break;
|
|
+ }
|
|
|
|
- /* allow O_NONBLOCK from other threads */
|
|
- mutex_unlock(&list->read_mutex);
|
|
- schedule();
|
|
- mutex_lock(&list->read_mutex);
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
+ /* if list->hdev is NULL we cannot remove_wait_queue().
|
|
+ * if list->hdev->debug is 0 then hid_debug_unregister()
|
|
+ * was already called and list->hdev is being destroyed.
|
|
+ * if we add remove_wait_queue() here we can hit a race.
|
|
+ */
|
|
+ if (!list->hdev || !list->hdev->debug) {
|
|
+ ret = -EIO;
|
|
+ set_current_state(TASK_RUNNING);
|
|
+ goto out;
|
|
}
|
|
|
|
- set_current_state(TASK_RUNNING);
|
|
- remove_wait_queue(&list->hdev->debug_wait, &wait);
|
|
+ /* allow O_NONBLOCK from other threads */
|
|
+ mutex_unlock(&list->read_mutex);
|
|
+ schedule();
|
|
+ mutex_lock(&list->read_mutex);
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
}
|
|
|
|
- if (ret)
|
|
- goto out;
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ remove_wait_queue(&list->hdev->debug_wait, &wait);
|
|
|
|
- /* pass the ringbuffer contents to userspace */
|
|
-copy_rest:
|
|
- if (list->tail == list->head)
|
|
+ if (ret)
|
|
goto out;
|
|
- if (list->tail > list->head) {
|
|
- len = list->tail - list->head;
|
|
- if (len > count)
|
|
- len = count;
|
|
-
|
|
- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
|
|
- ret = -EFAULT;
|
|
- goto out;
|
|
- }
|
|
- ret += len;
|
|
- list->head += len;
|
|
- } else {
|
|
- len = HID_DEBUG_BUFSIZE - list->head;
|
|
- if (len > count)
|
|
- len = count;
|
|
-
|
|
- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
|
|
- ret = -EFAULT;
|
|
- goto out;
|
|
- }
|
|
- list->head = 0;
|
|
- ret += len;
|
|
- count -= len;
|
|
- if (count > 0)
|
|
- goto copy_rest;
|
|
- }
|
|
-
|
|
}
|
|
+
|
|
+ /* pass the fifo content to userspace, locking is not needed with only
|
|
+ * one concurrent reader and one concurrent writer
|
|
+ */
|
|
+ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+ ret = copied;
|
|
out:
|
|
mutex_unlock(&list->read_mutex);
|
|
return ret;
|
|
@@ -1190,7 +1165,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
|
|
struct hid_debug_list *list = file->private_data;
|
|
|
|
poll_wait(file, &list->hdev->debug_wait, wait);
|
|
- if (list->head != list->tail)
|
|
+ if (!kfifo_is_empty(&list->hid_debug_fifo))
|
|
return EPOLLIN | EPOLLRDNORM;
|
|
if (!list->hdev->debug)
|
|
return EPOLLERR | EPOLLHUP;
|
|
@@ -1205,7 +1180,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
|
|
spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
|
|
list_del(&list->node);
|
|
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
|
|
- kfree(list->hid_debug_buf);
|
|
+ kfifo_free(&list->hid_debug_fifo);
|
|
kfree(list);
|
|
|
|
return 0;
|
|
@@ -1256,4 +1231,3 @@ void hid_debug_exit(void)
|
|
{
|
|
debugfs_remove_recursive(hid_debug_root);
|
|
}
|
|
-
|
|
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
|
|
index 643b6eb54442e..eacc76d2ab960 100644
|
|
--- a/drivers/hid/hid-lenovo.c
|
|
+++ b/drivers/hid/hid-lenovo.c
|
|
@@ -743,7 +743,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
|
|
data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
|
|
data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
|
|
data_pointer->led_mute.dev = dev;
|
|
- led_classdev_register(dev, &data_pointer->led_mute);
|
|
+ ret = led_classdev_register(dev, &data_pointer->led_mute);
|
|
+ if (ret < 0)
|
|
+ goto err;
|
|
|
|
data_pointer->led_micmute.name = name_micmute;
|
|
data_pointer->led_micmute.brightness_get =
|
|
@@ -751,7 +753,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
|
|
data_pointer->led_micmute.brightness_set =
|
|
lenovo_led_brightness_set_tpkbd;
|
|
data_pointer->led_micmute.dev = dev;
|
|
- led_classdev_register(dev, &data_pointer->led_micmute);
|
|
+ ret = led_classdev_register(dev, &data_pointer->led_micmute);
|
|
+ if (ret < 0) {
|
|
+ led_classdev_unregister(&data_pointer->led_mute);
|
|
+ goto err;
|
|
+ }
|
|
|
|
lenovo_features_set_tpkbd(hdev);
|
|
|
|
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
|
|
index fe00b12e44178..bea4c9850247b 100644
|
|
--- a/drivers/hv/channel.c
|
|
+++ b/drivers/hv/channel.c
|
|
@@ -701,20 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
int vmbus_disconnect_ring(struct vmbus_channel *channel)
|
|
{
|
|
struct vmbus_channel *cur_channel, *tmp;
|
|
- unsigned long flags;
|
|
- LIST_HEAD(list);
|
|
int ret;
|
|
|
|
if (channel->primary_channel != NULL)
|
|
return -EINVAL;
|
|
|
|
- /* Snapshot the list of subchannels */
|
|
- spin_lock_irqsave(&channel->lock, flags);
|
|
- list_splice_init(&channel->sc_list, &list);
|
|
- channel->num_sc = 0;
|
|
- spin_unlock_irqrestore(&channel->lock, flags);
|
|
-
|
|
- list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
|
|
+ list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
|
|
if (cur_channel->rescind)
|
|
wait_for_completion(&cur_channel->rescind_event);
|
|
|
|
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
|
|
index edd34c167a9bd..d01689079e9b9 100644
|
|
--- a/drivers/hv/channel_mgmt.c
|
|
+++ b/drivers/hv/channel_mgmt.c
|
|
@@ -405,7 +405,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
|
|
primary_channel = channel->primary_channel;
|
|
spin_lock_irqsave(&primary_channel->lock, flags);
|
|
list_del(&channel->sc_list);
|
|
- primary_channel->num_sc--;
|
|
spin_unlock_irqrestore(&primary_channel->lock, flags);
|
|
}
|
|
|
|
@@ -1302,49 +1301,6 @@ cleanup:
|
|
return ret;
|
|
}
|
|
|
|
-/*
|
|
- * Retrieve the (sub) channel on which to send an outgoing request.
|
|
- * When a primary channel has multiple sub-channels, we try to
|
|
- * distribute the load equally amongst all available channels.
|
|
- */
|
|
-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
|
|
-{
|
|
- struct list_head *cur, *tmp;
|
|
- int cur_cpu;
|
|
- struct vmbus_channel *cur_channel;
|
|
- struct vmbus_channel *outgoing_channel = primary;
|
|
- int next_channel;
|
|
- int i = 1;
|
|
-
|
|
- if (list_empty(&primary->sc_list))
|
|
- return outgoing_channel;
|
|
-
|
|
- next_channel = primary->next_oc++;
|
|
-
|
|
- if (next_channel > (primary->num_sc)) {
|
|
- primary->next_oc = 0;
|
|
- return outgoing_channel;
|
|
- }
|
|
-
|
|
- cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
|
|
- list_for_each_safe(cur, tmp, &primary->sc_list) {
|
|
- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
|
|
- if (cur_channel->state != CHANNEL_OPENED_STATE)
|
|
- continue;
|
|
-
|
|
- if (cur_channel->target_vp == cur_cpu)
|
|
- return cur_channel;
|
|
-
|
|
- if (i == next_channel)
|
|
- return cur_channel;
|
|
-
|
|
- i++;
|
|
- }
|
|
-
|
|
- return outgoing_channel;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
|
|
-
|
|
static void invoke_sc_cb(struct vmbus_channel *primary_channel)
|
|
{
|
|
struct list_head *cur, *tmp;
|
|
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
|
|
index 41631512ae97e..7b9fbd84d6df5 100644
|
|
--- a/drivers/hv/hv_balloon.c
|
|
+++ b/drivers/hv/hv_balloon.c
|
|
@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
|
pfn_cnt -= pgs_ol;
|
|
/*
|
|
* Check if the corresponding memory block is already
|
|
- * online by checking its last previously backed page.
|
|
- * In case it is we need to bring rest (which was not
|
|
- * backed previously) online too.
|
|
+ * online. It is possible to observe struct pages still
|
|
+ * being uninitialized here so check section instead.
|
|
+ * In case the section is online we need to bring the
|
|
+ * rest of pfns (which were not backed previously)
|
|
+ * online too.
|
|
*/
|
|
if (start_pfn > has->start_pfn &&
|
|
- !PageReserved(pfn_to_page(start_pfn - 1)))
|
|
+ online_section_nr(pfn_to_section_nr(start_pfn)))
|
|
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
|
|
|
}
|
|
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
|
|
index 64d0c85d51611..1f1a55e077338 100644
|
|
--- a/drivers/hv/ring_buffer.c
|
|
+++ b/drivers/hv/ring_buffer.c
|
|
@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
|
|
}
|
|
|
|
/* Get various debug metrics for the specified ring buffer. */
|
|
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
|
- struct hv_ring_buffer_debug_info *debug_info)
|
|
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
|
+ struct hv_ring_buffer_debug_info *debug_info)
|
|
{
|
|
u32 bytes_avail_towrite;
|
|
u32 bytes_avail_toread;
|
|
|
|
- if (ring_info->ring_buffer) {
|
|
- hv_get_ringbuffer_availbytes(ring_info,
|
|
- &bytes_avail_toread,
|
|
- &bytes_avail_towrite);
|
|
-
|
|
- debug_info->bytes_avail_toread = bytes_avail_toread;
|
|
- debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
|
- debug_info->current_read_index =
|
|
- ring_info->ring_buffer->read_index;
|
|
- debug_info->current_write_index =
|
|
- ring_info->ring_buffer->write_index;
|
|
- debug_info->current_interrupt_mask =
|
|
- ring_info->ring_buffer->interrupt_mask;
|
|
- }
|
|
+ if (!ring_info->ring_buffer)
|
|
+ return -EINVAL;
|
|
+
|
|
+ hv_get_ringbuffer_availbytes(ring_info,
|
|
+ &bytes_avail_toread,
|
|
+ &bytes_avail_towrite);
|
|
+ debug_info->bytes_avail_toread = bytes_avail_toread;
|
|
+ debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
|
+ debug_info->current_read_index = ring_info->ring_buffer->read_index;
|
|
+ debug_info->current_write_index = ring_info->ring_buffer->write_index;
|
|
+ debug_info->current_interrupt_mask
|
|
+ = ring_info->ring_buffer->interrupt_mask;
|
|
+ return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
|
|
|
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
|
|
index d0ff65675292b..403fee01572c5 100644
|
|
--- a/drivers/hv/vmbus_drv.c
|
|
+++ b/drivers/hv/vmbus_drv.c
|
|
@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info outbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
|
+ &outbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
|
|
}
|
|
static DEVICE_ATTR_RO(out_intr_mask);
|
|
@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info outbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
|
+ &outbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
return sprintf(buf, "%d\n", outbound.current_read_index);
|
|
}
|
|
static DEVICE_ATTR_RO(out_read_index);
|
|
@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info outbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
|
+ &outbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
return sprintf(buf, "%d\n", outbound.current_write_index);
|
|
}
|
|
static DEVICE_ATTR_RO(out_write_index);
|
|
@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info outbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
|
+ &outbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
|
|
}
|
|
static DEVICE_ATTR_RO(out_read_bytes_avail);
|
|
@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info outbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
|
+ &outbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
|
|
}
|
|
static DEVICE_ATTR_RO(out_write_bytes_avail);
|
|
@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info inbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
|
|
}
|
|
static DEVICE_ATTR_RO(in_intr_mask);
|
|
@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info inbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return sprintf(buf, "%d\n", inbound.current_read_index);
|
|
}
|
|
static DEVICE_ATTR_RO(in_read_index);
|
|
@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info inbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return sprintf(buf, "%d\n", inbound.current_write_index);
|
|
}
|
|
static DEVICE_ATTR_RO(in_write_index);
|
|
@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info inbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
|
|
}
|
|
static DEVICE_ATTR_RO(in_read_bytes_avail);
|
|
@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
|
|
{
|
|
struct hv_device *hv_dev = device_to_hv_device(dev);
|
|
struct hv_ring_buffer_debug_info inbound;
|
|
+ int ret;
|
|
|
|
if (!hv_dev->channel)
|
|
return -ENODEV;
|
|
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
|
- return -EINVAL;
|
|
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+
|
|
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
|
|
}
|
|
static DEVICE_ATTR_RO(in_write_bytes_avail);
|
|
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
|
|
index 08e3945a6fbfd..f9b8e3e23a8e8 100644
|
|
--- a/drivers/hwmon/lm80.c
|
|
+++ b/drivers/hwmon/lm80.c
|
|
@@ -360,9 +360,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
|
|
struct i2c_client *client = data->client;
|
|
unsigned long min, val;
|
|
u8 reg;
|
|
- int err = kstrtoul(buf, 10, &val);
|
|
- if (err < 0)
|
|
- return err;
|
|
+ int rv;
|
|
+
|
|
+ rv = kstrtoul(buf, 10, &val);
|
|
+ if (rv < 0)
|
|
+ return rv;
|
|
|
|
/* Save fan_min */
|
|
mutex_lock(&data->update_lock);
|
|
@@ -390,8 +392,13 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- reg = (lm80_read_value(client, LM80_REG_FANDIV) &
|
|
- ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1)));
|
|
+ rv = lm80_read_value(client, LM80_REG_FANDIV);
|
|
+ if (rv < 0) {
|
|
+ mutex_unlock(&data->update_lock);
|
|
+ return rv;
|
|
+ }
|
|
+ reg = (rv & ~(3 << (2 * (nr + 1))))
|
|
+ | (data->fan_div[nr] << (2 * (nr + 1)));
|
|
lm80_write_value(client, LM80_REG_FANDIV, reg);
|
|
|
|
/* Restore fan_min */
|
|
@@ -623,6 +630,7 @@ static int lm80_probe(struct i2c_client *client,
|
|
struct device *dev = &client->dev;
|
|
struct device *hwmon_dev;
|
|
struct lm80_data *data;
|
|
+ int rv;
|
|
|
|
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
|
|
if (!data)
|
|
@@ -635,8 +643,14 @@ static int lm80_probe(struct i2c_client *client,
|
|
lm80_init_client(client);
|
|
|
|
/* A few vars need to be filled upon startup */
|
|
- data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
|
|
- data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
|
|
+ rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
|
|
+ if (rv < 0)
|
|
+ return rv;
|
|
+ data->fan[f_min][0] = rv;
|
|
+ rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
|
|
+ if (rv < 0)
|
|
+ return rv;
|
|
+ data->fan[f_min][1] = rv;
|
|
|
|
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
|
|
data, lm80_groups);
|
|
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
|
|
index c3040079b1cb6..4adec4ab7d066 100644
|
|
--- a/drivers/hwmon/nct6775.c
|
|
+++ b/drivers/hwmon/nct6775.c
|
|
@@ -44,8 +44,8 @@
|
|
* nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
|
|
* nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
|
|
* (0xd451)
|
|
- * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
|
|
- * (0xd459)
|
|
+ * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
|
|
+ * (0xd429)
|
|
*
|
|
* #temp lists the number of monitored temperature sources (first value) plus
|
|
* the number of directly connectable temperature sensors (second value).
|
|
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
|
|
#define SIO_NCT6795_ID 0xd350
|
|
#define SIO_NCT6796_ID 0xd420
|
|
#define SIO_NCT6797_ID 0xd450
|
|
-#define SIO_NCT6798_ID 0xd458
|
|
+#define SIO_NCT6798_ID 0xd428
|
|
#define SIO_ID_MASK 0xFFF8
|
|
|
|
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
|
|
@@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
|
|
|
|
if (data->kind == nct6791 || data->kind == nct6792 ||
|
|
data->kind == nct6793 || data->kind == nct6795 ||
|
|
- data->kind == nct6796)
|
|
+ data->kind == nct6796 || data->kind == nct6797 ||
|
|
+ data->kind == nct6798)
|
|
nct6791_enable_io_mapping(sioreg);
|
|
|
|
superio_exit(sioreg);
|
|
@@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
|
|
|
|
if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
|
|
sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
|
|
- sio_data->kind == nct6796)
|
|
+ sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
|
|
+ sio_data->kind == nct6798)
|
|
nct6791_enable_io_mapping(sioaddr);
|
|
|
|
superio_exit(sioaddr);
|
|
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
|
|
index 8844c9565d2a4..7053be59ad2e4 100644
|
|
--- a/drivers/hwmon/tmp421.c
|
|
+++ b/drivers/hwmon/tmp421.c
|
|
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
|
|
.data = (void *)2
|
|
},
|
|
{
|
|
- .compatible = "ti,tmp422",
|
|
+ .compatible = "ti,tmp442",
|
|
.data = (void *)3
|
|
},
|
|
{ },
|
|
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
|
|
index 53fc83b72a492..5864ac55e275b 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
|
|
@@ -86,7 +86,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
|
|
|
|
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
|
|
{
|
|
- coresight_disclaim_device(drvdata);
|
|
+ coresight_disclaim_device(drvdata->base);
|
|
__tmc_etb_disable_hw(drvdata);
|
|
}
|
|
|
|
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
|
|
index d293e55553bd6..ba7aaf421f36c 100644
|
|
--- a/drivers/hwtracing/intel_th/msu.c
|
|
+++ b/drivers/hwtracing/intel_th/msu.c
|
|
@@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
|
|
if (!end)
|
|
break;
|
|
|
|
- len -= end - p;
|
|
+ /* consume the number and the following comma, hence +1 */
|
|
+ len -= end - p + 1;
|
|
p = end + 1;
|
|
} while (len);
|
|
|
|
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
|
|
index 0910ec8071874..4b9e44b227d80 100644
|
|
--- a/drivers/hwtracing/stm/policy.c
|
|
+++ b/drivers/hwtracing/stm/policy.c
|
|
@@ -440,10 +440,8 @@ stp_policy_make(struct config_group *group, const char *name)
|
|
|
|
stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
|
|
if (!stm->policy) {
|
|
- mutex_unlock(&stm->policy_mutex);
|
|
- stm_put_protocol(pdrv);
|
|
- stm_put_device(stm);
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ ret = ERR_PTR(-ENOMEM);
|
|
+ goto unlock_policy;
|
|
}
|
|
|
|
config_group_init_type_name(&stm->policy->group, name,
|
|
@@ -458,7 +456,11 @@ unlock_policy:
|
|
mutex_unlock(&stm->policy_mutex);
|
|
|
|
if (IS_ERR(ret)) {
|
|
- stm_put_protocol(stm->pdrv);
|
|
+ /*
|
|
+ * pdrv and stm->pdrv at this point can be quite different,
|
|
+ * and only one of them needs to be 'put'
|
|
+ */
|
|
+ stm_put_protocol(pdrv);
|
|
stm_put_device(stm);
|
|
}
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
|
|
index 51d34959709ba..fb5bac079e83f 100644
|
|
--- a/drivers/i2c/busses/i2c-axxia.c
|
|
+++ b/drivers/i2c/busses/i2c-axxia.c
|
|
@@ -296,22 +296,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
|
|
i2c_int_disable(idev, MST_STATUS_TFL);
|
|
}
|
|
|
|
- if (status & MST_STATUS_SCC) {
|
|
- /* Stop completed */
|
|
- i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
- complete(&idev->msg_complete);
|
|
- } else if (status & MST_STATUS_SNS) {
|
|
- /* Transfer done */
|
|
- i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
- if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
|
|
- axxia_i2c_empty_rx_fifo(idev);
|
|
- complete(&idev->msg_complete);
|
|
- } else if (status & MST_STATUS_TSS) {
|
|
- /* Transfer timeout */
|
|
- idev->msg_err = -ETIMEDOUT;
|
|
- i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
- complete(&idev->msg_complete);
|
|
- } else if (unlikely(status & MST_STATUS_ERR)) {
|
|
+ if (unlikely(status & MST_STATUS_ERR)) {
|
|
/* Transfer error */
|
|
i2c_int_disable(idev, ~0);
|
|
if (status & MST_STATUS_AL)
|
|
@@ -328,6 +313,21 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
|
|
readl(idev->base + MST_TX_BYTES_XFRD),
|
|
readl(idev->base + MST_TX_XFER));
|
|
complete(&idev->msg_complete);
|
|
+ } else if (status & MST_STATUS_SCC) {
|
|
+ /* Stop completed */
|
|
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
+ complete(&idev->msg_complete);
|
|
+ } else if (status & MST_STATUS_SNS) {
|
|
+ /* Transfer done */
|
|
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
+ if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
|
|
+ axxia_i2c_empty_rx_fifo(idev);
|
|
+ complete(&idev->msg_complete);
|
|
+ } else if (status & MST_STATUS_TSS) {
|
|
+ /* Transfer timeout */
|
|
+ idev->msg_err = -ETIMEDOUT;
|
|
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
|
|
+ complete(&idev->msg_complete);
|
|
}
|
|
|
|
out:
|
|
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
|
|
index a7a7a9c3bc7c4..a64f2ff3cb49c 100644
|
|
--- a/drivers/i2c/busses/i2c-sh_mobile.c
|
|
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
|
|
@@ -800,6 +800,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
|
|
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
|
|
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
|
|
+ { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
|
|
{ .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
|
|
@@ -808,6 +809,7 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
|
|
{ .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
|
|
+ { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
|
|
{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
|
|
{},
|
|
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
|
|
index 1aca742fde4ae..ccd76c71af098 100644
|
|
--- a/drivers/i2c/i2c-dev.c
|
|
+++ b/drivers/i2c/i2c-dev.c
|
|
@@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
data_arg.data);
|
|
}
|
|
case I2C_RETRIES:
|
|
+ if (arg > INT_MAX)
|
|
+ return -EINVAL;
|
|
+
|
|
client->adapter->retries = arg;
|
|
break;
|
|
case I2C_TIMEOUT:
|
|
+ if (arg > INT_MAX)
|
|
+ return -EINVAL;
|
|
+
|
|
/* For historical reasons, user-space sets the timeout
|
|
* value in units of 10 ms.
|
|
*/
|
|
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
|
|
index 4c8c7a620d08d..a5dc13576394f 100644
|
|
--- a/drivers/ide/ide-proc.c
|
|
+++ b/drivers/ide/ide-proc.c
|
|
@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
|
|
drive->proc = proc_mkdir(drive->name, parent);
|
|
if (drive->proc) {
|
|
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
|
|
- proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
|
|
+ proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
|
|
drive->proc, &ide_settings_proc_fops,
|
|
drive);
|
|
}
|
|
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
|
|
index af53a1084ee53..471caa5323e41 100644
|
|
--- a/drivers/iio/accel/kxcjk-1013.c
|
|
+++ b/drivers/iio/accel/kxcjk-1013.c
|
|
@@ -1490,6 +1490,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
|
|
{"KXCJ1008", KXCJ91008},
|
|
{"KXCJ9000", KXCJ91008},
|
|
{"KIOX000A", KXCJ91008},
|
|
+ {"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
|
|
{"KXTJ1009", KXTJ21009},
|
|
{"SMO8500", KXCJ91008},
|
|
{ },
|
|
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
|
|
index 031d568b4972f..4e339cfd0c546 100644
|
|
--- a/drivers/iio/adc/axp288_adc.c
|
|
+++ b/drivers/iio/adc/axp288_adc.c
|
|
@@ -27,9 +27,18 @@
|
|
#include <linux/iio/machine.h>
|
|
#include <linux/iio/driver.h>
|
|
|
|
-#define AXP288_ADC_EN_MASK 0xF1
|
|
-#define AXP288_ADC_TS_PIN_GPADC 0xF2
|
|
-#define AXP288_ADC_TS_PIN_ON 0xF3
|
|
+/*
|
|
+ * This mask enables all ADCs except for the battery temp-sensor (TS), that is
|
|
+ * left as-is to avoid breaking charging on devices without a temp-sensor.
|
|
+ */
|
|
+#define AXP288_ADC_EN_MASK 0xF0
|
|
+#define AXP288_ADC_TS_ENABLE 0x01
|
|
+
|
|
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
|
|
+#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
|
|
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
|
|
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
|
|
+#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
|
|
|
|
enum axp288_adc_id {
|
|
AXP288_ADC_TS,
|
|
@@ -44,6 +53,7 @@ enum axp288_adc_id {
|
|
struct axp288_adc_info {
|
|
int irq;
|
|
struct regmap *regmap;
|
|
+ bool ts_enabled;
|
|
};
|
|
|
|
static const struct iio_chan_spec axp288_adc_channels[] = {
|
|
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
|
|
return IIO_VAL_INT;
|
|
}
|
|
|
|
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
|
|
- unsigned long address)
|
|
+/*
|
|
+ * The current-source used for the battery temp-sensor (TS) is shared
|
|
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
|
|
+ * current-source needs to be permanently on. But to read the GPADC we
|
|
+ * need to temporary switch the TS current-source to ondemand, so that
|
|
+ * the GPADC can use it, otherwise we will always read an all 0 value.
|
|
+ */
|
|
+static int axp288_adc_set_ts(struct axp288_adc_info *info,
|
|
+ unsigned int mode, unsigned long address)
|
|
{
|
|
int ret;
|
|
|
|
- /* channels other than GPADC do not need to switch TS pin */
|
|
+ /* No need to switch the current-source if the TS pin is disabled */
|
|
+ if (!info->ts_enabled)
|
|
+ return 0;
|
|
+
|
|
+ /* Channels other than GPADC do not need the current source */
|
|
if (address != AXP288_GP_ADC_H)
|
|
return 0;
|
|
|
|
- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
|
|
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
|
|
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* When switching to the GPADC pin give things some time to settle */
|
|
- if (mode == AXP288_ADC_TS_PIN_GPADC)
|
|
+ if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
|
|
usleep_range(6000, 10000);
|
|
|
|
return 0;
|
|
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
|
mutex_lock(&indio_dev->mlock);
|
|
switch (mask) {
|
|
case IIO_CHAN_INFO_RAW:
|
|
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
|
|
+ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
|
|
chan->address)) {
|
|
dev_err(&indio_dev->dev, "GPADC mode\n");
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
|
|
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
|
|
+ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
|
|
chan->address))
|
|
dev_err(&indio_dev->dev, "TS pin restore\n");
|
|
break;
|
|
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
|
return ret;
|
|
}
|
|
|
|
-static int axp288_adc_set_state(struct regmap *regmap)
|
|
+static int axp288_adc_initialize(struct axp288_adc_info *info)
|
|
{
|
|
- /* ADC should be always enabled for internal FG to function */
|
|
- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
|
|
- return -EIO;
|
|
+ int ret, adc_enable_val;
|
|
+
|
|
+ /*
|
|
+ * Determine if the TS pin is enabled and set the TS current-source
|
|
+ * accordingly.
|
|
+ */
|
|
+ ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
|
|
+ info->ts_enabled = true;
|
|
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
|
|
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
|
|
+ AXP288_ADC_TS_CURRENT_ON);
|
|
+ } else {
|
|
+ info->ts_enabled = false;
|
|
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
|
|
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
|
|
+ AXP288_ADC_TS_CURRENT_OFF);
|
|
+ }
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
|
|
+ /* Turn on the ADC for all channels except TS, leave TS as is */
|
|
+ return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
|
|
+ AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
|
|
}
|
|
|
|
static const struct iio_info axp288_adc_iio_info = {
|
|
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
|
|
* Set ADC to enabled state at all time, including system suspend.
|
|
* otherwise internal fuel gauge functionality may be affected.
|
|
*/
|
|
- ret = axp288_adc_set_state(axp20x->regmap);
|
|
+ ret = axp288_adc_initialize(info);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "unable to enable ADC device\n");
|
|
return ret;
|
|
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
|
|
index 028ccd218f82a..ba4b42429a2a2 100644
|
|
--- a/drivers/iio/adc/meson_saradc.c
|
|
+++ b/drivers/iio/adc/meson_saradc.c
|
|
@@ -587,8 +587,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
|
|
struct clk_init_data init;
|
|
const char *clk_parents[1];
|
|
|
|
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div",
|
|
- indio_dev->dev.of_node);
|
|
+ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div",
|
|
+ dev_name(indio_dev->dev.parent));
|
|
+ if (!init.name)
|
|
+ return -ENOMEM;
|
|
+
|
|
init.flags = 0;
|
|
init.ops = &clk_divider_ops;
|
|
clk_parents[0] = __clk_get_name(priv->clkin);
|
|
@@ -606,8 +609,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
|
|
if (WARN_ON(IS_ERR(priv->adc_div_clk)))
|
|
return PTR_ERR(priv->adc_div_clk);
|
|
|
|
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en",
|
|
- indio_dev->dev.of_node);
|
|
+ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en",
|
|
+ dev_name(indio_dev->dev.parent));
|
|
+ if (!init.name)
|
|
+ return -ENOMEM;
|
|
+
|
|
init.flags = CLK_SET_RATE_PARENT;
|
|
init.ops = &clk_gate_ops;
|
|
clk_parents[0] = __clk_get_name(priv->adc_div_clk);
|
|
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
|
|
index f9af6b082916b..6a866cc187f7d 100644
|
|
--- a/drivers/iio/adc/qcom-spmi-adc5.c
|
|
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
|
|
@@ -423,6 +423,7 @@ struct adc5_channels {
|
|
enum vadc_scale_fn_type scale_fn_type;
|
|
};
|
|
|
|
+/* In these definitions, _pre refers to an index into adc5_prescale_ratios. */
|
|
#define ADC5_CHAN(_dname, _type, _mask, _pre, _scale) \
|
|
{ \
|
|
.datasheet_name = _dname, \
|
|
@@ -443,63 +444,63 @@ struct adc5_channels {
|
|
_pre, _scale) \
|
|
|
|
static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
|
|
- [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 1,
|
|
+ [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 0,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 1,
|
|
+ [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 3,
|
|
+ [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 3,
|
|
+ [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 1,
|
|
+ [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0,
|
|
SCALE_HW_CALIB_PMIC_THERM)
|
|
- [ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 1,
|
|
+ [ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 0,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_USB_IN_V_16] = ADC5_CHAN_VOLT("usb_in_v_div_16", 16,
|
|
+ [ADC5_USB_IN_V_16] = ADC5_CHAN_VOLT("usb_in_v_div_16", 8,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_CHG_TEMP] = ADC5_CHAN_TEMP("chg_temp", 1,
|
|
+ [ADC5_CHG_TEMP] = ADC5_CHAN_TEMP("chg_temp", 0,
|
|
SCALE_HW_CALIB_PM5_CHG_TEMP)
|
|
/* Charger prescales SBUx and MID_CHG to fit within 1.8V upper unit */
|
|
- [ADC5_SBUx] = ADC5_CHAN_VOLT("chg_sbux", 3,
|
|
+ [ADC5_SBUx] = ADC5_CHAN_VOLT("chg_sbux", 1,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_MID_CHG_DIV6] = ADC5_CHAN_VOLT("chg_mid_chg", 6,
|
|
+ [ADC5_MID_CHG_DIV6] = ADC5_CHAN_VOLT("chg_mid_chg", 3,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 1,
|
|
+ [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 0,
|
|
SCALE_HW_CALIB_XOTHERM)
|
|
- [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 1,
|
|
+ [ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 0,
|
|
SCALE_HW_CALIB_PM5_SMB_TEMP)
|
|
};
|
|
|
|
static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
|
|
- [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 1,
|
|
+ [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 0,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 1,
|
|
+ [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 3,
|
|
+ [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 3,
|
|
+ [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 3,
|
|
+ [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 1,
|
|
SCALE_HW_CALIB_DEFAULT)
|
|
- [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 1,
|
|
+ [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0,
|
|
SCALE_HW_CALIB_PMIC_THERM)
|
|
- [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 1,
|
|
+ [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
- [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 1,
|
|
+ [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 0,
|
|
SCALE_HW_CALIB_THERM_100K_PULLUP)
|
|
};
|
|
|
|
@@ -558,6 +559,9 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
|
|
return ret;
|
|
}
|
|
prop->prescale = ret;
|
|
+ } else {
|
|
+ prop->prescale =
|
|
+ adc->data->adc_chans[prop->channel].prescale_index;
|
|
}
|
|
|
|
ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
|
|
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
|
|
index 184d686ebd995..8b4568edd5cb6 100644
|
|
--- a/drivers/iio/adc/ti-ads8688.c
|
|
+++ b/drivers/iio/adc/ti-ads8688.c
|
|
@@ -41,6 +41,7 @@
|
|
|
|
#define ADS8688_VREF_MV 4096
|
|
#define ADS8688_REALBITS 16
|
|
+#define ADS8688_MAX_CHANNELS 8
|
|
|
|
/*
|
|
* enum ads8688_range - ADS8688 reference voltage range
|
|
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
|
|
{
|
|
struct iio_poll_func *pf = p;
|
|
struct iio_dev *indio_dev = pf->indio_dev;
|
|
- u16 buffer[8];
|
|
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
|
|
int i, j = 0;
|
|
|
|
for (i = 0; i < indio_dev->masklength; i++) {
|
|
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
|
|
index a406ad31b096f..3a20cb5d9bffc 100644
|
|
--- a/drivers/iio/chemical/atlas-ph-sensor.c
|
|
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
|
|
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
|
|
case IIO_CHAN_INFO_SCALE:
|
|
switch (chan->type) {
|
|
case IIO_TEMP:
|
|
- *val = 1; /* 0.01 */
|
|
- *val2 = 100;
|
|
- break;
|
|
+ *val = 10;
|
|
+ return IIO_VAL_INT;
|
|
case IIO_PH:
|
|
*val = 1; /* 0.001 */
|
|
*val2 = 1000;
|
|
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
|
|
int val, int val2, long mask)
|
|
{
|
|
struct atlas_data *data = iio_priv(indio_dev);
|
|
- __be32 reg = cpu_to_be32(val);
|
|
+ __be32 reg = cpu_to_be32(val / 10);
|
|
|
|
if (val2 != 0 || val < 0 || val > 20000)
|
|
return -EINVAL;
|
|
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
|
|
index 0e134b13967a5..eae740fceed90 100644
|
|
--- a/drivers/iio/dac/ad5686.c
|
|
+++ b/drivers/iio/dac/ad5686.c
|
|
@@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
|
|
mutex_unlock(&indio_dev->mlock);
|
|
if (ret < 0)
|
|
return ret;
|
|
- *val = ret;
|
|
+ *val = (ret >> chan->scan_type.shift) &
|
|
+ GENMASK(chan->scan_type.realbits - 1, 0);
|
|
return IIO_VAL_INT;
|
|
case IIO_CHAN_INFO_SCALE:
|
|
*val = st->vref_mv;
|
|
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
|
|
index bb9007a0cca75..d97d39a7537c4 100644
|
|
--- a/drivers/infiniband/core/core_priv.h
|
|
+++ b/drivers/infiniband/core/core_priv.h
|
|
@@ -296,6 +296,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
|
|
#endif
|
|
|
|
struct ib_device *ib_device_get_by_index(u32 ifindex);
|
|
+void ib_device_put(struct ib_device *device);
|
|
/* RDMA device netlink */
|
|
void nldev_init(void);
|
|
void nldev_exit(void);
|
|
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
|
|
index 87eb4f2cdd7d4..0027b0d79b09e 100644
|
|
--- a/drivers/infiniband/core/device.c
|
|
+++ b/drivers/infiniband/core/device.c
|
|
@@ -145,7 +145,8 @@ static struct ib_device *__ib_device_get_by_index(u32 index)
|
|
}
|
|
|
|
/*
|
|
- * Caller is responsible to return refrerence count by calling put_device()
|
|
+ * Caller must perform ib_device_put() to return the device reference count
|
|
+ * when ib_device_get_by_index() returns valid device pointer.
|
|
*/
|
|
struct ib_device *ib_device_get_by_index(u32 index)
|
|
{
|
|
@@ -153,13 +154,21 @@ struct ib_device *ib_device_get_by_index(u32 index)
|
|
|
|
down_read(&lists_rwsem);
|
|
device = __ib_device_get_by_index(index);
|
|
- if (device)
|
|
- get_device(&device->dev);
|
|
-
|
|
+ if (device) {
|
|
+ /* Do not return a device if unregistration has started. */
|
|
+ if (!refcount_inc_not_zero(&device->refcount))
|
|
+ device = NULL;
|
|
+ }
|
|
up_read(&lists_rwsem);
|
|
return device;
|
|
}
|
|
|
|
+void ib_device_put(struct ib_device *device)
|
|
+{
|
|
+ if (refcount_dec_and_test(&device->refcount))
|
|
+ complete(&device->unreg_completion);
|
|
+}
|
|
+
|
|
static struct ib_device *__ib_device_get_by_name(const char *name)
|
|
{
|
|
struct ib_device *device;
|
|
@@ -293,6 +302,8 @@ struct ib_device *ib_alloc_device(size_t size)
|
|
rwlock_init(&device->client_data_lock);
|
|
INIT_LIST_HEAD(&device->client_data_list);
|
|
INIT_LIST_HEAD(&device->port_list);
|
|
+ refcount_set(&device->refcount, 1);
|
|
+ init_completion(&device->unreg_completion);
|
|
|
|
return device;
|
|
}
|
|
@@ -641,6 +652,13 @@ void ib_unregister_device(struct ib_device *device)
|
|
struct ib_client_data *context, *tmp;
|
|
unsigned long flags;
|
|
|
|
+ /*
|
|
+ * Wait for all netlink command callers to finish working on the
|
|
+ * device.
|
|
+ */
|
|
+ ib_device_put(device);
|
|
+ wait_for_completion(&device->unreg_completion);
|
|
+
|
|
mutex_lock(&device_mutex);
|
|
|
|
down_write(&lists_rwsem);
|
|
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
|
|
index ba668d49c751d..476abc74178e3 100644
|
|
--- a/drivers/infiniband/core/iwcm.c
|
|
+++ b/drivers/infiniband/core/iwcm.c
|
|
@@ -502,17 +502,21 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
|
|
*/
|
|
static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
|
|
{
|
|
+ const char *devname = dev_name(&cm_id->device->dev);
|
|
+ const char *ifname = cm_id->device->iwcm->ifname;
|
|
struct iwpm_dev_data pm_reg_msg;
|
|
struct iwpm_sa_data pm_msg;
|
|
int status;
|
|
|
|
+ if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
|
|
+ strlen(ifname) >= sizeof(pm_reg_msg.if_name))
|
|
+ return -EINVAL;
|
|
+
|
|
cm_id->m_local_addr = cm_id->local_addr;
|
|
cm_id->m_remote_addr = cm_id->remote_addr;
|
|
|
|
- memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev),
|
|
- sizeof(pm_reg_msg.dev_name));
|
|
- memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
|
|
- sizeof(pm_reg_msg.if_name));
|
|
+ strncpy(pm_reg_msg.dev_name, devname, sizeof(pm_reg_msg.dev_name));
|
|
+ strncpy(pm_reg_msg.if_name, ifname, sizeof(pm_reg_msg.if_name));
|
|
|
|
if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
|
|
!iwpm_valid_pid())
|
|
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
|
|
index 573399e3ccc13..77a0f1e1576fc 100644
|
|
--- a/drivers/infiniband/core/nldev.c
|
|
+++ b/drivers/infiniband/core/nldev.c
|
|
@@ -580,10 +580,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
|
|
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
|
|
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
|
|
goto err;
|
|
- if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
|
|
- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
|
|
- pd->unsafe_global_rkey))
|
|
- goto err;
|
|
|
|
if (fill_res_name_pid(msg, res))
|
|
goto err;
|
|
@@ -636,13 +632,13 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
nlmsg_end(msg, nlh);
|
|
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
|
|
|
err_free:
|
|
nlmsg_free(msg);
|
|
err:
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return err;
|
|
}
|
|
|
|
@@ -672,7 +668,7 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
err = ib_device_rename(device, name);
|
|
}
|
|
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return err;
|
|
}
|
|
|
|
@@ -756,14 +752,14 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
goto err_free;
|
|
|
|
nlmsg_end(msg, nlh);
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
|
|
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
|
|
|
err_free:
|
|
nlmsg_free(msg);
|
|
err:
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return err;
|
|
}
|
|
|
|
@@ -820,7 +816,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
|
|
}
|
|
|
|
out:
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
cb->args[0] = idx;
|
|
return skb->len;
|
|
}
|
|
@@ -859,13 +855,13 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
goto err_free;
|
|
|
|
nlmsg_end(msg, nlh);
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
|
|
|
err_free:
|
|
nlmsg_free(msg);
|
|
err:
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return ret;
|
|
}
|
|
|
|
@@ -1058,7 +1054,7 @@ next: idx++;
|
|
if (!filled)
|
|
goto err;
|
|
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return skb->len;
|
|
|
|
res_err:
|
|
@@ -1069,7 +1065,7 @@ err:
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
err_index:
|
|
- put_device(&device->dev);
|
|
+ ib_device_put(device);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
|
|
index 6d373f5515b7e..50a97471a2be6 100644
|
|
--- a/drivers/infiniband/core/uverbs_main.c
|
|
+++ b/drivers/infiniband/core/uverbs_main.c
|
|
@@ -262,6 +262,9 @@ void ib_uverbs_release_file(struct kref *ref)
|
|
if (atomic_dec_and_test(&file->device->refcount))
|
|
ib_uverbs_comp_dev(file->device);
|
|
|
|
+ if (file->async_file)
|
|
+ kref_put(&file->async_file->ref,
|
|
+ ib_uverbs_release_async_event_file);
|
|
put_device(&file->device->dev);
|
|
kfree(file);
|
|
}
|
|
@@ -997,11 +1000,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|
|
|
/* Get an arbitrary mm pointer that hasn't been cleaned yet */
|
|
mutex_lock(&ufile->umap_lock);
|
|
- if (!list_empty(&ufile->umaps)) {
|
|
- mm = list_first_entry(&ufile->umaps,
|
|
- struct rdma_umap_priv, list)
|
|
- ->vma->vm_mm;
|
|
- mmget(mm);
|
|
+ while (!list_empty(&ufile->umaps)) {
|
|
+ int ret;
|
|
+
|
|
+ priv = list_first_entry(&ufile->umaps,
|
|
+ struct rdma_umap_priv, list);
|
|
+ mm = priv->vma->vm_mm;
|
|
+ ret = mmget_not_zero(mm);
|
|
+ if (!ret) {
|
|
+ list_del_init(&priv->list);
|
|
+ mm = NULL;
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
}
|
|
mutex_unlock(&ufile->umap_lock);
|
|
if (!mm)
|
|
@@ -1132,10 +1143,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
|
|
list_del_init(&file->list);
|
|
mutex_unlock(&file->device->lists_mutex);
|
|
|
|
- if (file->async_file)
|
|
- kref_put(&file->async_file->ref,
|
|
- ib_uverbs_release_async_event_file);
|
|
-
|
|
kref_put(&file->ref, ib_uverbs_release_file);
|
|
|
|
return 0;
|
|
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
|
|
index c22ebc774a6a4..f9a7e9d29c8ba 100644
|
|
--- a/drivers/infiniband/hw/hfi1/file_ops.c
|
|
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
|
|
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
|
vmf = 1;
|
|
break;
|
|
case STATUS:
|
|
- if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
|
|
+ if (flags & VM_WRITE) {
|
|
ret = -EPERM;
|
|
goto done;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
|
|
index 188aa4f686a07..ea3aac264df9e 100644
|
|
--- a/drivers/infiniband/hw/hfi1/rc.c
|
|
+++ b/drivers/infiniband/hw/hfi1/rc.c
|
|
@@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
|
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
|
|
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
|
|
break;
|
|
+ rvt_qp_wqe_unreserve(qp, wqe);
|
|
s_last = qp->s_last;
|
|
trace_hfi1_qp_send_completion(qp, wqe, s_last);
|
|
if (++s_last >= qp->s_size)
|
|
@@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
|
|
u32 s_last;
|
|
|
|
rvt_put_swqe(wqe);
|
|
+ rvt_qp_wqe_unreserve(qp, wqe);
|
|
s_last = qp->s_last;
|
|
trace_hfi1_qp_send_completion(qp, wqe, s_last);
|
|
if (++s_last >= qp->s_size)
|
|
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
|
|
index a365089a9305b..b7257d7dd925b 100644
|
|
--- a/drivers/infiniband/hw/hfi1/verbs.c
|
|
+++ b/drivers/infiniband/hw/hfi1/verbs.c
|
|
@@ -919,6 +919,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
|
|
|
if (slen > len)
|
|
slen = len;
|
|
+ if (slen > ss->sge.sge_length)
|
|
+ slen = ss->sge.sge_length;
|
|
rvt_update_sge(ss, slen, false);
|
|
seg_pio_copy_mid(pbuf, addr, slen);
|
|
len -= slen;
|
|
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
index 691c6f0489386..2428c7d89c6be 100644
|
|
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
@@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
|
{
|
|
struct mthca_ucontext *context;
|
|
|
|
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
|
|
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
|
if (!qp)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
@@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
|
if (pd->uobject)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
|
|
+ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
|
|
if (!qp)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
|
|
index 0b91ff36768a3..598e23cf01fcd 100644
|
|
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
|
|
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
|
|
@@ -336,13 +336,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
|
|
|
|
usnic_dbg("\n");
|
|
|
|
- mutex_lock(&us_ibdev->usdev_lock);
|
|
if (ib_get_eth_speed(ibdev, port, &props->active_speed,
|
|
- &props->active_width)) {
|
|
- mutex_unlock(&us_ibdev->usdev_lock);
|
|
+ &props->active_width))
|
|
return -EINVAL;
|
|
- }
|
|
|
|
+ /*
|
|
+ * usdev_lock is acquired after (and not before) ib_get_eth_speed call
|
|
+ * because acquiring rtnl_lock in ib_get_eth_speed, while holding
|
|
+ * usdev_lock could lead to a deadlock.
|
|
+ */
|
|
+ mutex_lock(&us_ibdev->usdev_lock);
|
|
/* props being zeroed by the caller, avoid zeroing it here */
|
|
|
|
props->lid = 0;
|
|
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
|
|
index 42b8685c997eb..3c633ab580528 100644
|
|
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
|
|
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
|
|
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
|
|
|
|
static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
|
|
{
|
|
- return (enum pvrdma_wr_opcode)op;
|
|
+ switch (op) {
|
|
+ case IB_WR_RDMA_WRITE:
|
|
+ return PVRDMA_WR_RDMA_WRITE;
|
|
+ case IB_WR_RDMA_WRITE_WITH_IMM:
|
|
+ return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
|
|
+ case IB_WR_SEND:
|
|
+ return PVRDMA_WR_SEND;
|
|
+ case IB_WR_SEND_WITH_IMM:
|
|
+ return PVRDMA_WR_SEND_WITH_IMM;
|
|
+ case IB_WR_RDMA_READ:
|
|
+ return PVRDMA_WR_RDMA_READ;
|
|
+ case IB_WR_ATOMIC_CMP_AND_SWP:
|
|
+ return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
|
|
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
|
|
+ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
|
|
+ case IB_WR_LSO:
|
|
+ return PVRDMA_WR_LSO;
|
|
+ case IB_WR_SEND_WITH_INV:
|
|
+ return PVRDMA_WR_SEND_WITH_INV;
|
|
+ case IB_WR_RDMA_READ_WITH_INV:
|
|
+ return PVRDMA_WR_RDMA_READ_WITH_INV;
|
|
+ case IB_WR_LOCAL_INV:
|
|
+ return PVRDMA_WR_LOCAL_INV;
|
|
+ case IB_WR_REG_MR:
|
|
+ return PVRDMA_WR_FAST_REG_MR;
|
|
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
|
|
+ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
|
|
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
|
|
+ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
|
|
+ case IB_WR_REG_SIG_MR:
|
|
+ return PVRDMA_WR_REG_SIG_MR;
|
|
+ default:
|
|
+ return PVRDMA_WR_ERROR;
|
|
+ }
|
|
}
|
|
|
|
static inline enum ib_wc_status pvrdma_wc_status_to_ib(
|
|
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
|
|
index cf22f57a9f0dd..418d9ab4ea7f2 100644
|
|
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
|
|
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
|
|
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
|
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
|
|
wqe_hdr->ex.imm_data = wr->ex.imm_data;
|
|
|
|
+ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
|
|
+ *bad_wr = wr;
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
switch (qp->ibqp.qp_type) {
|
|
case IB_QPT_GSI:
|
|
case IB_QPT_UD:
|
|
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
|
|
index 1735deb1a9d4e..79ac9c3c0e5aa 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/qp.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/qp.c
|
|
@@ -2903,6 +2903,8 @@ send:
|
|
goto op_err;
|
|
if (!ret)
|
|
goto rnr_nak;
|
|
+ if (wqe->length > qp->r_len)
|
|
+ goto inv_err;
|
|
break;
|
|
|
|
case IB_WR_RDMA_WRITE_WITH_IMM:
|
|
@@ -3071,7 +3073,10 @@ op_err:
|
|
goto err;
|
|
|
|
inv_err:
|
|
- send_status = IB_WC_REM_INV_REQ_ERR;
|
|
+ send_status =
|
|
+ sqp->ibqp.qp_type == IB_QPT_RC ?
|
|
+ IB_WC_REM_INV_REQ_ERR :
|
|
+ IB_WC_SUCCESS;
|
|
wc.status = IB_WC_LOC_QP_OP_ERR;
|
|
goto err;
|
|
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
|
|
index 6c361d70d7cdd..46f62f71cd286 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_req.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
|
|
@@ -643,6 +643,7 @@ next_wqe:
|
|
rmr->access = wqe->wr.wr.reg.access;
|
|
rmr->lkey = wqe->wr.wr.reg.key;
|
|
rmr->rkey = wqe->wr.wr.reg.key;
|
|
+ rmr->iova = wqe->wr.wr.reg.mr->iova;
|
|
wqe->state = wqe_state_done;
|
|
wqe->status = IB_WC_SUCCESS;
|
|
} else {
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
|
|
index c962160292f49..f0438bc6df885 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
|
|
@@ -844,11 +844,16 @@ static enum resp_states do_complete(struct rxe_qp *qp,
|
|
|
|
memset(&cqe, 0, sizeof(cqe));
|
|
|
|
- wc->wr_id = wqe->wr_id;
|
|
- wc->status = qp->resp.status;
|
|
- wc->qp = &qp->ibqp;
|
|
+ if (qp->rcq->is_user) {
|
|
+ uwc->status = qp->resp.status;
|
|
+ uwc->qp_num = qp->ibqp.qp_num;
|
|
+ uwc->wr_id = wqe->wr_id;
|
|
+ } else {
|
|
+ wc->status = qp->resp.status;
|
|
+ wc->qp = &qp->ibqp;
|
|
+ wc->wr_id = wqe->wr_id;
|
|
+ }
|
|
|
|
- /* fields after status are not required for errors */
|
|
if (wc->status == IB_WC_SUCCESS) {
|
|
wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
|
|
pkt->mask & RXE_WRITE_MASK) ?
|
|
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
index eed0eb3bb04c6..0466f2ac9ad08 100644
|
|
--- a/drivers/infiniband/ulp/srp/ib_srp.c
|
|
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
|
|
@@ -2942,7 +2942,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|
{
|
|
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
|
struct srp_rdma_ch *ch;
|
|
- int i, j;
|
|
u8 status;
|
|
|
|
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
|
|
@@ -2954,15 +2953,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|
if (status)
|
|
return FAILED;
|
|
|
|
- for (i = 0; i < target->ch_count; i++) {
|
|
- ch = &target->ch[i];
|
|
- for (j = 0; j < target->req_ring_size; ++j) {
|
|
- struct srp_request *req = &ch->req_ring[j];
|
|
-
|
|
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
|
|
- }
|
|
- }
|
|
-
|
|
return SUCCESS;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
index 2357aa727dcf5..96c7673245757 100644
|
|
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
|
@@ -2010,6 +2010,14 @@ static void srpt_free_ch(struct kref *kref)
|
|
kfree_rcu(ch, rcu);
|
|
}
|
|
|
|
+/*
|
|
+ * Shut down the SCSI target session, tell the connection manager to
|
|
+ * disconnect the associated RDMA channel, transition the QP to the error
|
|
+ * state and remove the channel from the channel list. This function is
|
|
+ * typically called from inside srpt_zerolength_write_done(). Concurrent
|
|
+ * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
|
|
+ * as long as the channel is on sport->nexus_list.
|
|
+ */
|
|
static void srpt_release_channel_work(struct work_struct *w)
|
|
{
|
|
struct srpt_rdma_ch *ch;
|
|
@@ -2037,6 +2045,11 @@ static void srpt_release_channel_work(struct work_struct *w)
|
|
else
|
|
ib_destroy_cm_id(ch->ib_cm.cm_id);
|
|
|
|
+ sport = ch->sport;
|
|
+ mutex_lock(&sport->mutex);
|
|
+ list_del_rcu(&ch->list);
|
|
+ mutex_unlock(&sport->mutex);
|
|
+
|
|
srpt_destroy_ch_ib(ch);
|
|
|
|
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
|
|
@@ -2047,11 +2060,6 @@ static void srpt_release_channel_work(struct work_struct *w)
|
|
sdev, ch->rq_size,
|
|
srp_max_req_size, DMA_FROM_DEVICE);
|
|
|
|
- sport = ch->sport;
|
|
- mutex_lock(&sport->mutex);
|
|
- list_del_rcu(&ch->list);
|
|
- mutex_unlock(&sport->mutex);
|
|
-
|
|
wake_up(&sport->ch_releaseQ);
|
|
|
|
kref_put(&ch->kref, srpt_free_ch);
|
|
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
|
|
index cfc8b94527b97..aa4e431cbcd35 100644
|
|
--- a/drivers/input/joystick/xpad.c
|
|
+++ b/drivers/input/joystick/xpad.c
|
|
@@ -252,6 +252,8 @@ static const struct xpad_device {
|
|
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
|
|
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
|
|
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
|
|
+ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
|
+ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
|
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
|
|
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
|
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
|
|
@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
|
|
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
|
|
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
|
|
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
|
|
+ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
|
|
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
|
|
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
|
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
|
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
|
|
index 1efcfdf9f8a84..dd9dd4e408271 100644
|
|
--- a/drivers/input/misc/bma150.c
|
|
+++ b/drivers/input/misc/bma150.c
|
|
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
|
|
idev->close = bma150_irq_close;
|
|
input_set_drvdata(idev, bma150);
|
|
|
|
+ bma150->input = idev;
|
|
+
|
|
error = input_register_device(idev);
|
|
if (error) {
|
|
input_free_device(idev);
|
|
return error;
|
|
}
|
|
|
|
- bma150->input = idev;
|
|
return 0;
|
|
}
|
|
|
|
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
|
|
|
|
bma150_init_input_device(bma150, ipoll_dev->input);
|
|
|
|
+ bma150->input_polled = ipoll_dev;
|
|
+ bma150->input = ipoll_dev->input;
|
|
+
|
|
error = input_register_polled_device(ipoll_dev);
|
|
if (error) {
|
|
input_free_polled_device(ipoll_dev);
|
|
return error;
|
|
}
|
|
|
|
- bma150->input_polled = ipoll_dev;
|
|
- bma150->input = ipoll_dev->input;
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
|
|
index 8ec483e8688be..26ec603fe2208 100644
|
|
--- a/drivers/input/misc/uinput.c
|
|
+++ b/drivers/input/misc/uinput.c
|
|
@@ -39,6 +39,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/miscdevice.h>
|
|
+#include <linux/overflow.h>
|
|
#include <linux/input/mt.h>
|
|
#include "../input-compat.h"
|
|
|
|
@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
|
|
static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
|
const struct input_absinfo *abs)
|
|
{
|
|
- int min, max;
|
|
+ int min, max, range;
|
|
|
|
min = abs->minimum;
|
|
max = abs->maximum;
|
|
@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (abs->flat > max - min) {
|
|
+ if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
|
|
printk(KERN_DEBUG
|
|
"%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
|
|
UINPUT_NAME, code, abs->flat, min, max);
|
|
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
|
|
index a94b6494e71a5..225ae6980182f 100644
|
|
--- a/drivers/input/mouse/elan_i2c_core.c
|
|
+++ b/drivers/input/mouse/elan_i2c_core.c
|
|
@@ -1345,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|
{ "ELAN060C", 0 },
|
|
{ "ELAN0611", 0 },
|
|
{ "ELAN0612", 0 },
|
|
+ { "ELAN0617", 0 },
|
|
{ "ELAN0618", 0 },
|
|
{ "ELAN061C", 0 },
|
|
{ "ELAN061D", 0 },
|
|
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
|
|
index 9fe075c137dc4..a7f8b16145595 100644
|
|
--- a/drivers/input/mouse/elantech.c
|
|
+++ b/drivers/input/mouse/elantech.c
|
|
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
|
* Asus UX31 0x361f00 20, 15, 0e clickpad
|
|
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
|
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
|
+ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
|
|
+ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
|
|
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
|
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
|
|
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
|
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
|
|
},
|
|
},
|
|
+ {
|
|
+ /* Fujitsu H780 also has a middle button */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
|
|
+ },
|
|
+ },
|
|
#endif
|
|
{ }
|
|
};
|
|
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
index d3aacd534e9ca..5c63d25ce84eb 100644
|
|
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
@@ -1585,10 +1585,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
|
|
/* T7 config may have changed */
|
|
mxt_init_t7_power_cfg(data);
|
|
|
|
-release_raw:
|
|
- kfree(cfg.raw);
|
|
release_mem:
|
|
kfree(cfg.mem);
|
|
+release_raw:
|
|
+ kfree(cfg.raw);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
|
|
index 1167ff0416cf7..325f3bad118b4 100644
|
|
--- a/drivers/iommu/amd_iommu.c
|
|
+++ b/drivers/iommu/amd_iommu.c
|
|
@@ -438,7 +438,14 @@ static int iommu_init_device(struct device *dev)
|
|
|
|
dev_data->alias = get_alias(dev);
|
|
|
|
- if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
|
|
+ /*
|
|
+ * By default we use passthrough mode for IOMMUv2 capable device.
|
|
+ * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
|
|
+ * invalid address), we ignore the capability for the device so
|
|
+ * it'll be forced to go into translation mode.
|
|
+ */
|
|
+ if ((iommu_pass_through || !amd_iommu_force_isolation) &&
|
|
+ dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
|
|
struct amd_iommu *iommu;
|
|
|
|
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
|
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
|
|
index 6947ccf26512e..11f528e727a16 100644
|
|
--- a/drivers/iommu/arm-smmu-v3.c
|
|
+++ b/drivers/iommu/arm-smmu-v3.c
|
|
@@ -576,7 +576,11 @@ struct arm_smmu_device {
|
|
|
|
struct arm_smmu_strtab_cfg strtab_cfg;
|
|
|
|
- u32 sync_count;
|
|
+ /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
|
|
+ union {
|
|
+ u32 sync_count;
|
|
+ u64 padding;
|
|
+ };
|
|
|
|
/* IOMMU core code handle */
|
|
struct iommu_device iommu;
|
|
@@ -675,7 +679,13 @@ static void queue_inc_cons(struct arm_smmu_queue *q)
|
|
u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
|
|
|
|
q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
|
|
- writel(q->cons, q->cons_reg);
|
|
+
|
|
+ /*
|
|
+ * Ensure that all CPU accesses (reads and writes) to the queue
|
|
+ * are complete before we update the cons pointer.
|
|
+ */
|
|
+ mb();
|
|
+ writel_relaxed(q->cons, q->cons_reg);
|
|
}
|
|
|
|
static int queue_sync_prod(struct arm_smmu_queue *q)
|
|
@@ -828,7 +838,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
|
|
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
|
|
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
|
|
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
|
|
- cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
|
|
+ /*
|
|
+ * Commands are written little-endian, but we want the SMMU to
|
|
+ * receive MSIData, and thus write it back to memory, in CPU
|
|
+ * byte order, so big-endian needs an extra byteswap here.
|
|
+ */
|
|
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
|
|
+ cpu_to_le32(ent->sync.msidata));
|
|
cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
|
|
break;
|
|
default:
|
|
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
|
|
index 5a28ae892504a..e23aa7f6c4ad3 100644
|
|
--- a/drivers/iommu/arm-smmu.c
|
|
+++ b/drivers/iommu/arm-smmu.c
|
|
@@ -119,6 +119,7 @@ enum arm_smmu_implementation {
|
|
GENERIC_SMMU,
|
|
ARM_MMU500,
|
|
CAVIUM_SMMUV2,
|
|
+ QCOM_SMMUV2,
|
|
};
|
|
|
|
struct arm_smmu_s2cr {
|
|
@@ -1954,6 +1955,7 @@ ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
|
|
ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
|
|
ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
|
|
ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
|
|
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
|
|
|
|
static const struct of_device_id arm_smmu_of_match[] = {
|
|
{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
|
|
@@ -1962,6 +1964,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
|
|
{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
|
|
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
|
|
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
|
|
+ { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
|
|
{ },
|
|
};
|
|
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index 41a4b8808802b..7ec9c67391709 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -2044,7 +2044,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|
* than default. Unnecessary for PT mode.
|
|
*/
|
|
if (translation != CONTEXT_TT_PASS_THROUGH) {
|
|
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
|
|
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
|
|
ret = -ENOMEM;
|
|
pgd = phys_to_virt(dma_pte_addr(pgd));
|
|
if (!dma_pte_present(pgd))
|
|
@@ -2058,7 +2058,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|
translation = CONTEXT_TT_MULTI_LEVEL;
|
|
|
|
context_set_address_root(context, virt_to_phys(pgd));
|
|
- context_set_address_width(context, iommu->agaw);
|
|
+ context_set_address_width(context, agaw);
|
|
} else {
|
|
/*
|
|
* In pass through mode, AW must be programmed to
|
|
@@ -5204,7 +5204,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
|
|
struct iommu_resv_region *entry, *next;
|
|
|
|
list_for_each_entry_safe(entry, next, head, list) {
|
|
- if (entry->type == IOMMU_RESV_RESERVED)
|
|
+ if (entry->type == IOMMU_RESV_MSI)
|
|
kfree(entry);
|
|
}
|
|
}
|
|
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
|
|
index 2543baba8b1fe..5a2ec43b7ddd4 100644
|
|
--- a/drivers/irqchip/irq-csky-apb-intc.c
|
|
+++ b/drivers/irqchip/irq-csky-apb-intc.c
|
|
@@ -95,7 +95,7 @@ static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr)
|
|
|
|
/* Setup 64 channel slots */
|
|
for (i = 0; i < INTC_IRQS; i += 4)
|
|
- writel_relaxed(build_channel_val(i, magic), reg_addr + i);
|
|
+ writel(build_channel_val(i, magic), reg_addr + i);
|
|
}
|
|
|
|
static int __init
|
|
@@ -135,16 +135,10 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
|
|
static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
|
|
u32 irq_base)
|
|
{
|
|
- u32 irq;
|
|
-
|
|
if (hwirq == 0)
|
|
return 0;
|
|
|
|
- while (hwirq) {
|
|
- irq = __ffs(hwirq);
|
|
- hwirq &= ~BIT(irq);
|
|
- handle_domain_irq(root_domain, irq_base + irq, regs);
|
|
- }
|
|
+ handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
|
|
|
|
return 1;
|
|
}
|
|
@@ -154,12 +148,16 @@ static void gx_irq_handler(struct pt_regs *regs)
|
|
{
|
|
bool ret;
|
|
|
|
- do {
|
|
- ret = handle_irq_perbit(regs,
|
|
- readl_relaxed(reg_base + GX_INTC_PEN31_00), 0);
|
|
- ret |= handle_irq_perbit(regs,
|
|
- readl_relaxed(reg_base + GX_INTC_PEN63_32), 32);
|
|
- } while (ret);
|
|
+retry:
|
|
+ ret = handle_irq_perbit(regs,
|
|
+ readl(reg_base + GX_INTC_PEN63_32), 32);
|
|
+ if (ret)
|
|
+ goto retry;
|
|
+
|
|
+ ret = handle_irq_perbit(regs,
|
|
+ readl(reg_base + GX_INTC_PEN31_00), 0);
|
|
+ if (ret)
|
|
+ goto retry;
|
|
}
|
|
|
|
static int __init
|
|
@@ -174,14 +172,14 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
|
|
/*
|
|
* Initial enable reg to disable all interrupts
|
|
*/
|
|
- writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00);
|
|
- writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32);
|
|
+ writel(0x0, reg_base + GX_INTC_NEN31_00);
|
|
+ writel(0x0, reg_base + GX_INTC_NEN63_32);
|
|
|
|
/*
|
|
* Initial mask reg with all unmasked, because we only use enalbe reg
|
|
*/
|
|
- writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00);
|
|
- writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32);
|
|
+ writel(0x0, reg_base + GX_INTC_NMASK31_00);
|
|
+ writel(0x0, reg_base + GX_INTC_NMASK63_32);
|
|
|
|
setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
|
|
|
|
@@ -204,20 +202,29 @@ static void ck_irq_handler(struct pt_regs *regs)
|
|
void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
|
|
void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
|
|
|
|
- do {
|
|
- /* handle 0 - 31 irqs */
|
|
- ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0);
|
|
- ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32);
|
|
+retry:
|
|
+ /* handle 0 - 63 irqs */
|
|
+ ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
|
|
+ if (ret)
|
|
+ goto retry;
|
|
|
|
- if (nr_irq == INTC_IRQS)
|
|
- continue;
|
|
+ ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
|
|
+ if (ret)
|
|
+ goto retry;
|
|
+
|
|
+ if (nr_irq == INTC_IRQS)
|
|
+ return;
|
|
|
|
- /* handle 64 - 127 irqs */
|
|
- ret |= handle_irq_perbit(regs,
|
|
- readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
|
|
- ret |= handle_irq_perbit(regs,
|
|
- readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
|
|
- } while (ret);
|
|
+ /* handle 64 - 127 irqs */
|
|
+ ret = handle_irq_perbit(regs,
|
|
+ readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
|
|
+ if (ret)
|
|
+ goto retry;
|
|
+
|
|
+ ret = handle_irq_perbit(regs,
|
|
+ readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
|
|
+ if (ret)
|
|
+ goto retry;
|
|
}
|
|
|
|
static int __init
|
|
@@ -230,11 +237,11 @@ ck_intc_init(struct device_node *node, struct device_node *parent)
|
|
return ret;
|
|
|
|
/* Initial enable reg to disable all interrupts */
|
|
- writel_relaxed(0, reg_base + CK_INTC_NEN31_00);
|
|
- writel_relaxed(0, reg_base + CK_INTC_NEN63_32);
|
|
+ writel(0, reg_base + CK_INTC_NEN31_00);
|
|
+ writel(0, reg_base + CK_INTC_NEN63_32);
|
|
|
|
/* Enable irq intc */
|
|
- writel_relaxed(BIT(31), reg_base + CK_INTC_ICR);
|
|
+ writel(BIT(31), reg_base + CK_INTC_ICR);
|
|
|
|
ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
|
|
ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
|
|
@@ -260,8 +267,8 @@ ck_dual_intc_init(struct device_node *node, struct device_node *parent)
|
|
return ret;
|
|
|
|
/* Initial enable reg to disable all interrupts */
|
|
- writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
|
|
- writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
|
|
+ writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
|
|
+ writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
|
|
|
|
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
|
|
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
|
|
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
|
|
index db20e992a40f2..350f999d205be 100644
|
|
--- a/drivers/irqchip/irq-gic-v3-its.c
|
|
+++ b/drivers/irqchip/irq-gic-v3-its.c
|
|
@@ -97,9 +97,14 @@ struct its_device;
|
|
* The ITS structure - contains most of the infrastructure, with the
|
|
* top-level MSI domain, the command queue, the collections, and the
|
|
* list of devices writing to it.
|
|
+ *
|
|
+ * dev_alloc_lock has to be taken for device allocations, while the
|
|
+ * spinlock must be taken to parse data structures such as the device
|
|
+ * list.
|
|
*/
|
|
struct its_node {
|
|
raw_spinlock_t lock;
|
|
+ struct mutex dev_alloc_lock;
|
|
struct list_head entry;
|
|
void __iomem *base;
|
|
phys_addr_t phys_base;
|
|
@@ -156,6 +161,7 @@ struct its_device {
|
|
void *itt;
|
|
u32 nr_ites;
|
|
u32 device_id;
|
|
+ bool shared;
|
|
};
|
|
|
|
static struct {
|
|
@@ -2399,13 +2405,14 @@ static void its_free_device(struct its_device *its_dev)
|
|
kfree(its_dev);
|
|
}
|
|
|
|
-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
|
|
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
|
|
{
|
|
int idx;
|
|
|
|
- idx = find_first_zero_bit(dev->event_map.lpi_map,
|
|
- dev->event_map.nr_lpis);
|
|
- if (idx == dev->event_map.nr_lpis)
|
|
+ idx = bitmap_find_free_region(dev->event_map.lpi_map,
|
|
+ dev->event_map.nr_lpis,
|
|
+ get_count_order(nvecs));
|
|
+ if (idx < 0)
|
|
return -ENOSPC;
|
|
|
|
*hwirq = dev->event_map.lpi_base + idx;
|
|
@@ -2421,6 +2428,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
|
|
struct its_device *its_dev;
|
|
struct msi_domain_info *msi_info;
|
|
u32 dev_id;
|
|
+ int err = 0;
|
|
|
|
/*
|
|
* We ignore "dev" entierely, and rely on the dev_id that has
|
|
@@ -2443,6 +2451,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ mutex_lock(&its->dev_alloc_lock);
|
|
its_dev = its_find_device(its, dev_id);
|
|
if (its_dev) {
|
|
/*
|
|
@@ -2450,18 +2459,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
|
|
* another alias (PCI bridge of some sort). No need to
|
|
* create the device.
|
|
*/
|
|
+ its_dev->shared = true;
|
|
pr_debug("Reusing ITT for devID %x\n", dev_id);
|
|
goto out;
|
|
}
|
|
|
|
its_dev = its_create_device(its, dev_id, nvec, true);
|
|
- if (!its_dev)
|
|
- return -ENOMEM;
|
|
+ if (!its_dev) {
|
|
+ err = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
|
|
pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
|
|
out:
|
|
+ mutex_unlock(&its->dev_alloc_lock);
|
|
info->scratchpad[0].ptr = its_dev;
|
|
- return 0;
|
|
+ return err;
|
|
}
|
|
|
|
static struct msi_domain_ops its_msi_domain_ops = {
|
|
@@ -2501,21 +2514,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|
int err;
|
|
int i;
|
|
|
|
- for (i = 0; i < nr_irqs; i++) {
|
|
- err = its_alloc_device_irq(its_dev, &hwirq);
|
|
- if (err)
|
|
- return err;
|
|
+ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
|
|
+ for (i = 0; i < nr_irqs; i++) {
|
|
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
|
|
if (err)
|
|
return err;
|
|
|
|
irq_domain_set_hwirq_and_chip(domain, virq + i,
|
|
- hwirq, &its_irq_chip, its_dev);
|
|
+ hwirq + i, &its_irq_chip, its_dev);
|
|
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
|
|
pr_debug("ID:%d pID:%d vID:%d\n",
|
|
- (int)(hwirq - its_dev->event_map.lpi_base),
|
|
- (int) hwirq, virq + i);
|
|
+ (int)(hwirq + i - its_dev->event_map.lpi_base),
|
|
+ (int)(hwirq + i), virq + i);
|
|
}
|
|
|
|
return 0;
|
|
@@ -2565,6 +2578,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
|
|
{
|
|
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
+ struct its_node *its = its_dev->its;
|
|
int i;
|
|
|
|
for (i = 0; i < nr_irqs; i++) {
|
|
@@ -2579,8 +2593,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
|
|
irq_domain_reset_irq_data(data);
|
|
}
|
|
|
|
- /* If all interrupts have been freed, start mopping the floor */
|
|
- if (bitmap_empty(its_dev->event_map.lpi_map,
|
|
+ mutex_lock(&its->dev_alloc_lock);
|
|
+
|
|
+ /*
|
|
+ * If all interrupts have been freed, start mopping the
|
|
+ * floor. This is conditionned on the device not being shared.
|
|
+ */
|
|
+ if (!its_dev->shared &&
|
|
+ bitmap_empty(its_dev->event_map.lpi_map,
|
|
its_dev->event_map.nr_lpis)) {
|
|
its_lpi_free(its_dev->event_map.lpi_map,
|
|
its_dev->event_map.lpi_base,
|
|
@@ -2592,6 +2612,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
|
|
its_free_device(its_dev);
|
|
}
|
|
|
|
+ mutex_unlock(&its->dev_alloc_lock);
|
|
+
|
|
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
|
|
}
|
|
|
|
@@ -3516,6 +3538,7 @@ static int __init its_probe_one(struct resource *res,
|
|
}
|
|
|
|
raw_spin_lock_init(&its->lock);
|
|
+ mutex_init(&its->dev_alloc_lock);
|
|
INIT_LIST_HEAD(&its->entry);
|
|
INIT_LIST_HEAD(&its->its_device_list);
|
|
typer = gic_read_typer(its_base + GITS_TYPER);
|
|
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
|
|
index 0ff517d3c98f9..a4ceb61c5b603 100644
|
|
--- a/drivers/isdn/capi/kcapi.c
|
|
+++ b/drivers/isdn/capi/kcapi.c
|
|
@@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
|
|
u16 ret;
|
|
|
|
if (contr == 0) {
|
|
- strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
|
|
+ strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
|
|
return CAPI_NOERROR;
|
|
}
|
|
|
|
@@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
|
|
|
|
ctr = get_capi_ctr_by_nr(contr);
|
|
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
|
|
- strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
|
|
+ strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
|
|
ret = CAPI_NOERROR;
|
|
} else
|
|
ret = CAPI_REGNOTINSTALLED;
|
|
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
|
|
index 4ac378e489023..40ca1e8fa09fc 100644
|
|
--- a/drivers/isdn/hardware/avm/b1.c
|
|
+++ b/drivers/isdn/hardware/avm/b1.c
|
|
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
|
|
int i, j;
|
|
|
|
for (j = 0; j < AVM_MAXVERSION; j++)
|
|
- cinfo->version[j] = "\0\0" + 1;
|
|
+ cinfo->version[j] = "";
|
|
for (i = 0, j = 0;
|
|
j < AVM_MAXVERSION && i < cinfo->versionlen;
|
|
j++, i += cinfo->versionbuf[i] + 1)
|
|
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
|
|
index ea0e4c6de3fb2..0109e0e8bcb67 100644
|
|
--- a/drivers/isdn/hisax/hfc_pci.c
|
|
+++ b/drivers/isdn/hisax/hfc_pci.c
|
|
@@ -1170,11 +1170,13 @@ HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
|
|
if (cs->debug & L1_DEB_LAPD)
|
|
debugl1(cs, "-> PH_REQUEST_PULL");
|
|
#endif
|
|
+ spin_lock_irqsave(&cs->lock, flags);
|
|
if (!cs->tx_skb) {
|
|
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
|
|
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
|
|
} else
|
|
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
|
|
+ spin_unlock_irqrestore(&cs->lock, flags);
|
|
break;
|
|
case (HW_RESET | REQUEST):
|
|
spin_lock_irqsave(&cs->lock, flags);
|
|
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
|
|
index 1b2239c1d5694..dc1cded716c1a 100644
|
|
--- a/drivers/isdn/i4l/isdn_tty.c
|
|
+++ b/drivers/isdn/i4l/isdn_tty.c
|
|
@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
|
|
{
|
|
modem_info *info = (modem_info *) tty->driver_data;
|
|
|
|
+ mutex_lock(&modem_info_mutex);
|
|
if (!old_termios)
|
|
isdn_tty_change_speed(info);
|
|
else {
|
|
if (tty->termios.c_cflag == old_termios->c_cflag &&
|
|
tty->termios.c_ispeed == old_termios->c_ispeed &&
|
|
- tty->termios.c_ospeed == old_termios->c_ospeed)
|
|
+ tty->termios.c_ospeed == old_termios->c_ospeed) {
|
|
+ mutex_unlock(&modem_info_mutex);
|
|
return;
|
|
+ }
|
|
isdn_tty_change_speed(info);
|
|
}
|
|
+ mutex_unlock(&modem_info_mutex);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
|
|
index 211ed6cffd10e..5789787118870 100644
|
|
--- a/drivers/isdn/mISDN/timerdev.c
|
|
+++ b/drivers/isdn/mISDN/timerdev.c
|
|
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
|
|
spin_lock_irqsave(&timer->dev->lock, flags);
|
|
if (timer->id >= 0)
|
|
list_move_tail(&timer->list, &timer->dev->expired);
|
|
- spin_unlock_irqrestore(&timer->dev->lock, flags);
|
|
wake_up_interruptible(&timer->dev->wait);
|
|
+ spin_unlock_irqrestore(&timer->dev->lock, flags);
|
|
}
|
|
|
|
static int
|
|
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
|
|
index a2e74feee2b2f..fd64df5a57a5e 100644
|
|
--- a/drivers/leds/leds-lp5523.c
|
|
+++ b/drivers/leds/leds-lp5523.c
|
|
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
|
|
|
|
/* Let the programs run for couple of ms and check the engine status */
|
|
usleep_range(3000, 6000);
|
|
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
|
|
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
|
|
+ if (ret)
|
|
+ return ret;
|
|
status &= LP5523_ENG_STATUS_MASK;
|
|
|
|
if (status != LP5523_ENG_STATUS_MASK) {
|
|
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
|
|
index efb976a863d22..73ab3cf268680 100644
|
|
--- a/drivers/lightnvm/core.c
|
|
+++ b/drivers/lightnvm/core.c
|
|
@@ -974,7 +974,7 @@ static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
|
|
struct ppa_addr ppa;
|
|
u8 *blks;
|
|
int ch, lun, nr_blks;
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
ppa.ppa = slba;
|
|
ppa = dev_to_generic_addr(dev, ppa);
|
|
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
|
|
index 6944aac43b015..247112e1db94f 100644
|
|
--- a/drivers/lightnvm/pblk-core.c
|
|
+++ b/drivers/lightnvm/pblk-core.c
|
|
@@ -1295,15 +1295,22 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
|
|
|
|
ret = pblk_line_alloc_bitmaps(pblk, line);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto fail;
|
|
|
|
if (!pblk_line_init_bb(pblk, line, 0)) {
|
|
- list_add(&line->list, &l_mg->free_list);
|
|
- return -EINTR;
|
|
+ ret = -EINTR;
|
|
+ goto fail;
|
|
}
|
|
|
|
pblk_rl_free_lines_dec(&pblk->rl, line, true);
|
|
return 0;
|
|
+
|
|
+fail:
|
|
+ spin_lock(&l_mg->free_lock);
|
|
+ list_add(&line->list, &l_mg->free_list);
|
|
+ spin_unlock(&l_mg->free_lock);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
|
|
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
|
|
index fa8726493b39f..3ddd16f471066 100644
|
|
--- a/drivers/lightnvm/pblk-write.c
|
|
+++ b/drivers/lightnvm/pblk-write.c
|
|
@@ -148,9 +148,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
|
|
w_ctx = &entry->w_ctx;
|
|
|
|
/* Check if the lba has been overwritten */
|
|
- ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
|
|
- if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
|
|
- w_ctx->lba = ADDR_EMPTY;
|
|
+ if (w_ctx->lba != ADDR_EMPTY) {
|
|
+ ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
|
|
+ if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
|
|
+ w_ctx->lba = ADDR_EMPTY;
|
|
+ }
|
|
|
|
/* Mark up the entry as submittable again */
|
|
flags = READ_ONCE(w_ctx->flags);
|
|
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
index b8eec515a003c..9fc5423f83c13 100644
|
|
--- a/drivers/md/dm-crypt.c
|
|
+++ b/drivers/md/dm-crypt.c
|
|
@@ -49,7 +49,7 @@ struct convert_context {
|
|
struct bio *bio_out;
|
|
struct bvec_iter iter_in;
|
|
struct bvec_iter iter_out;
|
|
- sector_t cc_sector;
|
|
+ u64 cc_sector;
|
|
atomic_t cc_pending;
|
|
union {
|
|
struct skcipher_request *req;
|
|
@@ -81,7 +81,7 @@ struct dm_crypt_request {
|
|
struct convert_context *ctx;
|
|
struct scatterlist sg_in[4];
|
|
struct scatterlist sg_out[4];
|
|
- sector_t iv_sector;
|
|
+ u64 iv_sector;
|
|
};
|
|
|
|
struct crypt_config;
|
|
@@ -160,7 +160,7 @@ struct crypt_config {
|
|
struct iv_lmk_private lmk;
|
|
struct iv_tcw_private tcw;
|
|
} iv_gen_private;
|
|
- sector_t iv_offset;
|
|
+ u64 iv_offset;
|
|
unsigned int iv_size;
|
|
unsigned short int sector_size;
|
|
unsigned char sector_shift;
|
|
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
|
|
if (IS_ERR(bip))
|
|
return PTR_ERR(bip);
|
|
|
|
- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
|
|
+ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
|
|
|
|
bip->bip_iter.bi_size = tag_len;
|
|
bip->bip_iter.bi_sector = io->cc->start + io->sector;
|
|
@@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
|
|
* capi:cipher_api_spec-iv:ivopts
|
|
*/
|
|
tmp = &cipher_in[strlen("capi:")];
|
|
- cipher_api = strsep(&tmp, "-");
|
|
- *ivmode = strsep(&tmp, ":");
|
|
- *ivopts = tmp;
|
|
+
|
|
+ /* Separate IV options if present, it can contain another '-' in hash name */
|
|
+ *ivopts = strrchr(tmp, ':');
|
|
+ if (*ivopts) {
|
|
+ **ivopts = '\0';
|
|
+ (*ivopts)++;
|
|
+ }
|
|
+ /* Parse IV mode */
|
|
+ *ivmode = strrchr(tmp, '-');
|
|
+ if (*ivmode) {
|
|
+ **ivmode = '\0';
|
|
+ (*ivmode)++;
|
|
+ }
|
|
+ /* The rest is crypto API spec */
|
|
+ cipher_api = tmp;
|
|
|
|
if (*ivmode && !strcmp(*ivmode, "lmk"))
|
|
cc->tfms_count = 64;
|
|
@@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
|
|
goto bad_mem;
|
|
|
|
chainmode = strsep(&tmp, "-");
|
|
- *ivopts = strsep(&tmp, "-");
|
|
- *ivmode = strsep(&*ivopts, ":");
|
|
-
|
|
- if (tmp)
|
|
- DMWARN("Ignoring unexpected additional cipher options");
|
|
+ *ivmode = strsep(&tmp, ":");
|
|
+ *ivopts = tmp;
|
|
|
|
/*
|
|
* For compatibility with the original dm-crypt mapping format, if
|
|
@@ -2781,7 +2790,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
|
|
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
|
|
ti->error = "Invalid device sector";
|
|
goto bad;
|
|
}
|
|
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
|
|
index 2fb7bb4304ad7..fddffe251bf6b 100644
|
|
--- a/drivers/md/dm-delay.c
|
|
+++ b/drivers/md/dm-delay.c
|
|
@@ -141,7 +141,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a
|
|
unsigned long long tmpll;
|
|
char dummy;
|
|
|
|
- if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
|
|
+ if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
|
|
ti->error = "Invalid device sector";
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
|
|
index 3cb97fa4c11da..8261aa8c7fe11 100644
|
|
--- a/drivers/md/dm-flakey.c
|
|
+++ b/drivers/md/dm-flakey.c
|
|
@@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
devname = dm_shift_arg(&as);
|
|
|
|
r = -EINVAL;
|
|
- if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
|
|
+ if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
|
|
ti->error = "Invalid device sector";
|
|
goto bad;
|
|
}
|
|
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
|
|
index 2fc4213e02b5f..671c24332802e 100644
|
|
--- a/drivers/md/dm-kcopyd.c
|
|
+++ b/drivers/md/dm-kcopyd.c
|
|
@@ -56,15 +56,17 @@ struct dm_kcopyd_client {
|
|
atomic_t nr_jobs;
|
|
|
|
/*
|
|
- * We maintain three lists of jobs:
|
|
+ * We maintain four lists of jobs:
|
|
*
|
|
* i) jobs waiting for pages
|
|
* ii) jobs that have pages, and are waiting for the io to be issued.
|
|
- * iii) jobs that have completed.
|
|
+ * iii) jobs that don't need to do any IO and just run a callback
|
|
+ * iv) jobs that have completed.
|
|
*
|
|
- * All three of these are protected by job_lock.
|
|
+ * All four of these are protected by job_lock.
|
|
*/
|
|
spinlock_t job_lock;
|
|
+ struct list_head callback_jobs;
|
|
struct list_head complete_jobs;
|
|
struct list_head io_jobs;
|
|
struct list_head pages_jobs;
|
|
@@ -625,6 +627,7 @@ static void do_work(struct work_struct *work)
|
|
struct dm_kcopyd_client *kc = container_of(work,
|
|
struct dm_kcopyd_client, kcopyd_work);
|
|
struct blk_plug plug;
|
|
+ unsigned long flags;
|
|
|
|
/*
|
|
* The order that these are called is *very* important.
|
|
@@ -633,6 +636,10 @@ static void do_work(struct work_struct *work)
|
|
* list. io jobs call wake when they complete and it all
|
|
* starts again.
|
|
*/
|
|
+ spin_lock_irqsave(&kc->job_lock, flags);
|
|
+ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
|
|
+ spin_unlock_irqrestore(&kc->job_lock, flags);
|
|
+
|
|
blk_start_plug(&plug);
|
|
process_jobs(&kc->complete_jobs, kc, run_complete_job);
|
|
process_jobs(&kc->pages_jobs, kc, run_pages_job);
|
|
@@ -650,7 +657,7 @@ static void dispatch_job(struct kcopyd_job *job)
|
|
struct dm_kcopyd_client *kc = job->kc;
|
|
atomic_inc(&kc->nr_jobs);
|
|
if (unlikely(!job->source.count))
|
|
- push(&kc->complete_jobs, job);
|
|
+ push(&kc->callback_jobs, job);
|
|
else if (job->pages == &zero_page_list)
|
|
push(&kc->io_jobs, job);
|
|
else
|
|
@@ -858,7 +865,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
|
|
job->read_err = read_err;
|
|
job->write_err = write_err;
|
|
|
|
- push(&kc->complete_jobs, job);
|
|
+ push(&kc->callback_jobs, job);
|
|
wake(kc);
|
|
}
|
|
EXPORT_SYMBOL(dm_kcopyd_do_callback);
|
|
@@ -888,6 +895,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
spin_lock_init(&kc->job_lock);
|
|
+ INIT_LIST_HEAD(&kc->callback_jobs);
|
|
INIT_LIST_HEAD(&kc->complete_jobs);
|
|
INIT_LIST_HEAD(&kc->io_jobs);
|
|
INIT_LIST_HEAD(&kc->pages_jobs);
|
|
@@ -939,6 +947,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
|
|
/* Wait for completion of all jobs submitted by this client. */
|
|
wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
|
|
|
|
+ BUG_ON(!list_empty(&kc->callback_jobs));
|
|
BUG_ON(!list_empty(&kc->complete_jobs));
|
|
BUG_ON(!list_empty(&kc->io_jobs));
|
|
BUG_ON(!list_empty(&kc->pages_jobs));
|
|
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
|
|
index 8d7ddee6ac4d3..ad980a38fb1e8 100644
|
|
--- a/drivers/md/dm-linear.c
|
|
+++ b/drivers/md/dm-linear.c
|
|
@@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
- if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
|
|
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
|
|
ti->error = "Invalid device sector";
|
|
goto bad;
|
|
}
|
|
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
|
|
index 79eab1071ec22..5a51151f680d6 100644
|
|
--- a/drivers/md/dm-raid1.c
|
|
+++ b/drivers/md/dm-raid1.c
|
|
@@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
|
|
char dummy;
|
|
int ret;
|
|
|
|
- if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
|
|
+ if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
|
|
+ offset != (sector_t)offset) {
|
|
ti->error = "Invalid offset";
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
|
|
index ae4b33d109246..36805b12661e1 100644
|
|
--- a/drivers/md/dm-snap.c
|
|
+++ b/drivers/md/dm-snap.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/log2.h>
|
|
#include <linux/dm-kcopyd.h>
|
|
+#include <linux/semaphore.h>
|
|
|
|
#include "dm.h"
|
|
|
|
@@ -105,6 +106,9 @@ struct dm_snapshot {
|
|
/* The on disk metadata handler */
|
|
struct dm_exception_store *store;
|
|
|
|
+ /* Maximum number of in-flight COW jobs. */
|
|
+ struct semaphore cow_count;
|
|
+
|
|
struct dm_kcopyd_client *kcopyd_client;
|
|
|
|
/* Wait for events based on state_bits */
|
|
@@ -145,6 +149,19 @@ struct dm_snapshot {
|
|
#define RUNNING_MERGE 0
|
|
#define SHUTDOWN_MERGE 1
|
|
|
|
+/*
|
|
+ * Maximum number of chunks being copied on write.
|
|
+ *
|
|
+ * The value was decided experimentally as a trade-off between memory
|
|
+ * consumption, stalling the kernel's workqueues and maintaining a high enough
|
|
+ * throughput.
|
|
+ */
|
|
+#define DEFAULT_COW_THRESHOLD 2048
|
|
+
|
|
+static int cow_threshold = DEFAULT_COW_THRESHOLD;
|
|
+module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
|
|
+MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
|
|
+
|
|
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
|
|
"A percentage of time allocated for copy on write");
|
|
|
|
@@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
goto bad_hash_tables;
|
|
}
|
|
|
|
+ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
|
|
+
|
|
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
|
|
if (IS_ERR(s->kcopyd_client)) {
|
|
r = PTR_ERR(s->kcopyd_client);
|
|
@@ -1575,6 +1594,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
|
|
rb_link_node(&pe->out_of_order_node, parent, p);
|
|
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
|
|
}
|
|
+ up(&s->cow_count);
|
|
}
|
|
|
|
/*
|
|
@@ -1598,6 +1618,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
|
|
dest.count = src.count;
|
|
|
|
/* Hand over to kcopyd */
|
|
+ down(&s->cow_count);
|
|
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
|
|
}
|
|
|
|
@@ -1617,6 +1638,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
|
|
pe->full_bio = bio;
|
|
pe->full_bio_end_io = bio->bi_end_io;
|
|
|
|
+ down(&s->cow_count);
|
|
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
|
|
copy_callback, pe);
|
|
|
|
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
|
|
index 9038c302d5c29..44f180e47622f 100644
|
|
--- a/drivers/md/dm-table.c
|
|
+++ b/drivers/md/dm-table.c
|
|
@@ -1927,6 +1927,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|
*/
|
|
if (blk_queue_is_zoned(q))
|
|
blk_revalidate_disk_zones(t->md->disk);
|
|
+
|
|
+ /* Allow reads to exceed readahead limits */
|
|
+ q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
|
|
}
|
|
|
|
unsigned int dm_table_get_num_targets(struct dm_table *t)
|
|
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
|
|
index 20b0776e39ef3..ed3caceaed07c 100644
|
|
--- a/drivers/md/dm-thin-metadata.c
|
|
+++ b/drivers/md/dm-thin-metadata.c
|
|
@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
|
|
return r;
|
|
}
|
|
|
|
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
|
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
|
{
|
|
int r;
|
|
uint32_t ref_count;
|
|
@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
|
|
down_read(&pmd->root_lock);
|
|
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
|
if (!r)
|
|
- *result = (ref_count != 0);
|
|
+ *result = (ref_count > 1);
|
|
up_read(&pmd->root_lock);
|
|
|
|
return r;
|
|
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
|
|
index 35e954ea20a9b..f6be0d733c202 100644
|
|
--- a/drivers/md/dm-thin-metadata.h
|
|
+++ b/drivers/md/dm-thin-metadata.h
|
|
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
|
|
|
|
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
|
|
|
|
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
|
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
|
|
|
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
|
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
|
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
|
|
index dadd9696340c0..e83b63608262a 100644
|
|
--- a/drivers/md/dm-thin.c
|
|
+++ b/drivers/md/dm-thin.c
|
|
@@ -257,6 +257,7 @@ struct pool {
|
|
|
|
spinlock_t lock;
|
|
struct bio_list deferred_flush_bios;
|
|
+ struct bio_list deferred_flush_completions;
|
|
struct list_head prepared_mappings;
|
|
struct list_head prepared_discards;
|
|
struct list_head prepared_discards_pt2;
|
|
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
|
|
mempool_free(m, &m->tc->pool->mapping_pool);
|
|
}
|
|
|
|
+static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
|
|
+{
|
|
+ struct pool *pool = tc->pool;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /*
|
|
+ * If the bio has the REQ_FUA flag set we must commit the metadata
|
|
+ * before signaling its completion.
|
|
+ */
|
|
+ if (!bio_triggers_commit(tc, bio)) {
|
|
+ bio_endio(bio);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Complete bio with an error if earlier I/O caused changes to the
|
|
+ * metadata that can't be committed, e.g, due to I/O errors on the
|
|
+ * metadata device.
|
|
+ */
|
|
+ if (dm_thin_aborted_changes(tc->td)) {
|
|
+ bio_io_error(bio);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Batch together any bios that trigger commits and then issue a
|
|
+ * single commit for them in process_deferred_bios().
|
|
+ */
|
|
+ spin_lock_irqsave(&pool->lock, flags);
|
|
+ bio_list_add(&pool->deferred_flush_completions, bio);
|
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
|
+}
|
|
+
|
|
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
{
|
|
struct thin_c *tc = m->tc;
|
|
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|
*/
|
|
if (bio) {
|
|
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
|
|
- bio_endio(bio);
|
|
+ complete_overwrite_bio(tc, bio);
|
|
} else {
|
|
inc_all_io_entry(tc->pool, m->cell->holder);
|
|
remap_and_issue(tc, m->cell->holder, m->data_block);
|
|
@@ -1048,7 +1082,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
|
* passdown we have to check that these blocks are now unused.
|
|
*/
|
|
int r = 0;
|
|
- bool used = true;
|
|
+ bool shared = true;
|
|
struct thin_c *tc = m->tc;
|
|
struct pool *pool = tc->pool;
|
|
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
|
|
@@ -1058,11 +1092,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
|
while (b != end) {
|
|
/* find start of unmapped run */
|
|
for (; b < end; b++) {
|
|
- r = dm_pool_block_is_used(pool->pmd, b, &used);
|
|
+ r = dm_pool_block_is_shared(pool->pmd, b, &shared);
|
|
if (r)
|
|
goto out;
|
|
|
|
- if (!used)
|
|
+ if (!shared)
|
|
break;
|
|
}
|
|
|
|
@@ -1071,11 +1105,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
|
|
|
/* find end of run */
|
|
for (e = b + 1; e != end; e++) {
|
|
- r = dm_pool_block_is_used(pool->pmd, e, &used);
|
|
+ r = dm_pool_block_is_shared(pool->pmd, e, &shared);
|
|
if (r)
|
|
goto out;
|
|
|
|
- if (used)
|
|
+ if (shared)
|
|
break;
|
|
}
|
|
|
|
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
|
|
{
|
|
unsigned long flags;
|
|
struct bio *bio;
|
|
- struct bio_list bios;
|
|
+ struct bio_list bios, bio_completions;
|
|
struct thin_c *tc;
|
|
|
|
tc = get_first_thin(pool);
|
|
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
|
|
}
|
|
|
|
/*
|
|
- * If there are any deferred flush bios, we must commit
|
|
- * the metadata before issuing them.
|
|
+ * If there are any deferred flush bios, we must commit the metadata
|
|
+ * before issuing them or signaling their completion.
|
|
*/
|
|
bio_list_init(&bios);
|
|
+ bio_list_init(&bio_completions);
|
|
+
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
bio_list_merge(&bios, &pool->deferred_flush_bios);
|
|
bio_list_init(&pool->deferred_flush_bios);
|
|
+
|
|
+ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
|
|
+ bio_list_init(&pool->deferred_flush_completions);
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
- if (bio_list_empty(&bios) &&
|
|
+ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
|
|
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
|
|
return;
|
|
|
|
if (commit(pool)) {
|
|
+ bio_list_merge(&bios, &bio_completions);
|
|
+
|
|
while ((bio = bio_list_pop(&bios)))
|
|
bio_io_error(bio);
|
|
return;
|
|
}
|
|
pool->last_commit_jiffies = jiffies;
|
|
|
|
+ while ((bio = bio_list_pop(&bio_completions)))
|
|
+ bio_endio(bio);
|
|
+
|
|
while ((bio = bio_list_pop(&bios)))
|
|
generic_make_request(bio);
|
|
}
|
|
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
|
|
spin_lock_init(&pool->lock);
|
|
bio_list_init(&pool->deferred_flush_bios);
|
|
+ bio_list_init(&pool->deferred_flush_completions);
|
|
INIT_LIST_HEAD(&pool->prepared_mappings);
|
|
INIT_LIST_HEAD(&pool->prepared_discards);
|
|
INIT_LIST_HEAD(&pool->prepared_discards_pt2);
|
|
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
|
|
index 954b7ab4e684d..e673dacf64181 100644
|
|
--- a/drivers/md/dm-unstripe.c
|
|
+++ b/drivers/md/dm-unstripe.c
|
|
@@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
goto err;
|
|
}
|
|
|
|
- if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
|
|
+ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
|
|
ti->error = "Invalid striped device offset";
|
|
goto err;
|
|
}
|
|
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
|
|
index 1d54109071cc8..fa47249fa3e42 100644
|
|
--- a/drivers/md/raid1.c
|
|
+++ b/drivers/md/raid1.c
|
|
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
|
|
reschedule_retry(r1_bio);
|
|
}
|
|
|
|
+static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
|
|
+{
|
|
+ sector_t sync_blocks = 0;
|
|
+ sector_t s = r1_bio->sector;
|
|
+ long sectors_to_go = r1_bio->sectors;
|
|
+
|
|
+ /* make sure these bits don't get cleared. */
|
|
+ do {
|
|
+ md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
|
|
+ s += sync_blocks;
|
|
+ sectors_to_go -= sync_blocks;
|
|
+ } while (sectors_to_go > 0);
|
|
+}
|
|
+
|
|
static void end_sync_write(struct bio *bio)
|
|
{
|
|
int uptodate = !bio->bi_status;
|
|
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
|
|
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
|
|
|
|
if (!uptodate) {
|
|
- sector_t sync_blocks = 0;
|
|
- sector_t s = r1_bio->sector;
|
|
- long sectors_to_go = r1_bio->sectors;
|
|
- /* make sure these bits doesn't get cleared. */
|
|
- do {
|
|
- md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
|
|
- s += sync_blocks;
|
|
- sectors_to_go -= sync_blocks;
|
|
- } while (sectors_to_go > 0);
|
|
+ abort_sync_write(mddev, r1_bio);
|
|
set_bit(WriteErrorSeen, &rdev->flags);
|
|
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
|
set_bit(MD_RECOVERY_NEEDED, &
|
|
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
|
|
(i == r1_bio->read_disk ||
|
|
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
|
|
continue;
|
|
- if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
|
|
+ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
|
|
+ abort_sync_write(mddev, r1_bio);
|
|
continue;
|
|
+ }
|
|
|
|
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
|
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index b98e746e7fc4f..12cf8a04e839b 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
|
struct bio *split = bio_split(bio, max_sectors,
|
|
gfp, &conf->bio_split);
|
|
bio_chain(split, bio);
|
|
+ allow_barrier(conf);
|
|
generic_make_request(bio);
|
|
+ wait_barrier(conf);
|
|
bio = split;
|
|
r10_bio->master_bio = bio;
|
|
r10_bio->sectors = max_sectors;
|
|
@@ -1514,7 +1516,9 @@ retry_write:
|
|
struct bio *split = bio_split(bio, r10_bio->sectors,
|
|
GFP_NOIO, &conf->bio_split);
|
|
bio_chain(split, bio);
|
|
+ allow_barrier(conf);
|
|
generic_make_request(bio);
|
|
+ wait_barrier(conf);
|
|
bio = split;
|
|
r10_bio->master_bio = bio;
|
|
}
|
|
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
|
|
index ec3a5ef7fee0b..cbbe6b6535be8 100644
|
|
--- a/drivers/md/raid5-cache.c
|
|
+++ b/drivers/md/raid5-cache.c
|
|
@@ -1935,12 +1935,14 @@ out:
|
|
}
|
|
|
|
static struct stripe_head *
|
|
-r5c_recovery_alloc_stripe(struct r5conf *conf,
|
|
- sector_t stripe_sect)
|
|
+r5c_recovery_alloc_stripe(
|
|
+ struct r5conf *conf,
|
|
+ sector_t stripe_sect,
|
|
+ int noblock)
|
|
{
|
|
struct stripe_head *sh;
|
|
|
|
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
|
|
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
|
|
if (!sh)
|
|
return NULL; /* no more stripe available */
|
|
|
|
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|
stripe_sect);
|
|
|
|
if (!sh) {
|
|
- sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
|
|
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
|
|
/*
|
|
* cannot get stripe from raid5_get_active_stripe
|
|
* try replay some stripes
|
|
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|
r5c_recovery_replay_stripes(
|
|
cached_stripe_list, ctx);
|
|
sh = r5c_recovery_alloc_stripe(
|
|
- conf, stripe_sect);
|
|
+ conf, stripe_sect, 1);
|
|
}
|
|
if (!sh) {
|
|
+ int new_size = conf->min_nr_stripes * 2;
|
|
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
|
|
mdname(mddev),
|
|
- conf->min_nr_stripes * 2);
|
|
- raid5_set_cache_size(mddev,
|
|
- conf->min_nr_stripes * 2);
|
|
- sh = r5c_recovery_alloc_stripe(conf,
|
|
- stripe_sect);
|
|
+ new_size);
|
|
+ ret = raid5_set_cache_size(mddev, new_size);
|
|
+ if (conf->min_nr_stripes <= new_size / 2) {
|
|
+ pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
|
|
+ mdname(mddev),
|
|
+ ret,
|
|
+ new_size,
|
|
+ conf->min_nr_stripes,
|
|
+ conf->max_nr_stripes);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ sh = r5c_recovery_alloc_stripe(
|
|
+ conf, stripe_sect, 0);
|
|
}
|
|
if (!sh) {
|
|
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
|
|
- mdname(mddev));
|
|
+ mdname(mddev));
|
|
return -ENOMEM;
|
|
}
|
|
list_add_tail(&sh->lru, cached_stripe_list);
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 4990f0319f6cf..cecea901ab8c5 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
|
|
int
|
|
raid5_set_cache_size(struct mddev *mddev, int size)
|
|
{
|
|
+ int result = 0;
|
|
struct r5conf *conf = mddev->private;
|
|
|
|
if (size <= 16 || size > 32768)
|
|
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
|
|
|
|
mutex_lock(&conf->cache_size_mutex);
|
|
while (size > conf->max_nr_stripes)
|
|
- if (!grow_one_stripe(conf, GFP_KERNEL))
|
|
+ if (!grow_one_stripe(conf, GFP_KERNEL)) {
|
|
+ conf->min_nr_stripes = conf->max_nr_stripes;
|
|
+ result = -ENOMEM;
|
|
break;
|
|
+ }
|
|
mutex_unlock(&conf->cache_size_mutex);
|
|
|
|
- return 0;
|
|
+ return result;
|
|
}
|
|
EXPORT_SYMBOL(raid5_set_cache_size);
|
|
|
|
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
|
|
index 65a933a21e685..9a5334b726d66 100644
|
|
--- a/drivers/media/cec/cec-adap.c
|
|
+++ b/drivers/media/cec/cec-adap.c
|
|
@@ -455,7 +455,7 @@ int cec_thread_func(void *_adap)
|
|
(adap->needs_hpd &&
|
|
(!adap->is_configured && !adap->is_configuring)) ||
|
|
kthread_should_stop() ||
|
|
- (!adap->transmitting &&
|
|
+ (!adap->transmit_in_progress &&
|
|
!list_empty(&adap->transmit_queue)),
|
|
msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
|
|
timeout = err == 0;
|
|
@@ -463,7 +463,7 @@ int cec_thread_func(void *_adap)
|
|
/* Otherwise we just wait for something to happen. */
|
|
wait_event_interruptible(adap->kthread_waitq,
|
|
kthread_should_stop() ||
|
|
- (!adap->transmitting &&
|
|
+ (!adap->transmit_in_progress &&
|
|
!list_empty(&adap->transmit_queue)));
|
|
}
|
|
|
|
@@ -488,6 +488,7 @@ int cec_thread_func(void *_adap)
|
|
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
|
|
adap->transmitting->msg.len,
|
|
adap->transmitting->msg.msg);
|
|
+ adap->transmit_in_progress = false;
|
|
adap->tx_timeouts++;
|
|
/* Just give up on this. */
|
|
cec_data_cancel(adap->transmitting,
|
|
@@ -499,7 +500,7 @@ int cec_thread_func(void *_adap)
|
|
* If we are still transmitting, or there is nothing new to
|
|
* transmit, then just continue waiting.
|
|
*/
|
|
- if (adap->transmitting || list_empty(&adap->transmit_queue))
|
|
+ if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
|
|
goto unlock;
|
|
|
|
/* Get a new message to transmit */
|
|
@@ -545,6 +546,8 @@ int cec_thread_func(void *_adap)
|
|
if (adap->ops->adap_transmit(adap, data->attempts,
|
|
signal_free_time, &data->msg))
|
|
cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
|
|
+ else
|
|
+ adap->transmit_in_progress = true;
|
|
|
|
unlock:
|
|
mutex_unlock(&adap->lock);
|
|
@@ -575,14 +578,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
|
|
data = adap->transmitting;
|
|
if (!data) {
|
|
/*
|
|
- * This can happen if a transmit was issued and the cable is
|
|
+ * This might happen if a transmit was issued and the cable is
|
|
* unplugged while the transmit is ongoing. Ignore this
|
|
* transmit in that case.
|
|
*/
|
|
- dprintk(1, "%s was called without an ongoing transmit!\n",
|
|
- __func__);
|
|
- goto unlock;
|
|
+ if (!adap->transmit_in_progress)
|
|
+ dprintk(1, "%s was called without an ongoing transmit!\n",
|
|
+ __func__);
|
|
+ adap->transmit_in_progress = false;
|
|
+ goto wake_thread;
|
|
}
|
|
+ adap->transmit_in_progress = false;
|
|
|
|
msg = &data->msg;
|
|
|
|
@@ -648,7 +654,6 @@ wake_thread:
|
|
* for transmitting or to retry the current message.
|
|
*/
|
|
wake_up_interruptible(&adap->kthread_waitq);
|
|
-unlock:
|
|
mutex_unlock(&adap->lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
|
|
@@ -1496,8 +1501,11 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
|
|
if (adap->monitor_all_cnt)
|
|
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
|
|
mutex_lock(&adap->devnode.lock);
|
|
- if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
|
|
+ if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
|
|
WARN_ON(adap->ops->adap_enable(adap, false));
|
|
+ adap->transmit_in_progress = false;
|
|
+ wake_up_interruptible(&adap->kthread_waitq);
|
|
+ }
|
|
mutex_unlock(&adap->devnode.lock);
|
|
if (phys_addr == CEC_PHYS_ADDR_INVALID)
|
|
return;
|
|
@@ -1505,6 +1513,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
|
|
|
|
mutex_lock(&adap->devnode.lock);
|
|
adap->last_initiator = 0xff;
|
|
+ adap->transmit_in_progress = false;
|
|
|
|
if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
|
|
adap->ops->adap_enable(adap, true)) {
|
|
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
|
|
index 635db8e70ead5..8f987bc0dd883 100644
|
|
--- a/drivers/media/cec/cec-pin.c
|
|
+++ b/drivers/media/cec/cec-pin.c
|
|
@@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
|
|
break;
|
|
/* Was the message ACKed? */
|
|
ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
|
|
- if (!ack && !pin->tx_ignore_nack_until_eom &&
|
|
- pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
|
|
+ if (!ack && (!pin->tx_ignore_nack_until_eom ||
|
|
+ pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
|
|
+ !pin->tx_post_eom) {
|
|
/*
|
|
* Note: the CEC spec is ambiguous regarding
|
|
* what action to take when a NACK appears
|
|
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
|
|
index fa483b95bc5a9..d9a590ae7545c 100644
|
|
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
|
|
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
|
|
@@ -1769,7 +1769,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
|
|
unsigned s; \
|
|
\
|
|
for (s = 0; s < len; s++) { \
|
|
- u8 chr = font8x16[text[s] * 16 + line]; \
|
|
+ u8 chr = font8x16[(u8)text[s] * 16 + line]; \
|
|
\
|
|
if (hdiv == 2 && tpg->hflip) { \
|
|
pos[3] = (chr & (0x01 << 6) ? fg : bg); \
|
|
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
|
|
index 8ff8722cb6b16..fa77e2ae4ec4b 100644
|
|
--- a/drivers/media/common/videobuf2/videobuf2-core.c
|
|
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
|
|
@@ -812,6 +812,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
|
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
|
|
q->memory = memory;
|
|
q->waiting_for_buffers = !q->is_output;
|
|
+ } else if (q->memory != memory) {
|
|
+ dprintk(1, "memory model mismatch\n");
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
|
|
@@ -2143,9 +2146,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
+
|
|
+ mutex_lock(&q->mmap_lock);
|
|
+
|
|
if (vb2_fileio_is_active(q)) {
|
|
dprintk(1, "mmap: file io in progress\n");
|
|
- return -EBUSY;
|
|
+ ret = -EBUSY;
|
|
+ goto unlock;
|
|
}
|
|
|
|
/*
|
|
@@ -2153,7 +2160,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
|
*/
|
|
ret = __find_plane_by_offset(q, off, &buffer, &plane);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto unlock;
|
|
|
|
vb = q->bufs[buffer];
|
|
|
|
@@ -2166,11 +2173,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
|
if (length < (vma->vm_end - vma->vm_start)) {
|
|
dprintk(1,
|
|
"MMAP invalid, as it would overflow buffer length\n");
|
|
- return -EINVAL;
|
|
+ ret = -EINVAL;
|
|
+ goto unlock;
|
|
}
|
|
|
|
- mutex_lock(&q->mmap_lock);
|
|
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
|
|
+
|
|
+unlock:
|
|
mutex_unlock(&q->mmap_lock);
|
|
if (ret)
|
|
return ret;
|
|
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
|
|
index 1c933b2cf7603..3ef5df1648d77 100644
|
|
--- a/drivers/media/firewire/firedtv-avc.c
|
|
+++ b/drivers/media/firewire/firedtv-avc.c
|
|
@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
|
|
return r->operand[7];
|
|
}
|
|
|
|
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
|
|
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
|
|
+ unsigned int *len)
|
|
{
|
|
struct avc_command_frame *c = (void *)fdtv->avc_data;
|
|
struct avc_response_frame *r = (void *)fdtv->avc_data;
|
|
@@ -1009,7 +1010,8 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
|
|
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
|
|
+ unsigned int *len)
|
|
{
|
|
struct avc_command_frame *c = (void *)fdtv->avc_data;
|
|
struct avc_response_frame *r = (void *)fdtv->avc_data;
|
|
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
|
|
index 876cdec8329be..009905a199472 100644
|
|
--- a/drivers/media/firewire/firedtv.h
|
|
+++ b/drivers/media/firewire/firedtv.h
|
|
@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
|
|
struct dvb_diseqc_master_cmd *diseqcmd);
|
|
void avc_remote_ctrl_work(struct work_struct *work);
|
|
int avc_register_remote_control(struct firedtv *fdtv);
|
|
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
|
|
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
|
|
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
|
|
+ unsigned int *len);
|
|
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
|
|
+ unsigned int *len);
|
|
int avc_ca_reset(struct firedtv *fdtv);
|
|
int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
|
|
int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
|
|
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
|
|
index 704af210e2708..421e2fd2481d3 100644
|
|
--- a/drivers/media/i2c/Kconfig
|
|
+++ b/drivers/media/i2c/Kconfig
|
|
@@ -61,6 +61,7 @@ config VIDEO_TDA1997X
|
|
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
|
|
depends on SND_SOC
|
|
select SND_PCM
|
|
+ select HDMI
|
|
---help---
|
|
V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
|
|
|
|
@@ -610,6 +611,7 @@ config VIDEO_IMX274
|
|
tristate "Sony IMX274 sensor support"
|
|
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
|
|
depends on MEDIA_CAMERA_SUPPORT
|
|
+ select REGMAP_I2C
|
|
---help---
|
|
This is a V4L2 sensor driver for the Sony IMX274
|
|
CMOS image sensor.
|
|
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
|
|
index 5b008b0002c02..aa8b04cfed0f6 100644
|
|
--- a/drivers/media/i2c/ad9389b.c
|
|
+++ b/drivers/media/i2c/ad9389b.c
|
|
@@ -578,7 +578,7 @@ static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
|
|
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
|
|
index f3899cc84e27f..88349b5053cce 100644
|
|
--- a/drivers/media/i2c/adv7511.c
|
|
+++ b/drivers/media/i2c/adv7511.c
|
|
@@ -130,7 +130,7 @@ static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
|
|
+ V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
|
|
ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
|
|
index 9eb7c70a77125..ff28f5692986d 100644
|
|
--- a/drivers/media/i2c/adv7604.c
|
|
+++ b/drivers/media/i2c/adv7604.c
|
|
@@ -766,7 +766,7 @@ static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
|
|
@@ -777,7 +777,7 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
|
|
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
|
|
index 4721d49dcf0fe..5305c3ad80e64 100644
|
|
--- a/drivers/media/i2c/adv7842.c
|
|
+++ b/drivers/media/i2c/adv7842.c
|
|
@@ -663,7 +663,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
|
|
@@ -674,7 +674,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
|
|
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
|
|
index 11c69281692eb..95a0e7d9851ae 100644
|
|
--- a/drivers/media/i2c/imx274.c
|
|
+++ b/drivers/media/i2c/imx274.c
|
|
@@ -619,16 +619,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
|
|
|
|
static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
|
|
{
|
|
+ unsigned int uint_val;
|
|
int err;
|
|
|
|
- err = regmap_read(priv->regmap, addr, (unsigned int *)val);
|
|
+ err = regmap_read(priv->regmap, addr, &uint_val);
|
|
if (err)
|
|
dev_err(&priv->client->dev,
|
|
"%s : i2c read failed, addr = %x\n", __func__, addr);
|
|
else
|
|
dev_dbg(&priv->client->dev,
|
|
"%s : addr 0x%x, val=0x%x\n", __func__,
|
|
- addr, *val);
|
|
+ addr, uint_val);
|
|
+
|
|
+ *val = uint_val;
|
|
return err;
|
|
}
|
|
|
|
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
|
|
index eaefdb58653b7..703d29abb3635 100644
|
|
--- a/drivers/media/i2c/ov5640.c
|
|
+++ b/drivers/media/i2c/ov5640.c
|
|
@@ -2020,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
|
|
struct ov5640_dev *sensor = to_ov5640_dev(sd);
|
|
const struct ov5640_mode_info *new_mode;
|
|
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
|
|
+ struct v4l2_mbus_framefmt *fmt;
|
|
int ret;
|
|
|
|
if (format->pad != 0)
|
|
@@ -2037,22 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
|
|
if (ret)
|
|
goto out;
|
|
|
|
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
|
|
- struct v4l2_mbus_framefmt *fmt =
|
|
- v4l2_subdev_get_try_format(sd, cfg, 0);
|
|
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
|
|
+ fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
|
|
+ else
|
|
+ fmt = &sensor->fmt;
|
|
|
|
- *fmt = *mbus_fmt;
|
|
- goto out;
|
|
- }
|
|
+ *fmt = *mbus_fmt;
|
|
|
|
if (new_mode != sensor->current_mode) {
|
|
sensor->current_mode = new_mode;
|
|
sensor->pending_mode_change = true;
|
|
}
|
|
- if (mbus_fmt->code != sensor->fmt.code) {
|
|
- sensor->fmt = *mbus_fmt;
|
|
+ if (mbus_fmt->code != sensor->fmt.code)
|
|
sensor->pending_fmt_change = true;
|
|
- }
|
|
+
|
|
out:
|
|
mutex_unlock(&sensor->lock);
|
|
return ret;
|
|
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
|
|
index 41d470d9ca943..00dc930e049f9 100644
|
|
--- a/drivers/media/i2c/tc358743.c
|
|
+++ b/drivers/media/i2c/tc358743.c
|
|
@@ -59,7 +59,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
/* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
|
|
- V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 13000000, 165000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE |
|
|
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
|
|
index 498ad2368cbcf..f5ee28058ea2a 100644
|
|
--- a/drivers/media/i2c/ths8200.c
|
|
+++ b/drivers/media/i2c/ths8200.c
|
|
@@ -49,7 +49,7 @@ static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
|
|
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1080, 25000000, 148500000,
|
|
V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
|
|
};
|
|
|
|
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
|
|
index 4d49af86c15ee..ec0758dca2fcd 100644
|
|
--- a/drivers/media/i2c/video-i2c.c
|
|
+++ b/drivers/media/i2c/video-i2c.c
|
|
@@ -510,7 +510,12 @@ static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = {
|
|
|
|
static void video_i2c_release(struct video_device *vdev)
|
|
{
|
|
- kfree(video_get_drvdata(vdev));
|
|
+ struct video_i2c_data *data = video_get_drvdata(vdev);
|
|
+
|
|
+ v4l2_device_unregister(&data->v4l2_dev);
|
|
+ mutex_destroy(&data->lock);
|
|
+ mutex_destroy(&data->queue_lock);
|
|
+ kfree(data);
|
|
}
|
|
|
|
static int video_i2c_probe(struct i2c_client *client,
|
|
@@ -608,10 +613,6 @@ static int video_i2c_remove(struct i2c_client *client)
|
|
struct video_i2c_data *data = i2c_get_clientdata(client);
|
|
|
|
video_unregister_device(&data->vdev);
|
|
- v4l2_device_unregister(&data->v4l2_dev);
|
|
-
|
|
- mutex_destroy(&data->lock);
|
|
- mutex_destroy(&data->queue_lock);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
|
|
index 39804d830305c..fd5c52b21436b 100644
|
|
--- a/drivers/media/pci/cx23885/cx23885-core.c
|
|
+++ b/drivers/media/pci/cx23885/cx23885-core.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/kmod.h>
|
|
#include <linux/kernel.h>
|
|
+#include <linux/pci.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/delay.h>
|
|
@@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_VERSION(CX23885_VERSION);
|
|
|
|
+/*
|
|
+ * Some platforms have been found to require periodic resetting of the DMA
|
|
+ * engine. Ryzen and XEON platforms are known to be affected. The symptom
|
|
+ * encountered is "mpeg risc op code error". Only Ryzen platforms employ
|
|
+ * this workaround if the option equals 1. The workaround can be explicitly
|
|
+ * disabled for all platforms by setting to 0, the workaround can be forced
|
|
+ * on for any platform by setting to 2.
|
|
+ */
|
|
+static unsigned int dma_reset_workaround = 1;
|
|
+module_param(dma_reset_workaround, int, 0644);
|
|
+MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
|
|
+
|
|
static unsigned int debug;
|
|
module_param(debug, int, 0644);
|
|
MODULE_PARM_DESC(debug, "enable debug messages");
|
|
@@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
|
|
|
|
static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
|
|
{
|
|
- uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
|
|
- uint32_t reg2_val = cx_read(TC_REQ_SET);
|
|
+ uint32_t reg1_val, reg2_val;
|
|
+
|
|
+ if (!dev->need_dma_reset)
|
|
+ return;
|
|
+
|
|
+ reg1_val = cx_read(TC_REQ); /* read-only */
|
|
+ reg2_val = cx_read(TC_REQ_SET);
|
|
|
|
if (reg1_val && reg2_val) {
|
|
cx_write(TC_REQ, reg1_val);
|
|
@@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
|
|
/* TODO: 23-19 */
|
|
}
|
|
|
|
+static struct {
|
|
+ int vendor, dev;
|
|
+} const broken_dev_id[] = {
|
|
+ /* According with
|
|
+ * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
|
|
+ * 0x1451 is PCI ID for the IOMMU found on Ryzen
|
|
+ */
|
|
+ { PCI_VENDOR_ID_AMD, 0x1451 },
|
|
+};
|
|
+
|
|
+static bool cx23885_does_need_dma_reset(void)
|
|
+{
|
|
+ int i;
|
|
+ struct pci_dev *pdev = NULL;
|
|
+
|
|
+ if (dma_reset_workaround == 0)
|
|
+ return false;
|
|
+ else if (dma_reset_workaround == 2)
|
|
+ return true;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
|
|
+ pdev = pci_get_device(broken_dev_id[i].vendor,
|
|
+ broken_dev_id[i].dev, NULL);
|
|
+ if (pdev) {
|
|
+ pci_dev_put(pdev);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int cx23885_initdev(struct pci_dev *pci_dev,
|
|
const struct pci_device_id *pci_id)
|
|
{
|
|
@@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
|
|
if (NULL == dev)
|
|
return -ENOMEM;
|
|
|
|
+ dev->need_dma_reset = cx23885_does_need_dma_reset();
|
|
+
|
|
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
|
|
if (err < 0)
|
|
goto fail_free;
|
|
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
|
|
index d54c7ee1ab218..cf965efabe666 100644
|
|
--- a/drivers/media/pci/cx23885/cx23885.h
|
|
+++ b/drivers/media/pci/cx23885/cx23885.h
|
|
@@ -451,6 +451,8 @@ struct cx23885_dev {
|
|
/* Analog raw audio */
|
|
struct cx23885_audio_dev *audio_dev;
|
|
|
|
+ /* Does the system require periodic DMA resets? */
|
|
+ unsigned int need_dma_reset:1;
|
|
};
|
|
|
|
static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
|
|
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
|
|
index d26c2d85a0094..d20d3df5778bc 100644
|
|
--- a/drivers/media/platform/coda/coda-bit.c
|
|
+++ b/drivers/media/platform/coda/coda-bit.c
|
|
@@ -991,16 +991,15 @@ static int coda_start_encoding(struct coda_ctx *ctx)
|
|
else
|
|
coda_write(dev, CODA_STD_H264,
|
|
CODA_CMD_ENC_SEQ_COD_STD);
|
|
- if (ctx->params.h264_deblk_enabled) {
|
|
- value = ((ctx->params.h264_deblk_alpha &
|
|
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
|
|
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
|
|
- ((ctx->params.h264_deblk_beta &
|
|
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
|
|
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
|
|
- } else {
|
|
- value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
|
|
- }
|
|
+ value = ((ctx->params.h264_disable_deblocking_filter_idc &
|
|
+ CODA_264PARAM_DISABLEDEBLK_MASK) <<
|
|
+ CODA_264PARAM_DISABLEDEBLK_OFFSET) |
|
|
+ ((ctx->params.h264_slice_alpha_c0_offset_div2 &
|
|
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
|
|
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
|
|
+ ((ctx->params.h264_slice_beta_offset_div2 &
|
|
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
|
|
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
|
|
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
|
|
break;
|
|
case V4L2_PIX_FMT_JPEG:
|
|
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
|
|
index 2848ea5f464d9..d0b36d6eb86ef 100644
|
|
--- a/drivers/media/platform/coda/coda-common.c
|
|
+++ b/drivers/media/platform/coda/coda-common.c
|
|
@@ -1792,14 +1792,13 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
|
|
ctx->params.h264_max_qp = ctrl->val;
|
|
break;
|
|
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
|
|
- ctx->params.h264_deblk_alpha = ctrl->val;
|
|
+ ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
|
|
break;
|
|
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
|
|
- ctx->params.h264_deblk_beta = ctrl->val;
|
|
+ ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
|
|
break;
|
|
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
|
|
- ctx->params.h264_deblk_enabled = (ctrl->val ==
|
|
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
|
|
+ ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
|
|
break;
|
|
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
|
|
/* TODO: switch between baseline and constrained baseline */
|
|
@@ -1881,13 +1880,13 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
|
|
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
|
|
V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
|
|
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
|
|
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
|
|
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
|
|
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
|
|
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
|
|
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
|
|
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
|
|
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
|
|
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
|
|
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
|
|
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
|
|
+ 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
|
|
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
|
|
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
|
|
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
|
|
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
|
|
index 19ac0b9dc6eba..2469ca1dc5985 100644
|
|
--- a/drivers/media/platform/coda/coda.h
|
|
+++ b/drivers/media/platform/coda/coda.h
|
|
@@ -115,9 +115,9 @@ struct coda_params {
|
|
u8 h264_inter_qp;
|
|
u8 h264_min_qp;
|
|
u8 h264_max_qp;
|
|
- u8 h264_deblk_enabled;
|
|
- u8 h264_deblk_alpha;
|
|
- u8 h264_deblk_beta;
|
|
+ u8 h264_disable_deblocking_filter_idc;
|
|
+ s8 h264_slice_alpha_c0_offset_div2;
|
|
+ s8 h264_slice_beta_offset_div2;
|
|
u8 h264_profile_idc;
|
|
u8 h264_level_idc;
|
|
u8 mpeg4_intra_qp;
|
|
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
|
|
index 5e7b00a97671e..e675e38f3475e 100644
|
|
--- a/drivers/media/platform/coda/coda_regs.h
|
|
+++ b/drivers/media/platform/coda/coda_regs.h
|
|
@@ -292,7 +292,7 @@
|
|
#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8
|
|
#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f
|
|
#define CODA_264PARAM_DISABLEDEBLK_OFFSET 6
|
|
-#define CODA_264PARAM_DISABLEDEBLK_MASK 0x01
|
|
+#define CODA_264PARAM_DISABLEDEBLK_MASK 0x03
|
|
#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5
|
|
#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01
|
|
#define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0
|
|
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
|
|
index 18c035ef84cfa..df1ae6b5c8545 100644
|
|
--- a/drivers/media/platform/davinci/vpbe.c
|
|
+++ b/drivers/media/platform/davinci/vpbe.c
|
|
@@ -740,7 +740,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
|
|
if (ret) {
|
|
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
|
|
def_output);
|
|
- return ret;
|
|
+ goto fail_kfree_amp;
|
|
}
|
|
|
|
printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
|
|
@@ -748,12 +748,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
|
|
if (ret) {
|
|
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
|
|
def_mode);
|
|
- return ret;
|
|
+ goto fail_kfree_amp;
|
|
}
|
|
vpbe_dev->initialized = 1;
|
|
/* TBD handling of bootargs for default output and mode */
|
|
return 0;
|
|
|
|
+fail_kfree_amp:
|
|
+ mutex_lock(&vpbe_dev->lock);
|
|
+ kfree(vpbe_dev->amp);
|
|
fail_kfree_encoders:
|
|
kfree(vpbe_dev->encoders);
|
|
fail_dev_unregister:
|
|
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
|
|
index 3e73e9db781f4..7c025045ea904 100644
|
|
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
|
|
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
|
|
@@ -41,25 +41,27 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
|
|
node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
|
|
if (!node) {
|
|
mtk_v4l2_err("no mediatek,larb found");
|
|
- return -1;
|
|
+ return -ENODEV;
|
|
}
|
|
pdev = of_find_device_by_node(node);
|
|
+ of_node_put(node);
|
|
if (!pdev) {
|
|
mtk_v4l2_err("no mediatek,larb device found");
|
|
- return -1;
|
|
+ return -ENODEV;
|
|
}
|
|
pm->larbvenc = &pdev->dev;
|
|
|
|
node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
|
|
if (!node) {
|
|
mtk_v4l2_err("no mediatek,larb found");
|
|
- return -1;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
pdev = of_find_device_by_node(node);
|
|
+ of_node_put(node);
|
|
if (!pdev) {
|
|
mtk_v4l2_err("no mediatek,larb device found");
|
|
- return -1;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
pm->larbvenclt = &pdev->dev;
|
|
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
|
|
index bb6add9d340e2..5b8350e87e750 100644
|
|
--- a/drivers/media/platform/qcom/venus/core.c
|
|
+++ b/drivers/media/platform/qcom/venus/core.c
|
|
@@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev)
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ if (!dev->dma_parms) {
|
|
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
|
|
+ GFP_KERNEL);
|
|
+ if (!dev->dma_parms)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
|
|
+
|
|
INIT_LIST_HEAD(&core->instances);
|
|
mutex_init(&core->lock);
|
|
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
|
|
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
|
|
index d82db738f174e..2d1ae83e2fde6 100644
|
|
--- a/drivers/media/platform/vim2m.c
|
|
+++ b/drivers/media/platform/vim2m.c
|
|
@@ -805,10 +805,13 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
|
|
static void vim2m_stop_streaming(struct vb2_queue *q)
|
|
{
|
|
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
|
|
+ struct vim2m_dev *dev = ctx->dev;
|
|
struct vb2_v4l2_buffer *vbuf;
|
|
unsigned long flags;
|
|
|
|
- flush_scheduled_work();
|
|
+ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
|
|
+ cancel_delayed_work_sync(&dev->work_run);
|
|
+
|
|
for (;;) {
|
|
if (V4L2_TYPE_IS_OUTPUT(q->type))
|
|
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
|
|
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
|
|
index 626e2b24a4033..ec1b1a8ea7754 100644
|
|
--- a/drivers/media/platform/vivid/vivid-core.c
|
|
+++ b/drivers/media/platform/vivid/vivid-core.c
|
|
@@ -669,6 +669,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
|
|
|
|
/* Initialize media device */
|
|
strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
|
|
+ snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info),
|
|
+ "platform:%s-%03d", VIVID_MODULE_NAME, inst);
|
|
dev->mdev.dev = &pdev->dev;
|
|
media_device_init(&dev->mdev);
|
|
dev->mdev.ops = &vivid_media_ops;
|
|
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
|
|
index eebfff2126be2..46e46e34a9e5f 100644
|
|
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
|
|
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
|
|
@@ -873,8 +873,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
|
|
"%s-vid-cap", dev->v4l2_dev.name);
|
|
|
|
if (IS_ERR(dev->kthread_vid_cap)) {
|
|
+ int err = PTR_ERR(dev->kthread_vid_cap);
|
|
+
|
|
+ dev->kthread_vid_cap = NULL;
|
|
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
|
|
- return PTR_ERR(dev->kthread_vid_cap);
|
|
+ return err;
|
|
}
|
|
*pstreaming = true;
|
|
vivid_grab_controls(dev, true);
|
|
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
|
|
index 5a14810eeb691..ce5bcda2348c9 100644
|
|
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
|
|
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
|
|
@@ -244,8 +244,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
|
|
"%s-vid-out", dev->v4l2_dev.name);
|
|
|
|
if (IS_ERR(dev->kthread_vid_out)) {
|
|
+ int err = PTR_ERR(dev->kthread_vid_out);
|
|
+
|
|
+ dev->kthread_vid_out = NULL;
|
|
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
|
|
- return PTR_ERR(dev->kthread_vid_out);
|
|
+ return err;
|
|
}
|
|
*pstreaming = true;
|
|
vivid_grab_controls(dev, true);
|
|
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
|
|
index 673772cd17d61..a88637a42f44f 100644
|
|
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
|
|
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
|
|
@@ -449,6 +449,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
|
|
tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
|
|
break;
|
|
}
|
|
+ vfree(dev->bitmap_cap);
|
|
+ dev->bitmap_cap = NULL;
|
|
vivid_update_quality(dev);
|
|
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
|
|
dev->crop_cap = dev->src_rect;
|
|
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
|
|
index 9645a91b87825..661f4015fba1a 100644
|
|
--- a/drivers/media/platform/vivid/vivid-vid-common.c
|
|
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
|
|
@@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
|
|
.type = V4L2_DV_BT_656_1120,
|
|
/* keep this initialization for compatibility with GCC < 4.4.6 */
|
|
.reserved = { 0 },
|
|
- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
|
|
+ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
|
|
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
|
|
V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
|
|
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
|
|
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
|
|
index 552bbe82a160a..66a174979b3c9 100644
|
|
--- a/drivers/media/rc/rc-main.c
|
|
+++ b/drivers/media/rc/rc-main.c
|
|
@@ -695,7 +695,8 @@ void rc_repeat(struct rc_dev *dev)
|
|
(dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
|
|
};
|
|
|
|
- ir_lirc_scancode_event(dev, &sc);
|
|
+ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
|
|
+ ir_lirc_scancode_event(dev, &sc);
|
|
|
|
spin_lock_irqsave(&dev->keylock, flags);
|
|
|
|
@@ -735,7 +736,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
|
|
.keycode = keycode
|
|
};
|
|
|
|
- ir_lirc_scancode_event(dev, &sc);
|
|
+ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
|
|
+ ir_lirc_scancode_event(dev, &sc);
|
|
|
|
if (new_event && dev->keypressed)
|
|
ir_do_keyup(dev, false);
|
|
@@ -1950,6 +1952,8 @@ void rc_unregister_device(struct rc_dev *dev)
|
|
rc_free_rx_device(dev);
|
|
|
|
mutex_lock(&dev->lock);
|
|
+ if (dev->users && dev->close)
|
|
+ dev->close(dev);
|
|
dev->registered = false;
|
|
mutex_unlock(&dev->lock);
|
|
|
|
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
|
|
index 024c751eb1659..2ad2ddeaff513 100644
|
|
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
|
|
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
|
|
@@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
|
|
stream->props.u.bulk.buffersize,
|
|
usb_urb_complete, stream);
|
|
|
|
- stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
|
|
stream->urbs_initialized++;
|
|
}
|
|
return 0;
|
|
@@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
|
|
urb->complete = usb_urb_complete;
|
|
urb->pipe = usb_rcvisocpipe(stream->udev,
|
|
stream->props.endpoint);
|
|
- urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
|
|
+ urb->transfer_flags = URB_ISO_ASAP;
|
|
urb->interval = stream->props.u.isoc.interval;
|
|
urb->number_of_packets = stream->props.u.isoc.framesperurb;
|
|
urb->transfer_buffer_length = stream->props.u.isoc.framesize *
|
|
@@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
|
|
if (stream->state & USB_STATE_URB_BUF) {
|
|
while (stream->buf_num) {
|
|
stream->buf_num--;
|
|
- stream->buf_list[stream->buf_num] = NULL;
|
|
+ kfree(stream->buf_list[stream->buf_num]);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
|
|
index bc369a0934a36..76dc3ee8ca212 100644
|
|
--- a/drivers/media/usb/uvc/uvc_driver.c
|
|
+++ b/drivers/media/usb/uvc/uvc_driver.c
|
|
@@ -1824,11 +1824,7 @@ static void uvc_delete(struct kref *kref)
|
|
usb_put_intf(dev->intf);
|
|
usb_put_dev(dev->udev);
|
|
|
|
- if (dev->vdev.dev)
|
|
- v4l2_device_unregister(&dev->vdev);
|
|
#ifdef CONFIG_MEDIA_CONTROLLER
|
|
- if (media_devnode_is_registered(dev->mdev.devnode))
|
|
- media_device_unregister(&dev->mdev);
|
|
media_device_cleanup(&dev->mdev);
|
|
#endif
|
|
|
|
@@ -1885,6 +1881,15 @@ static void uvc_unregister_video(struct uvc_device *dev)
|
|
|
|
uvc_debugfs_cleanup_stream(stream);
|
|
}
|
|
+
|
|
+ uvc_status_unregister(dev);
|
|
+
|
|
+ if (dev->vdev.dev)
|
|
+ v4l2_device_unregister(&dev->vdev);
|
|
+#ifdef CONFIG_MEDIA_CONTROLLER
|
|
+ if (media_devnode_is_registered(dev->mdev.devnode))
|
|
+ media_device_unregister(&dev->mdev);
|
|
+#endif
|
|
}
|
|
|
|
int uvc_register_video_device(struct uvc_device *dev,
|
|
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
|
|
index 0722dc684378f..883e4cab45e79 100644
|
|
--- a/drivers/media/usb/uvc/uvc_status.c
|
|
+++ b/drivers/media/usb/uvc/uvc_status.c
|
|
@@ -54,7 +54,7 @@ error:
|
|
return ret;
|
|
}
|
|
|
|
-static void uvc_input_cleanup(struct uvc_device *dev)
|
|
+static void uvc_input_unregister(struct uvc_device *dev)
|
|
{
|
|
if (dev->input)
|
|
input_unregister_device(dev->input);
|
|
@@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
|
|
|
|
#else
|
|
#define uvc_input_init(dev)
|
|
-#define uvc_input_cleanup(dev)
|
|
+#define uvc_input_unregister(dev)
|
|
#define uvc_input_report_key(dev, code, value)
|
|
#endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
|
|
|
|
@@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev)
|
|
return 0;
|
|
}
|
|
|
|
-void uvc_status_cleanup(struct uvc_device *dev)
|
|
+void uvc_status_unregister(struct uvc_device *dev)
|
|
{
|
|
usb_kill_urb(dev->int_urb);
|
|
+ uvc_input_unregister(dev);
|
|
+}
|
|
+
|
|
+void uvc_status_cleanup(struct uvc_device *dev)
|
|
+{
|
|
usb_free_urb(dev->int_urb);
|
|
kfree(dev->status);
|
|
- uvc_input_cleanup(dev);
|
|
}
|
|
|
|
int uvc_status_start(struct uvc_device *dev, gfp_t flags)
|
|
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
|
|
index c0cbd833d0a4c..1db6634b24552 100644
|
|
--- a/drivers/media/usb/uvc/uvcvideo.h
|
|
+++ b/drivers/media/usb/uvc/uvcvideo.h
|
|
@@ -757,6 +757,7 @@ int uvc_register_video_device(struct uvc_device *dev,
|
|
|
|
/* Status */
|
|
int uvc_status_init(struct uvc_device *dev);
|
|
+void uvc_status_unregister(struct uvc_device *dev);
|
|
void uvc_status_cleanup(struct uvc_device *dev);
|
|
int uvc_status_start(struct uvc_device *dev, gfp_t flags);
|
|
void uvc_status_stop(struct uvc_device *dev);
|
|
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
|
|
index df0ac38c40500..e0ddb9a52bd1f 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-device.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-device.c
|
|
@@ -247,6 +247,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
|
|
|
|
video_set_drvdata(vdev, sd);
|
|
strscpy(vdev->name, sd->name, sizeof(vdev->name));
|
|
+ vdev->dev_parent = sd->dev;
|
|
vdev->v4l2_dev = v4l2_dev;
|
|
vdev->fops = &v4l2_subdev_fops;
|
|
vdev->release = v4l2_device_release_subdev_node;
|
|
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
|
|
index 218f0da0ce769..edd34cf09cf8a 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
|
|
@@ -310,8 +310,8 @@ v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
|
|
}
|
|
|
|
if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
|
|
- flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
|
|
- V4L2_MBUS_PCLK_SAMPLE_FALLING);
|
|
+ flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
|
|
+ V4L2_MBUS_DATA_ACTIVE_LOW);
|
|
flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
|
|
V4L2_MBUS_DATA_ACTIVE_LOW;
|
|
pr_debug("data-active %s\n", v ? "high" : "low");
|
|
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
|
|
index c63746968fa3d..3cdd09e4dd6b5 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
|
|
@@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only)
|
|
const struct v4l2_window *win;
|
|
const struct v4l2_sdr_format *sdr;
|
|
const struct v4l2_meta_format *meta;
|
|
+ u32 planes;
|
|
unsigned i;
|
|
|
|
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
|
|
@@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only)
|
|
prt_names(mp->field, v4l2_field_names),
|
|
mp->colorspace, mp->num_planes, mp->flags,
|
|
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
|
|
- for (i = 0; i < mp->num_planes; i++)
|
|
+ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
|
|
+ for (i = 0; i < planes; i++)
|
|
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
|
|
mp->plane_fmt[i].bytesperline,
|
|
mp->plane_fmt[i].sizeimage);
|
|
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
|
|
index 76382c858c354..1246d69ba1874 100644
|
|
--- a/drivers/memstick/core/memstick.c
|
|
+++ b/drivers/memstick/core/memstick.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/delay.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/pm_runtime.h>
|
|
|
|
#define DRIVER_NAME "memstick"
|
|
|
|
@@ -436,6 +437,7 @@ static void memstick_check(struct work_struct *work)
|
|
struct memstick_dev *card;
|
|
|
|
dev_dbg(&host->dev, "memstick_check started\n");
|
|
+ pm_runtime_get_noresume(host->dev.parent);
|
|
mutex_lock(&host->lock);
|
|
if (!host->card) {
|
|
if (memstick_power_on(host))
|
|
@@ -479,6 +481,7 @@ out_power_off:
|
|
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
|
|
|
|
mutex_unlock(&host->lock);
|
|
+ pm_runtime_put(host->dev.parent);
|
|
dev_dbg(&host->dev, "memstick_check finished\n");
|
|
}
|
|
|
|
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
|
|
index 8c5dfdce4326c..f461460a2aeb9 100644
|
|
--- a/drivers/mfd/Kconfig
|
|
+++ b/drivers/mfd/Kconfig
|
|
@@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
|
|
config MFD_AT91_USART
|
|
tristate "AT91 USART Driver"
|
|
select MFD_CORE
|
|
+ depends on ARCH_AT91 || COMPILE_TEST
|
|
help
|
|
Select this to get support for AT91 USART IP. This is a wrapper
|
|
over at91-usart-serial driver and usart-spi-driver. Only one function
|
|
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
|
|
index 30d09d1771717..11ab17f64c649 100644
|
|
--- a/drivers/mfd/ab8500-core.c
|
|
+++ b/drivers/mfd/ab8500-core.c
|
|
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
|
|
mutex_unlock(&ab8500->lock);
|
|
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
|
|
|
|
- return ret;
|
|
+ return (ret < 0) ? ret : 0;
|
|
}
|
|
|
|
static int ab8500_get_register(struct device *dev, u8 bank,
|
|
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
|
|
index 0be511dd93d01..f8e0fa97bb31e 100644
|
|
--- a/drivers/mfd/axp20x.c
|
|
+++ b/drivers/mfd/axp20x.c
|
|
@@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
|
|
|
|
static const struct mfd_cell axp223_cells[] = {
|
|
{
|
|
- .name = "axp221-pek",
|
|
- .num_resources = ARRAY_SIZE(axp22x_pek_resources),
|
|
- .resources = axp22x_pek_resources,
|
|
+ .name = "axp221-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp22x_pek_resources),
|
|
+ .resources = axp22x_pek_resources,
|
|
}, {
|
|
.name = "axp22x-adc",
|
|
.of_compatible = "x-powers,axp221-adc",
|
|
@@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
|
|
.name = "axp20x-battery-power-supply",
|
|
.of_compatible = "x-powers,axp221-battery-power-supply",
|
|
}, {
|
|
- .name = "axp20x-regulator",
|
|
+ .name = "axp20x-regulator",
|
|
}, {
|
|
.name = "axp20x-ac-power-supply",
|
|
.of_compatible = "x-powers,axp221-ac-power-supply",
|
|
@@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
|
|
|
|
static const struct mfd_cell axp152_cells[] = {
|
|
{
|
|
- .name = "axp20x-pek",
|
|
- .num_resources = ARRAY_SIZE(axp152_pek_resources),
|
|
- .resources = axp152_pek_resources,
|
|
+ .name = "axp20x-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp152_pek_resources),
|
|
+ .resources = axp152_pek_resources,
|
|
},
|
|
};
|
|
|
|
@@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
|
|
|
|
static const struct mfd_cell axp288_cells[] = {
|
|
{
|
|
- .name = "axp288_adc",
|
|
- .num_resources = ARRAY_SIZE(axp288_adc_resources),
|
|
- .resources = axp288_adc_resources,
|
|
- },
|
|
- {
|
|
- .name = "axp288_extcon",
|
|
- .num_resources = ARRAY_SIZE(axp288_extcon_resources),
|
|
- .resources = axp288_extcon_resources,
|
|
- },
|
|
- {
|
|
- .name = "axp288_charger",
|
|
- .num_resources = ARRAY_SIZE(axp288_charger_resources),
|
|
- .resources = axp288_charger_resources,
|
|
- },
|
|
- {
|
|
- .name = "axp288_fuel_gauge",
|
|
- .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
|
|
- .resources = axp288_fuel_gauge_resources,
|
|
- },
|
|
- {
|
|
- .name = "axp221-pek",
|
|
- .num_resources = ARRAY_SIZE(axp288_power_button_resources),
|
|
- .resources = axp288_power_button_resources,
|
|
- },
|
|
- {
|
|
- .name = "axp288_pmic_acpi",
|
|
+ .name = "axp288_adc",
|
|
+ .num_resources = ARRAY_SIZE(axp288_adc_resources),
|
|
+ .resources = axp288_adc_resources,
|
|
+ }, {
|
|
+ .name = "axp288_extcon",
|
|
+ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
|
|
+ .resources = axp288_extcon_resources,
|
|
+ }, {
|
|
+ .name = "axp288_charger",
|
|
+ .num_resources = ARRAY_SIZE(axp288_charger_resources),
|
|
+ .resources = axp288_charger_resources,
|
|
+ }, {
|
|
+ .name = "axp288_fuel_gauge",
|
|
+ .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
|
|
+ .resources = axp288_fuel_gauge_resources,
|
|
+ }, {
|
|
+ .name = "axp221-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp288_power_button_resources),
|
|
+ .resources = axp288_power_button_resources,
|
|
+ }, {
|
|
+ .name = "axp288_pmic_acpi",
|
|
},
|
|
};
|
|
|
|
static const struct mfd_cell axp803_cells[] = {
|
|
{
|
|
- .name = "axp221-pek",
|
|
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
|
|
- .resources = axp803_pek_resources,
|
|
+ .name = "axp221-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
|
|
+ .resources = axp803_pek_resources,
|
|
+ }, {
|
|
+ .name = "axp20x-gpio",
|
|
+ .of_compatible = "x-powers,axp813-gpio",
|
|
+ }, {
|
|
+ .name = "axp813-adc",
|
|
+ .of_compatible = "x-powers,axp813-adc",
|
|
+ }, {
|
|
+ .name = "axp20x-battery-power-supply",
|
|
+ .of_compatible = "x-powers,axp813-battery-power-supply",
|
|
+ }, {
|
|
+ .name = "axp20x-ac-power-supply",
|
|
+ .of_compatible = "x-powers,axp813-ac-power-supply",
|
|
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
|
|
+ .resources = axp20x_ac_power_supply_resources,
|
|
},
|
|
- { .name = "axp20x-regulator" },
|
|
+ { .name = "axp20x-regulator" },
|
|
};
|
|
|
|
static const struct mfd_cell axp806_self_working_cells[] = {
|
|
{
|
|
- .name = "axp221-pek",
|
|
- .num_resources = ARRAY_SIZE(axp806_pek_resources),
|
|
- .resources = axp806_pek_resources,
|
|
+ .name = "axp221-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp806_pek_resources),
|
|
+ .resources = axp806_pek_resources,
|
|
},
|
|
- { .name = "axp20x-regulator" },
|
|
+ { .name = "axp20x-regulator" },
|
|
};
|
|
|
|
static const struct mfd_cell axp806_cells[] = {
|
|
{
|
|
- .id = 2,
|
|
- .name = "axp20x-regulator",
|
|
+ .id = 2,
|
|
+ .name = "axp20x-regulator",
|
|
},
|
|
};
|
|
|
|
static const struct mfd_cell axp809_cells[] = {
|
|
{
|
|
- .name = "axp221-pek",
|
|
- .num_resources = ARRAY_SIZE(axp809_pek_resources),
|
|
- .resources = axp809_pek_resources,
|
|
+ .name = "axp221-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp809_pek_resources),
|
|
+ .resources = axp809_pek_resources,
|
|
}, {
|
|
- .id = 1,
|
|
- .name = "axp20x-regulator",
|
|
+ .id = 1,
|
|
+ .name = "axp20x-regulator",
|
|
},
|
|
};
|
|
|
|
static const struct mfd_cell axp813_cells[] = {
|
|
{
|
|
- .name = "axp221-pek",
|
|
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
|
|
- .resources = axp803_pek_resources,
|
|
+ .name = "axp221-pek",
|
|
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
|
|
+ .resources = axp803_pek_resources,
|
|
}, {
|
|
- .name = "axp20x-regulator",
|
|
+ .name = "axp20x-regulator",
|
|
}, {
|
|
- .name = "axp20x-gpio",
|
|
- .of_compatible = "x-powers,axp813-gpio",
|
|
+ .name = "axp20x-gpio",
|
|
+ .of_compatible = "x-powers,axp813-gpio",
|
|
}, {
|
|
- .name = "axp813-adc",
|
|
- .of_compatible = "x-powers,axp813-adc",
|
|
+ .name = "axp813-adc",
|
|
+ .of_compatible = "x-powers,axp813-adc",
|
|
}, {
|
|
.name = "axp20x-battery-power-supply",
|
|
.of_compatible = "x-powers,axp813-battery-power-supply",
|
|
+ }, {
|
|
+ .name = "axp20x-ac-power-supply",
|
|
+ .of_compatible = "x-powers,axp813-ac-power-supply",
|
|
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
|
|
+ .resources = axp20x_ac_power_supply_resources,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
|
|
index 503979c81dae1..fab3cdc27ed64 100644
|
|
--- a/drivers/mfd/bd9571mwv.c
|
|
+++ b/drivers/mfd/bd9571mwv.c
|
|
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
|
|
};
|
|
|
|
static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
|
|
+ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
|
|
regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
|
|
regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
|
|
regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
|
|
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
|
|
index b99a194ce5a4a..2d0fee488c5aa 100644
|
|
--- a/drivers/mfd/cros_ec_dev.c
|
|
+++ b/drivers/mfd/cros_ec_dev.c
|
|
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
|
|
|
|
cros_ec_debugfs_remove(ec);
|
|
|
|
+ mfd_remove_devices(ec->dev);
|
|
cdev_del(&ec->cdev);
|
|
device_unregister(&ec->class_dev);
|
|
return 0;
|
|
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
|
|
index 5970b8def5487..aec20e1c7d3d5 100644
|
|
--- a/drivers/mfd/db8500-prcmu.c
|
|
+++ b/drivers/mfd/db8500-prcmu.c
|
|
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
|
|
.irq_unmask = prcmu_irq_unmask,
|
|
};
|
|
|
|
-static __init char *fw_project_name(u32 project)
|
|
+static char *fw_project_name(u32 project)
|
|
{
|
|
switch (project) {
|
|
case PRCMU_FW_PROJECT_U8500:
|
|
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
|
|
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
|
|
}
|
|
|
|
-static void __init init_prcm_registers(void)
|
|
+static void init_prcm_registers(void)
|
|
{
|
|
u32 val;
|
|
|
|
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
|
|
index f475e848252fa..d0bf50e3568d7 100644
|
|
--- a/drivers/mfd/mc13xxx-core.c
|
|
+++ b/drivers/mfd/mc13xxx-core.c
|
|
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
|
|
|
|
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
|
|
|
|
- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
|
|
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
|
|
+ if (ret)
|
|
+ goto out;
|
|
|
|
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
|
|
MC13XXX_ADC0_CHRGRAWDIV;
|
|
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
|
|
index 77b64bd64df36..ab24e176ef448 100644
|
|
--- a/drivers/mfd/mt6397-core.c
|
|
+++ b/drivers/mfd/mt6397-core.c
|
|
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
|
|
|
|
default:
|
|
dev_err(&pdev->dev, "unsupported chip: %d\n", id);
|
|
- ret = -ENODEV;
|
|
- break;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
if (ret) {
|
|
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
|
|
index 52fafea06067e..8d420c37b2a61 100644
|
|
--- a/drivers/mfd/qcom_rpm.c
|
|
+++ b/drivers/mfd/qcom_rpm.c
|
|
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
|
|
return -EFAULT;
|
|
}
|
|
|
|
+ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
|
|
+ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
|
|
+ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
|
|
+
|
|
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
|
|
fw_version[1],
|
|
fw_version[2]);
|
|
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
|
|
index c2d47d78705b8..fd111296b9592 100644
|
|
--- a/drivers/mfd/ti_am335x_tscadc.c
|
|
+++ b/drivers/mfd/ti_am335x_tscadc.c
|
|
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
|
|
cell->pdata_size = sizeof(tscadc);
|
|
}
|
|
|
|
- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
|
|
- tscadc->used_cells, NULL, 0, NULL);
|
|
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
|
|
+ tscadc->cells, tscadc->used_cells, NULL,
|
|
+ 0, NULL);
|
|
if (err < 0)
|
|
goto err_disable_clk;
|
|
|
|
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
|
|
index 910f569ff77c1..8bcdecf494d05 100644
|
|
--- a/drivers/mfd/tps65218.c
|
|
+++ b/drivers/mfd/tps65218.c
|
|
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
|
|
|
|
mutex_init(&tps->tps_lock);
|
|
|
|
- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
|
|
- IRQF_ONESHOT, 0, &tps65218_irq_chip,
|
|
- &tps->irq_data);
|
|
+ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
|
|
+ IRQF_ONESHOT, 0, &tps65218_irq_chip,
|
|
+ &tps->irq_data);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
|
|
ARRAY_SIZE(tps65218_cells), NULL, 0,
|
|
regmap_irq_get_domain(tps->irq_data));
|
|
|
|
- if (ret < 0)
|
|
- goto err_irq;
|
|
-
|
|
- return 0;
|
|
-
|
|
-err_irq:
|
|
- regmap_del_irq_chip(tps->irq, tps->irq_data);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
-static int tps65218_remove(struct i2c_client *client)
|
|
-{
|
|
- struct tps65218 *tps = i2c_get_clientdata(client);
|
|
-
|
|
- regmap_del_irq_chip(tps->irq, tps->irq_data);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static const struct i2c_device_id tps65218_id_table[] = {
|
|
{ "tps65218", TPS65218 },
|
|
{ },
|
|
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
|
|
.of_match_table = of_tps65218_match_table,
|
|
},
|
|
.probe = tps65218_probe,
|
|
- .remove = tps65218_remove,
|
|
.id_table = tps65218_id_table,
|
|
};
|
|
|
|
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
|
|
index b893797827410..9c7925ca13cf0 100644
|
|
--- a/drivers/mfd/tps6586x.c
|
|
+++ b/drivers/mfd/tps6586x.c
|
|
@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
|
|
return 0;
|
|
}
|
|
|
|
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
|
|
+{
|
|
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
|
|
+
|
|
+ if (tps6586x->client->irq)
|
|
+ disable_irq(tps6586x->client->irq);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
|
|
+{
|
|
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
|
|
+
|
|
+ if (tps6586x->client->irq)
|
|
+ enable_irq(tps6586x->client->irq);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
|
|
+ tps6586x_i2c_resume);
|
|
+
|
|
static const struct i2c_device_id tps6586x_id_table[] = {
|
|
{ "tps6586x", 0 },
|
|
{ },
|
|
@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
|
|
.driver = {
|
|
.name = "tps6586x",
|
|
.of_match_table = of_match_ptr(tps6586x_of_match),
|
|
+ .pm = &tps6586x_pm_ops,
|
|
},
|
|
.probe = tps6586x_i2c_probe,
|
|
.remove = tps6586x_i2c_remove,
|
|
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
|
|
index 4be3d239da9ec..299016bc46d90 100644
|
|
--- a/drivers/mfd/twl-core.c
|
|
+++ b/drivers/mfd/twl-core.c
|
|
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
|
|
* letting it generate the right frequencies for USB, MADC, and
|
|
* other purposes.
|
|
*/
|
|
-static inline int __init protect_pm_master(void)
|
|
+static inline int protect_pm_master(void)
|
|
{
|
|
int e = 0;
|
|
|
|
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
|
|
return e;
|
|
}
|
|
|
|
-static inline int __init unprotect_pm_master(void)
|
|
+static inline int unprotect_pm_master(void)
|
|
{
|
|
int e = 0;
|
|
|
|
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
|
|
index 1ee68bd440fbc..16c6e2accfaa5 100644
|
|
--- a/drivers/mfd/wm5110-tables.c
|
|
+++ b/drivers/mfd/wm5110-tables.c
|
|
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
|
|
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
|
|
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
|
|
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
|
|
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
|
|
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
|
|
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
|
|
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
|
|
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
|
|
case ARIZONA_ASRC_ENABLE:
|
|
case ARIZONA_ASRC_STATUS:
|
|
case ARIZONA_ASRC_RATE1:
|
|
+ case ARIZONA_ASRC_RATE2:
|
|
case ARIZONA_ISRC_1_CTRL_1:
|
|
case ARIZONA_ISRC_1_CTRL_2:
|
|
case ARIZONA_ISRC_1_CTRL_3:
|
|
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
|
|
index fe7a1d27a017b..a846faefa2103 100644
|
|
--- a/drivers/misc/eeprom/Kconfig
|
|
+++ b/drivers/misc/eeprom/Kconfig
|
|
@@ -13,7 +13,7 @@ config EEPROM_AT24
|
|
ones like at24c64, 24lc02 or fm24c04:
|
|
|
|
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
|
|
- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
|
|
+ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
|
|
|
|
Unless you like data loss puzzles, always be sure that any chip
|
|
you configure as a 24c32 (32 kbit) or larger is NOT really a
|
|
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
|
|
index 636ed7149793b..ddfcf4ade7bf3 100644
|
|
--- a/drivers/misc/eeprom/at24.c
|
|
+++ b/drivers/misc/eeprom/at24.c
|
|
@@ -156,6 +156,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
|
|
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
|
|
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
|
|
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
|
|
+AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
|
|
/* identical to 24c08 ? */
|
|
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
|
|
|
|
@@ -182,6 +183,7 @@ static const struct i2c_device_id at24_ids[] = {
|
|
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
|
|
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
|
|
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
|
|
+ { "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
|
|
{ "at24", 0 },
|
|
{ /* END OF LIST */ }
|
|
};
|
|
@@ -210,6 +212,7 @@ static const struct of_device_id at24_of_match[] = {
|
|
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
|
|
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
|
|
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
|
|
+ { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
|
|
{ /* END OF LIST */ },
|
|
};
|
|
MODULE_DEVICE_TABLE(of, at24_of_match);
|
|
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
|
|
index 3fcb9a2fe1c9d..efe2fb72d54be 100644
|
|
--- a/drivers/misc/genwqe/card_utils.c
|
|
+++ b/drivers/misc/genwqe/card_utils.c
|
|
@@ -215,7 +215,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
|
|
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
|
|
dma_addr_t *dma_handle)
|
|
{
|
|
- if (get_order(size) > MAX_ORDER)
|
|
+ if (get_order(size) >= MAX_ORDER)
|
|
return NULL;
|
|
|
|
return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
|
|
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
|
|
index b8aaa684c397b..2ed23c99f59fd 100644
|
|
--- a/drivers/misc/ibmvmc.c
|
|
+++ b/drivers/misc/ibmvmc.c
|
|
@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
|
|
*
|
|
* Return:
|
|
* 0 - Success
|
|
+ * Non-zero - Failure
|
|
*/
|
|
static int ibmvmc_open(struct inode *inode, struct file *file)
|
|
{
|
|
struct ibmvmc_file_session *session;
|
|
- int rc = 0;
|
|
|
|
pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
|
|
(unsigned long)inode, (unsigned long)file,
|
|
ibmvmc.state);
|
|
|
|
session = kzalloc(sizeof(*session), GFP_KERNEL);
|
|
+ if (!session)
|
|
+ return -ENOMEM;
|
|
+
|
|
session->file = file;
|
|
file->private_data = session;
|
|
|
|
- return rc;
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
|
|
index e4b10b2d1a083..bb1ee9834a029 100644
|
|
--- a/drivers/misc/mei/hw-me-regs.h
|
|
+++ b/drivers/misc/mei/hw-me-regs.h
|
|
@@ -127,6 +127,8 @@
|
|
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
|
|
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
|
|
|
|
+#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
|
|
+
|
|
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
|
|
|
|
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
|
|
@@ -137,6 +139,8 @@
|
|
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
|
|
#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
|
|
|
|
+#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
|
|
+
|
|
/*
|
|
* MEI HW Section
|
|
*/
|
|
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
|
|
index ea4e152270a3b..4299658d48d63 100644
|
|
--- a/drivers/misc/mei/pci-me.c
|
|
+++ b/drivers/misc/mei/pci-me.c
|
|
@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
|
|
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
|
|
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
|
|
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
|
|
|
|
+ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
|
|
+
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
|
|
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
|
|
@@ -103,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
|
|
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
|
|
|
|
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
|
|
+
|
|
/* required last entry */
|
|
{0, }
|
|
};
|
|
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
|
|
index 3633202e18f4f..17b6398cf66c3 100644
|
|
--- a/drivers/misc/mic/vop/vop_main.c
|
|
+++ b/drivers/misc/mic/vop/vop_main.c
|
|
@@ -381,16 +381,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
|
|
struct _vop_vdev *vdev = to_vopvdev(dev);
|
|
struct vop_device *vpdev = vdev->vpdev;
|
|
struct mic_device_ctrl __iomem *dc = vdev->dc;
|
|
- int i, err, retry;
|
|
+ int i, err, retry, queue_idx = 0;
|
|
|
|
/* We must have this many virtqueues. */
|
|
if (nvqs > ioread8(&vdev->desc->num_vq))
|
|
return -ENOENT;
|
|
|
|
for (i = 0; i < nvqs; ++i) {
|
|
+ if (!names[i]) {
|
|
+ vqs[i] = NULL;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
|
|
__func__, i, names[i]);
|
|
- vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
|
|
+ vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
|
|
ctx ? ctx[i] : false);
|
|
if (IS_ERR(vqs[i])) {
|
|
err = PTR_ERR(vqs[i]);
|
|
@@ -563,6 +568,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
|
|
int ret = -1;
|
|
|
|
if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
|
|
+ struct device *dev = get_device(&vdev->vdev.dev);
|
|
+
|
|
dev_dbg(&vpdev->dev,
|
|
"%s %d config_change %d type %d vdev %p\n",
|
|
__func__, __LINE__,
|
|
@@ -574,7 +581,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
|
|
iowrite8(-1, &dc->h2c_vdev_db);
|
|
if (status & VIRTIO_CONFIG_S_DRIVER_OK)
|
|
wait_for_completion(&vdev->reset_done);
|
|
- put_device(&vdev->vdev.dev);
|
|
+ put_device(dev);
|
|
iowrite8(1, &dc->guest_ack);
|
|
dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
|
|
__func__, __LINE__, ioread8(&dc->guest_ack));
|
|
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
|
|
index 57a6bb1fd3c95..8f2c5d8bd2eee 100644
|
|
--- a/drivers/misc/ocxl/config.c
|
|
+++ b/drivers/misc/ocxl/config.c
|
|
@@ -318,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
|
|
if (rc)
|
|
return rc;
|
|
ptr = (u32 *) &afu->name[i];
|
|
- *ptr = val;
|
|
+ *ptr = le32_to_cpu((__force __le32) val);
|
|
}
|
|
afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
|
|
return 0;
|
|
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
|
|
index 31695a078485a..646d16450066f 100644
|
|
--- a/drivers/misc/ocxl/link.c
|
|
+++ b/drivers/misc/ocxl/link.c
|
|
@@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
|
|
|
|
mutex_lock(&spa->spa_lock);
|
|
|
|
- pe->tid = tid;
|
|
+ pe->tid = cpu_to_be32(tid);
|
|
|
|
/*
|
|
* The barrier makes sure the PE is updated
|
|
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
|
|
index 6c3591cdf8555..a3c6c773d9dc8 100644
|
|
--- a/drivers/misc/vexpress-syscfg.c
|
|
+++ b/drivers/misc/vexpress-syscfg.c
|
|
@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
|
|
int tries;
|
|
long timeout;
|
|
|
|
- if (WARN_ON(index > func->num_templates))
|
|
+ if (WARN_ON(index >= func->num_templates))
|
|
return -EINVAL;
|
|
|
|
command = readl(syscfg->base + SYS_CFGCTRL);
|
|
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
|
|
index 111934838da2c..4f1912a1e071e 100644
|
|
--- a/drivers/mmc/core/block.c
|
|
+++ b/drivers/mmc/core/block.c
|
|
@@ -2114,7 +2114,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
|
|
if (waiting)
|
|
wake_up(&mq->wait);
|
|
else
|
|
- kblockd_schedule_work(&mq->complete_work);
|
|
+ queue_work(mq->card->complete_wq, &mq->complete_work);
|
|
|
|
return;
|
|
}
|
|
@@ -2928,6 +2928,13 @@ static int mmc_blk_probe(struct mmc_card *card)
|
|
|
|
mmc_fixup_device(card, mmc_blk_fixups);
|
|
|
|
+ card->complete_wq = alloc_workqueue("mmc_complete",
|
|
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
|
+ if (unlikely(!card->complete_wq)) {
|
|
+ pr_err("Failed to create mmc completion workqueue");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
md = mmc_blk_alloc(card);
|
|
if (IS_ERR(md))
|
|
return PTR_ERR(md);
|
|
@@ -2991,6 +2998,7 @@ static void mmc_blk_remove(struct mmc_card *card)
|
|
pm_runtime_put_noidle(&card->dev);
|
|
mmc_blk_remove_req(md);
|
|
dev_set_drvdata(&card->dev, NULL);
|
|
+ destroy_workqueue(card->complete_wq);
|
|
}
|
|
|
|
static int _mmc_blk_suspend(struct mmc_card *card)
|
|
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
|
|
index be53044086c76..fbc56ee996827 100644
|
|
--- a/drivers/mmc/host/atmel-mci.c
|
|
+++ b/drivers/mmc/host/atmel-mci.c
|
|
@@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv)
|
|
}
|
|
|
|
atmci_request_end(host, host->mrq);
|
|
- state = STATE_IDLE;
|
|
+ goto unlock; /* atmci_request_end() sets host->state */
|
|
break;
|
|
}
|
|
} while (state != prev_state);
|
|
|
|
host->state = state;
|
|
|
|
+unlock:
|
|
spin_unlock(&host->lock);
|
|
}
|
|
|
|
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
|
|
index 768972af8b853..5301302fb5310 100644
|
|
--- a/drivers/mmc/host/bcm2835.c
|
|
+++ b/drivers/mmc/host/bcm2835.c
|
|
@@ -286,6 +286,7 @@ static void bcm2835_reset(struct mmc_host *mmc)
|
|
|
|
if (host->dma_chan)
|
|
dmaengine_terminate_sync(host->dma_chan);
|
|
+ host->dma_chan = NULL;
|
|
bcm2835_reset_internal(host);
|
|
}
|
|
|
|
@@ -772,6 +773,8 @@ static void bcm2835_finish_command(struct bcm2835_host *host)
|
|
|
|
if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
|
|
(host->cmd->opcode != MMC_SEND_OP_COND)) {
|
|
+ u32 edm, fsm;
|
|
+
|
|
if (sdhsts & SDHSTS_CMD_TIME_OUT) {
|
|
host->cmd->error = -ETIMEDOUT;
|
|
} else {
|
|
@@ -780,6 +783,13 @@ static void bcm2835_finish_command(struct bcm2835_host *host)
|
|
bcm2835_dumpregs(host);
|
|
host->cmd->error = -EILSEQ;
|
|
}
|
|
+ edm = readl(host->ioaddr + SDEDM);
|
|
+ fsm = edm & SDEDM_FSM_MASK;
|
|
+ if (fsm == SDEDM_FSM_READWAIT ||
|
|
+ fsm == SDEDM_FSM_WRITESTART1)
|
|
+ /* Kick the FSM out of its wait */
|
|
+ writel(edm | SDEDM_FORCE_DATA_MODE,
|
|
+ host->ioaddr + SDEDM);
|
|
bcm2835_finish_request(host);
|
|
return;
|
|
}
|
|
@@ -837,6 +847,8 @@ static void bcm2835_timeout(struct work_struct *work)
|
|
dev_err(dev, "timeout waiting for hardware interrupt.\n");
|
|
bcm2835_dumpregs(host);
|
|
|
|
+ bcm2835_reset(host->mmc);
|
|
+
|
|
if (host->data) {
|
|
host->data->error = -ETIMEDOUT;
|
|
bcm2835_finish_data(host);
|
|
@@ -1427,6 +1439,8 @@ static int bcm2835_probe(struct platform_device *pdev)
|
|
|
|
err:
|
|
dev_dbg(dev, "%s -> err %d\n", __func__, ret);
|
|
+ if (host->dma_chan_rxtx)
|
|
+ dma_release_channel(host->dma_chan_rxtx);
|
|
mmc_free_host(mmc);
|
|
|
|
return ret;
|
|
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
|
|
index 54c3fbb4a3918..db56d4f58aaab 100644
|
|
--- a/drivers/mmc/host/dw_mmc-bluefield.c
|
|
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
|
|
@@ -1,11 +1,6 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2018 Mellanox Technologies.
|
|
- *
|
|
- * This program is free software; you can redistribute it and/or modify
|
|
- * it under the terms of the GNU General Public License as published by
|
|
- * the Free Software Foundation; either version 2 of the License, or
|
|
- * (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/bitfield.h>
|
|
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
|
|
index 0c1efd5100b77..44ea452add8e9 100644
|
|
--- a/drivers/mmc/host/jz4740_mmc.c
|
|
+++ b/drivers/mmc/host/jz4740_mmc.c
|
|
@@ -983,17 +983,17 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
|
|
if (!pdata->read_only_active_low)
|
|
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
|
|
- if (gpio_is_valid(pdata->gpio_card_detect)) {
|
|
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
+ /*
|
|
+ * Get optional card detect and write protect GPIOs,
|
|
+ * only back out on probe deferral.
|
|
+ */
|
|
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
|
|
+ if (ret == -EPROBE_DEFER)
|
|
+ return ret;
|
|
|
|
- if (gpio_is_valid(pdata->gpio_read_only)) {
|
|
- ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
|
|
+ if (ret == -EPROBE_DEFER)
|
|
+ return ret;
|
|
|
|
return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
|
|
"MMC read only", true, pdata->power_active_low);
|
|
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
|
|
index c201c378537e4..ddd98cdd33bcd 100644
|
|
--- a/drivers/mmc/host/meson-gx-mmc.c
|
|
+++ b/drivers/mmc/host/meson-gx-mmc.c
|
|
@@ -174,6 +174,8 @@ struct meson_host {
|
|
struct sd_emmc_desc *descs;
|
|
dma_addr_t descs_dma_addr;
|
|
|
|
+ int irq;
|
|
+
|
|
bool vqmmc_enabled;
|
|
};
|
|
|
|
@@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|
struct resource *res;
|
|
struct meson_host *host;
|
|
struct mmc_host *mmc;
|
|
- int ret, irq;
|
|
+ int ret;
|
|
|
|
mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
|
|
if (!mmc)
|
|
@@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|
goto free_host;
|
|
}
|
|
|
|
- irq = platform_get_irq(pdev, 0);
|
|
- if (irq <= 0) {
|
|
+ host->irq = platform_get_irq(pdev, 0);
|
|
+ if (host->irq <= 0) {
|
|
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
|
|
ret = -EINVAL;
|
|
goto free_host;
|
|
@@ -1283,9 +1285,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|
writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
|
|
host->regs + SD_EMMC_IRQ_EN);
|
|
|
|
- ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
|
|
- meson_mmc_irq_thread, IRQF_SHARED,
|
|
- NULL, host);
|
|
+ ret = request_threaded_irq(host->irq, meson_mmc_irq,
|
|
+ meson_mmc_irq_thread, IRQF_SHARED,
|
|
+ dev_name(&pdev->dev), host);
|
|
if (ret)
|
|
goto err_init_clk;
|
|
|
|
@@ -1303,7 +1305,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|
if (host->bounce_buf == NULL) {
|
|
dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
|
|
ret = -ENOMEM;
|
|
- goto err_init_clk;
|
|
+ goto err_free_irq;
|
|
}
|
|
|
|
host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
|
|
@@ -1322,6 +1324,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|
err_bounce_buf:
|
|
dma_free_coherent(host->dev, host->bounce_buf_size,
|
|
host->bounce_buf, host->bounce_dma_addr);
|
|
+err_free_irq:
|
|
+ free_irq(host->irq, host);
|
|
err_init_clk:
|
|
clk_disable_unprepare(host->mmc_clk);
|
|
err_core_clk:
|
|
@@ -1339,6 +1343,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
|
|
|
|
/* disable interrupts */
|
|
writel(0, host->regs + SD_EMMC_IRQ_EN);
|
|
+ free_irq(host->irq, host);
|
|
|
|
dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
|
|
host->descs, host->descs_dma_addr);
|
|
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
|
|
index abe253c262a2e..ec980bda071c3 100644
|
|
--- a/drivers/mmc/host/meson-mx-sdio.c
|
|
+++ b/drivers/mmc/host/meson-mx-sdio.c
|
|
@@ -596,6 +596,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
|
|
init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
|
|
"%s#fixed_factor",
|
|
dev_name(host->controller_dev));
|
|
+ if (!init.name)
|
|
+ return -ENOMEM;
|
|
+
|
|
init.ops = &clk_fixed_factor_ops;
|
|
init.flags = 0;
|
|
init.parent_names = &clk_fixed_factor_parent;
|
|
@@ -612,6 +615,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
|
|
clk_div_parent = __clk_get_name(host->fixed_factor_clk);
|
|
init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
|
|
"%s#div", dev_name(host->controller_dev));
|
|
+ if (!init.name)
|
|
+ return -ENOMEM;
|
|
+
|
|
init.ops = &clk_divider_ops;
|
|
init.flags = CLK_SET_RATE_PARENT;
|
|
init.parent_names = &clk_div_parent;
|
|
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
|
|
index 6334cc752d8b7..4ff7646d0d294 100644
|
|
--- a/drivers/mmc/host/mtk-sd.c
|
|
+++ b/drivers/mmc/host/mtk-sd.c
|
|
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
|
|
|
|
if (timing == MMC_TIMING_MMC_HS400 &&
|
|
host->dev_comp->hs400_tune)
|
|
- sdr_set_field(host->base + PAD_CMD_TUNE,
|
|
+ sdr_set_field(host->base + tune_reg,
|
|
MSDC_PAD_TUNE_CMDRRDLY,
|
|
host->hs400_cmd_int_delay);
|
|
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
|
|
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
|
|
index 0db99057c44f7..9d12c06c7fd68 100644
|
|
--- a/drivers/mmc/host/sdhci-iproc.c
|
|
+++ b/drivers/mmc/host/sdhci-iproc.c
|
|
@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
|
|
|
|
iproc_host->data = iproc_data;
|
|
|
|
- mmc_of_parse(host->mmc);
|
|
+ ret = mmc_of_parse(host->mmc);
|
|
+ if (ret)
|
|
+ goto err;
|
|
+
|
|
sdhci_get_property(pdev);
|
|
|
|
host->mmc->caps |= iproc_host->data->mmc_caps;
|
|
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
|
|
index 3cc8bfee6c18f..8594659cb5923 100644
|
|
--- a/drivers/mmc/host/sdhci-msm.c
|
|
+++ b/drivers/mmc/host/sdhci-msm.c
|
|
@@ -258,6 +258,8 @@ struct sdhci_msm_host {
|
|
bool mci_removed;
|
|
const struct sdhci_msm_variant_ops *var_ops;
|
|
const struct sdhci_msm_offset *offset;
|
|
+ bool use_cdr;
|
|
+ u32 transfer_mode;
|
|
};
|
|
|
|
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
|
|
@@ -1025,6 +1027,26 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
|
|
+{
|
|
+ const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
|
|
+ u32 config, oldconfig = readl_relaxed(host->ioaddr +
|
|
+ msm_offset->core_dll_config);
|
|
+
|
|
+ config = oldconfig;
|
|
+ if (enable) {
|
|
+ config |= CORE_CDR_EN;
|
|
+ config &= ~CORE_CDR_EXT_EN;
|
|
+ } else {
|
|
+ config &= ~CORE_CDR_EN;
|
|
+ config |= CORE_CDR_EXT_EN;
|
|
+ }
|
|
+
|
|
+ if (config != oldconfig)
|
|
+ writel_relaxed(config, host->ioaddr +
|
|
+ msm_offset->core_dll_config);
|
|
+}
|
|
+
|
|
static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
{
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
@@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
if (host->clock <= CORE_FREQ_100MHZ ||
|
|
!(ios.timing == MMC_TIMING_MMC_HS400 ||
|
|
ios.timing == MMC_TIMING_MMC_HS200 ||
|
|
- ios.timing == MMC_TIMING_UHS_SDR104))
|
|
+ ios.timing == MMC_TIMING_UHS_SDR104)) {
|
|
+ msm_host->use_cdr = false;
|
|
+ sdhci_msm_set_cdr(host, false);
|
|
return 0;
|
|
+ }
|
|
+
|
|
+ /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
|
|
+ msm_host->use_cdr = true;
|
|
|
|
/*
|
|
* For HS400 tuning in HS200 timing requires:
|
|
@@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
|
|
case SDHCI_POWER_CONTROL:
|
|
req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
|
|
break;
|
|
+ case SDHCI_TRANSFER_MODE:
|
|
+ msm_host->transfer_mode = val;
|
|
+ break;
|
|
+ case SDHCI_COMMAND:
|
|
+ if (!msm_host->use_cdr)
|
|
+ break;
|
|
+ if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
|
|
+ SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
|
|
+ SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
|
|
+ sdhci_msm_set_cdr(host, true);
|
|
+ else
|
|
+ sdhci_msm_set_cdr(host, false);
|
|
+ break;
|
|
}
|
|
|
|
if (req_type) {
|
|
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
|
|
index 86fc9f0220020..d111bf62acd9c 100644
|
|
--- a/drivers/mmc/host/sdhci-of-esdhc.c
|
|
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
|
|
@@ -528,8 +528,12 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
|
|
/* Wait max 20 ms */
|
|
timeout = ktime_add_ms(ktime_get(), 20);
|
|
val = ESDHC_CLOCK_STABLE;
|
|
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
|
|
- if (ktime_after(ktime_get(), timeout)) {
|
|
+ while (1) {
|
|
+ bool timedout = ktime_after(ktime_get(), timeout);
|
|
+
|
|
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
|
|
+ break;
|
|
+ if (timedout) {
|
|
pr_err("%s: Internal clock never stabilised.\n",
|
|
mmc_hostname(host->mmc));
|
|
break;
|
|
@@ -594,8 +598,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
|
|
|
|
/* Wait max 20 ms */
|
|
timeout = ktime_add_ms(ktime_get(), 20);
|
|
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
|
|
- if (ktime_after(ktime_get(), timeout)) {
|
|
+ while (1) {
|
|
+ bool timedout = ktime_after(ktime_get(), timeout);
|
|
+
|
|
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
|
|
+ break;
|
|
+ if (timedout) {
|
|
pr_err("%s: Internal clock never stabilised.\n",
|
|
mmc_hostname(host->mmc));
|
|
return;
|
|
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
|
|
index d264391616f93..d02f5cf76b3d1 100644
|
|
--- a/drivers/mmc/host/sdhci-omap.c
|
|
+++ b/drivers/mmc/host/sdhci-omap.c
|
|
@@ -220,8 +220,12 @@ static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
|
|
|
|
/* wait 1ms */
|
|
timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
|
|
- while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
|
|
- if (WARN_ON(ktime_after(ktime_get(), timeout)))
|
|
+ while (1) {
|
|
+ bool timedout = ktime_after(ktime_get(), timeout);
|
|
+
|
|
+ if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)
|
|
+ break;
|
|
+ if (WARN_ON(timedout))
|
|
return;
|
|
usleep_range(5, 10);
|
|
}
|
|
@@ -653,8 +657,12 @@ static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
|
|
|
|
/* wait 1ms */
|
|
timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
|
|
- while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
|
|
- if (WARN_ON(ktime_after(ktime_get(), timeout)))
|
|
+ while (1) {
|
|
+ bool timedout = ktime_after(ktime_get(), timeout);
|
|
+
|
|
+ if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)
|
|
+ break;
|
|
+ if (WARN_ON(timedout))
|
|
return;
|
|
usleep_range(5, 10);
|
|
}
|
|
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
|
|
index 5956e90380e8b..5b5eb53a63d28 100644
|
|
--- a/drivers/mmc/host/sdhci-xenon-phy.c
|
|
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
|
|
@@ -357,9 +357,13 @@ static int xenon_emmc_phy_enable_dll(struct sdhci_host *host)
|
|
|
|
/* Wait max 32 ms */
|
|
timeout = ktime_add_ms(ktime_get(), 32);
|
|
- while (!(sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
|
|
- XENON_DLL_LOCK_STATE)) {
|
|
- if (ktime_after(ktime_get(), timeout)) {
|
|
+ while (1) {
|
|
+ bool timedout = ktime_after(ktime_get(), timeout);
|
|
+
|
|
+ if (sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
|
|
+ XENON_DLL_LOCK_STATE)
|
|
+ break;
|
|
+ if (timedout) {
|
|
dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n");
|
|
return -ETIMEDOUT;
|
|
}
|
|
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
|
|
index 4d0791f6ec236..a0b5089b32748 100644
|
|
--- a/drivers/mmc/host/sdhci-xenon.c
|
|
+++ b/drivers/mmc/host/sdhci-xenon.c
|
|
@@ -34,9 +34,13 @@ static int xenon_enable_internal_clk(struct sdhci_host *host)
|
|
sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL);
|
|
/* Wait max 20 ms */
|
|
timeout = ktime_add_ms(ktime_get(), 20);
|
|
- while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
|
|
- & SDHCI_CLOCK_INT_STABLE)) {
|
|
- if (ktime_after(ktime_get(), timeout)) {
|
|
+ while (1) {
|
|
+ bool timedout = ktime_after(ktime_get(), timeout);
|
|
+
|
|
+ reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
|
+ if (reg & SDHCI_CLOCK_INT_STABLE)
|
|
+ break;
|
|
+ if (timedout) {
|
|
dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n");
|
|
return -ETIMEDOUT;
|
|
}
|
|
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
|
|
index 279e326e397e1..70fadc9767955 100644
|
|
--- a/drivers/mmc/host/sunxi-mmc.c
|
|
+++ b/drivers/mmc/host/sunxi-mmc.c
|
|
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
|
|
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
|
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
|
|
|
|
- if (host->cfg->clk_delays || host->use_new_timings)
|
|
+ /*
|
|
+ * Some H5 devices do not have signal traces precise enough to
|
|
+ * use HS DDR mode for their eMMC chips.
|
|
+ *
|
|
+ * We still enable HS DDR modes for all the other controller
|
|
+ * variants that support them.
|
|
+ */
|
|
+ if ((host->cfg->clk_delays || host->use_new_timings) &&
|
|
+ !of_device_is_compatible(pdev->dev.of_node,
|
|
+ "allwinner,sun50i-h5-emmc"))
|
|
mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
|
|
|
|
ret = mmc_of_parse(mmc);
|
|
if (ret)
|
|
goto error_free_dma;
|
|
|
|
+ /*
|
|
+ * If we don't support delay chains in the SoC, we can't use any
|
|
+ * of the higher speed modes. Mask them out in case the device
|
|
+ * tree specifies the properties for them, which gets added to
|
|
+ * the caps by mmc_of_parse() above.
|
|
+ */
|
|
+ if (!(host->cfg->clk_delays || host->use_new_timings)) {
|
|
+ mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
|
|
+ MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
|
|
+ mmc->caps2 &= ~MMC_CAP2_HS200;
|
|
+ }
|
|
+
|
|
+ /* TODO: This driver doesn't support HS400 mode yet */
|
|
+ mmc->caps2 &= ~MMC_CAP2_HS400;
|
|
+
|
|
ret = sunxi_mmc_init_host(host);
|
|
if (ret)
|
|
goto error_free_dma;
|
|
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
|
|
index 99c460facd5e9..0bbb23b014f1b 100644
|
|
--- a/drivers/mtd/mtdpart.c
|
|
+++ b/drivers/mtd/mtdpart.c
|
|
@@ -470,6 +470,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
|
|
/* let's register it anyway to preserve ordering */
|
|
slave->offset = 0;
|
|
slave->mtd.size = 0;
|
|
+
|
|
+ /* Initialize ->erasesize to make add_mtd_device() happy. */
|
|
+ slave->mtd.erasesize = parent->erasesize;
|
|
+
|
|
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
|
|
part->name);
|
|
goto out_register;
|
|
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
|
|
index bd4cfac6b5aa6..a4768df5083f9 100644
|
|
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
|
|
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
|
|
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
|
|
|
|
/*
|
|
* Reset BCH here, too. We got failures otherwise :(
|
|
- * See later BCH reset for explanation of MX23 handling
|
|
+ * See later BCH reset for explanation of MX23 and MX28 handling
|
|
*/
|
|
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
|
|
+ ret = gpmi_reset_block(r->bch_regs,
|
|
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
|
|
if (ret)
|
|
goto err_out;
|
|
|
|
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
|
|
/*
|
|
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
|
|
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
|
|
- * On the other hand, the MX28 needs the reset, because one case has been
|
|
- * seen where the BCH produced ECC errors constantly after 10000
|
|
- * consecutive reboots. The latter case has not been seen on the MX23
|
|
- * yet, still we don't know if it could happen there as well.
|
|
+ * and MX28.
|
|
*/
|
|
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
|
|
+ ret = gpmi_reset_block(r->bch_regs,
|
|
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
|
|
if (ret)
|
|
goto err_out;
|
|
|
|
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
|
|
index 650f2b490a054..9dc16a23429ad 100644
|
|
--- a/drivers/mtd/nand/raw/marvell_nand.c
|
|
+++ b/drivers/mtd/nand/raw/marvell_nand.c
|
|
@@ -514,9 +514,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
|
|
writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
|
|
}
|
|
|
|
-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
|
|
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
|
|
{
|
|
+ u32 reg;
|
|
+
|
|
+ reg = readl_relaxed(nfc->regs + NDSR);
|
|
writel_relaxed(int_mask, nfc->regs + NDSR);
|
|
+
|
|
+ return reg & int_mask;
|
|
}
|
|
|
|
static void marvell_nfc_force_byte_access(struct nand_chip *chip,
|
|
@@ -683,6 +688,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
|
|
static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
|
|
{
|
|
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
|
|
+ u32 pending;
|
|
int ret;
|
|
|
|
/* Timeout is expressed in ms */
|
|
@@ -695,8 +701,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
|
|
ret = wait_for_completion_timeout(&nfc->complete,
|
|
msecs_to_jiffies(timeout_ms));
|
|
marvell_nfc_disable_int(nfc, NDCR_RDYM);
|
|
- marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
|
|
- if (!ret) {
|
|
+ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
|
|
+
|
|
+ /*
|
|
+ * In case the interrupt was not served in the required time frame,
|
|
+ * check if the ISR was not served or if something went actually wrong.
|
|
+ */
|
|
+ if (ret && !pending) {
|
|
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
|
|
return -ETIMEDOUT;
|
|
}
|
|
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
|
|
index 5c26492c841dd..38b5dc22cb30a 100644
|
|
--- a/drivers/mtd/nand/raw/nand_jedec.c
|
|
+++ b/drivers/mtd/nand/raw/nand_jedec.c
|
|
@@ -107,6 +107,8 @@ int nand_jedec_detect(struct nand_chip *chip)
|
|
pr_warn("Invalid codeword size\n");
|
|
}
|
|
|
|
+ ret = 1;
|
|
+
|
|
free_jedec_param_page:
|
|
kfree(p);
|
|
return ret;
|
|
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
|
|
index 886d05c391efe..68e8b9f7f372a 100644
|
|
--- a/drivers/mtd/nand/raw/omap2.c
|
|
+++ b/drivers/mtd/nand/raw/omap2.c
|
|
@@ -1944,7 +1944,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
|
|
case NAND_OMAP_PREFETCH_DMA:
|
|
dma_cap_zero(mask);
|
|
dma_cap_set(DMA_SLAVE, mask);
|
|
- info->dma = dma_request_chan(dev, "rxtx");
|
|
+ info->dma = dma_request_chan(dev->parent, "rxtx");
|
|
|
|
if (IS_ERR(info->dma)) {
|
|
dev_err(dev, "DMA engine request failed\n");
|
|
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
|
|
index 699d3cf49c6da..7c42a57aca1ff 100644
|
|
--- a/drivers/mtd/nand/raw/qcom_nandc.c
|
|
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
|
|
@@ -2833,6 +2833,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
|
|
if (ret)
|
|
return ret;
|
|
|
|
+ if (nandc->props->is_bam) {
|
|
+ free_bam_transaction(nandc);
|
|
+ nandc->bam_txn = alloc_bam_transaction(nandc);
|
|
+ if (!nandc->bam_txn) {
|
|
+ dev_err(nandc->dev,
|
|
+ "failed to allocate bam transaction\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ }
|
|
+
|
|
ret = mtd_device_register(mtd, NULL, 0);
|
|
if (ret)
|
|
nand_cleanup(chip);
|
|
@@ -2847,16 +2857,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
|
|
struct qcom_nand_host *host;
|
|
int ret;
|
|
|
|
- if (nandc->props->is_bam) {
|
|
- free_bam_transaction(nandc);
|
|
- nandc->bam_txn = alloc_bam_transaction(nandc);
|
|
- if (!nandc->bam_txn) {
|
|
- dev_err(nandc->dev,
|
|
- "failed to allocate bam transaction\n");
|
|
- return -ENOMEM;
|
|
- }
|
|
- }
|
|
-
|
|
for_each_available_child_of_node(dn, child) {
|
|
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
|
|
if (!host) {
|
|
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
|
|
index 30f83649c4814..8c7bf91ce4e1d 100644
|
|
--- a/drivers/mtd/nand/spi/core.c
|
|
+++ b/drivers/mtd/nand/spi/core.c
|
|
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
|
|
struct nand_device *nand = spinand_to_nand(spinand);
|
|
struct mtd_info *mtd = nanddev_to_mtd(nand);
|
|
struct nand_page_io_req adjreq = *req;
|
|
- unsigned int nbytes = 0;
|
|
- void *buf = NULL;
|
|
+ void *buf = spinand->databuf;
|
|
+ unsigned int nbytes;
|
|
u16 column = 0;
|
|
int ret;
|
|
|
|
- memset(spinand->databuf, 0xff,
|
|
- nanddev_page_size(nand) +
|
|
- nanddev_per_page_oobsize(nand));
|
|
+ /*
|
|
+ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
|
|
+ * the cache content to 0xFF (depends on vendor implementation), so we
|
|
+ * must fill the page cache entirely even if we only want to program
|
|
+ * the data portion of the page, otherwise we might corrupt the BBM or
|
|
+ * user data previously programmed in OOB area.
|
|
+ */
|
|
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
|
|
+ memset(spinand->databuf, 0xff, nbytes);
|
|
+ adjreq.dataoffs = 0;
|
|
+ adjreq.datalen = nanddev_page_size(nand);
|
|
+ adjreq.databuf.out = spinand->databuf;
|
|
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
|
|
+ adjreq.ooboffs = 0;
|
|
+ adjreq.oobbuf.out = spinand->oobbuf;
|
|
|
|
- if (req->datalen) {
|
|
+ if (req->datalen)
|
|
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
|
|
req->datalen);
|
|
- adjreq.dataoffs = 0;
|
|
- adjreq.datalen = nanddev_page_size(nand);
|
|
- adjreq.databuf.out = spinand->databuf;
|
|
- nbytes = adjreq.datalen;
|
|
- buf = spinand->databuf;
|
|
- }
|
|
|
|
if (req->ooblen) {
|
|
if (req->mode == MTD_OPS_AUTO_OOB)
|
|
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
|
|
else
|
|
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
|
|
req->ooblen);
|
|
-
|
|
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
|
|
- adjreq.ooboffs = 0;
|
|
- nbytes += nanddev_per_page_oobsize(nand);
|
|
- if (!buf) {
|
|
- buf = spinand->oobbuf;
|
|
- column = nanddev_page_size(nand);
|
|
- }
|
|
}
|
|
|
|
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
|
|
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
|
|
|
|
/*
|
|
* We need to use the RANDOM LOAD CACHE operation if there's
|
|
- * more than one iteration, because the LOAD operation resets
|
|
- * the cache to 0xff.
|
|
+ * more than one iteration, because the LOAD operation might
|
|
+ * reset the cache to 0xff.
|
|
*/
|
|
if (nbytes) {
|
|
column = op.addr.val;
|
|
@@ -1016,11 +1014,11 @@ static int spinand_init(struct spinand_device *spinand)
|
|
for (i = 0; i < nand->memorg.ntargets; i++) {
|
|
ret = spinand_select_target(spinand, i);
|
|
if (ret)
|
|
- goto err_free_bufs;
|
|
+ goto err_manuf_cleanup;
|
|
|
|
ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
|
|
if (ret)
|
|
- goto err_free_bufs;
|
|
+ goto err_manuf_cleanup;
|
|
}
|
|
|
|
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
|
|
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
|
|
index 6cc9c929ff571..37775fc09e095 100644
|
|
--- a/drivers/mtd/spi-nor/Kconfig
|
|
+++ b/drivers/mtd/spi-nor/Kconfig
|
|
@@ -41,7 +41,7 @@ config SPI_ASPEED_SMC
|
|
|
|
config SPI_ATMEL_QUADSPI
|
|
tristate "Atmel Quad SPI Controller"
|
|
- depends on ARCH_AT91 || (ARM && COMPILE_TEST)
|
|
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
|
|
depends on OF && HAS_IOMEM
|
|
help
|
|
This enables support for the Quad SPI controller in master mode.
|
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
|
index 333387f1f1fe6..62659abf73cd7 100644
|
|
--- a/drivers/net/bonding/bond_main.c
|
|
+++ b/drivers/net/bonding/bond_main.c
|
|
@@ -1948,6 +1948,9 @@ static int __bond_release_one(struct net_device *bond_dev,
|
|
if (!bond_has_slaves(bond)) {
|
|
bond_set_carrier(bond);
|
|
eth_hw_addr_random(bond_dev);
|
|
+ bond->nest_level = SINGLE_DEPTH_NESTING;
|
|
+ } else {
|
|
+ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
|
|
}
|
|
|
|
unblock_netpoll_tx();
|
|
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
|
|
index 3b3f88ffab53c..c05e4d50d43d7 100644
|
|
--- a/drivers/net/can/dev.c
|
|
+++ b/drivers/net/can/dev.c
|
|
@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
|
|
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
|
|
{
|
|
struct can_priv *priv = netdev_priv(dev);
|
|
- struct sk_buff *skb = priv->echo_skb[idx];
|
|
- struct canfd_frame *cf;
|
|
|
|
if (idx >= priv->echo_skb_max) {
|
|
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
|
|
@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
|
|
return NULL;
|
|
}
|
|
|
|
- if (!skb) {
|
|
- netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
|
|
- __func__, idx);
|
|
- return NULL;
|
|
- }
|
|
+ if (priv->echo_skb[idx]) {
|
|
+ /* Using "struct canfd_frame::len" for the frame
|
|
+ * length is supported on both CAN and CANFD frames.
|
|
+ */
|
|
+ struct sk_buff *skb = priv->echo_skb[idx];
|
|
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
|
+ u8 len = cf->len;
|
|
|
|
- /* Using "struct canfd_frame::len" for the frame
|
|
- * length is supported on both CAN and CANFD frames.
|
|
- */
|
|
- cf = (struct canfd_frame *)skb->data;
|
|
- *len_ptr = cf->len;
|
|
- priv->echo_skb[idx] = NULL;
|
|
+ *len_ptr = len;
|
|
+ priv->echo_skb[idx] = NULL;
|
|
|
|
- return skb;
|
|
+ return skb;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
|
|
index 75ce11395ee81..ae219b8a7754a 100644
|
|
--- a/drivers/net/can/flexcan.c
|
|
+++ b/drivers/net/can/flexcan.c
|
|
@@ -1004,7 +1004,7 @@ static int flexcan_chip_start(struct net_device *dev)
|
|
}
|
|
} else {
|
|
/* clear and invalidate unused mailboxes first */
|
|
- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
|
|
+ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) {
|
|
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
|
|
®s->mb[i].can_ctrl);
|
|
}
|
|
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
|
|
index 90f5142529870..d9c56a779c088 100644
|
|
--- a/drivers/net/dsa/b53/b53_srab.c
|
|
+++ b/drivers/net/dsa/b53/b53_srab.c
|
|
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
|
|
/* Clear all pending interrupts */
|
|
writel(0xffffffff, priv->regs + B53_SRAB_INTR);
|
|
|
|
- if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
|
|
- return;
|
|
-
|
|
for (i = 0; i < B53_N_PORTS; i++) {
|
|
port = &priv->port_intrs[i];
|
|
|
|
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
|
|
index 24fb6a6850396..b0113f6fdbb46 100644
|
|
--- a/drivers/net/dsa/mv88e6xxx/chip.c
|
|
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
|
|
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
|
|
unsigned int sub_irq;
|
|
unsigned int n;
|
|
u16 reg;
|
|
+ u16 ctl1;
|
|
int err;
|
|
|
|
mutex_lock(&chip->reg_lock);
|
|
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
|
|
if (err)
|
|
goto out;
|
|
|
|
- for (n = 0; n < chip->g1_irq.nirqs; ++n) {
|
|
- if (reg & (1 << n)) {
|
|
- sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
|
|
- handle_nested_irq(sub_irq);
|
|
- ++nhandled;
|
|
+ do {
|
|
+ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
|
|
+ if (reg & (1 << n)) {
|
|
+ sub_irq = irq_find_mapping(chip->g1_irq.domain,
|
|
+ n);
|
|
+ handle_nested_irq(sub_irq);
|
|
+ ++nhandled;
|
|
+ }
|
|
}
|
|
- }
|
|
+
|
|
+ mutex_lock(&chip->reg_lock);
|
|
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
|
|
+ if (err)
|
|
+ goto unlock;
|
|
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
|
|
+unlock:
|
|
+ mutex_unlock(&chip->reg_lock);
|
|
+ if (err)
|
|
+ goto out;
|
|
+ ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
|
|
+ } while (reg & ctl1);
|
|
+
|
|
out:
|
|
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
|
|
}
|
|
@@ -2403,6 +2419,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
|
|
return mv88e6xxx_g1_stats_clear(chip);
|
|
}
|
|
|
|
+/* The mv88e6390 has some hidden registers used for debug and
|
|
+ * development. The errata also makes use of them.
|
|
+ */
|
|
+static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
|
|
+ int reg, u16 val)
|
|
+{
|
|
+ u16 ctrl;
|
|
+ int err;
|
|
+
|
|
+ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
|
|
+ PORT_RESERVED_1A, val);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
|
|
+ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
|
|
+ reg;
|
|
+
|
|
+ return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
|
|
+ PORT_RESERVED_1A, ctrl);
|
|
+}
|
|
+
|
|
+static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
|
|
+{
|
|
+ return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
|
|
+ PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
|
|
+}
|
|
+
|
|
+
|
|
+static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
|
|
+ int reg, u16 *val)
|
|
+{
|
|
+ u16 ctrl;
|
|
+ int err;
|
|
+
|
|
+ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
|
|
+ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
|
|
+ reg;
|
|
+
|
|
+ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
|
|
+ PORT_RESERVED_1A, ctrl);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ err = mv88e6390_hidden_wait(chip);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
|
|
+ PORT_RESERVED_1A, val);
|
|
+}
|
|
+
|
|
+/* Check if the errata has already been applied. */
|
|
+static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
|
|
+{
|
|
+ int port;
|
|
+ int err;
|
|
+ u16 val;
|
|
+
|
|
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
|
|
+ err = mv88e6390_hidden_read(chip, port, 0, &val);
|
|
+ if (err) {
|
|
+ dev_err(chip->dev,
|
|
+ "Error reading hidden register: %d\n", err);
|
|
+ return false;
|
|
+ }
|
|
+ if (val != 0x01c0)
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* The 6390 copper ports have an errata which require poking magic
|
|
+ * values into undocumented hidden registers and then performing a
|
|
+ * software reset.
|
|
+ */
|
|
+static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
|
|
+{
|
|
+ int port;
|
|
+ int err;
|
|
+
|
|
+ if (mv88e6390_setup_errata_applied(chip))
|
|
+ return 0;
|
|
+
|
|
+ /* Set the ports into blocking mode */
|
|
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
|
|
+ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
|
|
+ err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ return mv88e6xxx_software_reset(chip);
|
|
+}
|
|
+
|
|
static int mv88e6xxx_setup(struct dsa_switch *ds)
|
|
{
|
|
struct mv88e6xxx_chip *chip = ds->priv;
|
|
@@ -2415,6 +2532,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
|
|
|
|
mutex_lock(&chip->reg_lock);
|
|
|
|
+ if (chip->info->ops->setup_errata) {
|
|
+ err = chip->info->ops->setup_errata(chip);
|
|
+ if (err)
|
|
+ goto unlock;
|
|
+ }
|
|
+
|
|
/* Cache the cmode of each port. */
|
|
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
|
|
if (chip->info->ops->port_get_cmode) {
|
|
@@ -3215,6 +3338,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
|
|
|
|
static const struct mv88e6xxx_ops mv88e6190_ops = {
|
|
/* MV88E6XXX_FAMILY_6390 */
|
|
+ .setup_errata = mv88e6390_setup_errata,
|
|
.irl_init_all = mv88e6390_g2_irl_init_all,
|
|
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
|
|
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
|
|
@@ -3257,6 +3381,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
|
|
|
|
static const struct mv88e6xxx_ops mv88e6190x_ops = {
|
|
/* MV88E6XXX_FAMILY_6390 */
|
|
+ .setup_errata = mv88e6390_setup_errata,
|
|
.irl_init_all = mv88e6390_g2_irl_init_all,
|
|
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
|
|
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
|
|
@@ -3299,6 +3424,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
|
|
|
|
static const struct mv88e6xxx_ops mv88e6191_ops = {
|
|
/* MV88E6XXX_FAMILY_6390 */
|
|
+ .setup_errata = mv88e6390_setup_errata,
|
|
.irl_init_all = mv88e6390_g2_irl_init_all,
|
|
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
|
|
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
|
|
@@ -3390,6 +3516,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
|
|
|
|
static const struct mv88e6xxx_ops mv88e6290_ops = {
|
|
/* MV88E6XXX_FAMILY_6390 */
|
|
+ .setup_errata = mv88e6390_setup_errata,
|
|
.irl_init_all = mv88e6390_g2_irl_init_all,
|
|
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
|
|
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
|
|
@@ -3693,6 +3820,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
|
|
|
|
static const struct mv88e6xxx_ops mv88e6390_ops = {
|
|
/* MV88E6XXX_FAMILY_6390 */
|
|
+ .setup_errata = mv88e6390_setup_errata,
|
|
.irl_init_all = mv88e6390_g2_irl_init_all,
|
|
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
|
|
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
|
|
@@ -3740,6 +3868,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
|
|
|
|
static const struct mv88e6xxx_ops mv88e6390x_ops = {
|
|
/* MV88E6XXX_FAMILY_6390 */
|
|
+ .setup_errata = mv88e6390_setup_errata,
|
|
.irl_init_all = mv88e6390_g2_irl_init_all,
|
|
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
|
|
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
|
|
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
|
|
index f9ecb7872d32c..546651d8c3e1f 100644
|
|
--- a/drivers/net/dsa/mv88e6xxx/chip.h
|
|
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
|
|
@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
|
|
};
|
|
|
|
struct mv88e6xxx_ops {
|
|
+ /* Switch Setup Errata, called early in the switch setup to
|
|
+ * allow any errata actions to be performed
|
|
+ */
|
|
+ int (*setup_errata)(struct mv88e6xxx_chip *chip);
|
|
+
|
|
int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
|
|
int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
|
|
|
|
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
|
|
index 5200e4bdce93d..ea243840ee0fe 100644
|
|
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
|
|
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
|
|
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
|
|
{
|
|
struct mv88e6xxx_chip *chip = dev_id;
|
|
struct mv88e6xxx_atu_entry entry;
|
|
+ int spid;
|
|
int err;
|
|
u16 val;
|
|
|
|
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
|
|
if (err)
|
|
goto out;
|
|
|
|
+ spid = entry.state;
|
|
+
|
|
if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
|
|
dev_err_ratelimited(chip->dev,
|
|
"ATU age out violation for %pM\n",
|
|
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
|
|
|
|
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
|
|
dev_err_ratelimited(chip->dev,
|
|
- "ATU member violation for %pM portvec %x\n",
|
|
- entry.mac, entry.portvec);
|
|
- chip->ports[entry.portvec].atu_member_violation++;
|
|
+ "ATU member violation for %pM portvec %x spid %d\n",
|
|
+ entry.mac, entry.portvec, spid);
|
|
+ chip->ports[spid].atu_member_violation++;
|
|
}
|
|
|
|
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
|
|
dev_err_ratelimited(chip->dev,
|
|
- "ATU miss violation for %pM portvec %x\n",
|
|
- entry.mac, entry.portvec);
|
|
- chip->ports[entry.portvec].atu_miss_violation++;
|
|
+ "ATU miss violation for %pM portvec %x spid %d\n",
|
|
+ entry.mac, entry.portvec, spid);
|
|
+ chip->ports[spid].atu_miss_violation++;
|
|
}
|
|
|
|
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
|
|
dev_err_ratelimited(chip->dev,
|
|
- "ATU full violation for %pM portvec %x\n",
|
|
- entry.mac, entry.portvec);
|
|
- chip->ports[entry.portvec].atu_full_violation++;
|
|
+ "ATU full violation for %pM portvec %x spid %d\n",
|
|
+ entry.mac, entry.portvec, spid);
|
|
+ chip->ports[spid].atu_full_violation++;
|
|
}
|
|
mutex_unlock(&chip->reg_lock);
|
|
|
|
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
|
|
index 36904c9bf955f..091aa0057f1f6 100644
|
|
--- a/drivers/net/dsa/mv88e6xxx/port.h
|
|
+++ b/drivers/net/dsa/mv88e6xxx/port.h
|
|
@@ -251,6 +251,16 @@
|
|
/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
|
|
#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
|
|
|
|
+/* Offset 0x1a: Magic undocumented errata register */
|
|
+#define PORT_RESERVED_1A 0x1a
|
|
+#define PORT_RESERVED_1A_BUSY BIT(15)
|
|
+#define PORT_RESERVED_1A_WRITE BIT(14)
|
|
+#define PORT_RESERVED_1A_READ 0
|
|
+#define PORT_RESERVED_1A_PORT_SHIFT 5
|
|
+#define PORT_RESERVED_1A_BLOCK (0xf << 10)
|
|
+#define PORT_RESERVED_1A_CTRL_PORT 4
|
|
+#define PORT_RESERVED_1A_DATA_PORT 5
|
|
+
|
|
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
|
|
u16 *val);
|
|
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
|
|
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
|
|
index b4b839a1d0952..ad41ec63cc9f0 100644
|
|
--- a/drivers/net/dsa/realtek-smi.c
|
|
+++ b/drivers/net/dsa/realtek-smi.c
|
|
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
|
|
struct device_node *mdio_np;
|
|
int ret;
|
|
|
|
- mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
|
|
- "realtek,smi-mdio");
|
|
+ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
|
|
if (!mdio_np) {
|
|
dev_err(smi->dev, "no MDIO bus node\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
|
|
- if (!smi->slave_mii_bus)
|
|
- return -ENOMEM;
|
|
+ if (!smi->slave_mii_bus) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_put_node;
|
|
+ }
|
|
smi->slave_mii_bus->priv = smi;
|
|
smi->slave_mii_bus->name = "SMI slave MII";
|
|
smi->slave_mii_bus->read = realtek_smi_mdio_read;
|
|
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
|
|
if (ret) {
|
|
dev_err(smi->dev, "unable to register MDIO bus %s\n",
|
|
smi->slave_mii_bus->id);
|
|
- of_node_put(mdio_np);
|
|
+ goto err_put_node;
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
+err_put_node:
|
|
+ of_node_put(mdio_np);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int realtek_smi_probe(struct platform_device *pdev)
|
|
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
|
|
struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
|
|
|
|
dsa_unregister_switch(smi->ds);
|
|
+ if (smi->slave_mii_bus)
|
|
+ of_node_put(smi->slave_mii_bus->dev.of_node);
|
|
gpiod_set_value(smi->reset, 1);
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
index a70bb1bb90e7d..a6eacf2099c30 100644
|
|
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|
goto err_device_destroy;
|
|
}
|
|
|
|
- clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
|
|
- /* Make sure we don't have a race with AENQ Links state handler */
|
|
- if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
|
|
- netif_carrier_on(adapter->netdev);
|
|
-
|
|
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
|
|
adapter->num_queues);
|
|
if (rc) {
|
|
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|
}
|
|
|
|
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
|
|
+
|
|
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
|
|
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
|
|
+ netif_carrier_on(adapter->netdev);
|
|
+
|
|
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
|
dev_err(&pdev->dev,
|
|
"Device reset completed successfully, Driver info: %s\n",
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
|
|
index d272dc6984ac6..b40d4377cc71d 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
|
|
@@ -431,8 +431,6 @@
|
|
#define MAC_MDIOSCAR_PA_WIDTH 5
|
|
#define MAC_MDIOSCAR_RA_INDEX 0
|
|
#define MAC_MDIOSCAR_RA_WIDTH 16
|
|
-#define MAC_MDIOSCAR_REG_INDEX 0
|
|
-#define MAC_MDIOSCAR_REG_WIDTH 21
|
|
#define MAC_MDIOSCCDR_BUSY_INDEX 22
|
|
#define MAC_MDIOSCCDR_BUSY_WIDTH 1
|
|
#define MAC_MDIOSCCDR_CMD_INDEX 16
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
|
|
index 1e929a1e4ca78..4666084eda16a 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
|
|
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
|
|
}
|
|
}
|
|
|
|
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
|
|
+{
|
|
+ unsigned int mdio_sca, da;
|
|
+
|
|
+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
|
|
+
|
|
+ mdio_sca = 0;
|
|
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
|
|
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
|
|
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
|
|
+
|
|
+ return mdio_sca;
|
|
+}
|
|
+
|
|
static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
|
|
int reg, u16 val)
|
|
{
|
|
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
|
|
|
|
reinit_completion(&pdata->mdio_complete);
|
|
|
|
- mdio_sca = 0;
|
|
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
|
|
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
|
|
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
|
|
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
|
|
|
|
mdio_sccd = 0;
|
|
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
|
|
|
|
reinit_completion(&pdata->mdio_complete);
|
|
|
|
- mdio_sca = 0;
|
|
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
|
|
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
|
|
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
|
|
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
|
|
|
|
mdio_sccd = 0;
|
|
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
|
|
index 7def1cb8ab9d0..22cbf8dc6fa9b 100644
|
|
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
|
|
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
|
|
@@ -263,6 +263,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
|
|
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
|
|
HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
|
|
10, 1000U);
|
|
+ if (err)
|
|
+ return err;
|
|
}
|
|
|
|
if (self->rbl_enabled)
|
|
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
|
|
index 0e2d99c737e35..baa109040b401 100644
|
|
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
|
|
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
|
|
@@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
|
|
struct ethtool_wolinfo *wol)
|
|
{
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
- u32 reg;
|
|
|
|
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
|
wol->wolopts = priv->wolopts;
|
|
@@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
|
|
if (!(priv->wolopts & WAKE_MAGICSECURE))
|
|
return;
|
|
|
|
- /* Return the programmed SecureOn password */
|
|
- reg = umac_readl(priv, UMAC_PSW_MS);
|
|
- put_unaligned_be16(reg, &wol->sopass[0]);
|
|
- reg = umac_readl(priv, UMAC_PSW_LS);
|
|
- put_unaligned_be32(reg, &wol->sopass[2]);
|
|
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
|
|
}
|
|
|
|
static int bcm_sysport_set_wol(struct net_device *dev,
|
|
@@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
|
|
if (wol->wolopts & ~supported)
|
|
return -EINVAL;
|
|
|
|
- /* Program the SecureOn password */
|
|
- if (wol->wolopts & WAKE_MAGICSECURE) {
|
|
- umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
|
|
- UMAC_PSW_MS);
|
|
- umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
|
|
- UMAC_PSW_LS);
|
|
- }
|
|
+ if (wol->wolopts & WAKE_MAGICSECURE)
|
|
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
|
|
|
|
/* Flag the device and relevant IRQ as wakeup capable */
|
|
if (wol->wolopts) {
|
|
@@ -2592,13 +2582,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
|
|
unsigned int index, i = 0;
|
|
u32 reg;
|
|
|
|
- /* Password has already been programmed */
|
|
reg = umac_readl(priv, UMAC_MPD_CTRL);
|
|
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
|
|
reg |= MPD_EN;
|
|
reg &= ~PSW_EN;
|
|
- if (priv->wolopts & WAKE_MAGICSECURE)
|
|
+ if (priv->wolopts & WAKE_MAGICSECURE) {
|
|
+ /* Program the SecureOn password */
|
|
+ umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
|
|
+ UMAC_PSW_MS);
|
|
+ umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
|
|
+ UMAC_PSW_LS);
|
|
reg |= PSW_EN;
|
|
+ }
|
|
umac_writel(priv, reg, UMAC_MPD_CTRL);
|
|
|
|
if (priv->wolopts & WAKE_FILTER) {
|
|
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
|
|
index a7a230884a871..930ab8de3f457 100644
|
|
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
|
|
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
|
|
@@ -12,6 +12,7 @@
|
|
#define __BCM_SYSPORT_H
|
|
|
|
#include <linux/bitmap.h>
|
|
+#include <linux/ethtool.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/net_dim.h>
|
|
|
|
@@ -778,6 +779,7 @@ struct bcm_sysport_priv {
|
|
unsigned int crc_fwd:1;
|
|
u16 rev;
|
|
u32 wolopts;
|
|
+ u8 sopass[SOPASS_MAX];
|
|
unsigned int wol_irq_disabled:1;
|
|
|
|
/* MIB related fields */
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
index b164f705709d0..3b5b47e98c73d 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
@@ -9360,10 +9360,16 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
|
|
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
|
|
rc);
|
|
|
|
- /* Remove all currently configured VLANs */
|
|
- rc = bnx2x_del_all_vlans(bp);
|
|
- if (rc < 0)
|
|
- BNX2X_ERR("Failed to delete all VLANs\n");
|
|
+ /* The whole *vlan_obj structure may be not initialized if VLAN
|
|
+ * filtering offload is not supported by hardware. Currently this is
|
|
+ * true for all hardware covered by CHIP_IS_E1x().
|
|
+ */
|
|
+ if (!CHIP_IS_E1x(bp)) {
|
|
+ /* Remove all currently configured VLANs */
|
|
+ rc = bnx2x_del_all_vlans(bp);
|
|
+ if (rc < 0)
|
|
+ BNX2X_ERR("Failed to delete all VLANs\n");
|
|
+ }
|
|
|
|
/* Disable LLH */
|
|
if (!CHIP_IS_E1(bp))
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
index 5d21c14853acc..090207817ad8d 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
@@ -4903,12 +4903,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
|
|
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
|
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
|
|
u32 map_idx = ring->map_idx;
|
|
+ unsigned int vector;
|
|
|
|
+ vector = bp->irq_tbl[map_idx].vector;
|
|
+ disable_irq_nosync(vector);
|
|
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
|
|
- if (rc)
|
|
+ if (rc) {
|
|
+ enable_irq(vector);
|
|
goto err_out;
|
|
+ }
|
|
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
|
|
bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
|
|
+ enable_irq(vector);
|
|
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
|
|
|
|
if (!i) {
|
|
@@ -7203,23 +7209,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
|
|
int bnxt_reserve_rings(struct bnxt *bp)
|
|
{
|
|
int tcs = netdev_get_num_tc(bp->dev);
|
|
+ bool reinit_irq = false;
|
|
int rc;
|
|
|
|
if (!bnxt_need_reserve_rings(bp))
|
|
return 0;
|
|
|
|
- rc = __bnxt_reserve_rings(bp);
|
|
- if (rc) {
|
|
- netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
|
|
- return rc;
|
|
- }
|
|
if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
|
|
bnxt_ulp_irq_stop(bp);
|
|
bnxt_clear_int_mode(bp);
|
|
- rc = bnxt_init_int_mode(bp);
|
|
+ reinit_irq = true;
|
|
+ }
|
|
+ rc = __bnxt_reserve_rings(bp);
|
|
+ if (reinit_irq) {
|
|
+ if (!rc)
|
|
+ rc = bnxt_init_int_mode(bp);
|
|
bnxt_ulp_irq_restart(bp, rc);
|
|
- if (rc)
|
|
- return rc;
|
|
+ }
|
|
+ if (rc) {
|
|
+ netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
|
|
+ return rc;
|
|
}
|
|
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
|
|
netdev_err(bp->dev, "tx ring reservation failure\n");
|
|
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
index 60641e2025341..9a7f70db20c7f 100644
|
|
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|
* csum is correct or is zero.
|
|
*/
|
|
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
|
|
- tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
|
|
+ tcp_udp_csum_ok && outer_csum_ok &&
|
|
+ (ipv4_csum_ok || ipv6)) {
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
skb->csum_level = encap;
|
|
}
|
|
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
index 6e0f47f2c8a37..3e53be0fcd7ec 100644
|
|
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
|
bool nonlinear = skb_is_nonlinear(skb);
|
|
struct rtnl_link_stats64 *percpu_stats;
|
|
struct dpaa_percpu_priv *percpu_priv;
|
|
+ struct netdev_queue *txq;
|
|
struct dpaa_priv *priv;
|
|
struct qm_fd fd;
|
|
int offset = 0;
|
|
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
|
if (unlikely(err < 0))
|
|
goto skb_to_fd_failed;
|
|
|
|
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
|
|
+
|
|
+ /* LLTX requires to do our own update of trans_start */
|
|
+ txq->trans_start = jiffies;
|
|
+
|
|
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
|
|
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
|
|
index 84b942b1eccc8..9b150db3b5105 100644
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
|
|
@@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
|
|
|
|
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
|
|
if (err) {
|
|
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
|
|
+ if (err == -ENXIO)
|
|
+ err = -EPROBE_DEFER;
|
|
+ else
|
|
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
|
|
goto err_exit;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
|
|
index bc6eb30aa20f1..41c6fa200e746 100644
|
|
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
|
|
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
|
|
@@ -928,7 +928,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
|
|
hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
|
|
|
|
/* Create element to be added to the driver hash table */
|
|
- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
|
|
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
|
|
if (!hash_entry)
|
|
return -ENOMEM;
|
|
hash_entry->addr = addr;
|
|
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
|
|
index 40705938eeccf..f75b9c11b2d29 100644
|
|
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
|
|
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
|
|
@@ -553,7 +553,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
|
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
|
|
|
|
/* Create element to be added to the driver hash table */
|
|
- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
|
|
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
|
|
if (!hash_entry)
|
|
return -ENOMEM;
|
|
hash_entry->addr = addr;
|
|
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
|
|
index 32e02700feaa1..91d7965b3dab4 100644
|
|
--- a/drivers/net/ethernet/freescale/ucc_geth.c
|
|
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
|
|
@@ -1883,6 +1883,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
|
|
u16 i, j;
|
|
u8 __iomem *bd;
|
|
|
|
+ netdev_reset_queue(ugeth->ndev);
|
|
+
|
|
ug_info = ugeth->ug_info;
|
|
uf_info = &ug_info->uf_info;
|
|
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
|
|
index ad1779fc410e6..a78bfafd212c8 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
|
|
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
|
|
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
|
|
int i;
|
|
|
|
- vf_cb->mac_cb = NULL;
|
|
-
|
|
- kfree(vf_cb);
|
|
-
|
|
for (i = 0; i < handle->q_num; i++)
|
|
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
|
|
+
|
|
+ kfree(vf_cb);
|
|
}
|
|
|
|
static int hns_ae_wait_flow_down(struct hnae_handle *handle)
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
index 20fcf0d1c2ce5..d424d5bc05079 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
|
|
@@ -2332,9 +2332,16 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
|
|
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
|
|
struct sk_buff *skb)
|
|
{
|
|
- struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
|
|
struct hnae3_handle *handle = ring->tqp->handle;
|
|
enum pkt_hash_types rss_type;
|
|
+ struct hns3_desc *desc;
|
|
+ int last_bd;
|
|
+
|
|
+ /* When driver handle the rss type, ring->next_to_clean indicates the
|
|
+ * first descriptor of next packet, need -1 here.
|
|
+ */
|
|
+ last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
|
|
+ desc = &ring->desc[last_bd];
|
|
|
|
if (le32_to_cpu(desc->rx.rss_hash))
|
|
rss_type = handle->kinfo.rss_type;
|
|
@@ -2783,9 +2790,10 @@ err_free_chain:
|
|
cur_chain = head->next;
|
|
while (cur_chain) {
|
|
chain = cur_chain->next;
|
|
- devm_kfree(&pdev->dev, chain);
|
|
+ devm_kfree(&pdev->dev, cur_chain);
|
|
cur_chain = chain;
|
|
}
|
|
+ head->next = NULL;
|
|
|
|
return -ENOMEM;
|
|
}
|
|
@@ -2876,7 +2884,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
|
ret = hns3_get_vector_ring_chain(tqp_vector,
|
|
&vector_ring_chain);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto map_ring_fail;
|
|
|
|
ret = h->ae_algo->ops->map_ring_to_vector(h,
|
|
tqp_vector->vector_irq, &vector_ring_chain);
|
|
@@ -2901,6 +2909,8 @@ map_ring_fail:
|
|
|
|
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
|
|
{
|
|
+#define HNS3_VECTOR_PF_MAX_NUM 64
|
|
+
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
struct hns3_enet_tqp_vector *tqp_vector;
|
|
struct hnae3_vector_info *vector;
|
|
@@ -2913,6 +2923,8 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
|
|
/* RSS size, cpu online and vector_num should be the same */
|
|
/* Should consider 2p/4p later */
|
|
vector_num = min_t(u16, num_online_cpus(), tqp_num);
|
|
+ vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
|
|
+
|
|
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
|
|
GFP_KERNEL);
|
|
if (!vector)
|
|
@@ -2970,12 +2982,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
|
|
|
|
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
|
|
|
|
- if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
|
|
- (void)irq_set_affinity_hint(
|
|
- priv->tqp_vector[i].vector_irq,
|
|
- NULL);
|
|
- free_irq(priv->tqp_vector[i].vector_irq,
|
|
- &priv->tqp_vector[i]);
|
|
+ if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
|
|
+ irq_set_affinity_notifier(tqp_vector->vector_irq,
|
|
+ NULL);
|
|
+ irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
|
|
+ free_irq(tqp_vector->vector_irq, tqp_vector);
|
|
+ tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
|
|
}
|
|
|
|
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
index ffdd96020860d..a7895aefe291e 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
|
|
@@ -4339,6 +4339,10 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
|
|
if (!hnae3_dev_fd_supported(hdev))
|
|
return -EOPNOTSUPP;
|
|
|
|
+ /* if fd is disabled, should not restore it when reset */
|
|
+ if (!hdev->fd_cfg.fd_en)
|
|
+ return 0;
|
|
+
|
|
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
|
|
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
|
|
if (!ret)
|
|
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
|
|
index a4681780a55d2..098d8764c0ea9 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmveth.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmveth.c
|
|
@@ -1171,11 +1171,15 @@ out:
|
|
|
|
map_failed_frags:
|
|
last = i+1;
|
|
- for (i = 0; i < last; i++)
|
|
+ for (i = 1; i < last; i++)
|
|
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
|
|
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
|
|
DMA_TO_DEVICE);
|
|
|
|
+ dma_unmap_single(&adapter->vdev->dev,
|
|
+ descs[0].fields.address,
|
|
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
|
|
+ DMA_TO_DEVICE);
|
|
map_failed:
|
|
if (!firmware_has_feature(FW_FEATURE_CMO))
|
|
netdev_err(netdev, "tx: unable to map xmit buffer\n");
|
|
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
|
|
index 37c76945ad9ba..e1f821edbc21c 100644
|
|
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
|
|
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
|
|
@@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
|
|
ptp_clock_info);
|
|
unsigned long flags;
|
|
- u64 ns;
|
|
+ u64 cycles, ns;
|
|
|
|
spin_lock_irqsave(&adapter->systim_lock, flags);
|
|
- ns = timecounter_read(&adapter->tc);
|
|
+
|
|
+ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
|
|
+ cycles = adapter->cc.read(&adapter->cc);
|
|
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
|
|
+
|
|
spin_unlock_irqrestore(&adapter->systim_lock, flags);
|
|
|
|
*ts = ns_to_timespec64(ns);
|
|
@@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
|
|
systim_overflow_work.work);
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
struct timespec64 ts;
|
|
+ u64 ns;
|
|
|
|
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
|
|
+ /* Update the timecounter */
|
|
+ ns = timecounter_read(&adapter->tc);
|
|
|
|
+ ts = ns_to_timespec64(ns);
|
|
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
|
|
(long long) ts.tv_sec, ts.tv_nsec);
|
|
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
|
|
index 876cac317e795..8245ff12fd64f 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e.h
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
|
|
@@ -122,6 +122,7 @@ enum i40e_state_t {
|
|
__I40E_MDD_EVENT_PENDING,
|
|
__I40E_VFLR_EVENT_PENDING,
|
|
__I40E_RESET_RECOVERY_PENDING,
|
|
+ __I40E_TIMEOUT_RECOVERY_PENDING,
|
|
__I40E_MISC_IRQ_REQUESTED,
|
|
__I40E_RESET_INTR_RECEIVED,
|
|
__I40E_REINIT_REQUESTED,
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
index 0e5dc74b4ef22..f97c3d5ab884c 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
|
@@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
|
|
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
|
|
return; /* don't do any new action before the next timeout */
|
|
|
|
+ /* don't kick off another recovery if one is already pending */
|
|
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
|
|
+ return;
|
|
+
|
|
if (tx_ring) {
|
|
head = i40e_get_head(tx_ring);
|
|
/* Read interrupt register */
|
|
@@ -9632,6 +9636,7 @@ end_core_reset:
|
|
clear_bit(__I40E_RESET_FAILED, pf->state);
|
|
clear_recovery:
|
|
clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
|
|
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
|
|
}
|
|
|
|
/**
|
|
@@ -12334,6 +12339,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
|
ether_addr_copy(netdev->dev_addr, mac_addr);
|
|
ether_addr_copy(netdev->perm_addr, mac_addr);
|
|
|
|
+ /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
|
|
+ netdev->neigh_priv_len = sizeof(u32) * 4;
|
|
+
|
|
netdev->priv_flags |= IFF_UNICAST_FLT;
|
|
netdev->priv_flags |= IFF_SUPP_NOFCS;
|
|
/* Setup netdev TC information */
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
|
|
index ac5698ed0b111..c41e8ada23d12 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
|
|
@@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
|
|
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
|
|
return I40E_ERR_PARAM;
|
|
|
|
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
|
|
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
|
|
+ (allmulti || alluni)) {
|
|
dev_err(&pf->pdev->dev,
|
|
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
|
|
vf->vf_id);
|
|
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
|
|
index 333312a1d5957..9450004492795 100644
|
|
--- a/drivers/net/ethernet/intel/ice/ice_main.c
|
|
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
|
|
@@ -2563,8 +2563,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
|
|
if (!vsi->netdev)
|
|
return;
|
|
|
|
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
|
|
- napi_enable(&vsi->q_vectors[q_idx]->napi);
|
|
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
|
|
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
|
|
+
|
|
+ if (q_vector->rx.ring || q_vector->tx.ring)
|
|
+ napi_enable(&q_vector->napi);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
@@ -2931,8 +2935,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
|
|
if (!vsi->netdev)
|
|
return;
|
|
|
|
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
|
|
- napi_disable(&vsi->q_vectors[q_idx]->napi);
|
|
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
|
|
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
|
|
+
|
|
+ if (q_vector->rx.ring || q_vector->tx.ring)
|
|
+ napi_disable(&q_vector->napi);
|
|
+ }
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
index 5df88ad8ac819..93f150784cfc9 100644
|
|
--- a/drivers/net/ethernet/intel/igb/igb_main.c
|
|
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
|
|
@@ -8770,9 +8770,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
|
rtnl_unlock();
|
|
|
|
#ifdef CONFIG_PM
|
|
- retval = pci_save_state(pdev);
|
|
- if (retval)
|
|
- return retval;
|
|
+ if (!runtime) {
|
|
+ retval = pci_save_state(pdev);
|
|
+ if (retval)
|
|
+ return retval;
|
|
+ }
|
|
#endif
|
|
|
|
status = rd32(E1000_STATUS);
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
|
|
index fd1b0546fd675..4d77f42e035c5 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
|
|
@@ -4,6 +4,7 @@
|
|
#include "ixgbe.h"
|
|
#include <net/xfrm.h>
|
|
#include <crypto/aead.h>
|
|
+#include <linux/if_bridge.h>
|
|
|
|
#define IXGBE_IPSEC_KEY_BITS 160
|
|
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
|
|
@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
|
|
} else {
|
|
struct tx_sa tsa;
|
|
|
|
- if (adapter->num_vfs)
|
|
+ if (adapter->num_vfs &&
|
|
+ adapter->bridge_mode != BRIDGE_MODE_VEPA)
|
|
return -EOPNOTSUPP;
|
|
|
|
/* find the first unused index */
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
|
|
index 12db256c8c9f9..ee67d1c4281dd 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
|
|
@@ -668,7 +668,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
if (!cgx->reg_base) {
|
|
dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
|
|
err = -ENOMEM;
|
|
- goto err_release_regions;
|
|
+ goto err_free_irq_vectors;
|
|
}
|
|
|
|
nvec = CGX_NVEC;
|
|
@@ -693,6 +693,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
err_release_lmac:
|
|
cgx_lmac_exit(cgx);
|
|
list_del(&cgx->cgx_list);
|
|
+err_free_irq_vectors:
|
|
+ pci_free_irq_vectors(pdev);
|
|
err_release_regions:
|
|
pci_release_regions(pdev);
|
|
err_disable_device:
|
|
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
|
|
index 9c08c3650c02c..15dea48e01953 100644
|
|
--- a/drivers/net/ethernet/marvell/skge.c
|
|
+++ b/drivers/net/ethernet/marvell/skge.c
|
|
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|
memset(p, 0, regs->len);
|
|
memcpy_fromio(p, io, B3_RAM_ADDR);
|
|
|
|
- memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
|
- regs->len - B3_RI_WTO_R1);
|
|
+ if (regs->len > B3_RI_WTO_R1) {
|
|
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
|
+ regs->len - B3_RI_WTO_R1);
|
|
+ }
|
|
}
|
|
|
|
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
|
|
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
|
|
index 697d9b374f5e1..ae2f35039343b 100644
|
|
--- a/drivers/net/ethernet/marvell/sky2.c
|
|
+++ b/drivers/net/ethernet/marvell/sky2.c
|
|
@@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
INIT_WORK(&hw->restart_work, sky2_restart);
|
|
|
|
pci_set_drvdata(pdev, hw);
|
|
- pdev->d3_delay = 200;
|
|
+ pdev->d3_delay = 300;
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
index db00bf1c23f5a..d47d4f86ac11d 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
@@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
|
|
}
|
|
#endif
|
|
|
|
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
|
|
+
|
|
/* We reach this function only after checking that any of
|
|
* the (IPv4 | IPv6) bits are set in cqe->status.
|
|
*/
|
|
@@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
|
|
netdev_features_t dev_features)
|
|
{
|
|
__wsum hw_checksum = 0;
|
|
+ void *hdr;
|
|
+
|
|
+ /* CQE csum doesn't cover padding octets in short ethernet
|
|
+ * frames. And the pad field is appended prior to calculating
|
|
+ * and appending the FCS field.
|
|
+ *
|
|
+ * Detecting these padded frames requires to verify and parse
|
|
+ * IP headers, so we simply force all those small frames to skip
|
|
+ * checksum complete.
|
|
+ */
|
|
+ if (short_frame(skb->len))
|
|
+ return -EINVAL;
|
|
|
|
- void *hdr = (u8 *)va + sizeof(struct ethhdr);
|
|
-
|
|
+ hdr = (u8 *)va + sizeof(struct ethhdr);
|
|
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
|
|
|
|
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
|
|
@@ -822,6 +835,11 @@ xdp_drop_no_cnt:
|
|
skb_record_rx_queue(skb, cq_ring);
|
|
|
|
if (likely(dev->features & NETIF_F_RXCSUM)) {
|
|
+ /* TODO: For IP non TCP/UDP packets when csum complete is
|
|
+ * not an option (not supported or any other reason) we can
|
|
+ * actually check cqe IPOK status bit and report
|
|
+ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
|
|
+ */
|
|
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
|
|
MLX4_CQE_STATUS_UDP)) &&
|
|
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
|
|
index babcfd9c0571f..75213046563ce 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
|
|
@@ -2064,9 +2064,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
__be32 *outbox;
|
|
+ u64 qword_field;
|
|
u32 dword_field;
|
|
- int err;
|
|
+ u16 word_field;
|
|
u8 byte_field;
|
|
+ int err;
|
|
static const u8 a0_dmfs_query_hw_steering[] = {
|
|
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
|
|
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
|
|
@@ -2094,19 +2096,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
|
|
|
|
/* QPC/EEC/CQC/EQC/RDMARC attributes */
|
|
|
|
- MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
|
|
- MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
|
|
- MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
|
|
- MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
|
|
- MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
|
|
- MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
|
|
- MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
|
|
- MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
|
|
- MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
|
|
- MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
|
|
- MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
|
|
- MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
|
|
- MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
|
|
+ param->qpc_base = qword_field & ~((u64)0x1f);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
|
|
+ param->log_num_qps = byte_field & 0x1f;
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
|
|
+ param->srqc_base = qword_field & ~((u64)0x1f);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
|
|
+ param->log_num_srqs = byte_field & 0x1f;
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
|
|
+ param->cqc_base = qword_field & ~((u64)0x1f);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
|
|
+ param->log_num_cqs = byte_field & 0x1f;
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
|
|
+ param->altc_base = qword_field;
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
|
|
+ param->auxc_base = qword_field;
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
|
|
+ param->eqc_base = qword_field & ~((u64)0x1f);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
|
|
+ param->log_num_eqs = byte_field & 0x1f;
|
|
+ MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
|
|
+ param->num_sys_eqs = word_field & 0xfff;
|
|
+ MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
|
|
+ param->rdmarc_base = qword_field & ~((u64)0x1f);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
|
|
+ param->log_rd_per_qp = byte_field & 0x7;
|
|
|
|
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
|
|
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
|
|
@@ -2125,22 +2140,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
|
|
/* steering attributes */
|
|
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
|
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
|
|
- MLX4_GET(param->log_mc_entry_sz, outbox,
|
|
- INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
|
|
- MLX4_GET(param->log_mc_table_sz, outbox,
|
|
- INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
|
|
- MLX4_GET(byte_field, outbox,
|
|
- INIT_HCA_FS_A0_OFFSET);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
|
|
+ param->log_mc_entry_sz = byte_field & 0x1f;
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
|
|
+ param->log_mc_table_sz = byte_field & 0x1f;
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
|
|
param->dmfs_high_steer_mode =
|
|
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
|
|
} else {
|
|
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
|
|
- MLX4_GET(param->log_mc_entry_sz, outbox,
|
|
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
|
|
- MLX4_GET(param->log_mc_hash_sz, outbox,
|
|
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
|
|
- MLX4_GET(param->log_mc_table_sz, outbox,
|
|
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
|
|
+ param->log_mc_entry_sz = byte_field & 0x1f;
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
|
|
+ param->log_mc_hash_sz = byte_field & 0x1f;
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
|
|
+ param->log_mc_table_sz = byte_field & 0x1f;
|
|
}
|
|
|
|
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
|
|
@@ -2164,15 +2178,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
|
|
/* TPT attributes */
|
|
|
|
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
|
|
- MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
|
|
- MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
|
|
+ param->mw_enabled = byte_field >> 7;
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
|
|
+ param->log_mpt_sz = byte_field & 0x3f;
|
|
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
|
|
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
|
|
|
|
/* UAR attributes */
|
|
|
|
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
|
|
- MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
|
|
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
|
|
+ param->log_uar_sz = byte_field & 0xf;
|
|
|
|
/* phv_check enable */
|
|
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
|
|
index 4b4351141b94c..76b84d08a058b 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
|
|
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
|
|
int i;
|
|
|
|
if (chunk->nsg > 0)
|
|
- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
|
|
+ pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
for (i = 0; i < chunk->npages; ++i)
|
|
- __free_pages(sg_page(&chunk->mem[i]),
|
|
- get_order(chunk->mem[i].length));
|
|
+ __free_pages(sg_page(&chunk->sg[i]),
|
|
+ get_order(chunk->sg[i].length));
|
|
}
|
|
|
|
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
|
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
|
|
|
|
for (i = 0; i < chunk->npages; ++i)
|
|
dma_free_coherent(&dev->persist->pdev->dev,
|
|
- chunk->mem[i].length,
|
|
- lowmem_page_address(sg_page(&chunk->mem[i])),
|
|
- sg_dma_address(&chunk->mem[i]));
|
|
+ chunk->buf[i].size,
|
|
+ chunk->buf[i].addr,
|
|
+ chunk->buf[i].dma_addr);
|
|
}
|
|
|
|
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
|
|
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
|
|
return 0;
|
|
}
|
|
|
|
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
|
|
- int order, gfp_t gfp_mask)
|
|
+static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
|
|
+ int order, gfp_t gfp_mask)
|
|
{
|
|
- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
|
|
- &sg_dma_address(mem), gfp_mask);
|
|
- if (!buf)
|
|
+ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
|
|
+ &buf->dma_addr, gfp_mask);
|
|
+ if (!buf->addr)
|
|
return -ENOMEM;
|
|
|
|
- if (offset_in_page(buf)) {
|
|
- dma_free_coherent(dev, PAGE_SIZE << order,
|
|
- buf, sg_dma_address(mem));
|
|
+ if (offset_in_page(buf->addr)) {
|
|
+ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
|
|
+ buf->dma_addr);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- sg_set_buf(mem, buf, PAGE_SIZE << order);
|
|
- sg_dma_len(mem) = PAGE_SIZE << order;
|
|
+ buf->size = PAGE_SIZE << order;
|
|
return 0;
|
|
}
|
|
|
|
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
|
while (npages > 0) {
|
|
if (!chunk) {
|
|
- chunk = kmalloc_node(sizeof(*chunk),
|
|
+ chunk = kzalloc_node(sizeof(*chunk),
|
|
gfp_mask & ~(__GFP_HIGHMEM |
|
|
__GFP_NOWARN),
|
|
dev->numa_node);
|
|
if (!chunk) {
|
|
- chunk = kmalloc(sizeof(*chunk),
|
|
+ chunk = kzalloc(sizeof(*chunk),
|
|
gfp_mask & ~(__GFP_HIGHMEM |
|
|
__GFP_NOWARN));
|
|
if (!chunk)
|
|
goto fail;
|
|
}
|
|
+ chunk->coherent = coherent;
|
|
|
|
- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
|
|
- chunk->npages = 0;
|
|
- chunk->nsg = 0;
|
|
+ if (!coherent)
|
|
+ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
|
|
list_add_tail(&chunk->list, &icm->chunk_list);
|
|
}
|
|
|
|
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
|
if (coherent)
|
|
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
|
|
- &chunk->mem[chunk->npages],
|
|
- cur_order, mask);
|
|
+ &chunk->buf[chunk->npages],
|
|
+ cur_order, mask);
|
|
else
|
|
- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
|
|
+ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
|
|
cur_order, mask,
|
|
dev->numa_node);
|
|
|
|
@@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
if (coherent)
|
|
++chunk->nsg;
|
|
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
|
|
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
|
|
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
|
|
chunk->npages,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
@@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
}
|
|
|
|
if (!coherent && chunk) {
|
|
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
|
|
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
|
|
chunk->npages,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
@@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
|
|
u64 idx;
|
|
struct mlx4_icm_chunk *chunk;
|
|
struct mlx4_icm *icm;
|
|
- struct page *page = NULL;
|
|
+ void *addr = NULL;
|
|
|
|
if (!table->lowmem)
|
|
return NULL;
|
|
@@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
|
|
|
|
list_for_each_entry(chunk, &icm->chunk_list, list) {
|
|
for (i = 0; i < chunk->npages; ++i) {
|
|
+ dma_addr_t dma_addr;
|
|
+ size_t len;
|
|
+
|
|
+ if (table->coherent) {
|
|
+ len = chunk->buf[i].size;
|
|
+ dma_addr = chunk->buf[i].dma_addr;
|
|
+ addr = chunk->buf[i].addr;
|
|
+ } else {
|
|
+ struct page *page;
|
|
+
|
|
+ len = sg_dma_len(&chunk->sg[i]);
|
|
+ dma_addr = sg_dma_address(&chunk->sg[i]);
|
|
+
|
|
+ /* XXX: we should never do this for highmem
|
|
+ * allocation. This function either needs
|
|
+ * to be split, or the kernel virtual address
|
|
+ * return needs to be made optional.
|
|
+ */
|
|
+ page = sg_page(&chunk->sg[i]);
|
|
+ addr = lowmem_page_address(page);
|
|
+ }
|
|
+
|
|
if (dma_handle && dma_offset >= 0) {
|
|
- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
|
|
- *dma_handle = sg_dma_address(&chunk->mem[i]) +
|
|
- dma_offset;
|
|
- dma_offset -= sg_dma_len(&chunk->mem[i]);
|
|
+ if (len > dma_offset)
|
|
+ *dma_handle = dma_addr + dma_offset;
|
|
+ dma_offset -= len;
|
|
}
|
|
+
|
|
/*
|
|
* DMA mapping can merge pages but not split them,
|
|
* so if we found the page, dma_handle has already
|
|
* been assigned to.
|
|
*/
|
|
- if (chunk->mem[i].length > offset) {
|
|
- page = sg_page(&chunk->mem[i]);
|
|
+ if (len > offset)
|
|
goto out;
|
|
- }
|
|
- offset -= chunk->mem[i].length;
|
|
+ offset -= len;
|
|
}
|
|
}
|
|
|
|
+ addr = NULL;
|
|
out:
|
|
mutex_unlock(&table->mutex);
|
|
- return page ? lowmem_page_address(page) + offset : NULL;
|
|
+ return addr ? addr + offset : NULL;
|
|
}
|
|
|
|
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
|
|
index c9169a490557c..d199874b1c074 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
|
|
@@ -47,11 +47,21 @@ enum {
|
|
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
|
|
};
|
|
|
|
+struct mlx4_icm_buf {
|
|
+ void *addr;
|
|
+ size_t size;
|
|
+ dma_addr_t dma_addr;
|
|
+};
|
|
+
|
|
struct mlx4_icm_chunk {
|
|
struct list_head list;
|
|
int npages;
|
|
int nsg;
|
|
- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
|
|
+ bool coherent;
|
|
+ union {
|
|
+ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
|
|
+ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
|
|
+ };
|
|
};
|
|
|
|
struct mlx4_icm {
|
|
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
|
|
|
|
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
|
|
{
|
|
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
|
|
+ if (iter->chunk->coherent)
|
|
+ return iter->chunk->buf[iter->page_idx].dma_addr;
|
|
+ else
|
|
+ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
|
|
}
|
|
|
|
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
|
|
{
|
|
- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
|
|
+ if (iter->chunk->coherent)
|
|
+ return iter->chunk->buf[iter->page_idx].size;
|
|
+ else
|
|
+ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
|
|
}
|
|
|
|
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
|
|
index 1183248029264..7c72b3b5eedfa 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
|
|
@@ -636,6 +636,7 @@ enum {
|
|
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
|
|
MLX5E_STATE_OPENED,
|
|
MLX5E_STATE_DESTROYING,
|
|
+ MLX5E_STATE_XDP_TX_ENABLED,
|
|
};
|
|
|
|
struct mlx5e_rqt {
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
|
|
index ad6d471d00dd4..4a33c9a7cac7e 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
|
|
@@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|
int sq_num;
|
|
int i;
|
|
|
|
- if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
|
|
+ /* this flag is sufficient, no need to test internal sq state */
|
|
+ if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
|
|
return -ENETDOWN;
|
|
|
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
|
@@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|
|
|
sq = &priv->channels.c[sq_num]->xdpsq;
|
|
|
|
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
|
|
- return -ENETDOWN;
|
|
-
|
|
for (i = 0; i < n; i++) {
|
|
struct xdp_frame *xdpf = frames[i];
|
|
struct mlx5e_xdp_info xdpi;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
|
|
index 6dfab045925f0..4d096623178b9 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
|
|
@@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
|
|
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|
u32 flags);
|
|
|
|
+static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
|
|
+{
|
|
+ set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
|
+}
|
|
+
|
|
+static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
|
|
+{
|
|
+ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
|
+ /* let other device's napi(s) see our new state */
|
|
+ synchronize_rcu();
|
|
+}
|
|
+
|
|
+static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
|
|
+{
|
|
+ return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
|
|
+}
|
|
+
|
|
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
|
|
{
|
|
struct mlx5_wq_cyc *wq = &sq->wq;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
index b70cb6fd164c4..1d66a4e22d64f 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
@@ -1771,7 +1771,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
|
|
|
|
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
|
|
{
|
|
- return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
|
|
+ return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
|
|
}
|
|
|
|
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
|
|
@@ -2903,6 +2903,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
|
|
|
mlx5e_build_tx2sq_maps(priv);
|
|
mlx5e_activate_channels(&priv->channels);
|
|
+ mlx5e_xdp_tx_enable(priv);
|
|
netif_tx_start_all_queues(priv->netdev);
|
|
|
|
if (MLX5_ESWITCH_MANAGER(priv->mdev))
|
|
@@ -2924,6 +2925,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
|
|
*/
|
|
netif_tx_stop_all_queues(priv->netdev);
|
|
netif_tx_disable(priv->netdev);
|
|
+ mlx5e_xdp_tx_disable(priv);
|
|
mlx5e_deactivate_channels(&priv->channels);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
index 820fe85100b08..4dccc84fdcf2c 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
|
|
@@ -143,6 +143,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
|
|
|
|
s->tx_packets += sq_stats->packets;
|
|
s->tx_bytes += sq_stats->bytes;
|
|
+ s->tx_queue_dropped += sq_stats->dropped;
|
|
}
|
|
}
|
|
}
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
index 0b5ef6d4e8158..7185f0dd58ebd 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
@@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
|
|
((struct ipv6hdr *)ip_p)->nexthdr;
|
|
}
|
|
|
|
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
|
|
+
|
|
static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|
struct mlx5_cqe64 *cqe,
|
|
struct mlx5e_rq *rq,
|
|
@@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
|
|
goto csum_unnecessary;
|
|
|
|
+ /* CQE csum doesn't cover padding octets in short ethernet
|
|
+ * frames. And the pad field is appended prior to calculating
|
|
+ * and appending the FCS field.
|
|
+ *
|
|
+ * Detecting these padded frames requires to verify and parse
|
|
+ * IP headers, so we simply force all those small frames to be
|
|
+ * CHECKSUM_UNNECESSARY even if they are not padded.
|
|
+ */
|
|
+ if (short_frame(skb->len))
|
|
+ goto csum_unnecessary;
|
|
+
|
|
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
|
|
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
|
|
goto csum_unnecessary;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
index 9dabe9d4b2798..3fba80a8b436f 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
|
@@ -98,6 +98,7 @@ struct mlx5e_tc_flow_parse_attr {
|
|
struct ip_tunnel_info tun_info;
|
|
struct mlx5_flow_spec spec;
|
|
int num_mod_hdr_actions;
|
|
+ int max_mod_hdr_actions;
|
|
void *mod_hdr_actions;
|
|
int mirred_ifindex;
|
|
};
|
|
@@ -1888,9 +1889,9 @@ static struct mlx5_fields fields[] = {
|
|
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
|
|
};
|
|
|
|
-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
|
|
- * max from the SW pedit action. On success, it says how many HW actions were
|
|
- * actually parsed.
|
|
+/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
|
|
+ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
|
|
+ * says how many HW actions were actually parsed.
|
|
*/
|
|
static int offload_pedit_fields(struct pedit_headers *masks,
|
|
struct pedit_headers *vals,
|
|
@@ -1914,9 +1915,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
|
|
add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
|
|
|
|
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
|
|
- action = parse_attr->mod_hdr_actions;
|
|
- max_actions = parse_attr->num_mod_hdr_actions;
|
|
- nactions = 0;
|
|
+ action = parse_attr->mod_hdr_actions +
|
|
+ parse_attr->num_mod_hdr_actions * action_size;
|
|
+
|
|
+ max_actions = parse_attr->max_mod_hdr_actions;
|
|
+ nactions = parse_attr->num_mod_hdr_actions;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(fields); i++) {
|
|
f = &fields[i];
|
|
@@ -2027,7 +2030,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
|
|
if (!parse_attr->mod_hdr_actions)
|
|
return -ENOMEM;
|
|
|
|
- parse_attr->num_mod_hdr_actions = max_actions;
|
|
+ parse_attr->max_mod_hdr_actions = max_actions;
|
|
return 0;
|
|
}
|
|
|
|
@@ -2073,9 +2076,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
|
goto out_err;
|
|
}
|
|
|
|
- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
|
|
- if (err)
|
|
- goto out_err;
|
|
+ if (!parse_attr->mod_hdr_actions) {
|
|
+ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
|
|
+ if (err)
|
|
+ goto out_err;
|
|
+ }
|
|
|
|
err = offload_pedit_fields(masks, vals, parse_attr, extack);
|
|
if (err < 0)
|
|
@@ -2133,6 +2138,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
|
|
|
|
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
|
struct tcf_exts *exts,
|
|
+ u32 match_actions,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
const struct tc_action *a;
|
|
@@ -2143,7 +2149,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
|
u16 ethertype;
|
|
int nkeys, i;
|
|
|
|
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
|
|
+ if (match_actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
|
|
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
|
|
+ else
|
|
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
|
|
+
|
|
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
|
|
|
|
/* for non-IP we only re-write MACs, so we're okay */
|
|
@@ -2200,7 +2210,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
|
|
|
|
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
|
return modify_header_match_supported(&parse_attr->spec, exts,
|
|
- extack);
|
|
+ actions, extack);
|
|
|
|
return true;
|
|
}
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
|
|
index 6dacaeba2fbff..0b03d65474e93 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
|
|
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
|
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
|
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
|
|
+#ifdef CONFIG_MLX5_EN_IPSEC
|
|
+ struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
|
|
+#endif
|
|
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
|
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
|
|
+#ifdef CONFIG_MLX5_EN_IPSEC
|
|
+ wqe->eth = cur_eth;
|
|
+#endif
|
|
}
|
|
|
|
/* fill wqe */
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
|
index d004957328f9c..3908ed5544740 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
|
|
@@ -1133,13 +1133,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|
int err = 0;
|
|
u8 *smac_v;
|
|
|
|
- if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
|
|
- mlx5_core_warn(esw->dev,
|
|
- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
|
|
- vport->vport);
|
|
- return -EPERM;
|
|
- }
|
|
-
|
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
|
|
|
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
|
|
@@ -1696,7 +1689,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|
int vport_num;
|
|
int err;
|
|
|
|
- if (!MLX5_ESWITCH_MANAGER(dev))
|
|
+ if (!MLX5_VPORT_MANAGER(dev))
|
|
return 0;
|
|
|
|
esw_info(dev,
|
|
@@ -1765,7 +1758,7 @@ abort:
|
|
|
|
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
|
{
|
|
- if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
|
|
+ if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
|
|
return;
|
|
|
|
esw_info(esw->dev, "cleanup\n");
|
|
@@ -1812,13 +1805,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|
mutex_lock(&esw->state_lock);
|
|
evport = &esw->vports[vport];
|
|
|
|
- if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
|
|
+ if (evport->info.spoofchk && !is_valid_ether_addr(mac))
|
|
mlx5_core_warn(esw->dev,
|
|
- "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
|
|
+ "Set invalid MAC while spoofchk is on, vport(%d)\n",
|
|
vport);
|
|
- err = -EPERM;
|
|
- goto unlock;
|
|
- }
|
|
|
|
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
|
|
if (err) {
|
|
@@ -1964,6 +1954,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
|
|
evport = &esw->vports[vport];
|
|
pschk = evport->info.spoofchk;
|
|
evport->info.spoofchk = spoofchk;
|
|
+ if (pschk && !is_valid_ether_addr(evport->info.mac))
|
|
+ mlx5_core_warn(esw->dev,
|
|
+ "Spoofchk in set while MAC is invalid, vport(%d)\n",
|
|
+ evport->vport);
|
|
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
|
err = esw_vport_ingress_config(esw, evport);
|
|
if (err)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
index 0d90b1b4a3d38..2d6168ee99e8a 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
@@ -511,14 +511,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
|
ktime_to_ns(ktime_get_real()));
|
|
|
|
/* Calculate period in seconds to call the overflow watchdog - to make
|
|
- * sure counter is checked at least once every wrap around.
|
|
+ * sure counter is checked at least twice every wrap around.
|
|
* The period is calculated as the minimum between max HW cycles count
|
|
* (The clock source mask) and max amount of cycles that can be
|
|
* multiplied by clock multiplier where the result doesn't exceed
|
|
* 64bits.
|
|
*/
|
|
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
|
|
- overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
|
|
+ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
|
|
|
|
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
|
|
frac, &frac);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
index 28132c7dc05f2..d5cea0a36e6a8 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
@@ -640,18 +640,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
|
|
static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|
{
|
|
struct mlx5_priv *priv = &mdev->priv;
|
|
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
|
|
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
|
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
|
|
|
|
- if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
|
|
+ if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) {
|
|
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
|
- priv->irq_info[i].mask);
|
|
+ priv->irq_info[vecidx].mask);
|
|
|
|
if (IS_ENABLED(CONFIG_SMP) &&
|
|
- irq_set_affinity_hint(irq, priv->irq_info[i].mask))
|
|
+ irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask))
|
|
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
|
|
|
|
return 0;
|
|
@@ -659,11 +660,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|
|
|
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|
{
|
|
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
|
struct mlx5_priv *priv = &mdev->priv;
|
|
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
|
|
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
|
|
|
|
irq_set_affinity_hint(irq, NULL);
|
|
- free_cpumask_var(priv->irq_info[i].mask);
|
|
+ free_cpumask_var(priv->irq_info[vecidx].mask);
|
|
}
|
|
|
|
static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
|
|
index 8a291eb36c64c..7338c9bac4e6a 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
|
|
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
|
|
depends on IPV6 || IPV6=n
|
|
depends on NET_IPGRE || NET_IPGRE=n
|
|
depends on IPV6_GRE || IPV6_GRE=n
|
|
+ depends on VXLAN || VXLAN=n
|
|
select GENERIC_ALLOCATOR
|
|
select PARMAN
|
|
select MLXFW
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
|
|
index 5890fdfd62c37..a903e97793f9a 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
|
|
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
|
|
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
|
|
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
|
|
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
|
|
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
|
|
+
|
|
+ memcpy(ncqe, cqe, q->elem_size);
|
|
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
|
|
|
|
if (sendq) {
|
|
struct mlxsw_pci_queue *sdq;
|
|
|
|
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
|
|
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
|
|
- wqe_counter, cqe);
|
|
+ wqe_counter, ncqe);
|
|
q->u.cq.comp_sdq_count++;
|
|
} else {
|
|
struct mlxsw_pci_queue *rdq;
|
|
|
|
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
|
|
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
|
|
- wqe_counter, q->u.cq.v, cqe);
|
|
+ wqe_counter, q->u.cq.v, ncqe);
|
|
q->u.cq.comp_rdq_count++;
|
|
}
|
|
if (++items == credits)
|
|
break;
|
|
}
|
|
- if (items) {
|
|
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
|
|
+ if (items)
|
|
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
|
|
- }
|
|
}
|
|
|
|
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
|
|
@@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
|
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
|
|
|
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
|
|
- break;
|
|
+ return 0;
|
|
cond_resched();
|
|
} while (time_before(jiffies, end));
|
|
- return 0;
|
|
+ return -EBUSY;
|
|
}
|
|
|
|
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
|
|
index bb99f6d41fe0b..ffee38e36ce89 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
|
|
@@ -27,7 +27,7 @@
|
|
|
|
#define MLXSW_PCI_SW_RESET 0xF0010
|
|
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
|
|
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
|
|
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
|
|
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
|
|
#define MLXSW_PCI_FW_READY 0xA1844
|
|
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
|
|
@@ -53,6 +53,7 @@
|
|
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
|
|
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
|
|
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
|
|
+#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
|
|
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
|
|
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
|
|
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
index f84b9c02fcc5e..280173b489624 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
|
|
@@ -845,8 +845,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
|
|
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
|
bool configure = false;
|
|
bool pfc = false;
|
|
+ u16 thres_cells;
|
|
+ u16 delay_cells;
|
|
bool lossy;
|
|
- u16 thres;
|
|
|
|
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
|
|
if (prio_tc[j] == i) {
|
|
@@ -860,10 +861,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
|
|
continue;
|
|
|
|
lossy = !(pfc || pause_en);
|
|
- thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
|
|
- delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
|
|
- pause_en);
|
|
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
|
|
+ thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
|
|
+ delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
|
|
+ pfc, pause_en);
|
|
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
|
|
+ thres_cells, lossy);
|
|
}
|
|
|
|
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
|
|
@@ -4298,6 +4300,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
dev_put(mlxsw_sp_port->dev);
|
|
}
|
|
|
|
+static void
|
|
+mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
+ struct net_device *lag_dev)
|
|
+{
|
|
+ struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
|
|
+ struct net_device *upper_dev;
|
|
+ struct list_head *iter;
|
|
+
|
|
+ if (netif_is_bridge_port(lag_dev))
|
|
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
|
|
+
|
|
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
|
|
+ if (!netif_is_bridge_port(upper_dev))
|
|
+ continue;
|
|
+ br_dev = netdev_master_upper_dev_get(upper_dev);
|
|
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
|
|
+ }
|
|
+}
|
|
+
|
|
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
|
|
{
|
|
char sldr_pl[MLXSW_REG_SLDR_LEN];
|
|
@@ -4490,6 +4511,10 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
|
/* Any VLANs configured on the port are no longer valid */
|
|
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
|
|
+ /* Make the LAG and its directly linked uppers leave bridges they
|
|
+ * are memeber in
|
|
+ */
|
|
+ mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
|
|
|
|
if (lag->ref_count == 1)
|
|
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
|
|
@@ -4738,12 +4763,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
|
|
lower_dev,
|
|
upper_dev);
|
|
} else if (netif_is_lag_master(upper_dev)) {
|
|
- if (info->linking)
|
|
+ if (info->linking) {
|
|
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
|
|
upper_dev);
|
|
- else
|
|
+ } else {
|
|
+ mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
|
|
+ false);
|
|
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
|
|
upper_dev);
|
|
+ }
|
|
} else if (netif_is_ovs_master(upper_dev)) {
|
|
if (info->linking)
|
|
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
|
|
index e3c6fe8b1d406..1dcf152b28138 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
|
|
@@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
|
|
act_set = mlxsw_afa_block_first_set(rulei->act_block);
|
|
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
|
|
|
|
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
|
|
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
|
|
+ if (err)
|
|
+ goto err_ptce2_write;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_ptce2_write:
|
|
+ cregion->ops->entry_remove(cregion, centry);
|
|
+ return err;
|
|
}
|
|
|
|
static void
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
|
|
index e171513bb32a6..30931a2c025bc 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
|
|
@@ -95,8 +95,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
|
|
return -EIO;
|
|
|
|
- max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
|
|
- if (rulei->priority > max_priority)
|
|
+ /* Priority range is 1..cap_kvd_size-1. */
|
|
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
|
|
+ if (rulei->priority >= max_priority)
|
|
return -EINVAL;
|
|
|
|
/* Unlike in TC, in HW, higher number means higher priority. */
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
|
|
index a3db033d73990..b490589ef25c7 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
|
|
@@ -882,8 +882,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
|
|
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
|
|
.type = MLXSW_SP_FID_TYPE_DUMMY,
|
|
.fid_size = sizeof(struct mlxsw_sp_fid),
|
|
- .start_index = MLXSW_SP_RFID_BASE - 1,
|
|
- .end_index = MLXSW_SP_RFID_BASE - 1,
|
|
+ .start_index = VLAN_N_VID - 1,
|
|
+ .end_index = VLAN_N_VID - 1,
|
|
.ops = &mlxsw_sp_fid_dummy_ops,
|
|
};
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
index 50080c60a2794..b606db9833e9e 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
@@ -292,30 +292,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
|
|
kfree(bridge_port);
|
|
}
|
|
|
|
-static bool
|
|
-mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
|
|
- bridge_port)
|
|
-{
|
|
- struct net_device *dev = bridge_port->dev;
|
|
- struct mlxsw_sp *mlxsw_sp;
|
|
-
|
|
- if (is_vlan_dev(dev))
|
|
- mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
|
|
- else
|
|
- mlxsw_sp = mlxsw_sp_lower_get(dev);
|
|
-
|
|
- /* In case ports were pulled from out of a bridged LAG, then
|
|
- * it's possible the reference count isn't zero, yet the bridge
|
|
- * port should be destroyed, as it's no longer an upper of ours.
|
|
- */
|
|
- if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
|
|
- return true;
|
|
- else if (bridge_port->ref_count == 0)
|
|
- return true;
|
|
- else
|
|
- return false;
|
|
-}
|
|
-
|
|
static struct mlxsw_sp_bridge_port *
|
|
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
|
|
struct net_device *brport_dev)
|
|
@@ -353,8 +329,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
|
|
{
|
|
struct mlxsw_sp_bridge_device *bridge_device;
|
|
|
|
- bridge_port->ref_count--;
|
|
- if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
|
|
+ if (--bridge_port->ref_count != 0)
|
|
return;
|
|
bridge_device = bridge_port->bridge_device;
|
|
mlxsw_sp_bridge_port_destroy(bridge_port);
|
|
@@ -1244,7 +1219,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
|
|
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
|
|
{
|
|
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
|
|
- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
|
|
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
|
|
}
|
|
|
|
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
|
|
@@ -1301,7 +1276,7 @@ out:
|
|
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
const char *mac, u16 fid, bool adding,
|
|
enum mlxsw_reg_sfd_rec_action action,
|
|
- bool dynamic)
|
|
+ enum mlxsw_reg_sfd_rec_policy policy)
|
|
{
|
|
char *sfd_pl;
|
|
u8 num_rec;
|
|
@@ -1312,8 +1287,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
return -ENOMEM;
|
|
|
|
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
|
|
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
|
|
- mac, fid, action, local_port);
|
|
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
|
|
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
|
if (err)
|
|
@@ -1332,7 +1306,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
bool dynamic)
|
|
{
|
|
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
|
|
- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
|
|
+ MLXSW_REG_SFD_REC_ACTION_NOP,
|
|
+ mlxsw_sp_sfd_rec_policy(dynamic));
|
|
}
|
|
|
|
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
|
|
@@ -1340,7 +1315,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
|
|
{
|
|
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
|
|
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
|
|
- false);
|
|
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
|
|
}
|
|
|
|
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
|
|
@@ -1816,7 +1791,7 @@ static void
|
|
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
|
|
{
|
|
- u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
|
|
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
|
|
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
|
|
|
|
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
|
|
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
|
|
index 20c9377e99cb2..1ce8b729929fe 100644
|
|
--- a/drivers/net/ethernet/microchip/lan743x_main.c
|
|
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
|
|
@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
|
|
|
|
memset(&ksettings, 0, sizeof(ksettings));
|
|
phy_ethtool_get_link_ksettings(netdev, &ksettings);
|
|
- local_advertisement = phy_read(phydev, MII_ADVERTISE);
|
|
- if (local_advertisement < 0)
|
|
- return;
|
|
-
|
|
- remote_advertisement = phy_read(phydev, MII_LPA);
|
|
- if (remote_advertisement < 0)
|
|
- return;
|
|
+ local_advertisement =
|
|
+ ethtool_adv_to_mii_adv_t(phydev->advertising);
|
|
+ remote_advertisement =
|
|
+ ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
|
|
|
|
lan743x_phy_update_flowcontrol(adapter,
|
|
ksettings.base.duplex,
|
|
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
|
|
index c6f4bab67a5fc..9e728ec82c218 100644
|
|
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
|
|
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
|
|
@@ -1603,6 +1603,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
|
|
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
|
|
rx_prod.bd_prod = cpu_to_le16(bd_prod);
|
|
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
|
|
+
|
|
+ /* Make sure chain element is updated before ringing the doorbell */
|
|
+ dma_wmb();
|
|
+
|
|
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
|
|
index 209566f8097ba..88e60d6d93dc2 100644
|
|
--- a/drivers/net/ethernet/realtek/r8169.c
|
|
+++ b/drivers/net/ethernet/realtek/r8169.c
|
|
@@ -212,6 +212,8 @@ enum cfg_version {
|
|
};
|
|
|
|
static const struct pci_device_id rtl8169_pci_tbl[] = {
|
|
+ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
|
|
+ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
|
|
@@ -714,6 +716,7 @@ module_param(use_dac, int, 0);
|
|
MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
|
|
module_param_named(debug, debug.msg_enable, int, 0);
|
|
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
|
|
+MODULE_SOFTDEP("pre: realtek");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_FIRMWARE(FIRMWARE_8168D_1);
|
|
MODULE_FIRMWARE(FIRMWARE_8168D_2);
|
|
@@ -1728,11 +1731,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
|
|
|
|
static bool rtl8169_update_counters(struct rtl8169_private *tp)
|
|
{
|
|
+ u8 val = RTL_R8(tp, ChipCmd);
|
|
+
|
|
/*
|
|
* Some chips are unable to dump tally counters when the receiver
|
|
- * is disabled.
|
|
+ * is disabled. If 0xff chip may be in a PCI power-save state.
|
|
*/
|
|
- if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
|
|
+ if (!(val & CmdRxEnb) || val == 0xff)
|
|
return true;
|
|
|
|
return rtl8169_do_counters(tp, CounterDump);
|
|
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
|
|
index defed0d0c51d3..e7f8ab6e43917 100644
|
|
--- a/drivers/net/ethernet/renesas/ravb_main.c
|
|
+++ b/drivers/net/ethernet/renesas/ravb_main.c
|
|
@@ -350,7 +350,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
|
|
int i;
|
|
|
|
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
|
|
- ETH_HLEN + VLAN_HLEN;
|
|
+ ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
|
|
|
|
/* Allocate RX and TX skb rings */
|
|
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
|
|
@@ -533,13 +533,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
|
|
{
|
|
u8 *hw_csum;
|
|
|
|
- /* The hardware checksum is 2 bytes appended to packet data */
|
|
- if (unlikely(skb->len < 2))
|
|
+ /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
|
|
+ * appended to packet data
|
|
+ */
|
|
+ if (unlikely(skb->len < sizeof(__sum16)))
|
|
return;
|
|
- hw_csum = skb_tail_pointer(skb) - 2;
|
|
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
|
|
skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
|
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
|
- skb_trim(skb, skb->len - 2);
|
|
+ skb_trim(skb, skb->len - sizeof(__sum16));
|
|
}
|
|
|
|
/* Packet receive function for Ethernet AVB */
|
|
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
|
|
index 7c7cd9d94bcc1..3d0114ba2bfe5 100644
|
|
--- a/drivers/net/ethernet/socionext/sni_ave.c
|
|
+++ b/drivers/net/ethernet/socionext/sni_ave.c
|
|
@@ -1210,9 +1210,13 @@ static int ave_init(struct net_device *ndev)
|
|
|
|
priv->phydev = phydev;
|
|
|
|
- phy_ethtool_get_wol(phydev, &wol);
|
|
+ ave_ethtool_get_wol(ndev, &wol);
|
|
device_set_wakeup_capable(&ndev->dev, !!wol.supported);
|
|
|
|
+ /* set wol initial state disabled */
|
|
+ wol.wolopts = 0;
|
|
+ ave_ethtool_set_wol(ndev, &wol);
|
|
+
|
|
if (!phy_interface_is_rgmii(phydev))
|
|
phy_set_max_speed(phydev, SPEED_100);
|
|
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
|
|
index 20299f6f65fce..736e29635b772 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
|
|
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
|
|
static int dwmac4_rx_check_timestamp(void *desc)
|
|
{
|
|
struct dma_desc *p = (struct dma_desc *)desc;
|
|
+ unsigned int rdes0 = le32_to_cpu(p->des0);
|
|
+ unsigned int rdes1 = le32_to_cpu(p->des1);
|
|
+ unsigned int rdes3 = le32_to_cpu(p->des3);
|
|
u32 own, ctxt;
|
|
int ret = 1;
|
|
|
|
- own = p->des3 & RDES3_OWN;
|
|
- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
|
|
+ own = rdes3 & RDES3_OWN;
|
|
+ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
|
|
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
|
|
|
|
if (likely(!own && ctxt)) {
|
|
- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
|
|
+ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
|
|
/* Corrupted value */
|
|
ret = -EINVAL;
|
|
else
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
|
|
index 6c5092e7771cd..c5e25580a43fa 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
|
|
@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
|
|
struct stmmac_extra_stats *x, u32 chan)
|
|
{
|
|
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
|
|
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
|
int ret = 0;
|
|
|
|
/* ABNORMAL interrupts */
|
|
@@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
|
|
x->normal_irq_n++;
|
|
|
|
if (likely(intr_status & XGMAC_RI)) {
|
|
- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
|
|
- if (likely(value & XGMAC_RIE)) {
|
|
+ if (likely(intr_en & XGMAC_RIE)) {
|
|
x->rx_normal_irq_n++;
|
|
ret |= handle_rx;
|
|
}
|
|
@@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
|
|
}
|
|
|
|
/* Clear interrupts */
|
|
- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
|
|
+ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
|
|
index 5710864fa8090..9caf79ba5ef16 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
|
|
@@ -692,25 +692,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
|
|
struct ethtool_eee *edata)
|
|
{
|
|
struct stmmac_priv *priv = netdev_priv(dev);
|
|
+ int ret;
|
|
|
|
- priv->eee_enabled = edata->eee_enabled;
|
|
-
|
|
- if (!priv->eee_enabled)
|
|
+ if (!edata->eee_enabled) {
|
|
stmmac_disable_eee_mode(priv);
|
|
- else {
|
|
+ } else {
|
|
/* We are asking for enabling the EEE but it is safe
|
|
* to verify all by invoking the eee_init function.
|
|
* In case of failure it will return an error.
|
|
*/
|
|
- priv->eee_enabled = stmmac_eee_init(priv);
|
|
- if (!priv->eee_enabled)
|
|
+ edata->eee_enabled = stmmac_eee_init(priv);
|
|
+ if (!edata->eee_enabled)
|
|
return -EOPNOTSUPP;
|
|
-
|
|
- /* Do not change tx_lpi_timer in case of failure */
|
|
- priv->tx_lpi_timer = edata->tx_lpi_timer;
|
|
}
|
|
|
|
- return phy_ethtool_set_eee(dev->phydev, edata);
|
|
+ ret = phy_ethtool_set_eee(dev->phydev, edata);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ priv->eee_enabled = edata->eee_enabled;
|
|
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
|
|
+ return 0;
|
|
}
|
|
|
|
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
index c4a35e932f052..5d83d6a7694b0 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
@@ -3525,27 +3525,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
|
|
struct stmmac_channel *ch =
|
|
container_of(napi, struct stmmac_channel, napi);
|
|
struct stmmac_priv *priv = ch->priv_data;
|
|
- int work_done = 0, work_rem = budget;
|
|
+ int work_done, rx_done = 0, tx_done = 0;
|
|
u32 chan = ch->index;
|
|
|
|
priv->xstats.napi_poll++;
|
|
|
|
- if (ch->has_tx) {
|
|
- int done = stmmac_tx_clean(priv, work_rem, chan);
|
|
+ if (ch->has_tx)
|
|
+ tx_done = stmmac_tx_clean(priv, budget, chan);
|
|
+ if (ch->has_rx)
|
|
+ rx_done = stmmac_rx(priv, budget, chan);
|
|
|
|
- work_done += done;
|
|
- work_rem -= done;
|
|
- }
|
|
-
|
|
- if (ch->has_rx) {
|
|
- int done = stmmac_rx(priv, work_rem, chan);
|
|
+ work_done = max(rx_done, tx_done);
|
|
+ work_done = min(work_done, budget);
|
|
|
|
- work_done += done;
|
|
- work_rem -= done;
|
|
- }
|
|
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
|
|
+ int stat;
|
|
|
|
- if (work_done < budget && napi_complete_done(napi, work_done))
|
|
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
|
|
+ stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
|
|
+ &priv->xstats, chan);
|
|
+ if (stat && napi_reschedule(napi))
|
|
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
|
|
+ }
|
|
|
|
return work_done;
|
|
}
|
|
@@ -4194,6 +4195,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
|
return ret;
|
|
}
|
|
|
|
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
|
|
+ * In some case, for example on bugged HW this feature
|
|
+ * has to be disable and this can be done by passing the
|
|
+ * riwt_off field from the platform.
|
|
+ */
|
|
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
|
|
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
|
|
+ priv->use_riwt = 1;
|
|
+ dev_info(priv->device,
|
|
+ "Enable RX Mitigation via HW Watchdog Timer\n");
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -4326,18 +4339,6 @@ int stmmac_dvr_probe(struct device *device,
|
|
if (flow_ctrl)
|
|
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
|
|
|
|
- /* Rx Watchdog is available in the COREs newer than the 3.40.
|
|
- * In some case, for example on bugged HW this feature
|
|
- * has to be disable and this can be done by passing the
|
|
- * riwt_off field from the platform.
|
|
- */
|
|
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
|
|
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
|
|
- priv->use_riwt = 1;
|
|
- dev_info(priv->device,
|
|
- "Enable RX Mitigation via HW Watchdog Timer\n");
|
|
- }
|
|
-
|
|
/* Setup channels NAPI */
|
|
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
|
|
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
|
|
index c54a50dbd5ac2..d819e8eaba122 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
|
|
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
|
|
*/
|
|
static void stmmac_pci_remove(struct pci_dev *pdev)
|
|
{
|
|
+ int i;
|
|
+
|
|
stmmac_dvr_remove(&pdev->dev);
|
|
+
|
|
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
|
|
+ if (pci_resource_len(pdev, i) == 0)
|
|
+ continue;
|
|
+ pcim_iounmap_regions(pdev, BIT(i));
|
|
+ break;
|
|
+ }
|
|
+
|
|
pci_disable_device(pdev);
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
|
|
index 531294f4978bc..58ea18af9813a 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
|
|
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
|
/* Queue 0 is not AVB capable */
|
|
if (queue <= 0 || queue >= tx_queues_count)
|
|
return -EINVAL;
|
|
+ if (!priv->dma_cap.av)
|
|
+ return -EOPNOTSUPP;
|
|
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
|
|
return -EOPNOTSUPP;
|
|
|
|
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
|
|
index 9020b084b9538..7ec4eb74fe216 100644
|
|
--- a/drivers/net/ethernet/sun/cassini.c
|
|
+++ b/drivers/net/ethernet/sun/cassini.c
|
|
@@ -1,22 +1,9 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
|
|
*
|
|
* Copyright (C) 2004 Sun Microsystems Inc.
|
|
* Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
|
|
*
|
|
- * This program is free software; you can redistribute it and/or
|
|
- * modify it under the terms of the GNU General Public License as
|
|
- * published by the Free Software Foundation; either version 2 of the
|
|
- * License, or (at your option) any later version.
|
|
- *
|
|
- * This program is distributed in the hope that it will be useful,
|
|
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
- * GNU General Public License for more details.
|
|
- *
|
|
- * You should have received a copy of the GNU General Public License
|
|
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
- *
|
|
* This driver uses the sungem driver (c) David Miller
|
|
* (davem@redhat.com) as its basis.
|
|
*
|
|
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
|
|
index 13f3860496a86..ae5f05f03f880 100644
|
|
--- a/drivers/net/ethernet/sun/cassini.h
|
|
+++ b/drivers/net/ethernet/sun/cassini.h
|
|
@@ -1,23 +1,10 @@
|
|
-/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
|
|
* cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
|
|
*
|
|
* Copyright (C) 2004 Sun Microsystems Inc.
|
|
* Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
|
|
*
|
|
- * This program is free software; you can redistribute it and/or
|
|
- * modify it under the terms of the GNU General Public License as
|
|
- * published by the Free Software Foundation; either version 2 of the
|
|
- * License, or (at your option) any later version.
|
|
- *
|
|
- * This program is distributed in the hope that it will be useful,
|
|
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
- * GNU General Public License for more details.
|
|
- *
|
|
- * You should have received a copy of the GNU General Public License
|
|
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
- *
|
|
* vendor id: 0x108E (Sun Microsystems, Inc.)
|
|
* device id: 0xabba (Cassini)
|
|
* revision ids: 0x01 = Cassini
|
|
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
|
|
index 9319d84bf49f0..d84501441edde 100644
|
|
--- a/drivers/net/ethernet/sun/niu.c
|
|
+++ b/drivers/net/ethernet/sun/niu.c
|
|
@@ -8100,6 +8100,8 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
|
|
start += 3;
|
|
|
|
prop_len = niu_pci_eeprom_read(np, start + 4);
|
|
+ if (prop_len < 0)
|
|
+ return prop_len;
|
|
err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
|
|
if (err < 0)
|
|
return err;
|
|
@@ -8144,8 +8146,12 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
|
|
netif_printk(np, probe, KERN_DEBUG, np->dev,
|
|
"VPD_SCAN: Reading in property [%s] len[%d]\n",
|
|
namebuf, prop_len);
|
|
- for (i = 0; i < prop_len; i++)
|
|
- *prop_buf++ = niu_pci_eeprom_read(np, off + i);
|
|
+ for (i = 0; i < prop_len; i++) {
|
|
+ err = niu_pci_eeprom_read(np, off + i);
|
|
+ if (err >= 0)
|
|
+ *prop_buf = err;
|
|
+ ++prop_buf;
|
|
+ }
|
|
}
|
|
|
|
start += len;
|
|
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
|
|
index 1f612268c9987..d847f672a705f 100644
|
|
--- a/drivers/net/ethernet/ti/netcp_core.c
|
|
+++ b/drivers/net/ethernet/ti/netcp_core.c
|
|
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
|
|
const char *name;
|
|
char node_name[32];
|
|
|
|
- if (of_property_read_string(node, "label", &name) < 0) {
|
|
+ if (of_property_read_string(child, "label", &name) < 0) {
|
|
snprintf(node_name, sizeof(node_name), "%pOFn", child);
|
|
name = node_name;
|
|
}
|
|
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
|
|
index a0cd1c41cf5f0..2e6e11d8cf5cb 100644
|
|
--- a/drivers/net/geneve.c
|
|
+++ b/drivers/net/geneve.c
|
|
@@ -1426,9 +1426,13 @@ static void geneve_link_config(struct net_device *dev,
|
|
}
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
case AF_INET6: {
|
|
- struct rt6_info *rt = rt6_lookup(geneve->net,
|
|
- &info->key.u.ipv6.dst, NULL, 0,
|
|
- NULL, 0);
|
|
+ struct rt6_info *rt;
|
|
+
|
|
+ if (!__in6_dev_get(dev))
|
|
+ break;
|
|
+
|
|
+ rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
|
|
+ NULL, 0);
|
|
|
|
if (rt && rt->dst.dev)
|
|
ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
|
|
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
|
|
index 17e6dcd2eb424..c99dd3f1e6a84 100644
|
|
--- a/drivers/net/hamradio/6pack.c
|
|
+++ b/drivers/net/hamradio/6pack.c
|
|
@@ -523,10 +523,7 @@ static void resync_tnc(struct timer_list *t)
|
|
|
|
|
|
/* Start resync timer again -- the TNC might be still absent */
|
|
-
|
|
- del_timer(&sp->resync_t);
|
|
- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
|
|
- add_timer(&sp->resync_t);
|
|
+ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
|
|
}
|
|
|
|
static inline int tnc_init(struct sixpack *sp)
|
|
@@ -537,9 +534,7 @@ static inline int tnc_init(struct sixpack *sp)
|
|
|
|
sp->tty->ops->write(sp->tty, &inbyte, 1);
|
|
|
|
- del_timer(&sp->resync_t);
|
|
- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
|
|
- add_timer(&sp->resync_t);
|
|
+ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
|
|
|
|
return 0;
|
|
}
|
|
@@ -897,11 +892,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
|
|
/* if the state byte has been received, the TNC is present,
|
|
so the resync timer can be reset. */
|
|
|
|
- if (sp->tnc_state == TNC_IN_SYNC) {
|
|
- del_timer(&sp->resync_t);
|
|
- sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
|
|
- add_timer(&sp->resync_t);
|
|
- }
|
|
+ if (sp->tnc_state == TNC_IN_SYNC)
|
|
+ mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
|
|
|
|
sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
|
|
}
|
|
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
|
|
index 4a949569ec4c5..5fb541897863c 100644
|
|
--- a/drivers/net/ipvlan/ipvlan_main.c
|
|
+++ b/drivers/net/ipvlan/ipvlan_main.c
|
|
@@ -97,12 +97,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
|
|
err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
|
|
if (!err) {
|
|
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
|
|
- mdev->priv_flags |= IFF_L3MDEV_MASTER;
|
|
+ mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
|
|
} else
|
|
goto fail;
|
|
} else if (port->mode == IPVLAN_MODE_L3S) {
|
|
/* Old mode was L3S */
|
|
- mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
|
|
+ mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
|
|
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
|
|
mdev->l3mdev_ops = NULL;
|
|
}
|
|
@@ -162,7 +162,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
|
|
struct sk_buff *skb;
|
|
|
|
if (port->mode == IPVLAN_MODE_L3S) {
|
|
- dev->priv_flags &= ~IFF_L3MDEV_MASTER;
|
|
+ dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
|
|
ipvlan_unregister_nf_hook(dev_net(dev));
|
|
dev->l3mdev_ops = NULL;
|
|
}
|
|
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
|
|
index f7ebdcff53e47..4d66e4bb904a5 100644
|
|
--- a/drivers/net/phy/bcm87xx.c
|
|
+++ b/drivers/net/phy/bcm87xx.c
|
|
@@ -193,6 +193,7 @@ static struct phy_driver bcm87xx_driver[] = {
|
|
.phy_id = PHY_ID_BCM8706,
|
|
.phy_id_mask = 0xffffffff,
|
|
.name = "Broadcom BCM8706",
|
|
+ .features = PHY_10GBIT_FEC_FEATURES,
|
|
.flags = PHY_HAS_INTERRUPT,
|
|
.config_init = bcm87xx_config_init,
|
|
.config_aneg = bcm87xx_config_aneg,
|
|
@@ -205,6 +206,7 @@ static struct phy_driver bcm87xx_driver[] = {
|
|
.phy_id = PHY_ID_BCM8727,
|
|
.phy_id_mask = 0xffffffff,
|
|
.name = "Broadcom BCM8727",
|
|
+ .features = PHY_10GBIT_FEC_FEATURES,
|
|
.flags = PHY_HAS_INTERRUPT,
|
|
.config_init = bcm87xx_config_init,
|
|
.config_aneg = bcm87xx_config_aneg,
|
|
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
|
|
index 8022cd317f62b..1a4d04afb7f04 100644
|
|
--- a/drivers/net/phy/cortina.c
|
|
+++ b/drivers/net/phy/cortina.c
|
|
@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = {
|
|
.phy_id = PHY_ID_CS4340,
|
|
.phy_id_mask = 0xffffffff,
|
|
.name = "Cortina CS4340",
|
|
+ .features = PHY_10GBIT_FEATURES,
|
|
.config_init = gen10g_config_init,
|
|
.config_aneg = gen10g_config_aneg,
|
|
.read_status = cortina_read_status,
|
|
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
|
|
index edd4d44a386de..d3b36936f81e1 100644
|
|
--- a/drivers/net/phy/dp83640.c
|
|
+++ b/drivers/net/phy/dp83640.c
|
|
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640,
|
|
struct phy_txts *phy_txts)
|
|
{
|
|
struct skb_shared_hwtstamps shhwtstamps;
|
|
+ struct dp83640_skb_info *skb_info;
|
|
struct sk_buff *skb;
|
|
- u64 ns;
|
|
u8 overflow;
|
|
+ u64 ns;
|
|
|
|
/* We must already have the skb that triggered this. */
|
|
-
|
|
+again:
|
|
skb = skb_dequeue(&dp83640->tx_queue);
|
|
-
|
|
if (!skb) {
|
|
pr_debug("have timestamp but tx_queue empty\n");
|
|
return;
|
|
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640,
|
|
}
|
|
return;
|
|
}
|
|
+ skb_info = (struct dp83640_skb_info *)skb->cb;
|
|
+ if (time_after(jiffies, skb_info->tmo)) {
|
|
+ kfree_skb(skb);
|
|
+ goto again;
|
|
+ }
|
|
|
|
ns = phy2txts(phy_txts);
|
|
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
|
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
|
|
static void dp83640_txtstamp(struct phy_device *phydev,
|
|
struct sk_buff *skb, int type)
|
|
{
|
|
+ struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
|
|
struct dp83640_private *dp83640 = phydev->priv;
|
|
|
|
switch (dp83640->hwts_tx_en) {
|
|
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
|
|
/* fall through */
|
|
case HWTSTAMP_TX_ON:
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
|
+ skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
|
|
skb_queue_tail(&dp83640->tx_queue, skb);
|
|
break;
|
|
|
|
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
|
|
index cbec296107bdd..f5290adb49f0d 100644
|
|
--- a/drivers/net/phy/marvell.c
|
|
+++ b/drivers/net/phy/marvell.c
|
|
@@ -847,8 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
|
|
|
|
/* SGMII-to-Copper mode initialization */
|
|
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
|
|
- u32 pause;
|
|
-
|
|
/* Select page 18 */
|
|
err = marvell_set_page(phydev, 18);
|
|
if (err < 0)
|
|
@@ -871,16 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
|
|
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
|
|
if (err < 0)
|
|
return err;
|
|
-
|
|
- /* There appears to be a bug in the 88e1512 when used in
|
|
- * SGMII to copper mode, where the AN advertisement register
|
|
- * clears the pause bits each time a negotiation occurs.
|
|
- * This means we can never be truely sure what was advertised,
|
|
- * so disable Pause support.
|
|
- */
|
|
- pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
|
|
- phydev->supported &= ~pause;
|
|
- phydev->advertising &= ~pause;
|
|
}
|
|
|
|
return m88e1318_config_init(phydev);
|
|
@@ -1042,6 +1030,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
|
|
return 0;
|
|
}
|
|
|
|
+/* The VOD can be out of specification on link up. Poke an
|
|
+ * undocumented register, in an undocumented page, with a magic value
|
|
+ * to fix this.
|
|
+ */
|
|
+static int m88e6390_errata(struct phy_device *phydev)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = phy_write(phydev, MII_BMCR,
|
|
+ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ usleep_range(300, 400);
|
|
+
|
|
+ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ return genphy_soft_reset(phydev);
|
|
+}
|
|
+
|
|
+static int m88e6390_config_aneg(struct phy_device *phydev)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = m88e6390_errata(phydev);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ return m88e1510_config_aneg(phydev);
|
|
+}
|
|
+
|
|
/**
|
|
* fiber_lpa_to_ethtool_lpa_t
|
|
* @lpa: value of the MII_LPA register for fiber link
|
|
@@ -1397,7 +1418,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
|
|
* before enabling it if !phy_interrupt_is_valid()
|
|
*/
|
|
if (!phy_interrupt_is_valid(phydev))
|
|
- phy_read(phydev, MII_M1011_IEVENT);
|
|
+ __phy_read(phydev, MII_M1011_IEVENT);
|
|
|
|
/* Enable the WOL interrupt */
|
|
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
|
|
@@ -2292,7 +2313,7 @@ static struct phy_driver marvell_drivers[] = {
|
|
.flags = PHY_HAS_INTERRUPT,
|
|
.probe = m88e6390_probe,
|
|
.config_init = &marvell_config_init,
|
|
- .config_aneg = &m88e1510_config_aneg,
|
|
+ .config_aneg = &m88e6390_config_aneg,
|
|
.read_status = &marvell_read_status,
|
|
.ack_interrupt = &marvell_ack_interrupt,
|
|
.config_intr = &marvell_config_intr,
|
|
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
|
|
index 2e59a8419b17e..66b9cfe692fc7 100644
|
|
--- a/drivers/net/phy/mdio_bus.c
|
|
+++ b/drivers/net/phy/mdio_bus.c
|
|
@@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
|
|
if (IS_ERR(gpiod)) {
|
|
dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
|
|
bus->id);
|
|
+ device_del(&bus->dev);
|
|
return PTR_ERR(gpiod);
|
|
} else if (gpiod) {
|
|
bus->reset_gpiod = gpiod;
|
|
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
|
|
index ddc2c5ea37873..6ace118502b9f 100644
|
|
--- a/drivers/net/phy/meson-gxl.c
|
|
+++ b/drivers/net/phy/meson-gxl.c
|
|
@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = {
|
|
.name = "Meson GXL Internal PHY",
|
|
.features = PHY_BASIC_FEATURES,
|
|
.flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
|
|
+ .soft_reset = genphy_soft_reset,
|
|
.config_init = meson_gxl_config_init,
|
|
.aneg_done = genphy_aneg_done,
|
|
.read_status = meson_gxl_read_status,
|
|
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
|
|
index 9265dea794120..22dfbd4c6aaf4 100644
|
|
--- a/drivers/net/phy/micrel.c
|
|
+++ b/drivers/net/phy/micrel.c
|
|
@@ -1076,6 +1076,7 @@ static struct phy_driver ksphy_driver[] = {
|
|
.driver_data = &ksz9021_type,
|
|
.probe = kszphy_probe,
|
|
.config_init = ksz9031_config_init,
|
|
+ .soft_reset = genphy_soft_reset,
|
|
.read_status = ksz9031_read_status,
|
|
.ack_interrupt = kszphy_ack_interrupt,
|
|
.config_intr = kszphy_config_intr,
|
|
@@ -1105,6 +1106,7 @@ static struct phy_driver ksphy_driver[] = {
|
|
.phy_id = PHY_ID_KSZ8873MLL,
|
|
.phy_id_mask = MICREL_PHY_ID_MASK,
|
|
.name = "Micrel KSZ8873MLL Switch",
|
|
+ .features = PHY_BASIC_FEATURES,
|
|
.config_init = kszphy_config_init,
|
|
.config_aneg = ksz8873mll_config_aneg,
|
|
.read_status = ksz8873mll_read_status,
|
|
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
|
|
index 26c41ede54a4f..5dd661fb662fe 100644
|
|
--- a/drivers/net/phy/phy_device.c
|
|
+++ b/drivers/net/phy/phy_device.c
|
|
@@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
|
|
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
|
|
EXPORT_SYMBOL_GPL(phy_10gbit_features);
|
|
|
|
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
|
|
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
|
|
+
|
|
static const int phy_basic_ports_array[] = {
|
|
ETHTOOL_LINK_MODE_Autoneg_BIT,
|
|
ETHTOOL_LINK_MODE_TP_BIT,
|
|
@@ -102,6 +105,11 @@ static const int phy_10gbit_features_array[] = {
|
|
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
|
|
};
|
|
|
|
+const int phy_10gbit_fec_features_array[1] = {
|
|
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
|
|
+
|
|
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
|
|
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
|
|
|
|
@@ -184,6 +192,10 @@ static void features_init(void)
|
|
linkmode_set_bit_array(phy_10gbit_full_features_array,
|
|
ARRAY_SIZE(phy_10gbit_full_features_array),
|
|
phy_10gbit_full_features);
|
|
+ /* 10G FEC only */
|
|
+ linkmode_set_bit_array(phy_10gbit_fec_features_array,
|
|
+ ARRAY_SIZE(phy_10gbit_fec_features_array),
|
|
+ phy_10gbit_fec_features);
|
|
}
|
|
|
|
void phy_device_free(struct phy_device *phydev)
|
|
@@ -2184,6 +2196,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
|
|
{
|
|
int retval;
|
|
|
|
+ if (WARN_ON(!new_driver->features)) {
|
|
+ pr_err("%s: Driver features are missing\n", new_driver->name);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
|
|
new_driver->mdiodrv.driver.name = new_driver->name;
|
|
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
|
|
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
|
|
index 9b8dd0d0ee42c..b60c82065fd11 100644
|
|
--- a/drivers/net/phy/phylink.c
|
|
+++ b/drivers/net/phy/phylink.c
|
|
@@ -475,6 +475,17 @@ static void phylink_run_resolve(struct phylink *pl)
|
|
queue_work(system_power_efficient_wq, &pl->resolve);
|
|
}
|
|
|
|
+static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
|
|
+{
|
|
+ unsigned long state = pl->phylink_disable_state;
|
|
+
|
|
+ set_bit(bit, &pl->phylink_disable_state);
|
|
+ if (state == 0) {
|
|
+ queue_work(system_power_efficient_wq, &pl->resolve);
|
|
+ flush_work(&pl->resolve);
|
|
+ }
|
|
+}
|
|
+
|
|
static void phylink_fixed_poll(struct timer_list *t)
|
|
{
|
|
struct phylink *pl = container_of(t, struct phylink, link_poll);
|
|
@@ -928,9 +939,7 @@ void phylink_stop(struct phylink *pl)
|
|
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
|
|
del_timer_sync(&pl->link_poll);
|
|
|
|
- set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
|
|
- queue_work(system_power_efficient_wq, &pl->resolve);
|
|
- flush_work(&pl->resolve);
|
|
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
|
|
}
|
|
EXPORT_SYMBOL_GPL(phylink_stop);
|
|
|
|
@@ -1637,9 +1646,7 @@ static void phylink_sfp_link_down(void *upstream)
|
|
|
|
ASSERT_RTNL();
|
|
|
|
- set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
|
|
- queue_work(system_power_efficient_wq, &pl->resolve);
|
|
- flush_work(&pl->resolve);
|
|
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
|
|
}
|
|
|
|
static void phylink_sfp_link_up(void *upstream)
|
|
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
|
|
index ad9db652874dc..fef701bfad62e 100644
|
|
--- a/drivers/net/phy/sfp-bus.c
|
|
+++ b/drivers/net/phy/sfp-bus.c
|
|
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
|
|
return ret;
|
|
}
|
|
}
|
|
+ bus->socket_ops->attach(bus->sfp);
|
|
if (bus->started)
|
|
bus->socket_ops->start(bus->sfp);
|
|
bus->netdev->sfp_bus = bus;
|
|
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
|
|
if (bus->registered) {
|
|
if (bus->started)
|
|
bus->socket_ops->stop(bus->sfp);
|
|
+ bus->socket_ops->detach(bus->sfp);
|
|
if (bus->phydev && ops && ops->disconnect_phy)
|
|
ops->disconnect_phy(bus->upstream);
|
|
}
|
|
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
|
|
index fd8bb998ae52d..68c8fbf099f87 100644
|
|
--- a/drivers/net/phy/sfp.c
|
|
+++ b/drivers/net/phy/sfp.c
|
|
@@ -184,6 +184,7 @@ struct sfp {
|
|
|
|
struct gpio_desc *gpio[GPIO_MAX];
|
|
|
|
+ bool attached;
|
|
unsigned int state;
|
|
struct delayed_work poll;
|
|
struct delayed_work timeout;
|
|
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
|
|
*/
|
|
switch (sfp->sm_mod_state) {
|
|
default:
|
|
- if (event == SFP_E_INSERT) {
|
|
+ if (event == SFP_E_INSERT && sfp->attached) {
|
|
sfp_module_tx_disable(sfp);
|
|
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
|
|
}
|
|
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
|
|
mutex_unlock(&sfp->sm_mutex);
|
|
}
|
|
|
|
+static void sfp_attach(struct sfp *sfp)
|
|
+{
|
|
+ sfp->attached = true;
|
|
+ if (sfp->state & SFP_F_PRESENT)
|
|
+ sfp_sm_event(sfp, SFP_E_INSERT);
|
|
+}
|
|
+
|
|
+static void sfp_detach(struct sfp *sfp)
|
|
+{
|
|
+ sfp->attached = false;
|
|
+ sfp_sm_event(sfp, SFP_E_REMOVE);
|
|
+}
|
|
+
|
|
static void sfp_start(struct sfp *sfp)
|
|
{
|
|
sfp_sm_event(sfp, SFP_E_DEV_UP);
|
|
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
|
|
}
|
|
|
|
static const struct sfp_socket_ops sfp_module_ops = {
|
|
+ .attach = sfp_attach,
|
|
+ .detach = sfp_detach,
|
|
.start = sfp_start,
|
|
.stop = sfp_stop,
|
|
.module_info = sfp_module_info,
|
|
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
|
|
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
|
|
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
|
|
|
|
- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
|
|
- if (!sfp->sfp_bus)
|
|
- return -ENOMEM;
|
|
-
|
|
/* Get the initial state, and always signal TX disable,
|
|
* since the network interface will not be up.
|
|
*/
|
|
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
|
|
sfp->state |= SFP_F_RATE_SELECT;
|
|
sfp_set_state(sfp, sfp->state);
|
|
sfp_module_tx_disable(sfp);
|
|
- rtnl_lock();
|
|
- if (sfp->state & SFP_F_PRESENT)
|
|
- sfp_sm_event(sfp, SFP_E_INSERT);
|
|
- rtnl_unlock();
|
|
|
|
for (i = 0; i < GPIO_MAX; i++) {
|
|
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
|
|
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
|
|
dev_warn(sfp->dev,
|
|
"No tx_disable pin: SFP modules will always be emitting.\n");
|
|
|
|
+ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
|
|
+ if (!sfp->sfp_bus)
|
|
+ return -ENOMEM;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
|
|
index 31b0acf337e27..64f54b0bbd8c4 100644
|
|
--- a/drivers/net/phy/sfp.h
|
|
+++ b/drivers/net/phy/sfp.h
|
|
@@ -7,6 +7,8 @@
|
|
struct sfp;
|
|
|
|
struct sfp_socket_ops {
|
|
+ void (*attach)(struct sfp *sfp);
|
|
+ void (*detach)(struct sfp *sfp);
|
|
void (*start)(struct sfp *sfp);
|
|
void (*stop)(struct sfp *sfp);
|
|
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
|
|
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
|
|
index 22f3bdd8206cf..91247182bc527 100644
|
|
--- a/drivers/net/phy/teranetics.c
|
|
+++ b/drivers/net/phy/teranetics.c
|
|
@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = {
|
|
.phy_id = PHY_ID_TN2020,
|
|
.phy_id_mask = 0xffffffff,
|
|
.name = "Teranetics TN2020",
|
|
+ .features = PHY_10GBIT_FEATURES,
|
|
.soft_reset = gen10g_no_soft_reset,
|
|
.aneg_done = teranetics_aneg_done,
|
|
.config_init = gen10g_config_init,
|
|
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
|
|
index 74a8782313cf5..bd6084e315de2 100644
|
|
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
|
|
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
|
|
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
|
|
u16 val = 0;
|
|
int err;
|
|
|
|
- err = priv->phy_drv->read_status(phydev);
|
|
+ if (priv->phy_drv->read_status)
|
|
+ err = priv->phy_drv->read_status(phydev);
|
|
+ else
|
|
+ err = genphy_read_status(phydev);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
|
|
index 62dc564b251d5..f22639f0116a4 100644
|
|
--- a/drivers/net/ppp/pppoe.c
|
|
+++ b/drivers/net/ppp/pppoe.c
|
|
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
if (pskb_trim_rcsum(skb, len))
|
|
goto drop;
|
|
|
|
+ ph = pppoe_hdr(skb);
|
|
pn = pppoe_pernet(dev_net(dev));
|
|
|
|
/* Note that get_item does a sock_hold(), so sk_pppox(po)
|
|
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
|
|
index f03004f37eca6..276f800ed57fd 100644
|
|
--- a/drivers/net/tap.c
|
|
+++ b/drivers/net/tap.c
|
|
@@ -1177,8 +1177,6 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
|
|
goto err_kfree;
|
|
}
|
|
|
|
- skb_probe_transport_header(skb, ETH_HLEN);
|
|
-
|
|
/* Move network header to the right position for VLAN tagged packets */
|
|
if ((skb->protocol == htons(ETH_P_8021Q) ||
|
|
skb->protocol == htons(ETH_P_8021AD)) &&
|
|
@@ -1189,6 +1187,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
|
|
tap = rcu_dereference(q->tap);
|
|
if (tap) {
|
|
skb->dev = tap->dev;
|
|
+ skb_probe_transport_header(skb, ETH_HLEN);
|
|
dev_queue_xmit(skb);
|
|
} else {
|
|
kfree_skb(skb);
|
|
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
|
|
index 364f514d56d87..86db1205a3968 100644
|
|
--- a/drivers/net/team/team.c
|
|
+++ b/drivers/net/team/team.c
|
|
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
|
|
}
|
|
}
|
|
|
|
-static bool __team_option_inst_tmp_find(const struct list_head *opts,
|
|
- const struct team_option_inst *needle)
|
|
-{
|
|
- struct team_option_inst *opt_inst;
|
|
-
|
|
- list_for_each_entry(opt_inst, opts, tmp_list)
|
|
- if (opt_inst == needle)
|
|
- return true;
|
|
- return false;
|
|
-}
|
|
-
|
|
static int __team_options_register(struct team *team,
|
|
const struct team_option *option,
|
|
size_t option_count)
|
|
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
int err = 0;
|
|
int i;
|
|
struct nlattr *nl_option;
|
|
- LIST_HEAD(opt_inst_list);
|
|
|
|
rtnl_lock();
|
|
|
|
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
|
|
struct nlattr *attr;
|
|
struct nlattr *attr_data;
|
|
+ LIST_HEAD(opt_inst_list);
|
|
enum team_option_type opt_type;
|
|
int opt_port_ifindex = 0; /* != 0 for per-port options */
|
|
u32 opt_array_index = 0;
|
|
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
|
|
if (err)
|
|
goto team_put;
|
|
opt_inst->changed = true;
|
|
-
|
|
- /* dumb/evil user-space can send us duplicate opt,
|
|
- * keep only the last one
|
|
- */
|
|
- if (__team_option_inst_tmp_find(&opt_inst_list,
|
|
- opt_inst))
|
|
- continue;
|
|
-
|
|
list_add(&opt_inst->tmp_list, &opt_inst_list);
|
|
}
|
|
if (!opt_found) {
|
|
err = -ENOENT;
|
|
goto team_put;
|
|
}
|
|
- }
|
|
|
|
- err = team_nl_send_event_options_get(team, &opt_inst_list);
|
|
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
|
|
+ if (err)
|
|
+ break;
|
|
+ }
|
|
|
|
team_put:
|
|
team_nl_team_put(team);
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index 005020042be94..1e6f0da1fa8e7 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -852,10 +852,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
|
err = 0;
|
|
}
|
|
|
|
- rcu_assign_pointer(tfile->tun, tun);
|
|
- rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
|
|
- tun->numqueues++;
|
|
-
|
|
if (tfile->detached) {
|
|
tun_enable_queue(tfile);
|
|
} else {
|
|
@@ -866,12 +862,18 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
|
if (rtnl_dereference(tun->xdp_prog))
|
|
sock_set_flag(&tfile->sk, SOCK_XDP);
|
|
|
|
- tun_set_real_num_queues(tun);
|
|
-
|
|
/* device is allowed to go away first, so no need to hold extra
|
|
* refcnt.
|
|
*/
|
|
|
|
+ /* Publish tfile->tun and tun->tfiles only after we've fully
|
|
+ * initialized tfile; otherwise we risk using half-initialized
|
|
+ * object.
|
|
+ */
|
|
+ rcu_assign_pointer(tfile->tun, tun);
|
|
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
|
|
+ tun->numqueues++;
|
|
+ tun_set_real_num_queues(tun);
|
|
out:
|
|
return err;
|
|
}
|
|
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
|
|
index 774e1ff01c9a9..735ad838e2ba8 100644
|
|
--- a/drivers/net/usb/qmi_wwan.c
|
|
+++ b/drivers/net/usb/qmi_wwan.c
|
|
@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
|
|
dev->addr_len = 0;
|
|
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
|
|
dev->netdev_ops = &qmimux_netdev_ops;
|
|
+ dev->mtu = 1500;
|
|
dev->needs_free_netdev = true;
|
|
}
|
|
|
|
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
|
|
index f2d01cb6f958c..6e971628bb50a 100644
|
|
--- a/drivers/net/usb/smsc95xx.c
|
|
+++ b/drivers/net/usb/smsc95xx.c
|
|
@@ -1295,6 +1295,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
|
dev->net->features |= NETIF_F_RXCSUM;
|
|
|
|
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
|
|
+ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
|
|
|
|
smsc95xx_init_mac_address(dev);
|
|
|
|
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
|
|
index ea672145f6a66..f6a69b56f3e39 100644
|
|
--- a/drivers/net/virtio_net.c
|
|
+++ b/drivers/net/virtio_net.c
|
|
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
|
|
#define VIRTIO_XDP_TX BIT(0)
|
|
#define VIRTIO_XDP_REDIR BIT(1)
|
|
|
|
+#define VIRTIO_XDP_FLAG BIT(0)
|
|
+
|
|
/* RX packet size EWMA. The average packet size is used to determine the packet
|
|
* buffer size when refilling RX rings. As the entire RX ring may be refilled
|
|
* at once, the weight is chosen so that the EWMA will be insensitive to short-
|
|
@@ -251,6 +253,21 @@ struct padded_vnet_hdr {
|
|
char padding[4];
|
|
};
|
|
|
|
+static bool is_xdp_frame(void *ptr)
|
|
+{
|
|
+ return (unsigned long)ptr & VIRTIO_XDP_FLAG;
|
|
+}
|
|
+
|
|
+static void *xdp_to_ptr(struct xdp_frame *ptr)
|
|
+{
|
|
+ return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
|
|
+}
|
|
+
|
|
+static struct xdp_frame *ptr_to_xdp(void *ptr)
|
|
+{
|
|
+ return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
|
|
+}
|
|
+
|
|
/* Converting between virtqueue no. and kernel tx/rx queue no.
|
|
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
|
|
*/
|
|
@@ -461,7 +478,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
|
|
|
sg_init_one(sq->sg, xdpf->data, xdpf->len);
|
|
|
|
- err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
|
|
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
|
|
+ GFP_ATOMIC);
|
|
if (unlikely(err))
|
|
return -ENOSPC; /* Caller handle free/refcnt */
|
|
|
|
@@ -481,36 +499,47 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|
{
|
|
struct virtnet_info *vi = netdev_priv(dev);
|
|
struct receive_queue *rq = vi->rq;
|
|
- struct xdp_frame *xdpf_sent;
|
|
struct bpf_prog *xdp_prog;
|
|
struct send_queue *sq;
|
|
unsigned int len;
|
|
+ int packets = 0;
|
|
+ int bytes = 0;
|
|
int drops = 0;
|
|
int kicks = 0;
|
|
int ret, err;
|
|
+ void *ptr;
|
|
int i;
|
|
|
|
- sq = virtnet_xdp_sq(vi);
|
|
-
|
|
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
|
- ret = -EINVAL;
|
|
- drops = n;
|
|
- goto out;
|
|
- }
|
|
-
|
|
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
|
* indicate XDP resources have been successfully allocated.
|
|
*/
|
|
xdp_prog = rcu_dereference(rq->xdp_prog);
|
|
- if (!xdp_prog) {
|
|
- ret = -ENXIO;
|
|
+ if (!xdp_prog)
|
|
+ return -ENXIO;
|
|
+
|
|
+ sq = virtnet_xdp_sq(vi);
|
|
+
|
|
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
|
+ ret = -EINVAL;
|
|
drops = n;
|
|
goto out;
|
|
}
|
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
|
|
- xdp_return_frame(xdpf_sent);
|
|
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
+ if (likely(is_xdp_frame(ptr))) {
|
|
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
|
|
+
|
|
+ bytes += frame->len;
|
|
+ xdp_return_frame(frame);
|
|
+ } else {
|
|
+ struct sk_buff *skb = ptr;
|
|
+
|
|
+ bytes += skb->len;
|
|
+ napi_consume_skb(skb, false);
|
|
+ }
|
|
+ packets++;
|
|
+ }
|
|
|
|
for (i = 0; i < n; i++) {
|
|
struct xdp_frame *xdpf = frames[i];
|
|
@@ -529,6 +558,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|
}
|
|
out:
|
|
u64_stats_update_begin(&sq->stats.syncp);
|
|
+ sq->stats.bytes += bytes;
|
|
+ sq->stats.packets += packets;
|
|
sq->stats.xdp_tx += n;
|
|
sq->stats.xdp_tx_drops += drops;
|
|
sq->stats.kicks += kicks;
|
|
@@ -1329,20 +1360,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
|
return stats.packets;
|
|
}
|
|
|
|
-static void free_old_xmit_skbs(struct send_queue *sq)
|
|
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
|
|
{
|
|
- struct sk_buff *skb;
|
|
unsigned int len;
|
|
unsigned int packets = 0;
|
|
unsigned int bytes = 0;
|
|
+ void *ptr;
|
|
|
|
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
- pr_debug("Sent skb %p\n", skb);
|
|
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
+ if (likely(!is_xdp_frame(ptr))) {
|
|
+ struct sk_buff *skb = ptr;
|
|
|
|
- bytes += skb->len;
|
|
- packets++;
|
|
+ pr_debug("Sent skb %p\n", skb);
|
|
+
|
|
+ bytes += skb->len;
|
|
+ napi_consume_skb(skb, in_napi);
|
|
+ } else {
|
|
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
|
|
|
|
- dev_consume_skb_any(skb);
|
|
+ bytes += frame->len;
|
|
+ xdp_return_frame(frame);
|
|
+ }
|
|
+ packets++;
|
|
}
|
|
|
|
/* Avoid overhead when no packets have been processed
|
|
@@ -1357,6 +1396,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
|
|
u64_stats_update_end(&sq->stats.syncp);
|
|
}
|
|
|
|
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
|
+{
|
|
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
|
+ return false;
|
|
+ else if (q < vi->curr_queue_pairs)
|
|
+ return true;
|
|
+ else
|
|
+ return false;
|
|
+}
|
|
+
|
|
static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|
{
|
|
struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
@@ -1364,11 +1413,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|
struct send_queue *sq = &vi->sq[index];
|
|
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
|
|
|
|
- if (!sq->napi.weight)
|
|
+ if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
|
|
return;
|
|
|
|
if (__netif_tx_trylock(txq)) {
|
|
- free_old_xmit_skbs(sq);
|
|
+ free_old_xmit_skbs(sq, true);
|
|
__netif_tx_unlock(txq);
|
|
}
|
|
|
|
@@ -1441,10 +1490,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
|
{
|
|
struct send_queue *sq = container_of(napi, struct send_queue, napi);
|
|
struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
- struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
|
|
+ unsigned int index = vq2txq(sq->vq);
|
|
+ struct netdev_queue *txq;
|
|
|
|
+ if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
|
|
+ /* We don't need to enable cb for XDP */
|
|
+ napi_complete_done(napi, 0);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ txq = netdev_get_tx_queue(vi->dev, index);
|
|
__netif_tx_lock(txq, raw_smp_processor_id());
|
|
- free_old_xmit_skbs(sq);
|
|
+ free_old_xmit_skbs(sq, true);
|
|
__netif_tx_unlock(txq);
|
|
|
|
virtqueue_napi_complete(napi, sq->vq, 0);
|
|
@@ -1513,7 +1570,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
bool use_napi = sq->napi.weight;
|
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
- free_old_xmit_skbs(sq);
|
|
+ free_old_xmit_skbs(sq, false);
|
|
|
|
if (use_napi && kick)
|
|
virtqueue_enable_cb_delayed(sq->vq);
|
|
@@ -1556,7 +1613,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
if (!use_napi &&
|
|
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
|
/* More just got used, free them then recheck. */
|
|
- free_old_xmit_skbs(sq);
|
|
+ free_old_xmit_skbs(sq, false);
|
|
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
|
netif_start_subqueue(dev, qnum);
|
|
virtqueue_disable_cb(sq->vq);
|
|
@@ -2394,6 +2451,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+ old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
|
|
+ if (!prog && !old_prog)
|
|
+ return 0;
|
|
+
|
|
if (prog) {
|
|
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
|
|
if (IS_ERR(prog))
|
|
@@ -2401,36 +2462,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
}
|
|
|
|
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
|
- if (netif_running(dev))
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
+ if (netif_running(dev)) {
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
napi_disable(&vi->rq[i].napi);
|
|
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!prog) {
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
|
+ if (i == 0)
|
|
+ virtnet_restore_guest_offloads(vi);
|
|
+ }
|
|
+ synchronize_net();
|
|
+ }
|
|
|
|
- netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
|
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
|
|
if (err)
|
|
goto err;
|
|
+ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
|
vi->xdp_queue_pairs = xdp_qp;
|
|
|
|
- for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
- old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
|
|
- rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
|
- if (i == 0) {
|
|
- if (!old_prog)
|
|
+ if (prog) {
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
|
+ if (i == 0 && !old_prog)
|
|
virtnet_clear_guest_offloads(vi);
|
|
- if (!prog)
|
|
- virtnet_restore_guest_offloads(vi);
|
|
}
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
if (old_prog)
|
|
bpf_prog_put(old_prog);
|
|
- if (netif_running(dev))
|
|
+ if (netif_running(dev)) {
|
|
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
|
+ &vi->sq[i].napi);
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
|
|
err:
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
+ if (!prog) {
|
|
+ virtnet_clear_guest_offloads(vi);
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++)
|
|
+ rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
|
|
+ }
|
|
+
|
|
+ if (netif_running(dev)) {
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
|
+ &vi->sq[i].napi);
|
|
+ }
|
|
+ }
|
|
if (prog)
|
|
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
|
|
return err;
|
|
@@ -2586,16 +2673,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
|
|
put_page(vi->rq[i].alloc_frag.page);
|
|
}
|
|
|
|
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
|
-{
|
|
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
|
- return false;
|
|
- else if (q < vi->curr_queue_pairs)
|
|
- return true;
|
|
- else
|
|
- return false;
|
|
-}
|
|
-
|
|
static void free_unused_bufs(struct virtnet_info *vi)
|
|
{
|
|
void *buf;
|
|
@@ -2604,10 +2681,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
struct virtqueue *vq = vi->sq[i].vq;
|
|
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
|
|
- if (!is_xdp_raw_buffer_queue(vi, i))
|
|
+ if (!is_xdp_frame(buf))
|
|
dev_kfree_skb(buf);
|
|
else
|
|
- put_page(virt_to_head_page(buf));
|
|
+ xdp_return_frame(ptr_to_xdp(buf));
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
|
|
index 0565f8880199d..8f022964b2d1a 100644
|
|
--- a/drivers/net/vxlan.c
|
|
+++ b/drivers/net/vxlan.c
|
|
@@ -2072,7 +2072,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|
struct pcpu_sw_netstats *tx_stats, *rx_stats;
|
|
union vxlan_addr loopback;
|
|
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
|
|
- struct net_device *dev = skb->dev;
|
|
+ struct net_device *dev;
|
|
int len = skb->len;
|
|
|
|
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
|
|
@@ -2092,9 +2092,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|
#endif
|
|
}
|
|
|
|
+ rcu_read_lock();
|
|
+ dev = skb->dev;
|
|
+ if (unlikely(!(dev->flags & IFF_UP))) {
|
|
+ kfree_skb(skb);
|
|
+ goto drop;
|
|
+ }
|
|
+
|
|
if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
|
|
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
|
|
- vni);
|
|
+ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
|
|
|
|
u64_stats_update_begin(&tx_stats->syncp);
|
|
tx_stats->tx_packets++;
|
|
@@ -2107,8 +2113,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
|
|
rx_stats->rx_bytes += len;
|
|
u64_stats_update_end(&rx_stats->syncp);
|
|
} else {
|
|
+drop:
|
|
dev->stats.rx_dropped++;
|
|
}
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
|
|
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
|
|
index 4d6409605207c..af13d8cf94ad4 100644
|
|
--- a/drivers/net/wan/fsl_ucc_hdlc.c
|
|
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
|
|
@@ -1049,6 +1049,54 @@ static const struct net_device_ops uhdlc_ops = {
|
|
.ndo_tx_timeout = uhdlc_tx_timeout,
|
|
};
|
|
|
|
+static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
|
|
+{
|
|
+ struct device_node *np;
|
|
+ struct platform_device *pdev;
|
|
+ struct resource *res;
|
|
+ static int siram_init_flag;
|
|
+ int ret = 0;
|
|
+
|
|
+ np = of_find_compatible_node(NULL, NULL, name);
|
|
+ if (!np)
|
|
+ return -EINVAL;
|
|
+
|
|
+ pdev = of_find_device_by_node(np);
|
|
+ if (!pdev) {
|
|
+ pr_err("%pOFn: failed to lookup pdev\n", np);
|
|
+ of_node_put(np);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ of_node_put(np);
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (!res) {
|
|
+ ret = -EINVAL;
|
|
+ goto error_put_device;
|
|
+ }
|
|
+ *ptr = ioremap(res->start, resource_size(res));
|
|
+ if (!*ptr) {
|
|
+ ret = -ENOMEM;
|
|
+ goto error_put_device;
|
|
+ }
|
|
+
|
|
+ /* We've remapped the addresses, and we don't need the device any
|
|
+ * more, so we should release it.
|
|
+ */
|
|
+ put_device(&pdev->dev);
|
|
+
|
|
+ if (init_flag && siram_init_flag == 0) {
|
|
+ memset_io(*ptr, 0, resource_size(res));
|
|
+ siram_init_flag = 1;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+error_put_device:
|
|
+ put_device(&pdev->dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int ucc_hdlc_probe(struct platform_device *pdev)
|
|
{
|
|
struct device_node *np = pdev->dev.of_node;
|
|
@@ -1143,6 +1191,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
|
|
ret = ucc_of_parse_tdm(np, utdm, ut_info);
|
|
if (ret)
|
|
goto free_utdm;
|
|
+
|
|
+ ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
|
|
+ (void __iomem **)&utdm->si_regs);
|
|
+ if (ret)
|
|
+ goto free_utdm;
|
|
+ ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
|
|
+ (void __iomem **)&utdm->siram);
|
|
+ if (ret)
|
|
+ goto unmap_si_regs;
|
|
}
|
|
|
|
if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
|
|
@@ -1151,7 +1208,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
|
|
ret = uhdlc_init(uhdlc_priv);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "Failed to init uhdlc\n");
|
|
- goto free_utdm;
|
|
+ goto undo_uhdlc_init;
|
|
}
|
|
|
|
dev = alloc_hdlcdev(uhdlc_priv);
|
|
@@ -1181,6 +1238,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
|
|
free_dev:
|
|
free_netdev(dev);
|
|
undo_uhdlc_init:
|
|
+ iounmap(utdm->siram);
|
|
+unmap_si_regs:
|
|
+ iounmap(utdm->si_regs);
|
|
free_utdm:
|
|
if (uhdlc_priv->tsa)
|
|
kfree(utdm);
|
|
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
|
|
index 1098263ab862e..46c3d983b7b75 100644
|
|
--- a/drivers/net/wan/x25_asy.c
|
|
+++ b/drivers/net/wan/x25_asy.c
|
|
@@ -485,8 +485,10 @@ static int x25_asy_open(struct net_device *dev)
|
|
|
|
/* Cleanup */
|
|
kfree(sl->xbuff);
|
|
+ sl->xbuff = NULL;
|
|
noxbuff:
|
|
kfree(sl->rbuff);
|
|
+ sl->rbuff = NULL;
|
|
norbuff:
|
|
return -ENOMEM;
|
|
}
|
|
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
|
|
index d210b0ed59beb..59fdda67f89f4 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/core.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/core.c
|
|
@@ -561,6 +561,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
|
.hw_ops = &wcn3990_ops,
|
|
.decap_align_bytes = 1,
|
|
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
|
|
+ .n_cipher_suites = 8,
|
|
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
|
|
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
|
|
.target_64bit = true,
|
|
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
|
|
index b09cdc699c698..38afbbd9fb44f 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
|
|
@@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid
|
|
spin_lock_bh(&ar->data_lock);
|
|
|
|
peer = ath10k_peer_find_by_id(ar, peer_id);
|
|
- if (!peer)
|
|
+ if (!peer || !peer->sta)
|
|
goto out;
|
|
|
|
arsta = (struct ath10k_sta *)peer->sta->drv_priv;
|
|
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
index ffec98f7be505..2c2761d04d017 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
@@ -2832,7 +2832,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
|
|
rcu_read_lock();
|
|
spin_lock_bh(&ar->data_lock);
|
|
peer = ath10k_peer_find_by_id(ar, peer_id);
|
|
- if (!peer) {
|
|
+ if (!peer || !peer->sta) {
|
|
ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
|
|
peer_id);
|
|
goto out;
|
|
@@ -2885,7 +2885,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
|
|
rcu_read_lock();
|
|
spin_lock_bh(&ar->data_lock);
|
|
peer = ath10k_peer_find_by_id(ar, peer_id);
|
|
- if (!peer) {
|
|
+ if (!peer || !peer->sta) {
|
|
ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
|
|
peer_id);
|
|
goto out;
|
|
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
|
|
index 7e49342bae384..400495858e4e9 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/mac.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/mac.c
|
|
@@ -6293,13 +6293,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
ar->num_stations + 1, ar->max_num_stations,
|
|
ar->num_peers + 1, ar->max_num_peers);
|
|
|
|
- if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
|
|
- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
|
|
- GFP_KERNEL);
|
|
- if (!arsta->tx_stats)
|
|
- goto exit;
|
|
- }
|
|
-
|
|
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
|
|
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
|
|
|
|
@@ -6321,12 +6314,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
goto exit;
|
|
}
|
|
|
|
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
|
|
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
|
|
+ GFP_KERNEL);
|
|
+ if (!arsta->tx_stats) {
|
|
+ ret = -ENOMEM;
|
|
+ goto exit;
|
|
+ }
|
|
+ }
|
|
+
|
|
ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
|
|
sta->addr, peer_type);
|
|
if (ret) {
|
|
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
|
|
sta->addr, arvif->vdev_id, ret);
|
|
ath10k_mac_dec_num_stations(arvif, sta);
|
|
+ kfree(arsta->tx_stats);
|
|
goto exit;
|
|
}
|
|
|
|
@@ -6339,6 +6342,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
spin_unlock_bh(&ar->data_lock);
|
|
ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
|
|
ath10k_mac_dec_num_stations(arvif, sta);
|
|
+ kfree(arsta->tx_stats);
|
|
ret = -ENOENT;
|
|
goto exit;
|
|
}
|
|
@@ -6359,6 +6363,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
ath10k_peer_delete(ar, arvif->vdev_id,
|
|
sta->addr);
|
|
ath10k_mac_dec_num_stations(arvif, sta);
|
|
+ kfree(arsta->tx_stats);
|
|
goto exit;
|
|
}
|
|
|
|
@@ -6370,6 +6375,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
sta->addr, arvif->vdev_id, ret);
|
|
ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
|
|
ath10k_mac_dec_num_stations(arvif, sta);
|
|
+ kfree(arsta->tx_stats);
|
|
|
|
if (num_tdls_stations != 0)
|
|
goto exit;
|
|
@@ -6385,9 +6391,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
|
|
arvif->vdev_id, sta->addr, sta);
|
|
|
|
- if (ath10k_debug_is_extd_tx_stats_enabled(ar))
|
|
- kfree(arsta->tx_stats);
|
|
-
|
|
if (sta->tdls) {
|
|
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
|
|
sta,
|
|
@@ -6427,6 +6430,11 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
}
|
|
spin_unlock_bh(&ar->data_lock);
|
|
|
|
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
|
|
+ kfree(arsta->tx_stats);
|
|
+ arsta->tx_stats = NULL;
|
|
+ }
|
|
+
|
|
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
|
|
ath10k_mac_txq_unref(ar, sta->txq[i]);
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
|
|
index 21ba20981a80b..0fca44e91a712 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
|
|
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
|
|
@@ -272,7 +272,7 @@ struct ath_node {
|
|
#endif
|
|
u8 key_idx[4];
|
|
|
|
- u32 ackto;
|
|
+ int ackto;
|
|
struct list_head list;
|
|
};
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
|
|
index 7334c9b09e82c..6e236a4854311 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/dynack.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
|
|
@@ -29,9 +29,13 @@
|
|
* ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
|
|
*
|
|
*/
|
|
-static inline u32 ath_dynack_ewma(u32 old, u32 new)
|
|
+static inline int ath_dynack_ewma(int old, int new)
|
|
{
|
|
- return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
|
|
+ if (old > 0)
|
|
+ return (new * (EWMA_DIV - EWMA_LEVEL) +
|
|
+ old * EWMA_LEVEL) / EWMA_DIV;
|
|
+ else
|
|
+ return new;
|
|
}
|
|
|
|
/**
|
|
@@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
|
|
*/
|
|
static void ath_dynack_compute_ackto(struct ath_hw *ah)
|
|
{
|
|
- struct ath_node *an;
|
|
- u32 to = 0;
|
|
- struct ath_dynack *da = &ah->dynack;
|
|
struct ath_common *common = ath9k_hw_common(ah);
|
|
+ struct ath_dynack *da = &ah->dynack;
|
|
+ struct ath_node *an;
|
|
+ int to = 0;
|
|
|
|
list_for_each_entry(an, &da->nodes, list)
|
|
if (an->ackto > to)
|
|
@@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
|
|
an->ackto = ath_dynack_ewma(an->ackto,
|
|
ackto);
|
|
ath_dbg(ath9k_hw_common(ah), DYNACK,
|
|
- "%pM to %u\n", dst, an->ackto);
|
|
+ "%pM to %d [%u]\n", dst,
|
|
+ an->ackto, ackto);
|
|
if (time_is_before_jiffies(da->lto)) {
|
|
ath_dynack_compute_ackto(ah);
|
|
da->lto = jiffies + COMPUTE_TO;
|
|
@@ -166,10 +171,12 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
|
|
* @ah: ath hw
|
|
* @skb: socket buffer
|
|
* @ts: tx status info
|
|
+ * @sta: station pointer
|
|
*
|
|
*/
|
|
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|
- struct ath_tx_status *ts)
|
|
+ struct ath_tx_status *ts,
|
|
+ struct ieee80211_sta *sta)
|
|
{
|
|
u8 ridx;
|
|
struct ieee80211_hdr *hdr;
|
|
@@ -177,7 +184,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|
struct ath_common *common = ath9k_hw_common(ah);
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
- if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
|
|
+ if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
|
|
return;
|
|
|
|
spin_lock_bh(&da->qlock);
|
|
@@ -187,11 +194,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|
/* late ACK */
|
|
if (ts->ts_status & ATH9K_TXERR_XRETRY) {
|
|
if (ieee80211_is_assoc_req(hdr->frame_control) ||
|
|
- ieee80211_is_assoc_resp(hdr->frame_control)) {
|
|
+ ieee80211_is_assoc_resp(hdr->frame_control) ||
|
|
+ ieee80211_is_auth(hdr->frame_control)) {
|
|
ath_dbg(common, DYNACK, "late ack\n");
|
|
+
|
|
ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
|
|
ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
|
|
ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
|
|
+ if (sta) {
|
|
+ struct ath_node *an;
|
|
+
|
|
+ an = (struct ath_node *)sta->drv_priv;
|
|
+ an->ackto = -1;
|
|
+ }
|
|
da->lto = jiffies + LATEACK_DELAY;
|
|
}
|
|
|
|
@@ -251,7 +266,7 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|
struct ath_common *common = ath9k_hw_common(ah);
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
|
|
- if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
|
|
+ if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
|
|
return;
|
|
|
|
spin_lock_bh(&da->qlock);
|
|
diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h
|
|
index 6d7bef976742c..cf60224d40dff 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/dynack.h
|
|
+++ b/drivers/net/wireless/ath/ath9k/dynack.h
|
|
@@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an);
|
|
void ath_dynack_init(struct ath_hw *ah);
|
|
void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
|
|
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|
- struct ath_tx_status *ts);
|
|
+ struct ath_tx_status *ts,
|
|
+ struct ieee80211_sta *sta);
|
|
#else
|
|
static inline void ath_dynack_init(struct ath_hw *ah) {}
|
|
static inline void ath_dynack_node_init(struct ath_hw *ah,
|
|
@@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah,
|
|
struct sk_buff *skb, u32 ts) {}
|
|
static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
|
|
struct sk_buff *skb,
|
|
- struct ath_tx_status *ts) {}
|
|
+ struct ath_tx_status *ts,
|
|
+ struct ieee80211_sta *sta) {}
|
|
#endif
|
|
|
|
#endif /* DYNACK_H */
|
|
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
index 25b3fc82d4ac8..f448d57166398 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
@@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|
if (bf == bf->bf_lastbf)
|
|
ath_dynack_sample_tx_ts(sc->sc_ah,
|
|
bf->bf_mpdu,
|
|
- ts);
|
|
+ ts, sta);
|
|
}
|
|
|
|
ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
|
|
@@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
|
memcpy(info->control.rates, bf->rates,
|
|
sizeof(info->control.rates));
|
|
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
|
|
- ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
|
|
+ ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
|
|
+ sta);
|
|
}
|
|
ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
|
|
} else
|
|
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
|
|
index 398900a1c29e2..c54b008996ee6 100644
|
|
--- a/drivers/net/wireless/ath/wil6210/main.c
|
|
+++ b/drivers/net/wireless/ath/wil6210/main.c
|
|
@@ -998,10 +998,13 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
|
|
|
|
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
|
|
|
|
- /* Clear MAC link up */
|
|
- wil_s(wil, RGF_HP_CTRL, BIT(15));
|
|
- wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
|
|
- wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
|
|
+ if (wil->hw_version < HW_VER_TALYN) {
|
|
+ /* Clear MAC link up */
|
|
+ wil_s(wil, RGF_HP_CTRL, BIT(15));
|
|
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0,
|
|
+ BIT_HPAL_PERST_FROM_PAD);
|
|
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
|
|
+ }
|
|
|
|
wil_halt_cpu(wil);
|
|
|
|
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
|
|
index cc5f263cc9653..005c4ba9e8234 100644
|
|
--- a/drivers/net/wireless/ath/wil6210/txrx.c
|
|
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
|
|
@@ -1403,6 +1403,8 @@ found:
|
|
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
|
|
wil_set_da_for_vring(wil, skb2, i);
|
|
wil_tx_ring(wil, vif, v2, skb2);
|
|
+ /* successful call to wil_tx_ring takes skb2 ref */
|
|
+ dev_kfree_skb_any(skb2);
|
|
} else {
|
|
wil_err(wil, "skb_copy failed\n");
|
|
}
|
|
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
|
|
index 85f2ca9895656..ef3ffa5ad4668 100644
|
|
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
|
|
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
|
|
@@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
|
|
u8 i;
|
|
s32 tmp;
|
|
s8 signx = 1;
|
|
- u32 angle = 0;
|
|
+ s32 angle = 0;
|
|
struct b43_c32 ret = { .i = 39797, .q = 0, };
|
|
|
|
while (theta > (180 << 16))
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
index 7f0a5bade70a6..c0e3ae7bf2ae0 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
@@ -5196,10 +5196,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
|
|
.del_pmk = brcmf_cfg80211_del_pmk,
|
|
};
|
|
|
|
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
|
|
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
|
|
{
|
|
- return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
|
|
+ struct cfg80211_ops *ops;
|
|
+
|
|
+ ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
|
|
GFP_KERNEL);
|
|
+
|
|
+ if (ops && settings->roamoff)
|
|
+ ops->update_connect_params = NULL;
|
|
+
|
|
+ return ops;
|
|
}
|
|
|
|
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
|
|
index a4aec0004e4f1..9a6287f084a92 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
|
|
@@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
|
|
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
|
|
s32 brcmf_cfg80211_up(struct net_device *ndev);
|
|
s32 brcmf_cfg80211_down(struct net_device *ndev);
|
|
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
|
|
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
|
|
enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
|
|
|
|
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
|
|
index b1f702faff4fb..860a4372cb564 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
|
|
@@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
|
|
|
|
brcmf_dbg(TRACE, "Enter\n");
|
|
|
|
- ops = brcmf_cfg80211_get_ops();
|
|
+ ops = brcmf_cfg80211_get_ops(settings);
|
|
if (!ops)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
|
|
index 9095b830ae4d7..9927079a9ace4 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
|
|
@@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
|
struct brcmf_fw_request *fwreq;
|
|
char chipname[12];
|
|
const char *mp_path;
|
|
+ size_t mp_path_len;
|
|
u32 i, j;
|
|
- char end;
|
|
+ char end = '\0';
|
|
size_t reqsz;
|
|
|
|
for (i = 0; i < table_size; i++) {
|
|
@@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
|
mapping_table[i].fw_base, chipname);
|
|
|
|
mp_path = brcmf_mp_global.firmware_path;
|
|
- end = mp_path[strlen(mp_path) - 1];
|
|
+ mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
|
|
+ if (mp_path_len)
|
|
+ end = mp_path[mp_path_len - 1];
|
|
+
|
|
fwreq->n_items = n_fwnames;
|
|
|
|
for (j = 0; j < n_fwnames; j++) {
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
|
|
index 1dd23f846fb9c..f3ccd79483401 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
|
|
@@ -442,7 +442,7 @@ struct iwl_he_backoff_conf {
|
|
* Support for Nss x BW (or RU) matrix:
|
|
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
|
|
* Each entry contains 2 QAM thresholds for 8us and 16us:
|
|
- * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
|
|
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
|
|
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
|
|
* QAM_tx < QAM_th1 --> PPE=0us
|
|
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
index 00f831d88366d..5a42c617c54c1 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
@@ -2005,7 +2005,13 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
|
if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
|
|
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
|
|
|
|
- /* If PPE Thresholds exist, parse them into a FW-familiar format */
|
|
+ /*
|
|
+ * Initialize the PPE thresholds to "None" (7), as described in Table
|
|
+ * 9-262ac of 80211.ax/D3.0.
|
|
+ */
|
|
+ memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
|
|
+
|
|
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
|
|
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
|
|
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
|
|
u8 nss = (sta->he_cap.ppe_thres[0] &
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
|
|
index 7a98e1a1dc407..dabbc04853aca 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
|
|
@@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
|
|
{
|
|
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
|
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
|
|
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
|
|
u8 supp = 0;
|
|
|
|
+ if (he_cap && he_cap->has_he)
|
|
+ return 0;
|
|
+
|
|
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
|
|
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
|
|
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
|
|
index 9e015212c2c0f..8d4711590dfc2 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
|
|
@@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
|
|
|
|
/* 9000 Series */
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
|
+ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
|
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
|
|
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
|
|
index 3bfa7f5e35138..2e5bcb3fdff7a 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
|
|
@@ -1,3 +1,4 @@
|
|
+
|
|
/*
|
|
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
|
|
*
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
|
|
index 9273d2d2764ab..732f4b87fdcb6 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
|
|
@@ -116,9 +116,6 @@ void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
|
|
MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
|
|
}
|
|
|
|
- if (changed & BSS_CHANGED_ASSOC)
|
|
- mt76x0_phy_recalibrate_after_assoc(dev);
|
|
-
|
|
mutex_unlock(&dev->mt76.mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
|
|
@@ -138,6 +135,12 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
|
|
struct mt76x02_dev *dev = hw->priv;
|
|
|
|
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
|
+
|
|
+ if (dev->cal.gain_init_done) {
|
|
+ /* Restore AGC gain and resume calibration after scanning. */
|
|
+ dev->cal.low_gain = -1;
|
|
+ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
|
|
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
|
|
index 2187bafaf2e9b..0057f69d0c361 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
|
|
@@ -41,6 +41,11 @@ static inline bool is_mt7610e(struct mt76x02_dev *dev)
|
|
|
|
void mt76x0_init_debugfs(struct mt76x02_dev *dev);
|
|
|
|
+static inline bool is_mt7630(struct mt76x02_dev *dev)
|
|
+{
|
|
+ return mt76_chip(&dev->mt76) == 0x7630;
|
|
+}
|
|
+
|
|
/* Init */
|
|
struct mt76x02_dev *
|
|
mt76x0_alloc_device(struct device *pdev,
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
|
|
index cf024950e0ed0..c34abd1c6030f 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
|
|
@@ -215,62 +215,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
|
|
return 0;
|
|
}
|
|
|
|
-static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
|
|
-{
|
|
- u8 val;
|
|
-
|
|
- val = rf_rr(dev, MT_RF(0, 4));
|
|
- if ((val & 0x70) != 0x30)
|
|
- return;
|
|
-
|
|
- /*
|
|
- * Calibration Mode - Open loop, closed loop, and amplitude:
|
|
- * B0.R06.[0]: 1
|
|
- * B0.R06.[3:1] bp_close_code: 100
|
|
- * B0.R05.[7:0] bp_open_code: 0x0
|
|
- * B0.R04.[2:0] cal_bits: 000
|
|
- * B0.R03.[2:0] startup_time: 011
|
|
- * B0.R03.[6:4] settle_time:
|
|
- * 80MHz channel: 110
|
|
- * 40MHz channel: 101
|
|
- * 20MHz channel: 100
|
|
- */
|
|
- val = rf_rr(dev, MT_RF(0, 6));
|
|
- val &= ~0xf;
|
|
- val |= 0x09;
|
|
- rf_wr(dev, MT_RF(0, 6), val);
|
|
-
|
|
- val = rf_rr(dev, MT_RF(0, 5));
|
|
- if (val != 0)
|
|
- rf_wr(dev, MT_RF(0, 5), 0x0);
|
|
-
|
|
- val = rf_rr(dev, MT_RF(0, 4));
|
|
- val &= ~0x07;
|
|
- rf_wr(dev, MT_RF(0, 4), val);
|
|
-
|
|
- val = rf_rr(dev, MT_RF(0, 3));
|
|
- val &= ~0x77;
|
|
- if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
|
|
- val |= 0x63;
|
|
- } else if (channel == 3 || channel == 4 || channel == 10) {
|
|
- val |= 0x53;
|
|
- } else if (channel == 2 || channel == 5 || channel == 6 ||
|
|
- channel == 8 || channel == 11 || channel == 12) {
|
|
- val |= 0x43;
|
|
- } else {
|
|
- WARN(1, "Unknown channel %u\n", channel);
|
|
- return;
|
|
- }
|
|
- rf_wr(dev, MT_RF(0, 3), val);
|
|
-
|
|
- /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
|
|
- val = rf_rr(dev, MT_RF(0, 4));
|
|
- val = ((val & ~(0x80)) | 0x80);
|
|
- rf_wr(dev, MT_RF(0, 4), val);
|
|
-
|
|
- msleep(2);
|
|
-}
|
|
-
|
|
static void
|
|
mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
|
|
{
|
|
@@ -518,21 +462,47 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
|
|
|
|
static void mt76x0_ant_select(struct mt76x02_dev *dev)
|
|
{
|
|
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
|
|
-
|
|
- /* single antenna mode */
|
|
- if (chan->band == NL80211_BAND_2GHZ) {
|
|
- mt76_rmw(dev, MT_COEXCFG3,
|
|
- BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
|
|
- mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
|
|
+ u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
|
|
+ u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
|
|
+ u32 wlan, coex3, cmb;
|
|
+ bool ant_div;
|
|
+
|
|
+ wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
|
|
+ cmb = mt76_rr(dev, MT_CMB_CTRL);
|
|
+ coex3 = mt76_rr(dev, MT_COEXCFG3);
|
|
+
|
|
+ cmb &= ~(BIT(14) | BIT(12));
|
|
+ wlan &= ~(BIT(6) | BIT(5));
|
|
+ coex3 &= ~GENMASK(5, 2);
|
|
+
|
|
+ if (ee_ant & MT_EE_ANTENNA_DUAL) {
|
|
+ /* dual antenna mode */
|
|
+ ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
|
|
+ (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
|
|
+ if (ant_div)
|
|
+ cmb |= BIT(12);
|
|
+ else
|
|
+ coex3 |= BIT(4);
|
|
+ coex3 |= BIT(3);
|
|
+ if (dev->mt76.cap.has_2ghz)
|
|
+ wlan |= BIT(6);
|
|
} else {
|
|
- mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
|
|
- BIT(4) | BIT(3));
|
|
- mt76_clear(dev, MT_WLAN_FUN_CTRL,
|
|
- BIT(6) | BIT(5));
|
|
+ /* sigle antenna mode */
|
|
+ if (dev->mt76.cap.has_5ghz) {
|
|
+ coex3 |= BIT(3) | BIT(4);
|
|
+ } else {
|
|
+ wlan |= BIT(6);
|
|
+ coex3 |= BIT(1);
|
|
+ }
|
|
}
|
|
- mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
|
|
+
|
|
+ if (is_mt7630(dev))
|
|
+ cmb |= BIT(14) | BIT(11);
|
|
+
|
|
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
|
|
+ mt76_wr(dev, MT_CMB_CTRL, cmb);
|
|
mt76_clear(dev, MT_COEXCFG0, BIT(2));
|
|
+ mt76_wr(dev, MT_COEXCFG3, coex3);
|
|
}
|
|
|
|
static void
|
|
@@ -585,8 +555,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
|
|
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
|
|
{
|
|
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
|
|
+ int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
|
|
u32 val, tx_alc, reg_val;
|
|
|
|
+ if (is_mt7630(dev))
|
|
+ return;
|
|
+
|
|
if (power_on) {
|
|
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
|
|
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
|
|
@@ -602,7 +576,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
|
|
reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
|
|
mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
|
|
|
|
- if (chan->band == NL80211_BAND_5GHZ) {
|
|
+ if (is_5ghz) {
|
|
if (chan->hw_value < 100)
|
|
val = 0x701;
|
|
else if (chan->hw_value < 140)
|
|
@@ -615,7 +589,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
|
|
|
|
mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
|
|
msleep(350);
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
|
|
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
|
|
usleep_range(15000, 20000);
|
|
|
|
mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
|
|
@@ -696,7 +670,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
|
|
mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
|
|
mt76x02_phy_set_band(dev, chandef->chan->band,
|
|
ch_group_index & 1);
|
|
- mt76x0_ant_select(dev);
|
|
|
|
mt76_rmw(dev, MT_EXT_CCA_CFG,
|
|
(MT_EXT_CCA_CFG_CCA0 |
|
|
@@ -719,20 +692,16 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
|
|
|
|
mt76x0_read_rx_gain(dev);
|
|
mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
|
|
- mt76x02_init_agc_gain(dev);
|
|
|
|
- if (mt76_is_usb(dev)) {
|
|
- mt76x0_vco_cal(dev, channel);
|
|
- } else {
|
|
- /* enable vco */
|
|
- rf_set(dev, MT_RF(0, 4), BIT(7));
|
|
- }
|
|
+ /* enable vco */
|
|
+ rf_set(dev, MT_RF(0, 4), BIT(7));
|
|
|
|
if (scan)
|
|
return 0;
|
|
|
|
- if (mt76_is_mmio(dev))
|
|
- mt76x0_phy_calibrate(dev, false);
|
|
+ mt76x0_phy_calibrate(dev, false);
|
|
+ mt76x02_init_agc_gain(dev);
|
|
+
|
|
mt76x0_phy_set_txpower(dev);
|
|
|
|
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
|
@@ -741,39 +710,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
|
|
return 0;
|
|
}
|
|
|
|
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
|
|
-{
|
|
- u32 tx_alc, reg_val;
|
|
- u8 channel = dev->mt76.chandef.chan->hw_value;
|
|
- int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
|
|
-
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
|
|
-
|
|
- mt76x0_vco_cal(dev, channel);
|
|
-
|
|
- tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
|
|
- mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
|
|
- usleep_range(500, 700);
|
|
-
|
|
- reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
|
|
- mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
|
|
-
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
|
|
-
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
|
|
-
|
|
- mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
|
|
- mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
|
|
- msleep(100);
|
|
-
|
|
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
|
|
-}
|
|
-
|
|
static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
|
|
{
|
|
u8 rf_b7_73, rf_b0_66, rf_b0_67;
|
|
@@ -817,10 +753,8 @@ done:
|
|
static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
|
|
{
|
|
u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
|
|
- u32 val = 0x122c << 16 | 0xf2;
|
|
|
|
- mt76_wr(dev, MT_BBP(AGC, 8),
|
|
- val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
|
|
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
|
|
}
|
|
|
|
static void
|
|
@@ -835,7 +769,8 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
|
|
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
|
|
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
|
|
|
|
- gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
|
|
+ gain_change = dev->cal.low_gain < 0 ||
|
|
+ (dev->cal.low_gain & 2) ^ (low_gain & 2);
|
|
dev->cal.low_gain = low_gain;
|
|
|
|
if (!gain_change) {
|
|
@@ -924,6 +859,7 @@ void mt76x0_phy_init(struct mt76x02_dev *dev)
|
|
{
|
|
INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
|
|
|
|
+ mt76x0_ant_select(dev);
|
|
mt76x0_rf_init(dev);
|
|
mt76x02_phy_set_rxpath(dev);
|
|
mt76x02_phy_set_txdac(dev);
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
|
|
index a7fd36c2f6330..76f25008491a5 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
|
|
@@ -117,6 +117,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
|
|
if (ret)
|
|
goto out;
|
|
|
|
+ mt76x0_phy_calibrate(dev, true);
|
|
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
|
|
MT_CALIBRATE_INTERVAL);
|
|
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
|
@@ -158,39 +159,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
|
|
.wake_tx_queue = mt76_wake_tx_queue,
|
|
};
|
|
|
|
-static int mt76x0u_register_device(struct mt76x02_dev *dev)
|
|
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
|
|
{
|
|
- struct ieee80211_hw *hw = dev->mt76.hw;
|
|
int err;
|
|
|
|
- err = mt76u_alloc_queues(&dev->mt76);
|
|
- if (err < 0)
|
|
- goto out_err;
|
|
-
|
|
- err = mt76u_mcu_init_rx(&dev->mt76);
|
|
- if (err < 0)
|
|
- goto out_err;
|
|
-
|
|
mt76x0_chip_onoff(dev, true, true);
|
|
- if (!mt76x02_wait_for_mac(&dev->mt76)) {
|
|
- err = -ETIMEDOUT;
|
|
- goto out_err;
|
|
- }
|
|
+
|
|
+ if (!mt76x02_wait_for_mac(&dev->mt76))
|
|
+ return -ETIMEDOUT;
|
|
|
|
err = mt76x0u_mcu_init(dev);
|
|
if (err < 0)
|
|
- goto out_err;
|
|
+ return err;
|
|
|
|
mt76x0_init_usb_dma(dev);
|
|
err = mt76x0_init_hardware(dev);
|
|
if (err < 0)
|
|
- goto out_err;
|
|
+ return err;
|
|
|
|
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
|
|
mt76_wr(dev, MT_TXOP_CTRL_CFG,
|
|
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
|
|
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
|
|
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
|
|
+{
|
|
+ struct ieee80211_hw *hw = dev->mt76.hw;
|
|
+ int err;
|
|
+
|
|
+ err = mt76u_alloc_queues(&dev->mt76);
|
|
+ if (err < 0)
|
|
+ goto out_err;
|
|
+
|
|
+ err = mt76u_mcu_init_rx(&dev->mt76);
|
|
+ if (err < 0)
|
|
+ goto out_err;
|
|
+
|
|
+ err = mt76x0u_init_hardware(dev);
|
|
+ if (err < 0)
|
|
+ goto out_err;
|
|
+
|
|
err = mt76x0_register_device(dev);
|
|
if (err < 0)
|
|
goto out_err;
|
|
@@ -299,6 +310,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
|
|
|
|
mt76u_stop_queues(&dev->mt76);
|
|
mt76x0u_mac_stop(dev);
|
|
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
|
|
+ mt76x0_chip_onoff(dev, false, false);
|
|
usb_kill_urb(usb->mcu.res.urb);
|
|
|
|
return 0;
|
|
@@ -326,7 +339,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
|
|
tasklet_enable(&usb->rx_tasklet);
|
|
tasklet_enable(&usb->tx_tasklet);
|
|
|
|
- ret = mt76x0_init_hardware(dev);
|
|
+ ret = mt76x0u_init_hardware(dev);
|
|
if (ret)
|
|
goto err;
|
|
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
|
|
index 7806963b19052..9a5ae5c06840a 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
|
|
@@ -63,6 +63,7 @@ struct mt76x02_calibration {
|
|
bool tssi_comp_pending;
|
|
bool dpd_cal_done;
|
|
bool channel_cal_done;
|
|
+ bool gain_init_done;
|
|
};
|
|
|
|
struct mt76x02_dev {
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
|
|
index b3ec74835d10b..1de041590050b 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
|
|
@@ -25,6 +25,7 @@ enum mt76x02_eeprom_field {
|
|
MT_EE_VERSION = 0x002,
|
|
MT_EE_MAC_ADDR = 0x004,
|
|
MT_EE_PCI_ID = 0x00A,
|
|
+ MT_EE_ANTENNA = 0x022,
|
|
MT_EE_NIC_CONF_0 = 0x034,
|
|
MT_EE_NIC_CONF_1 = 0x036,
|
|
MT_EE_COUNTRY_REGION_5GHZ = 0x038,
|
|
@@ -104,6 +105,8 @@ enum mt76x02_eeprom_field {
|
|
__MT_EE_MAX
|
|
};
|
|
|
|
+#define MT_EE_ANTENNA_DUAL BIT(15)
|
|
+
|
|
#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
|
|
#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
|
|
#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
|
|
@@ -118,12 +121,9 @@ enum mt76x02_eeprom_field {
|
|
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
|
|
#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
|
|
|
|
-#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
|
|
-#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
|
|
-#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
|
|
+#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3)
|
|
+#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4)
|
|
#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
|
|
-#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
|
|
-#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
|
|
|
|
#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
|
|
MT_EE_USAGE_MAP_START + 1)
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
|
|
index 0f1d7b5c9f68e..977a8e7e26dfd 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
|
|
@@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
|
|
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
|
|
sizeof(dev->cal.agc_gain_cur));
|
|
dev->cal.low_gain = -1;
|
|
+ dev->cal.gain_init_done = true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
|
|
index b56febae89452..764528c9f48ad 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
|
|
@@ -800,7 +800,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
|
|
|
|
/* enable detection*/
|
|
mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
|
|
- mt76_wr(dev, 0x212c, 0x0c350001);
|
|
+ mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
|
|
}
|
|
|
|
void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
|
|
@@ -843,7 +843,11 @@ void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
|
|
mt76_wr(dev, MT_BBP(DFS, 0), 0);
|
|
/* clear detector status */
|
|
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
|
|
- mt76_wr(dev, 0x212c, 0);
|
|
+ if (mt76_chip(&dev->mt76) == 0x7610 ||
|
|
+ mt76_chip(&dev->mt76) == 0x7630)
|
|
+ mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
|
|
+ else
|
|
+ mt76_wr(dev, MT_BBP(IBI, 11), 0);
|
|
|
|
mt76x02_irq_disable(dev, MT_INT_GPTIMER);
|
|
mt76_rmw_field(dev, MT_INT_TIMER_EN,
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
|
|
index 1971a1b000384..9471b44ce5589 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
|
|
@@ -156,6 +156,9 @@ mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|
struct mt76x02_dev *dev = hw->priv;
|
|
|
|
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
|
+
|
|
+ if (dev->cal.gain_init_done)
|
|
+ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
|
|
}
|
|
|
|
const struct ieee80211_ops mt76x2u_ops = {
|
|
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
|
|
index bfdc1ad30c13a..659e7649fe22f 100644
|
|
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
|
|
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
|
|
@@ -84,7 +84,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
|
|
size_t *var_resp_size)
|
|
{
|
|
struct qlink_cmd *cmd;
|
|
- const struct qlink_resp *resp;
|
|
+ struct qlink_resp *resp = NULL;
|
|
struct sk_buff *resp_skb = NULL;
|
|
u16 cmd_id;
|
|
u8 mac_id;
|
|
@@ -113,7 +113,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
|
|
if (ret)
|
|
goto out;
|
|
|
|
- resp = (const struct qlink_resp *)resp_skb->data;
|
|
+ if (WARN_ON(!resp_skb || !resp_skb->data)) {
|
|
+ ret = -EFAULT;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ resp = (struct qlink_resp *)resp_skb->data;
|
|
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
|
|
const_resp_size);
|
|
if (ret)
|
|
@@ -686,7 +691,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
|
|
struct sk_buff *cmd_skb, *resp_skb = NULL;
|
|
struct qlink_cmd_get_sta_info *cmd;
|
|
const struct qlink_resp_get_sta_info *resp;
|
|
- size_t var_resp_len;
|
|
+ size_t var_resp_len = 0;
|
|
int ret = 0;
|
|
|
|
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
|
|
@@ -1650,7 +1655,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
|
|
{
|
|
struct sk_buff *cmd_skb, *resp_skb = NULL;
|
|
const struct qlink_resp_get_mac_info *resp;
|
|
- size_t var_data_len;
|
|
+ size_t var_data_len = 0;
|
|
int ret = 0;
|
|
|
|
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
|
|
@@ -1680,8 +1685,8 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
|
|
{
|
|
struct sk_buff *cmd_skb, *resp_skb = NULL;
|
|
const struct qlink_resp_get_hw_info *resp;
|
|
+ size_t info_len = 0;
|
|
int ret = 0;
|
|
- size_t info_len;
|
|
|
|
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
|
|
QLINK_CMD_GET_HW_INFO,
|
|
@@ -1709,9 +1714,9 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
|
|
struct ieee80211_supported_band *band)
|
|
{
|
|
struct sk_buff *cmd_skb, *resp_skb = NULL;
|
|
- size_t info_len;
|
|
struct qlink_cmd_band_info_get *cmd;
|
|
struct qlink_resp_band_info_get *resp;
|
|
+ size_t info_len = 0;
|
|
int ret = 0;
|
|
u8 qband;
|
|
|
|
@@ -1764,8 +1769,8 @@ out:
|
|
int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
|
|
{
|
|
struct sk_buff *cmd_skb, *resp_skb = NULL;
|
|
- size_t response_size;
|
|
struct qlink_resp_phy_params *resp;
|
|
+ size_t response_size = 0;
|
|
int ret = 0;
|
|
|
|
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
|
|
@@ -2431,7 +2436,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
|
|
struct sk_buff *cmd_skb, *resp_skb = NULL;
|
|
struct qlink_cmd_get_chan_stats *cmd;
|
|
struct qlink_resp_get_chan_stats *resp;
|
|
- size_t var_data_len;
|
|
+ size_t var_data_len = 0;
|
|
int ret = 0;
|
|
|
|
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
|
|
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
|
|
index 67213f11acbd2..0a9eac93dd01a 100644
|
|
--- a/drivers/net/wireless/st/cw1200/scan.c
|
|
+++ b/drivers/net/wireless/st/cw1200/scan.c
|
|
@@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
|
|
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
|
|
return -EINVAL;
|
|
|
|
+ /* will be unlocked in cw1200_scan_work() */
|
|
+ down(&priv->scan.lock);
|
|
+ mutex_lock(&priv->conf_mutex);
|
|
+
|
|
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
|
|
req->ie_len);
|
|
if (!frame.skb)
|
|
@@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
|
|
if (req->ie_len)
|
|
skb_put_data(frame.skb, req->ie, req->ie_len);
|
|
|
|
- /* will be unlocked in cw1200_scan_work() */
|
|
- down(&priv->scan.lock);
|
|
- mutex_lock(&priv->conf_mutex);
|
|
-
|
|
ret = wsm_set_template_frame(priv, &frame);
|
|
if (!ret) {
|
|
/* Host want to be the probe responder. */
|
|
ret = wsm_set_probe_responder(priv, true);
|
|
}
|
|
if (ret) {
|
|
+ dev_kfree_skb(frame.skb);
|
|
mutex_unlock(&priv->conf_mutex);
|
|
up(&priv->scan.lock);
|
|
- dev_kfree_skb(frame.skb);
|
|
return ret;
|
|
}
|
|
|
|
@@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
|
|
++priv->scan.n_ssids;
|
|
}
|
|
|
|
- mutex_unlock(&priv->conf_mutex);
|
|
-
|
|
if (frame.skb)
|
|
dev_kfree_skb(frame.skb);
|
|
+ mutex_unlock(&priv->conf_mutex);
|
|
queue_work(priv->workqueue, &priv->scan.work);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
|
|
index 0e39e3d1846fe..d28418b05a04b 100644
|
|
--- a/drivers/nvdimm/pmem.c
|
|
+++ b/drivers/nvdimm/pmem.c
|
|
@@ -309,8 +309,11 @@ static void pmem_release_queue(void *q)
|
|
blk_cleanup_queue(q);
|
|
}
|
|
|
|
-static void pmem_freeze_queue(void *q)
|
|
+static void pmem_freeze_queue(struct percpu_ref *ref)
|
|
{
|
|
+ struct request_queue *q;
|
|
+
|
|
+ q = container_of(ref, typeof(*q), q_usage_counter);
|
|
blk_freeze_queue_start(q);
|
|
}
|
|
|
|
@@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev,
|
|
|
|
pmem->pfn_flags = PFN_DEV;
|
|
pmem->pgmap.ref = &q->q_usage_counter;
|
|
+ pmem->pgmap.kill = pmem_freeze_queue;
|
|
if (is_nd_pfn(dev)) {
|
|
if (setup_pagemap_fsdax(dev, &pmem->pgmap))
|
|
return -ENOMEM;
|
|
@@ -427,13 +431,6 @@ static int pmem_attach_disk(struct device *dev,
|
|
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
|
|
}
|
|
|
|
- /*
|
|
- * At release time the queue must be frozen before
|
|
- * devm_memremap_pages is unwound
|
|
- */
|
|
- if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
|
|
- return -ENOMEM;
|
|
-
|
|
if (IS_ERR(addr))
|
|
return PTR_ERR(addr);
|
|
pmem->virt_addr = addr;
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index 962012135b62a..5f9a5ef939696 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -2084,18 +2084,20 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
|
|
size_t nqnlen;
|
|
int off;
|
|
|
|
- nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
|
|
- if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
|
|
- strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
|
|
- return;
|
|
- }
|
|
+ if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
|
|
+ nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
|
|
+ if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
|
|
+ strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
|
|
+ return;
|
|
+ }
|
|
|
|
- if (ctrl->vs >= NVME_VS(1, 2, 1))
|
|
- dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
|
|
+ if (ctrl->vs >= NVME_VS(1, 2, 1))
|
|
+ dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
|
|
+ }
|
|
|
|
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
|
|
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
|
|
- "nqn.2014.08.org.nvmexpress:%4x%4x",
|
|
+ "nqn.2014.08.org.nvmexpress:%04x%04x",
|
|
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
|
|
memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
|
|
off += sizeof(id->sn);
|
|
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
|
|
index a4f3b263cd6c6..d64805dc8efba 100644
|
|
--- a/drivers/nvme/host/lightnvm.c
|
|
+++ b/drivers/nvme/host/lightnvm.c
|
|
@@ -577,7 +577,8 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
|
|
struct ppa_addr ppa;
|
|
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
|
|
size_t log_pos, offset, len;
|
|
- int ret, i, max_len;
|
|
+ int i, max_len;
|
|
+ int ret = 0;
|
|
|
|
/*
|
|
* limit requests to maximum 256K to avoid issuing arbitrary large
|
|
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
|
|
index 9901afd804ce3..2b1d1f066efaf 100644
|
|
--- a/drivers/nvme/host/multipath.c
|
|
+++ b/drivers/nvme/host/multipath.c
|
|
@@ -586,6 +586,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|
return 0;
|
|
out_free_ana_log_buf:
|
|
kfree(ctrl->ana_log_buf);
|
|
+ ctrl->ana_log_buf = NULL;
|
|
out:
|
|
return error;
|
|
}
|
|
@@ -593,5 +594,6 @@ out:
|
|
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
|
{
|
|
kfree(ctrl->ana_log_buf);
|
|
+ ctrl->ana_log_buf = NULL;
|
|
}
|
|
|
|
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
|
|
index 081cbdcce8803..6ffa99a10a60b 100644
|
|
--- a/drivers/nvme/host/nvme.h
|
|
+++ b/drivers/nvme/host/nvme.h
|
|
@@ -90,6 +90,11 @@ enum nvme_quirks {
|
|
* Set MEDIUM priority on SQ creation
|
|
*/
|
|
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
|
|
+
|
|
+ /*
|
|
+ * Ignore device provided subnqn.
|
|
+ */
|
|
+ NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
|
|
index c33bb201b8846..c0d01048ce4de 100644
|
|
--- a/drivers/nvme/host/pci.c
|
|
+++ b/drivers/nvme/host/pci.c
|
|
@@ -913,9 +913,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
|
|
|
|
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
|
|
{
|
|
- if (++nvmeq->cq_head == nvmeq->q_depth) {
|
|
+ if (nvmeq->cq_head == nvmeq->q_depth - 1) {
|
|
nvmeq->cq_head = 0;
|
|
nvmeq->cq_phase = !nvmeq->cq_phase;
|
|
+ } else {
|
|
+ nvmeq->cq_head++;
|
|
}
|
|
}
|
|
|
|
@@ -1748,8 +1750,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
|
|
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
|
|
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
|
|
|
|
- dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
|
|
- le64_to_cpu(desc->addr));
|
|
+ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
|
|
+ le64_to_cpu(desc->addr),
|
|
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
|
|
}
|
|
|
|
kfree(dev->host_mem_desc_bufs);
|
|
@@ -1815,8 +1818,9 @@ out_free_bufs:
|
|
while (--i >= 0) {
|
|
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
|
|
|
|
- dma_free_coherent(dev->dev, size, bufs[i],
|
|
- le64_to_cpu(descs[i].addr));
|
|
+ dma_free_attrs(dev->dev, size, bufs[i],
|
|
+ le64_to_cpu(descs[i].addr),
|
|
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
|
|
}
|
|
|
|
kfree(bufs);
|
|
@@ -2696,6 +2700,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
|
|
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
|
NVME_QUIRK_MEDIUM_PRIO_SQ },
|
|
+ { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
|
|
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
|
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
|
|
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
|
|
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
|
|
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
|
|
index 583086dd9cb9a..bfc5ef6d85b76 100644
|
|
--- a/drivers/nvme/target/rdma.c
|
|
+++ b/drivers/nvme/target/rdma.c
|
|
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
|
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
|
|
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
|
|
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
|
|
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
|
|
+ struct nvmet_rdma_rsp *r);
|
|
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
|
+ struct nvmet_rdma_rsp *r);
|
|
|
|
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
|
|
|
|
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
|
|
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
|
|
|
if (unlikely(!rsp)) {
|
|
- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
|
|
+ int ret;
|
|
+
|
|
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
|
|
if (unlikely(!rsp))
|
|
return NULL;
|
|
+ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
|
|
+ if (unlikely(ret)) {
|
|
+ kfree(rsp);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
rsp->allocated = true;
|
|
}
|
|
|
|
@@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
|
|
{
|
|
unsigned long flags;
|
|
|
|
- if (rsp->allocated) {
|
|
+ if (unlikely(rsp->allocated)) {
|
|
+ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
|
|
kfree(rsp);
|
|
return;
|
|
}
|
|
diff --git a/drivers/of/base.c b/drivers/of/base.c
|
|
index 09692c9b32a71..6d20b6dcf0340 100644
|
|
--- a/drivers/of/base.c
|
|
+++ b/drivers/of/base.c
|
|
@@ -116,9 +116,6 @@ int __weak of_node_to_nid(struct device_node *np)
|
|
}
|
|
#endif
|
|
|
|
-static struct device_node **phandle_cache;
|
|
-static u32 phandle_cache_mask;
|
|
-
|
|
/*
|
|
* Assumptions behind phandle_cache implementation:
|
|
* - phandle property values are in a contiguous range of 1..n
|
|
@@ -127,6 +124,66 @@ static u32 phandle_cache_mask;
|
|
* - the phandle lookup overhead reduction provided by the cache
|
|
* will likely be less
|
|
*/
|
|
+
|
|
+static struct device_node **phandle_cache;
|
|
+static u32 phandle_cache_mask;
|
|
+
|
|
+/*
|
|
+ * Caller must hold devtree_lock.
|
|
+ */
|
|
+static void __of_free_phandle_cache(void)
|
|
+{
|
|
+ u32 cache_entries = phandle_cache_mask + 1;
|
|
+ u32 k;
|
|
+
|
|
+ if (!phandle_cache)
|
|
+ return;
|
|
+
|
|
+ for (k = 0; k < cache_entries; k++)
|
|
+ of_node_put(phandle_cache[k]);
|
|
+
|
|
+ kfree(phandle_cache);
|
|
+ phandle_cache = NULL;
|
|
+}
|
|
+
|
|
+int of_free_phandle_cache(void)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
+
|
|
+ __of_free_phandle_cache();
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#if !defined(CONFIG_MODULES)
|
|
+late_initcall_sync(of_free_phandle_cache);
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * Caller must hold devtree_lock.
|
|
+ */
|
|
+void __of_free_phandle_cache_entry(phandle handle)
|
|
+{
|
|
+ phandle masked_handle;
|
|
+ struct device_node *np;
|
|
+
|
|
+ if (!handle)
|
|
+ return;
|
|
+
|
|
+ masked_handle = handle & phandle_cache_mask;
|
|
+
|
|
+ if (phandle_cache) {
|
|
+ np = phandle_cache[masked_handle];
|
|
+ if (np && handle == np->phandle) {
|
|
+ of_node_put(np);
|
|
+ phandle_cache[masked_handle] = NULL;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
void of_populate_phandle_cache(void)
|
|
{
|
|
unsigned long flags;
|
|
@@ -136,8 +193,7 @@ void of_populate_phandle_cache(void)
|
|
|
|
raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
|
|
- kfree(phandle_cache);
|
|
- phandle_cache = NULL;
|
|
+ __of_free_phandle_cache();
|
|
|
|
for_each_of_allnodes(np)
|
|
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
|
@@ -155,30 +211,15 @@ void of_populate_phandle_cache(void)
|
|
goto out;
|
|
|
|
for_each_of_allnodes(np)
|
|
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
|
+ if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
|
|
+ of_node_get(np);
|
|
phandle_cache[np->phandle & phandle_cache_mask] = np;
|
|
+ }
|
|
|
|
out:
|
|
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
}
|
|
|
|
-int of_free_phandle_cache(void)
|
|
-{
|
|
- unsigned long flags;
|
|
-
|
|
- raw_spin_lock_irqsave(&devtree_lock, flags);
|
|
-
|
|
- kfree(phandle_cache);
|
|
- phandle_cache = NULL;
|
|
-
|
|
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-#if !defined(CONFIG_MODULES)
|
|
-late_initcall_sync(of_free_phandle_cache);
|
|
-#endif
|
|
-
|
|
void __init of_core_init(void)
|
|
{
|
|
struct device_node *np;
|
|
@@ -1190,13 +1231,23 @@ struct device_node *of_find_node_by_phandle(phandle handle)
|
|
if (phandle_cache[masked_handle] &&
|
|
handle == phandle_cache[masked_handle]->phandle)
|
|
np = phandle_cache[masked_handle];
|
|
+ if (np && of_node_check_flag(np, OF_DETACHED)) {
|
|
+ WARN_ON(1); /* did not uncache np on node removal */
|
|
+ of_node_put(np);
|
|
+ phandle_cache[masked_handle] = NULL;
|
|
+ np = NULL;
|
|
+ }
|
|
}
|
|
|
|
if (!np) {
|
|
for_each_of_allnodes(np)
|
|
- if (np->phandle == handle) {
|
|
- if (phandle_cache)
|
|
+ if (np->phandle == handle &&
|
|
+ !of_node_check_flag(np, OF_DETACHED)) {
|
|
+ if (phandle_cache) {
|
|
+ /* will put when removed from cache */
|
|
+ of_node_get(np);
|
|
phandle_cache[masked_handle] = np;
|
|
+ }
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
|
|
index f4f8ed9b5454c..45c0b1f4cb69f 100644
|
|
--- a/drivers/of/dynamic.c
|
|
+++ b/drivers/of/dynamic.c
|
|
@@ -268,13 +268,13 @@ void __of_detach_node(struct device_node *np)
|
|
}
|
|
|
|
of_node_set_flag(np, OF_DETACHED);
|
|
+
|
|
+ /* race with of_find_node_by_phandle() prevented by devtree_lock */
|
|
+ __of_free_phandle_cache_entry(np->phandle);
|
|
}
|
|
|
|
/**
|
|
* of_detach_node() - "Unplug" a node from the device tree.
|
|
- *
|
|
- * The caller must hold a reference to the node. The memory associated with
|
|
- * the node is not freed until its refcount goes to zero.
|
|
*/
|
|
int of_detach_node(struct device_node *np)
|
|
{
|
|
@@ -330,6 +330,25 @@ void of_node_release(struct kobject *kobj)
|
|
if (!of_node_check_flag(node, OF_DYNAMIC))
|
|
return;
|
|
|
|
+ if (of_node_check_flag(node, OF_OVERLAY)) {
|
|
+
|
|
+ if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
|
|
+ /* premature refcount of zero, do not free memory */
|
|
+ pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
|
|
+ node);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If node->properties non-empty then properties were added
|
|
+ * to this node either by different overlay that has not
|
|
+ * yet been removed, or by a non-overlay mechanism.
|
|
+ */
|
|
+ if (node->properties)
|
|
+ pr_err("ERROR: %s(), unexpected properties in %pOF\n",
|
|
+ __func__, node);
|
|
+ }
|
|
+
|
|
property_list_free(node->properties);
|
|
property_list_free(node->deadprops);
|
|
|
|
@@ -434,6 +453,16 @@ struct device_node *__of_node_dup(const struct device_node *np,
|
|
|
|
static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
|
|
{
|
|
+ if (ce->action == OF_RECONFIG_ATTACH_NODE &&
|
|
+ of_node_check_flag(ce->np, OF_OVERLAY)) {
|
|
+ if (kref_read(&ce->np->kobj.kref) > 1) {
|
|
+ pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
|
|
+ kref_read(&ce->np->kobj.kref), ce->np);
|
|
+ } else {
|
|
+ of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
|
|
+ }
|
|
+ }
|
|
+
|
|
of_node_put(ce->np);
|
|
list_del(&ce->node);
|
|
kfree(ce);
|
|
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
|
|
index 7a0a18980b98b..c72eef9880417 100644
|
|
--- a/drivers/of/kobj.c
|
|
+++ b/drivers/of/kobj.c
|
|
@@ -133,6 +133,9 @@ int __of_attach_node_sysfs(struct device_node *np)
|
|
}
|
|
if (!name)
|
|
return -ENOMEM;
|
|
+
|
|
+ of_node_get(np);
|
|
+
|
|
rc = kobject_add(&np->kobj, parent, "%s", name);
|
|
kfree(name);
|
|
if (rc)
|
|
@@ -159,6 +162,5 @@ void __of_detach_node_sysfs(struct device_node *np)
|
|
kobject_del(&np->kobj);
|
|
}
|
|
|
|
- /* finally remove the kobj_init ref */
|
|
of_node_put(np);
|
|
}
|
|
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
|
|
index 5d15670253587..24786818e32e5 100644
|
|
--- a/drivers/of/of_private.h
|
|
+++ b/drivers/of/of_private.h
|
|
@@ -84,6 +84,10 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {}
|
|
int of_resolve_phandles(struct device_node *tree);
|
|
#endif
|
|
|
|
+#if defined(CONFIG_OF_DYNAMIC)
|
|
+void __of_free_phandle_cache_entry(phandle handle);
|
|
+#endif
|
|
+
|
|
#if defined(CONFIG_OF_OVERLAY)
|
|
void of_overlay_mutex_lock(void);
|
|
void of_overlay_mutex_unlock(void);
|
|
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
|
|
index 42b1f73ac5f61..9808aae4621ad 100644
|
|
--- a/drivers/of/overlay.c
|
|
+++ b/drivers/of/overlay.c
|
|
@@ -23,6 +23,26 @@
|
|
|
|
#include "of_private.h"
|
|
|
|
+/**
|
|
+ * struct target - info about current target node as recursing through overlay
|
|
+ * @np: node where current level of overlay will be applied
|
|
+ * @in_livetree: @np is a node in the live devicetree
|
|
+ *
|
|
+ * Used in the algorithm to create the portion of a changeset that describes
|
|
+ * an overlay fragment, which is a devicetree subtree. Initially @np is a node
|
|
+ * in the live devicetree where the overlay subtree is targeted to be grafted
|
|
+ * into. When recursing to the next level of the overlay subtree, the target
|
|
+ * also recurses to the next level of the live devicetree, as long as overlay
|
|
+ * subtree node also exists in the live devicetree. When a node in the overlay
|
|
+ * subtree does not exist at the same level in the live devicetree, target->np
|
|
+ * points to a newly allocated node, and all subsequent targets in the subtree
|
|
+ * will be newly allocated nodes.
|
|
+ */
|
|
+struct target {
|
|
+ struct device_node *np;
|
|
+ bool in_livetree;
|
|
+};
|
|
+
|
|
/**
|
|
* struct fragment - info about fragment nodes in overlay expanded device tree
|
|
* @target: target of the overlay operation
|
|
@@ -72,8 +92,7 @@ static int devicetree_corrupt(void)
|
|
}
|
|
|
|
static int build_changeset_next_level(struct overlay_changeset *ovcs,
|
|
- struct device_node *target_node,
|
|
- const struct device_node *overlay_node);
|
|
+ struct target *target, const struct device_node *overlay_node);
|
|
|
|
/*
|
|
* of_resolve_phandles() finds the largest phandle in the live tree.
|
|
@@ -257,14 +276,17 @@ err_free_target_path:
|
|
/**
|
|
* add_changeset_property() - add @overlay_prop to overlay changeset
|
|
* @ovcs: overlay changeset
|
|
- * @target_node: where to place @overlay_prop in live tree
|
|
+ * @target: where @overlay_prop will be placed
|
|
* @overlay_prop: property to add or update, from overlay tree
|
|
* @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
|
|
*
|
|
- * If @overlay_prop does not already exist in @target_node, add changeset entry
|
|
- * to add @overlay_prop in @target_node, else add changeset entry to update
|
|
+ * If @overlay_prop does not already exist in live devicetree, add changeset
|
|
+ * entry to add @overlay_prop in @target, else add changeset entry to update
|
|
* value of @overlay_prop.
|
|
*
|
|
+ * @target may be either in the live devicetree or in a new subtree that
|
|
+ * is contained in the changeset.
|
|
+ *
|
|
* Some special properties are not updated (no error returned).
|
|
*
|
|
* Update of property in symbols node is not allowed.
|
|
@@ -273,20 +295,22 @@ err_free_target_path:
|
|
* invalid @overlay.
|
|
*/
|
|
static int add_changeset_property(struct overlay_changeset *ovcs,
|
|
- struct device_node *target_node,
|
|
- struct property *overlay_prop,
|
|
+ struct target *target, struct property *overlay_prop,
|
|
bool is_symbols_prop)
|
|
{
|
|
struct property *new_prop = NULL, *prop;
|
|
int ret = 0;
|
|
|
|
- prop = of_find_property(target_node, overlay_prop->name, NULL);
|
|
-
|
|
if (!of_prop_cmp(overlay_prop->name, "name") ||
|
|
!of_prop_cmp(overlay_prop->name, "phandle") ||
|
|
!of_prop_cmp(overlay_prop->name, "linux,phandle"))
|
|
return 0;
|
|
|
|
+ if (target->in_livetree)
|
|
+ prop = of_find_property(target->np, overlay_prop->name, NULL);
|
|
+ else
|
|
+ prop = NULL;
|
|
+
|
|
if (is_symbols_prop) {
|
|
if (prop)
|
|
return -EINVAL;
|
|
@@ -299,10 +323,10 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
|
|
return -ENOMEM;
|
|
|
|
if (!prop)
|
|
- ret = of_changeset_add_property(&ovcs->cset, target_node,
|
|
+ ret = of_changeset_add_property(&ovcs->cset, target->np,
|
|
new_prop);
|
|
else
|
|
- ret = of_changeset_update_property(&ovcs->cset, target_node,
|
|
+ ret = of_changeset_update_property(&ovcs->cset, target->np,
|
|
new_prop);
|
|
|
|
if (ret) {
|
|
@@ -315,14 +339,14 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
|
|
|
|
/**
|
|
* add_changeset_node() - add @node (and children) to overlay changeset
|
|
- * @ovcs: overlay changeset
|
|
- * @target_node: where to place @node in live tree
|
|
- * @node: node from within overlay device tree fragment
|
|
+ * @ovcs: overlay changeset
|
|
+ * @target: where @node will be placed in live tree or changeset
|
|
+ * @node: node from within overlay device tree fragment
|
|
*
|
|
- * If @node does not already exist in @target_node, add changeset entry
|
|
- * to add @node in @target_node.
|
|
+ * If @node does not already exist in @target, add changeset entry
|
|
+ * to add @node in @target.
|
|
*
|
|
- * If @node already exists in @target_node, and the existing node has
|
|
+ * If @node already exists in @target, and the existing node has
|
|
* a phandle, the overlay node is not allowed to have a phandle.
|
|
*
|
|
* If @node has child nodes, add the children recursively via
|
|
@@ -355,36 +379,46 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
|
|
* invalid @overlay.
|
|
*/
|
|
static int add_changeset_node(struct overlay_changeset *ovcs,
|
|
- struct device_node *target_node, struct device_node *node)
|
|
+ struct target *target, struct device_node *node)
|
|
{
|
|
const char *node_kbasename;
|
|
struct device_node *tchild;
|
|
+ struct target target_child;
|
|
int ret = 0;
|
|
|
|
node_kbasename = kbasename(node->full_name);
|
|
|
|
- for_each_child_of_node(target_node, tchild)
|
|
+ for_each_child_of_node(target->np, tchild)
|
|
if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
|
|
break;
|
|
|
|
if (!tchild) {
|
|
- tchild = __of_node_dup(node, node_kbasename);
|
|
+ tchild = __of_node_dup(NULL, node_kbasename);
|
|
if (!tchild)
|
|
return -ENOMEM;
|
|
|
|
- tchild->parent = target_node;
|
|
+ tchild->parent = target->np;
|
|
+ of_node_set_flag(tchild, OF_OVERLAY);
|
|
|
|
ret = of_changeset_attach_node(&ovcs->cset, tchild);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- return build_changeset_next_level(ovcs, tchild, node);
|
|
+ target_child.np = tchild;
|
|
+ target_child.in_livetree = false;
|
|
+
|
|
+ ret = build_changeset_next_level(ovcs, &target_child, node);
|
|
+ of_node_put(tchild);
|
|
+ return ret;
|
|
}
|
|
|
|
- if (node->phandle && tchild->phandle)
|
|
+ if (node->phandle && tchild->phandle) {
|
|
ret = -EINVAL;
|
|
- else
|
|
- ret = build_changeset_next_level(ovcs, tchild, node);
|
|
+ } else {
|
|
+ target_child.np = tchild;
|
|
+ target_child.in_livetree = target->in_livetree;
|
|
+ ret = build_changeset_next_level(ovcs, &target_child, node);
|
|
+ }
|
|
of_node_put(tchild);
|
|
|
|
return ret;
|
|
@@ -393,7 +427,7 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
|
|
/**
|
|
* build_changeset_next_level() - add level of overlay changeset
|
|
* @ovcs: overlay changeset
|
|
- * @target_node: where to place @overlay_node in live tree
|
|
+ * @target: where to place @overlay_node in live tree
|
|
* @overlay_node: node from within an overlay device tree fragment
|
|
*
|
|
* Add the properties (if any) and nodes (if any) from @overlay_node to the
|
|
@@ -406,27 +440,26 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
|
|
* invalid @overlay_node.
|
|
*/
|
|
static int build_changeset_next_level(struct overlay_changeset *ovcs,
|
|
- struct device_node *target_node,
|
|
- const struct device_node *overlay_node)
|
|
+ struct target *target, const struct device_node *overlay_node)
|
|
{
|
|
struct device_node *child;
|
|
struct property *prop;
|
|
int ret;
|
|
|
|
for_each_property_of_node(overlay_node, prop) {
|
|
- ret = add_changeset_property(ovcs, target_node, prop, 0);
|
|
+ ret = add_changeset_property(ovcs, target, prop, 0);
|
|
if (ret) {
|
|
pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
|
|
- target_node, prop->name, ret);
|
|
+ target->np, prop->name, ret);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
for_each_child_of_node(overlay_node, child) {
|
|
- ret = add_changeset_node(ovcs, target_node, child);
|
|
+ ret = add_changeset_node(ovcs, target, child);
|
|
if (ret) {
|
|
pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
|
|
- target_node, child, ret);
|
|
+ target->np, child, ret);
|
|
of_node_put(child);
|
|
return ret;
|
|
}
|
|
@@ -439,17 +472,17 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
|
|
* Add the properties from __overlay__ node to the @ovcs->cset changeset.
|
|
*/
|
|
static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
|
|
- struct device_node *target_node,
|
|
+ struct target *target,
|
|
const struct device_node *overlay_symbols_node)
|
|
{
|
|
struct property *prop;
|
|
int ret;
|
|
|
|
for_each_property_of_node(overlay_symbols_node, prop) {
|
|
- ret = add_changeset_property(ovcs, target_node, prop, 1);
|
|
+ ret = add_changeset_property(ovcs, target, prop, 1);
|
|
if (ret) {
|
|
pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
|
|
- target_node, prop->name, ret);
|
|
+ target->np, prop->name, ret);
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -472,6 +505,7 @@ static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
|
|
static int build_changeset(struct overlay_changeset *ovcs)
|
|
{
|
|
struct fragment *fragment;
|
|
+ struct target target;
|
|
int fragments_count, i, ret;
|
|
|
|
/*
|
|
@@ -486,7 +520,9 @@ static int build_changeset(struct overlay_changeset *ovcs)
|
|
for (i = 0; i < fragments_count; i++) {
|
|
fragment = &ovcs->fragments[i];
|
|
|
|
- ret = build_changeset_next_level(ovcs, fragment->target,
|
|
+ target.np = fragment->target;
|
|
+ target.in_livetree = true;
|
|
+ ret = build_changeset_next_level(ovcs, &target,
|
|
fragment->overlay);
|
|
if (ret) {
|
|
pr_debug("apply failed '%pOF'\n", fragment->target);
|
|
@@ -496,7 +532,10 @@ static int build_changeset(struct overlay_changeset *ovcs)
|
|
|
|
if (ovcs->symbols_fragment) {
|
|
fragment = &ovcs->fragments[ovcs->count - 1];
|
|
- ret = build_changeset_symbols_node(ovcs, fragment->target,
|
|
+
|
|
+ target.np = fragment->target;
|
|
+ target.in_livetree = true;
|
|
+ ret = build_changeset_symbols_node(ovcs, &target,
|
|
fragment->overlay);
|
|
if (ret) {
|
|
pr_debug("apply failed '%pOF'\n", fragment->target);
|
|
@@ -514,7 +553,7 @@ static int build_changeset(struct overlay_changeset *ovcs)
|
|
* 1) "target" property containing the phandle of the target
|
|
* 2) "target-path" property containing the path of the target
|
|
*/
|
|
-static struct device_node *find_target_node(struct device_node *info_node)
|
|
+static struct device_node *find_target(struct device_node *info_node)
|
|
{
|
|
struct device_node *node;
|
|
const char *path;
|
|
@@ -620,7 +659,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
|
|
|
|
fragment = &fragments[cnt];
|
|
fragment->overlay = overlay_node;
|
|
- fragment->target = find_target_node(node);
|
|
+ fragment->target = find_target(node);
|
|
if (!fragment->target) {
|
|
of_node_put(fragment->overlay);
|
|
ret = -EINVAL;
|
|
diff --git a/drivers/of/property.c b/drivers/of/property.c
|
|
index f46828e3b082b..43720c2de138b 100644
|
|
--- a/drivers/of/property.c
|
|
+++ b/drivers/of/property.c
|
|
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
|
|
|
|
if (!of_device_is_available(remote)) {
|
|
pr_debug("not available for remote node\n");
|
|
+ of_node_put(remote);
|
|
return NULL;
|
|
}
|
|
|
|
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
|
|
index 2c2df4e4fc14d..82baed4b80d40 100644
|
|
--- a/drivers/opp/core.c
|
|
+++ b/drivers/opp/core.c
|
|
@@ -196,12 +196,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
|
|
if (IS_ERR(opp_table))
|
|
return 0;
|
|
|
|
- count = opp_table->regulator_count;
|
|
-
|
|
/* Regulator may not be required for the device */
|
|
- if (!count)
|
|
+ if (!opp_table->regulators)
|
|
goto put_opp_table;
|
|
|
|
+ count = opp_table->regulator_count;
|
|
+
|
|
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
|
|
if (!uV)
|
|
goto put_opp_table;
|
|
@@ -951,11 +951,9 @@ void _opp_free(struct dev_pm_opp *opp)
|
|
kfree(opp);
|
|
}
|
|
|
|
-static void _opp_kref_release(struct kref *kref)
|
|
+static void _opp_kref_release(struct dev_pm_opp *opp,
|
|
+ struct opp_table *opp_table)
|
|
{
|
|
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
|
|
- struct opp_table *opp_table = opp->opp_table;
|
|
-
|
|
/*
|
|
* Notify the changes in the availability of the operable
|
|
* frequency/voltage list.
|
|
@@ -964,7 +962,22 @@ static void _opp_kref_release(struct kref *kref)
|
|
opp_debug_remove_one(opp);
|
|
list_del(&opp->node);
|
|
kfree(opp);
|
|
+}
|
|
+
|
|
+static void _opp_kref_release_unlocked(struct kref *kref)
|
|
+{
|
|
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
|
|
+ struct opp_table *opp_table = opp->opp_table;
|
|
+
|
|
+ _opp_kref_release(opp, opp_table);
|
|
+}
|
|
+
|
|
+static void _opp_kref_release_locked(struct kref *kref)
|
|
+{
|
|
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
|
|
+ struct opp_table *opp_table = opp->opp_table;
|
|
|
|
+ _opp_kref_release(opp, opp_table);
|
|
mutex_unlock(&opp_table->lock);
|
|
}
|
|
|
|
@@ -975,10 +988,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
|
|
|
|
void dev_pm_opp_put(struct dev_pm_opp *opp)
|
|
{
|
|
- kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
|
|
+ kref_put_mutex(&opp->kref, _opp_kref_release_locked,
|
|
+ &opp->opp_table->lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_put);
|
|
|
|
+static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
|
|
+{
|
|
+ kref_put(&opp->kref, _opp_kref_release_unlocked);
|
|
+}
|
|
+
|
|
/**
|
|
* dev_pm_opp_remove() - Remove an OPP from OPP table
|
|
* @dev: device for which we do this operation
|
|
@@ -1022,6 +1041,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
|
|
}
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
|
|
|
|
+/**
|
|
+ * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
|
|
+ * @dev: device for which we do this operation
|
|
+ *
|
|
+ * This function removes all dynamically created OPPs from the opp table.
|
|
+ */
|
|
+void dev_pm_opp_remove_all_dynamic(struct device *dev)
|
|
+{
|
|
+ struct opp_table *opp_table;
|
|
+ struct dev_pm_opp *opp, *temp;
|
|
+ int count = 0;
|
|
+
|
|
+ opp_table = _find_opp_table(dev);
|
|
+ if (IS_ERR(opp_table))
|
|
+ return;
|
|
+
|
|
+ mutex_lock(&opp_table->lock);
|
|
+ list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
|
|
+ if (opp->dynamic) {
|
|
+ dev_pm_opp_put_unlocked(opp);
|
|
+ count++;
|
|
+ }
|
|
+ }
|
|
+ mutex_unlock(&opp_table->lock);
|
|
+
|
|
+ /* Drop the references taken by dev_pm_opp_add() */
|
|
+ while (count--)
|
|
+ dev_pm_opp_put_opp_table(opp_table);
|
|
+
|
|
+ /* Drop the reference taken by _find_opp_table() */
|
|
+ dev_pm_opp_put_opp_table(opp_table);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
|
|
+
|
|
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
@@ -1049,6 +1102,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
|
|
struct regulator *reg;
|
|
int i;
|
|
|
|
+ if (!opp_table->regulators)
|
|
+ return true;
|
|
+
|
|
for (i = 0; i < opp_table->regulator_count; i++) {
|
|
reg = opp_table->regulators[i];
|
|
|
|
@@ -1333,7 +1389,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table)
|
|
struct dev_pm_set_opp_data *data;
|
|
int len, count = opp_table->regulator_count;
|
|
|
|
- if (WARN_ON(!count))
|
|
+ if (WARN_ON(!opp_table->regulators))
|
|
return -EINVAL;
|
|
|
|
/* space for set_opp_data */
|
|
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
|
|
index 88af6bff945f3..6b54528b5f0c3 100644
|
|
--- a/drivers/pci/controller/dwc/pci-imx6.c
|
|
+++ b/drivers/pci/controller/dwc/pci-imx6.c
|
|
@@ -67,6 +67,7 @@ struct imx6_pcie {
|
|
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
|
|
|
|
/* PCIe Root Complex registers (memory-mapped) */
|
|
+#define PCIE_RC_IMX6_MSI_CAP 0x50
|
|
#define PCIE_RC_LCR 0x7c
|
|
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
|
|
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
|
|
@@ -840,6 +841,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|
struct resource *dbi_base;
|
|
struct device_node *node = dev->of_node;
|
|
int ret;
|
|
+ u16 val;
|
|
|
|
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
|
|
if (!imx6_pcie)
|
|
@@ -981,6 +983,14 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
+ if (pci_msi_enabled()) {
|
|
+ val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
|
|
+ PCI_MSI_FLAGS);
|
|
+ val |= PCI_MSI_FLAGS_ENABLE;
|
|
+ dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
|
|
+ val);
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
index 29a05759a2942..0fa9e8fdce66e 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
@@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
|
(i * MAX_MSI_IRQS_PER_CTRL) +
|
|
pos);
|
|
generic_handle_irq(irq);
|
|
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
|
|
- (i * MSI_REG_CTRL_BLOCK_SIZE),
|
|
- 4, 1 << pos);
|
|
pos++;
|
|
}
|
|
}
|
|
@@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data)
|
|
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
pp->irq_status[ctrl] &= ~(1 << bit);
|
|
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
|
|
- pp->irq_status[ctrl]);
|
|
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
|
|
+ ~pp->irq_status[ctrl]);
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
@@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
|
|
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
pp->irq_status[ctrl] |= 1 << bit;
|
|
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
|
|
- pp->irq_status[ctrl]);
|
|
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
|
|
+ ~pp->irq_status[ctrl]);
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
@@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
|
|
|
|
static void dw_pci_bottom_ack(struct irq_data *d)
|
|
{
|
|
- struct msi_desc *msi = irq_data_get_msi_desc(d);
|
|
- struct pcie_port *pp;
|
|
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
|
+ unsigned int res, bit, ctrl;
|
|
+ unsigned long flags;
|
|
+
|
|
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
|
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
|
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
|
+
|
|
+ raw_spin_lock_irqsave(&pp->lock, flags);
|
|
|
|
- pp = msi_desc_to_pci_sysdata(msi);
|
|
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
|
|
|
|
if (pp->ops->msi_irq_ack)
|
|
pp->ops->msi_irq_ack(d->hwirq, pp);
|
|
+
|
|
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
|
|
}
|
|
|
|
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
|
|
@@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
|
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
/* Initialize IRQ Status array */
|
|
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
|
|
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
|
|
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
|
|
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
|
|
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
|
- 4, &pp->irq_status[ctrl]);
|
|
+ 4, ~0);
|
|
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
|
|
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
|
+ 4, ~0);
|
|
+ pp->irq_status[ctrl] = 0;
|
|
+ }
|
|
|
|
/* Setup RC BARs */
|
|
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
|
|
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
|
|
index ae3c5b25dcc7d..a2eb25271c969 100644
|
|
--- a/drivers/pci/p2pdma.c
|
|
+++ b/drivers/pci/p2pdma.c
|
|
@@ -82,10 +82,8 @@ static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
|
|
complete_all(&p2p->devmap_ref_done);
|
|
}
|
|
|
|
-static void pci_p2pdma_percpu_kill(void *data)
|
|
+static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
|
|
{
|
|
- struct percpu_ref *ref = data;
|
|
-
|
|
/*
|
|
* pci_p2pdma_add_resource() may be called multiple times
|
|
* by a driver and may register the percpu_kill devm action multiple
|
|
@@ -198,6 +196,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
|
|
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
|
|
pci_resource_start(pdev, bar);
|
|
+ pgmap->kill = pci_p2pdma_percpu_kill;
|
|
|
|
addr = devm_memremap_pages(&pdev->dev, pgmap);
|
|
if (IS_ERR(addr)) {
|
|
@@ -211,11 +210,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|
if (error)
|
|
goto pgmap_free;
|
|
|
|
- error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
|
|
- &pdev->p2pdma->devmap_ref);
|
|
- if (error)
|
|
- goto pgmap_free;
|
|
-
|
|
pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
|
|
&pgmap->res);
|
|
|
|
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
|
|
index bef17c3fca67c..33f3f475e5c6b 100644
|
|
--- a/drivers/pci/pci-driver.c
|
|
+++ b/drivers/pci/pci-driver.c
|
|
@@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev)
|
|
return 0;
|
|
}
|
|
|
|
- if (!pm || !pm->runtime_suspend)
|
|
- return -ENOSYS;
|
|
-
|
|
pci_dev->state_saved = false;
|
|
- error = pm->runtime_suspend(dev);
|
|
- if (error) {
|
|
+ if (pm && pm->runtime_suspend) {
|
|
+ error = pm->runtime_suspend(dev);
|
|
/*
|
|
* -EBUSY and -EAGAIN is used to request the runtime PM core
|
|
* to schedule a new suspend, so log the event only with debug
|
|
* log level.
|
|
*/
|
|
- if (error == -EBUSY || error == -EAGAIN)
|
|
+ if (error == -EBUSY || error == -EAGAIN) {
|
|
dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
|
|
pm->runtime_suspend, error);
|
|
- else
|
|
+ return error;
|
|
+ } else if (error) {
|
|
dev_err(dev, "can't suspend (%pf returned %d)\n",
|
|
pm->runtime_suspend, error);
|
|
-
|
|
- return error;
|
|
+ return error;
|
|
+ }
|
|
}
|
|
|
|
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
|
|
|
- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
|
+ if (pm && pm->runtime_suspend
|
|
+ && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
|
&& pci_dev->current_state != PCI_UNKNOWN) {
|
|
WARN_ONCE(pci_dev->current_state != prev,
|
|
"PCI PM: State of device not saved by %pF\n",
|
|
@@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
|
|
|
|
static int pci_pm_runtime_resume(struct device *dev)
|
|
{
|
|
- int rc;
|
|
+ int rc = 0;
|
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
|
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
|
|
|
@@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev)
|
|
if (!pci_dev->driver)
|
|
return 0;
|
|
|
|
- if (!pm || !pm->runtime_resume)
|
|
- return -ENOSYS;
|
|
-
|
|
pci_fixup_device(pci_fixup_resume_early, pci_dev);
|
|
pci_enable_wake(pci_dev, PCI_D0, false);
|
|
pci_fixup_device(pci_fixup_resume, pci_dev);
|
|
|
|
- rc = pm->runtime_resume(dev);
|
|
+ if (pm && pm->runtime_resume)
|
|
+ rc = pm->runtime_resume(dev);
|
|
|
|
pci_dev->runtime_d3cold = false;
|
|
|
|
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
|
|
index c9d8e3c837de7..c25acace7d914 100644
|
|
--- a/drivers/pci/pci.c
|
|
+++ b/drivers/pci/pci.c
|
|
@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
|
|
} else if (!strncmp(str, "pcie_scan_all", 13)) {
|
|
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
|
|
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
|
|
- disable_acs_redir_param = str + 18;
|
|
+ disable_acs_redir_param =
|
|
+ kstrdup(str + 18, GFP_KERNEL);
|
|
} else {
|
|
printk(KERN_ERR "PCI: Unknown option `%s'\n",
|
|
str);
|
|
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
|
|
index 54a8b30dda38c..37d0c15c9eeb0 100644
|
|
--- a/drivers/pci/switch/switchtec.c
|
|
+++ b/drivers/pci/switch/switchtec.c
|
|
@@ -800,6 +800,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
|
|
{
|
|
int ret;
|
|
int nr_idxs;
|
|
+ unsigned int event_flags;
|
|
struct switchtec_ioctl_event_ctl ctl;
|
|
|
|
if (copy_from_user(&ctl, uctl, sizeof(ctl)))
|
|
@@ -821,7 +822,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
|
|
else
|
|
return -EINVAL;
|
|
|
|
+ event_flags = ctl.flags;
|
|
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
|
|
+ ctl.flags = event_flags;
|
|
ret = event_ctl(stdev, &ctl);
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
|
|
index 54ec278d2fc46..e1a77b2de78ae 100644
|
|
--- a/drivers/perf/arm_spe_pmu.c
|
|
+++ b/drivers/perf/arm_spe_pmu.c
|
|
@@ -927,6 +927,11 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
|
|
|
|
idx = atomic_inc_return(&pmu_idx);
|
|
name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
|
|
+ if (!name) {
|
|
+ dev_err(dev, "failed to allocate name for pmu %d\n", idx);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
return perf_pmu_register(&spe_pmu->pmu, name, -1);
|
|
}
|
|
|
|
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
|
|
index 1b10ea05a9149..69372e2bc93c7 100644
|
|
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
|
|
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
|
|
@@ -30,8 +30,8 @@
|
|
#define DDRC_FLUX_RCMD 0x38c
|
|
#define DDRC_PRE_CMD 0x3c0
|
|
#define DDRC_ACT_CMD 0x3c4
|
|
-#define DDRC_BNK_CHG 0x3c8
|
|
#define DDRC_RNK_CHG 0x3cc
|
|
+#define DDRC_RW_CHG 0x3d0
|
|
#define DDRC_EVENT_CTRL 0x6C0
|
|
#define DDRC_INT_MASK 0x6c8
|
|
#define DDRC_INT_STATUS 0x6cc
|
|
@@ -51,7 +51,7 @@
|
|
|
|
static const u32 ddrc_reg_off[] = {
|
|
DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
|
|
- DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
|
|
+ DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
|
|
index d4dcd39b8d76f..881078ff73f60 100644
|
|
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
|
|
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
|
|
@@ -126,6 +126,7 @@ struct sun4i_usb_phy_cfg {
|
|
bool dedicated_clocks;
|
|
bool enable_pmu_unk1;
|
|
bool phy0_dual_route;
|
|
+ int missing_phys;
|
|
};
|
|
|
|
struct sun4i_usb_phy_data {
|
|
@@ -646,6 +647,9 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
|
|
if (args->args[0] >= data->cfg->num_phys)
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
+ if (data->cfg->missing_phys & BIT(args->args[0]))
|
|
+ return ERR_PTR(-ENODEV);
|
|
+
|
|
return data->phys[args->args[0]].phy;
|
|
}
|
|
|
|
@@ -741,6 +745,9 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
|
|
struct sun4i_usb_phy *phy = data->phys + i;
|
|
char name[16];
|
|
|
|
+ if (data->cfg->missing_phys & BIT(i))
|
|
+ continue;
|
|
+
|
|
snprintf(name, sizeof(name), "usb%d_vbus", i);
|
|
phy->vbus = devm_regulator_get_optional(dev, name);
|
|
if (IS_ERR(phy->vbus)) {
|
|
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
|
|
index fa530913a2c8f..08925d24180b0 100644
|
|
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
|
|
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
|
|
@@ -90,7 +90,7 @@ struct bcm2835_pinctrl {
|
|
struct gpio_chip gpio_chip;
|
|
struct pinctrl_gpio_range gpio_range;
|
|
|
|
- spinlock_t irq_lock[BCM2835_NUM_BANKS];
|
|
+ raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
|
|
};
|
|
|
|
/* pins are just named GPIO0..GPIO53 */
|
|
@@ -461,10 +461,10 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
|
|
unsigned bank = GPIO_REG_OFFSET(gpio);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&pc->irq_lock[bank], flags);
|
|
+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
|
|
set_bit(offset, &pc->enabled_irq_map[bank]);
|
|
bcm2835_gpio_irq_config(pc, gpio, true);
|
|
- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
|
|
+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
|
|
}
|
|
|
|
static void bcm2835_gpio_irq_disable(struct irq_data *data)
|
|
@@ -476,12 +476,12 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
|
|
unsigned bank = GPIO_REG_OFFSET(gpio);
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&pc->irq_lock[bank], flags);
|
|
+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
|
|
bcm2835_gpio_irq_config(pc, gpio, false);
|
|
/* Clear events that were latched prior to clearing event sources */
|
|
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
|
|
clear_bit(offset, &pc->enabled_irq_map[bank]);
|
|
- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
|
|
+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
|
|
}
|
|
|
|
static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
|
|
@@ -584,7 +584,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
- spin_lock_irqsave(&pc->irq_lock[bank], flags);
|
|
+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
|
|
|
|
if (test_bit(offset, &pc->enabled_irq_map[bank]))
|
|
ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type);
|
|
@@ -596,7 +596,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
|
|
else
|
|
irq_set_handler_locked(data, handle_level_irq);
|
|
|
|
- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
|
|
+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1047,7 +1047,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
|
|
for_each_set_bit(offset, &events, 32)
|
|
bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
|
|
|
|
- spin_lock_init(&pc->irq_lock[i]);
|
|
+ raw_spin_lock_init(&pc->irq_lock[i]);
|
|
}
|
|
|
|
err = gpiochip_add_data(&pc->gpio_chip, pc);
|
|
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
|
|
index 9b0f4b9ef4829..8efe8ea45602f 100644
|
|
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
|
|
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
|
|
@@ -1507,7 +1507,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
|
|
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
|
|
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
|
},
|
|
},
|
|
{
|
|
@@ -1515,7 +1515,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
|
|
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
|
},
|
|
},
|
|
{
|
|
@@ -1523,7 +1523,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
|
|
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
|
},
|
|
},
|
|
{
|
|
@@ -1531,7 +1531,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
|
|
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
|
},
|
|
},
|
|
{}
|
|
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
|
|
index 86466173114da..e482672e833a4 100644
|
|
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
|
|
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
|
|
@@ -807,7 +807,9 @@ static const char * const gpio_groups[] = {
|
|
"BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
|
|
"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
|
|
"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
|
|
+};
|
|
|
|
+static const char * const gpio_aobus_groups[] = {
|
|
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
|
|
"GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
|
|
"GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
|
|
@@ -1030,6 +1032,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = {
|
|
};
|
|
|
|
static struct meson_pmx_func meson8_aobus_functions[] = {
|
|
+ FUNCTION(gpio_aobus),
|
|
FUNCTION(uart_ao),
|
|
FUNCTION(remote),
|
|
FUNCTION(i2c_slave_ao),
|
|
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
|
|
index 647ad15d5c3c4..91cffc051055f 100644
|
|
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
|
|
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
|
|
@@ -646,16 +646,18 @@ static const char * const gpio_groups[] = {
|
|
"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
|
|
"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
|
|
|
|
- "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
|
|
- "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
|
|
- "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
|
|
- "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N",
|
|
-
|
|
"DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N",
|
|
"DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N",
|
|
"DIF_4_P", "DIF_4_N"
|
|
};
|
|
|
|
+static const char * const gpio_aobus_groups[] = {
|
|
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
|
|
+ "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
|
|
+ "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
|
|
+ "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N"
|
|
+};
|
|
+
|
|
static const char * const sd_a_groups[] = {
|
|
"sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a",
|
|
"sd_cmd_a"
|
|
@@ -871,6 +873,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = {
|
|
};
|
|
|
|
static struct meson_pmx_func meson8b_aobus_functions[] = {
|
|
+ FUNCTION(gpio_aobus),
|
|
FUNCTION(uart_ao),
|
|
FUNCTION(uart_ao_b),
|
|
FUNCTION(i2c_slave_ao),
|
|
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
|
|
index 7ad50d9268aac..3bda620d18bba 100644
|
|
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
|
|
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
|
|
@@ -1932,6 +1932,9 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
|
|
pctrl->gpio_bank[id].gc.label =
|
|
devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOF",
|
|
np);
|
|
+ if (pctrl->gpio_bank[id].gc.label == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
|
|
pctrl->gpio_bank[id].direction_input =
|
|
pctrl->gpio_bank[id].gc.direction_input;
|
|
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
|
|
index a7f37063518ec..3d05bc1937d40 100644
|
|
--- a/drivers/pinctrl/pinctrl-max77620.c
|
|
+++ b/drivers/pinctrl/pinctrl-max77620.c
|
|
@@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
|
|
MAX77620_PIN_PP_DRV,
|
|
};
|
|
|
|
-enum max77620_pinconf_param {
|
|
- MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
|
|
- MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
|
|
- MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
|
|
- MAX77620_SUSPEND_FPS_SOURCE,
|
|
- MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
|
|
- MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
|
|
-};
|
|
+#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
|
|
+#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
|
|
+#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
|
|
+#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
|
|
+#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
|
|
+#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
|
|
|
|
struct max77620_pin_function {
|
|
const char *name;
|
|
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
|
|
index 14eb576c04a2b..9cfe9d0520aca 100644
|
|
--- a/drivers/pinctrl/pinctrl-rza1.c
|
|
+++ b/drivers/pinctrl/pinctrl-rza1.c
|
|
@@ -1225,6 +1225,9 @@ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
|
|
chip->base = -1;
|
|
chip->label = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL, "%pOFn",
|
|
np);
|
|
+ if (!chip->label)
|
|
+ return -ENOMEM;
|
|
+
|
|
chip->ngpio = of_args.args[2];
|
|
chip->of_node = np;
|
|
chip->parent = rza1_pctl->dev;
|
|
@@ -1326,6 +1329,8 @@ static int rza1_pinctrl_register(struct rza1_pinctrl *rza1_pctl)
|
|
pins[i].number = i;
|
|
pins[i].name = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL,
|
|
"P%u-%u", port, pin);
|
|
+ if (!pins[i].name)
|
|
+ return -ENOMEM;
|
|
|
|
if (i % RZA1_PINS_PER_PORT == 0) {
|
|
/*
|
|
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
|
|
index cbf58a10113df..4d87d75b9c6ea 100644
|
|
--- a/drivers/pinctrl/pinctrl-sx150x.c
|
|
+++ b/drivers/pinctrl/pinctrl-sx150x.c
|
|
@@ -1166,7 +1166,6 @@ static int sx150x_probe(struct i2c_client *client,
|
|
}
|
|
|
|
/* Register GPIO controller */
|
|
- pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
|
|
pctl->gpio.base = -1;
|
|
pctl->gpio.ngpio = pctl->data->npins;
|
|
pctl->gpio.get_direction = sx150x_gpio_get_direction;
|
|
@@ -1180,6 +1179,10 @@ static int sx150x_probe(struct i2c_client *client,
|
|
pctl->gpio.of_node = dev->of_node;
|
|
#endif
|
|
pctl->gpio.can_sleep = true;
|
|
+ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
|
|
+ if (!pctl->gpio.label)
|
|
+ return -ENOMEM;
|
|
+
|
|
/*
|
|
* Setting multiple pins is not safe when all pins are not
|
|
* handled by the same regmap register. The oscio pin (present
|
|
@@ -1200,13 +1203,15 @@ static int sx150x_probe(struct i2c_client *client,
|
|
|
|
/* Add Interrupt support if an irq is specified */
|
|
if (client->irq > 0) {
|
|
- pctl->irq_chip.name = devm_kstrdup(dev, client->name,
|
|
- GFP_KERNEL);
|
|
pctl->irq_chip.irq_mask = sx150x_irq_mask;
|
|
pctl->irq_chip.irq_unmask = sx150x_irq_unmask;
|
|
pctl->irq_chip.irq_set_type = sx150x_irq_set_type;
|
|
pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
|
|
pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
|
|
+ pctl->irq_chip.name = devm_kstrdup(dev, client->name,
|
|
+ GFP_KERNEL);
|
|
+ if (!pctl->irq_chip.name)
|
|
+ return -ENOMEM;
|
|
|
|
pctl->irq.masked = ~0;
|
|
pctl->irq.sense = 0;
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
|
|
index 7aae52a09ff03..4ffd56ff809eb 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
|
|
@@ -79,7 +79,7 @@ enum {
|
|
.intr_cfg_reg = 0, \
|
|
.intr_status_reg = 0, \
|
|
.intr_target_reg = 0, \
|
|
- .tile = NORTH, \
|
|
+ .tile = SOUTH, \
|
|
.mux_bit = -1, \
|
|
.pull_bit = pull, \
|
|
.drv_bit = drv, \
|
|
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
|
|
index aa8b58125568d..ef4268cc62275 100644
|
|
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
|
|
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
|
|
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
|
|
static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
|
|
.pins = h6_pins,
|
|
.npins = ARRAY_SIZE(h6_pins),
|
|
- .irq_banks = 3,
|
|
+ .irq_banks = 4,
|
|
.irq_bank_map = h6_irq_bank_map,
|
|
.irq_read_needs_mux = true,
|
|
};
|
|
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
|
|
index b6fd4838f60f3..e5d5b1adb5a95 100644
|
|
--- a/drivers/platform/chrome/cros_ec_proto.c
|
|
+++ b/drivers/platform/chrome/cros_ec_proto.c
|
|
@@ -575,6 +575,7 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
|
|
|
|
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
|
|
{
|
|
+ u8 event_type;
|
|
u32 host_event;
|
|
int ret;
|
|
|
|
@@ -594,11 +595,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
|
|
return ret;
|
|
|
|
if (wake_event) {
|
|
+ event_type = ec_dev->event_data.event_type;
|
|
host_event = cros_ec_get_host_event(ec_dev);
|
|
|
|
- /* Consider non-host_event as wake event */
|
|
- *wake_event = !host_event ||
|
|
- !!(host_event & ec_dev->host_event_wake_mask);
|
|
+ /*
|
|
+ * Sensor events need to be parsed by the sensor sub-device.
|
|
+ * Defer them, and don't report the wakeup here.
|
|
+ */
|
|
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
|
|
+ *wake_event = false;
|
|
+ /* Masked host-events should not count as wake events. */
|
|
+ else if (host_event &&
|
|
+ !(host_event & ec_dev->host_event_wake_mask))
|
|
+ *wake_event = false;
|
|
+ /* Consider all other events as wake events. */
|
|
+ else
|
|
+ *wake_event = true;
|
|
}
|
|
|
|
return ret;
|
|
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
|
|
index f66521c7f8462..42efcb850722c 100644
|
|
--- a/drivers/platform/mips/cpu_hwmon.c
|
|
+++ b/drivers/platform/mips/cpu_hwmon.c
|
|
@@ -25,9 +25,10 @@ int loongson3_cpu_temp(int cpu)
|
|
case PRID_REV_LOONGSON3A_R1:
|
|
reg = (reg >> 8) & 0xff;
|
|
break;
|
|
- case PRID_REV_LOONGSON3A_R2:
|
|
case PRID_REV_LOONGSON3B_R1:
|
|
case PRID_REV_LOONGSON3B_R2:
|
|
+ case PRID_REV_LOONGSON3A_R2_0:
|
|
+ case PRID_REV_LOONGSON3A_R2_1:
|
|
reg = ((reg >> 8) & 0xff) - 100;
|
|
break;
|
|
case PRID_REV_LOONGSON3A_R3_0:
|
|
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
|
|
index db2af09067dbc..b6f2ff95c3ed9 100644
|
|
--- a/drivers/platform/x86/asus-nb-wmi.c
|
|
+++ b/drivers/platform/x86/asus-nb-wmi.c
|
|
@@ -442,8 +442,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
|
|
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
|
|
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
|
|
{ KE_KEY, 0x32, { KEY_MUTE } },
|
|
- { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
|
|
- { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
|
|
+ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
|
|
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
|
|
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
|
|
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
|
|
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
|
|
index c285a16675ee7..37b5de5412704 100644
|
|
--- a/drivers/platform/x86/asus-wmi.c
|
|
+++ b/drivers/platform/x86/asus-wmi.c
|
|
@@ -2131,7 +2131,8 @@ static int asus_wmi_add(struct platform_device *pdev)
|
|
err = asus_wmi_backlight_init(asus);
|
|
if (err && err != -ENODEV)
|
|
goto fail_backlight;
|
|
- }
|
|
+ } else
|
|
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
|
|
|
|
status = wmi_install_notify_handler(asus->driver->event_guid,
|
|
asus_wmi_notify, asus);
|
|
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
|
|
index 9371603a0ac90..2a8c7a4cea359 100644
|
|
--- a/drivers/platform/x86/intel_atomisp2_pm.c
|
|
+++ b/drivers/platform/x86/intel_atomisp2_pm.c
|
|
@@ -33,46 +33,45 @@
|
|
#define ISPSSPM0_IUNIT_POWER_ON 0x0
|
|
#define ISPSSPM0_IUNIT_POWER_OFF 0x3
|
|
|
|
-static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
+static int isp_set_power(struct pci_dev *dev, bool enable)
|
|
{
|
|
unsigned long timeout;
|
|
- u32 val;
|
|
-
|
|
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0);
|
|
-
|
|
- /*
|
|
- * MRFLD IUNIT DPHY is located in an always-power-on island
|
|
- * MRFLD HW design need all CSI ports are disabled before
|
|
- * powering down the IUNIT.
|
|
- */
|
|
- pci_read_config_dword(dev, PCI_CSI_CONTROL, &val);
|
|
- val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
|
|
- pci_write_config_dword(dev, PCI_CSI_CONTROL, val);
|
|
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
|
|
+ ISPSSPM0_IUNIT_POWER_OFF;
|
|
|
|
- /* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
|
|
+ /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
|
|
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
|
|
- ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK);
|
|
+ val, ISPSSPM0_ISPSSC_MASK);
|
|
|
|
/*
|
|
* There should be no IUNIT access while power-down is
|
|
* in progress HW sighting: 4567865
|
|
* Wait up to 50 ms for the IUNIT to shut down.
|
|
+ * And we do the same for power on.
|
|
*/
|
|
timeout = jiffies + msecs_to_jiffies(50);
|
|
while (1) {
|
|
- /* Wait until ISPSSPM0 bit[25:24] shows 0x3 */
|
|
- iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val);
|
|
- val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
|
|
- if (val == ISPSSPM0_IUNIT_POWER_OFF)
|
|
+ u32 tmp;
|
|
+
|
|
+ /* Wait until ISPSSPM0 bit[25:24] shows the right value */
|
|
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
|
|
+ tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
|
|
+ if (tmp == val)
|
|
break;
|
|
|
|
if (time_after(jiffies, timeout)) {
|
|
- dev_err(&dev->dev, "IUNIT power-off timeout.\n");
|
|
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
|
|
+ enable ? "on" : "off");
|
|
return -EBUSY;
|
|
}
|
|
usleep_range(1000, 2000);
|
|
}
|
|
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
+{
|
|
pm_runtime_allow(&dev->dev);
|
|
pm_runtime_put_sync_suspend(&dev->dev);
|
|
|
|
@@ -87,11 +86,40 @@ static void isp_remove(struct pci_dev *dev)
|
|
|
|
static int isp_pci_suspend(struct device *dev)
|
|
{
|
|
+ struct pci_dev *pdev = to_pci_dev(dev);
|
|
+ u32 val;
|
|
+
|
|
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
|
|
+
|
|
+ /*
|
|
+ * MRFLD IUNIT DPHY is located in an always-power-on island
|
|
+ * MRFLD HW design need all CSI ports are disabled before
|
|
+ * powering down the IUNIT.
|
|
+ */
|
|
+ pci_read_config_dword(pdev, PCI_CSI_CONTROL, &val);
|
|
+ val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
|
|
+ pci_write_config_dword(pdev, PCI_CSI_CONTROL, val);
|
|
+
|
|
+ /*
|
|
+ * We lose config space access when punit power gates
|
|
+ * the ISP. Can't use pci_set_power_state() because
|
|
+ * pmcsr won't actually change when we write to it.
|
|
+ */
|
|
+ pci_save_state(pdev);
|
|
+ pdev->current_state = PCI_D3cold;
|
|
+ isp_set_power(pdev, false);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static int isp_pci_resume(struct device *dev)
|
|
{
|
|
+ struct pci_dev *pdev = to_pci_dev(dev);
|
|
+
|
|
+ isp_set_power(pdev, true);
|
|
+ pdev->current_state = PCI_D0;
|
|
+ pci_restore_state(pdev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
|
|
index c2c3a1a198795..14f88bfabd5d4 100644
|
|
--- a/drivers/platform/x86/mlx-platform.c
|
|
+++ b/drivers/platform/x86/mlx-platform.c
|
|
@@ -83,12 +83,12 @@
|
|
#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
|
|
#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
|
|
#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
|
|
-#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xea
|
|
-#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xeb
|
|
-#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xec
|
|
-#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xed
|
|
-#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xee
|
|
-#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xef
|
|
+#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
|
|
+#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
|
|
+#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
|
|
+#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
|
|
+#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
|
|
+#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
|
|
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
|
|
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
|
|
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
|
|
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
|
|
index 6da79ae148601..5a97e42a35473 100644
|
|
--- a/drivers/power/supply/olpc_battery.c
|
|
+++ b/drivers/power/supply/olpc_battery.c
|
|
@@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
|
|
+ val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
|
|
break;
|
|
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
|
|
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
|
|
+ val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
|
|
break;
|
|
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
|
|
ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
|
|
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
|
|
index 2012551d93e02..796eeffdf93b1 100644
|
|
--- a/drivers/ptp/ptp_chardev.c
|
|
+++ b/drivers/ptp/ptp_chardev.c
|
|
@@ -228,7 +228,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|
pct->sec = ts.tv_sec;
|
|
pct->nsec = ts.tv_nsec;
|
|
pct++;
|
|
- ptp->info->gettime64(ptp->info, &ts);
|
|
+ err = ptp->info->gettime64(ptp->info, &ts);
|
|
+ if (err)
|
|
+ goto out;
|
|
pct->sec = ts.tv_sec;
|
|
pct->nsec = ts.tv_nsec;
|
|
pct++;
|
|
@@ -281,6 +283,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|
break;
|
|
}
|
|
|
|
+out:
|
|
kfree(sysoff);
|
|
return err;
|
|
}
|
|
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
|
|
index 5419a89d300e3..9b8aa6718ee73 100644
|
|
--- a/drivers/ptp/ptp_clock.c
|
|
+++ b/drivers/ptp/ptp_clock.c
|
|
@@ -249,8 +249,10 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|
ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
|
|
ptp, ptp->pin_attr_groups,
|
|
"ptp%d", ptp->index);
|
|
- if (IS_ERR(ptp->dev))
|
|
+ if (IS_ERR(ptp->dev)) {
|
|
+ err = PTR_ERR(ptp->dev);
|
|
goto no_device;
|
|
+ }
|
|
|
|
/* Register a new PPS source. */
|
|
if (info->pps) {
|
|
@@ -261,6 +263,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|
pps.owner = info->owner;
|
|
ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
|
|
if (!ptp->pps_source) {
|
|
+ err = -EINVAL;
|
|
pr_err("failed to register pps source\n");
|
|
goto no_pps;
|
|
}
|
|
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
|
|
index de21f620b8822..21b22a150930b 100644
|
|
--- a/drivers/remoteproc/remoteproc_virtio.c
|
|
+++ b/drivers/remoteproc/remoteproc_virtio.c
|
|
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
|
|
const bool * ctx,
|
|
struct irq_affinity *desc)
|
|
{
|
|
- int i, ret;
|
|
+ int i, ret, queue_idx = 0;
|
|
|
|
for (i = 0; i < nvqs; ++i) {
|
|
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
|
|
+ if (!names[i]) {
|
|
+ vqs[i] = NULL;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
|
|
ctx ? ctx[i] : false);
|
|
if (IS_ERR(vqs[i])) {
|
|
ret = PTR_ERR(vqs[i]);
|
|
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
|
|
index a3fb235fea0d1..7431a795a6247 100644
|
|
--- a/drivers/rtc/rtc-m41t80.c
|
|
+++ b/drivers/rtc/rtc-m41t80.c
|
|
@@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
|
|
alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
|
|
alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
|
|
alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
|
|
- alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
|
|
+ alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
|
|
|
|
alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
|
|
alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
|
|
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
|
|
index 194ffd5c85804..039b2074db7e5 100644
|
|
--- a/drivers/s390/char/sclp_config.c
|
|
+++ b/drivers/s390/char/sclp_config.c
|
|
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
|
|
|
|
static void __ref sclp_cpu_change_notify(struct work_struct *work)
|
|
{
|
|
+ lock_device_hotplug();
|
|
smp_rescan_cpus();
|
|
+ unlock_device_hotplug();
|
|
}
|
|
|
|
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
|
|
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
|
|
index 9f5a201c4c878..02b52cacde33a 100644
|
|
--- a/drivers/s390/crypto/ap_bus.c
|
|
+++ b/drivers/s390/crypto/ap_bus.c
|
|
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
|
|
static inline int ap_test_config_card_id(unsigned int id)
|
|
{
|
|
if (!ap_configuration) /* QCI not supported */
|
|
- return 1;
|
|
+ /* only ids 0...3F may be probed */
|
|
+ return id < 0x40 ? 1 : 0;
|
|
return ap_test_config(ap_configuration->apm, id);
|
|
}
|
|
|
|
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
|
|
index 240b27f3f5f6a..f34ee41cbed88 100644
|
|
--- a/drivers/s390/crypto/zcrypt_error.h
|
|
+++ b/drivers/s390/crypto/zcrypt_error.h
|
|
@@ -51,6 +51,7 @@ struct error_hdr {
|
|
#define REP82_ERROR_FORMAT_FIELD 0x29
|
|
#define REP82_ERROR_INVALID_COMMAND 0x30
|
|
#define REP82_ERROR_MALFORMED_MSG 0x40
|
|
+#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
|
|
#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
|
|
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
|
|
#define REP82_ERROR_WORD_ALIGNMENT 0x60
|
|
@@ -89,6 +90,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
|
|
case REP88_ERROR_MESSAGE_MALFORMD:
|
|
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
|
|
case REP82_ERROR_INVALID_DOMAIN_PENDING:
|
|
+ case REP82_ERROR_INVALID_SPECIAL_CMD:
|
|
// REP88_ERROR_INVALID_KEY // '82' CEX2A
|
|
// REP88_ERROR_OPERAND // '84' CEX2A
|
|
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
|
|
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
|
|
index 04e294d1d16d7..99af1a0a33147 100644
|
|
--- a/drivers/s390/net/qeth_core.h
|
|
+++ b/drivers/s390/net/qeth_core.h
|
|
@@ -665,7 +665,6 @@ struct qeth_card_blkt {
|
|
|
|
#define QETH_BROADCAST_WITH_ECHO 0x01
|
|
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
|
|
-#define QETH_LAYER2_MAC_READ 0x01
|
|
#define QETH_LAYER2_MAC_REGISTERED 0x02
|
|
struct qeth_card_info {
|
|
unsigned short unit_addr2;
|
|
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
|
|
index 2540652718671..0c9a5250dd93f 100644
|
|
--- a/drivers/s390/net/qeth_core_main.c
|
|
+++ b/drivers/s390/net/qeth_core_main.c
|
|
@@ -4235,16 +4235,18 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
|
|
struct qeth_reply *reply, unsigned long data)
|
|
{
|
|
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
|
+ struct qeth_ipacmd_setadpparms *adp_cmd;
|
|
|
|
QETH_CARD_TEXT(card, 4, "chgmaccb");
|
|
if (qeth_setadpparms_inspect_rc(cmd))
|
|
return 0;
|
|
|
|
- if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
|
|
- ether_addr_copy(card->dev->dev_addr,
|
|
- cmd->data.setadapterparms.data.change_addr.addr);
|
|
- card->info.mac_bits |= QETH_LAYER2_MAC_READ;
|
|
- }
|
|
+ adp_cmd = &cmd->data.setadapterparms;
|
|
+ if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
|
|
+ !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
|
|
+ return 0;
|
|
+
|
|
+ ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
|
|
index 3e54be201b279..10cc487c16d6a 100644
|
|
--- a/drivers/s390/net/qeth_core_mpc.h
|
|
+++ b/drivers/s390/net/qeth_core_mpc.h
|
|
@@ -80,7 +80,9 @@ enum qeth_card_types {
|
|
};
|
|
|
|
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
|
|
+#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
|
|
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
|
|
+#define IS_VM_NIC(card) ((card)->info.guestlan)
|
|
|
|
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
|
|
/* only the first two bytes are looked at in qeth_get_cardname_short */
|
|
@@ -529,17 +531,20 @@ struct qeth_query_switch_attributes {
|
|
__u8 reserved3[8];
|
|
};
|
|
|
|
+#define QETH_SETADP_FLAGS_VIRTUAL_MAC 0x80 /* for CHANGE_ADDR_READ_MAC */
|
|
+
|
|
struct qeth_ipacmd_setadpparms_hdr {
|
|
- __u32 supp_hw_cmds;
|
|
- __u32 reserved1;
|
|
- __u16 cmdlength;
|
|
- __u16 reserved2;
|
|
- __u32 command_code;
|
|
- __u16 return_code;
|
|
- __u8 used_total;
|
|
- __u8 seq_no;
|
|
- __u32 reserved3;
|
|
-} __attribute__ ((packed));
|
|
+ u32 supp_hw_cmds;
|
|
+ u32 reserved1;
|
|
+ u16 cmdlength;
|
|
+ u16 reserved2;
|
|
+ u32 command_code;
|
|
+ u16 return_code;
|
|
+ u8 used_total;
|
|
+ u8 seq_no;
|
|
+ u8 flags;
|
|
+ u8 reserved3[3];
|
|
+};
|
|
|
|
struct qeth_ipacmd_setadpparms {
|
|
struct qeth_ipacmd_setadpparms_hdr hdr;
|
|
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
|
|
index 2914a1a69f830..8d3601891c62d 100644
|
|
--- a/drivers/s390/net/qeth_l2_main.c
|
|
+++ b/drivers/s390/net/qeth_l2_main.c
|
|
@@ -461,12 +461,9 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
|
|
/* fall back to alternative mechanism: */
|
|
}
|
|
|
|
- if (card->info.type == QETH_CARD_TYPE_IQD ||
|
|
- card->info.type == QETH_CARD_TYPE_OSM ||
|
|
- card->info.type == QETH_CARD_TYPE_OSX ||
|
|
- card->info.guestlan) {
|
|
+ if (!IS_OSN(card)) {
|
|
rc = qeth_setadpparms_change_macaddr(card);
|
|
- if (!rc)
|
|
+ if (!rc && is_valid_ether_addr(card->dev->dev_addr))
|
|
goto out;
|
|
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
|
|
CARD_DEVID(card), rc);
|
|
@@ -917,7 +914,8 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
|
|
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
|
|
}
|
|
|
|
- qeth_l2_request_initial_mac(card);
|
|
+ if (!is_valid_ether_addr(card->dev->dev_addr))
|
|
+ qeth_l2_request_initial_mac(card);
|
|
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
|
|
rc = register_netdev(card->dev);
|
|
if (!rc && carrier_ok)
|
|
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
|
|
index 94f4d8fe85e0e..d1b531fe9ada1 100644
|
|
--- a/drivers/s390/scsi/zfcp_aux.c
|
|
+++ b/drivers/s390/scsi/zfcp_aux.c
|
|
@@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
|
|
*/
|
|
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
|
|
{
|
|
- while (atomic_read(&adapter->stat_miss) > 0)
|
|
+ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
|
|
if (zfcp_fsf_status_read(adapter->qdio)) {
|
|
+ atomic_inc(&adapter->stat_miss); /* undo add -1 */
|
|
if (atomic_read(&adapter->stat_miss) >=
|
|
adapter->stat_read_buf_num) {
|
|
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
|
|
return 1;
|
|
}
|
|
break;
|
|
- } else
|
|
- atomic_dec(&adapter->stat_miss);
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
|
|
index c9c57b4a0b718..4e1bdd03d2aa5 100644
|
|
--- a/drivers/s390/virtio/virtio_ccw.c
|
|
+++ b/drivers/s390/virtio/virtio_ccw.c
|
|
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|
{
|
|
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
|
|
unsigned long *indicatorp = NULL;
|
|
- int ret, i;
|
|
+ int ret, i, queue_idx = 0;
|
|
struct ccw1 *ccw;
|
|
|
|
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
|
|
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < nvqs; ++i) {
|
|
- vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
|
|
- ctx ? ctx[i] : false, ccw);
|
|
+ if (!names[i]) {
|
|
+ vqs[i] = NULL;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
|
|
+ names[i], ctx ? ctx[i] : false,
|
|
+ ccw);
|
|
if (IS_ERR(vqs[i])) {
|
|
ret = PTR_ERR(vqs[i]);
|
|
vqs[i] = NULL;
|
|
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
|
|
index 41c4d8abdd4a1..38e7680571296 100644
|
|
--- a/drivers/scsi/aic94xx/aic94xx_init.c
|
|
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
|
|
@@ -281,7 +281,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
|
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
|
asd_dev_rev[asd_ha->revision_id]);
|
|
}
|
|
-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
|
|
+static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
|
|
|
|
static ssize_t asd_show_dev_bios_build(struct device *dev,
|
|
struct device_attribute *attr,char *buf)
|
|
@@ -478,7 +478,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
|
|
{
|
|
int err;
|
|
|
|
- err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
|
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -500,13 +500,13 @@ err_update_bios:
|
|
err_biosb:
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
|
|
err_rev:
|
|
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
|
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
|
|
return err;
|
|
}
|
|
|
|
static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
|
|
{
|
|
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
|
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
|
|
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
|
|
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
|
|
index bf07735275a49..0fc382cb977bf 100644
|
|
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
|
|
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
|
|
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
|
|
}
|
|
|
|
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
|
- unsigned int tid, int pg_idx, bool reply)
|
|
+ unsigned int tid, int pg_idx)
|
|
{
|
|
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
|
|
GFP_KERNEL);
|
|
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
|
|
- req->reply = V_NO_REPLY(reply ? 0 : 1);
|
|
+ req->reply = V_NO_REPLY(1);
|
|
req->cpu_idx = 0;
|
|
req->word = htons(31);
|
|
req->mask = cpu_to_be64(0xF0000000);
|
|
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
|
* @tid: connection id
|
|
* @hcrc: header digest enabled
|
|
* @dcrc: data digest enabled
|
|
- * @reply: request reply from h/w
|
|
* set up the iscsi digest settings for a connection identified by tid
|
|
*/
|
|
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
- int hcrc, int dcrc, int reply)
|
|
+ int hcrc, int dcrc)
|
|
{
|
|
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
|
|
GFP_KERNEL);
|
|
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
|
|
- req->reply = V_NO_REPLY(reply ? 0 : 1);
|
|
+ req->reply = V_NO_REPLY(1);
|
|
req->cpu_idx = 0;
|
|
req->word = htons(31);
|
|
req->mask = cpu_to_be64(0x0F000000);
|
|
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
|
|
index 594f593c88212..f36b76e8e12c2 100644
|
|
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
|
|
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
|
|
@@ -1,8 +1,8 @@
|
|
config SCSI_CXGB4_ISCSI
|
|
tristate "Chelsio T4 iSCSI support"
|
|
depends on PCI && INET && (IPV6 || IPV6=n)
|
|
- select NETDEVICES
|
|
- select ETHERNET
|
|
+ depends on THERMAL || !THERMAL
|
|
+ depends on ETHERNET
|
|
select NET_VENDOR_CHELSIO
|
|
select CHELSIO_T4
|
|
select CHELSIO_LIB
|
|
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
|
|
index 064ef57351828..bd6cc014cab04 100644
|
|
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
|
|
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
|
|
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
|
|
struct cxgbi_sock *csk;
|
|
|
|
csk = lookup_tid(t, tid);
|
|
- if (!csk)
|
|
+ if (!csk) {
|
|
pr_err("can't find conn. for tid %u.\n", tid);
|
|
+ return;
|
|
+ }
|
|
|
|
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
|
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
|
|
csk, csk->state, csk->flags, csk->tid, rpl->status);
|
|
|
|
- if (rpl->status != CPL_ERR_NONE)
|
|
+ if (rpl->status != CPL_ERR_NONE) {
|
|
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
|
|
csk, tid, rpl->status);
|
|
+ csk->err = -EINVAL;
|
|
+ }
|
|
+
|
|
+ complete(&csk->cmpl);
|
|
|
|
__kfree_skb(skb);
|
|
}
|
|
@@ -1984,7 +1990,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
|
|
}
|
|
|
|
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
|
|
- int pg_idx, bool reply)
|
|
+ int pg_idx)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct cpl_set_tcb_field *req;
|
|
@@ -2000,7 +2006,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
INIT_TP_WR(req, csk->tid);
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
|
|
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
|
|
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
|
|
req->word_cookie = htons(0);
|
|
req->mask = cpu_to_be64(0x3 << 8);
|
|
req->val = cpu_to_be64(pg_idx << 8);
|
|
@@ -2009,12 +2015,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
|
|
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
|
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
|
|
|
|
+ reinit_completion(&csk->cmpl);
|
|
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
|
|
- return 0;
|
|
+ wait_for_completion(&csk->cmpl);
|
|
+
|
|
+ return csk->err;
|
|
}
|
|
|
|
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
- int hcrc, int dcrc, int reply)
|
|
+ int hcrc, int dcrc)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct cpl_set_tcb_field *req;
|
|
@@ -2032,7 +2041,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
req = (struct cpl_set_tcb_field *)skb->head;
|
|
INIT_TP_WR(req, tid);
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
|
|
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
|
|
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
|
|
req->word_cookie = htons(0);
|
|
req->mask = cpu_to_be64(0x3 << 4);
|
|
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
|
|
@@ -2042,8 +2051,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
|
|
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
|
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
|
|
|
|
+ reinit_completion(&csk->cmpl);
|
|
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
|
|
- return 0;
|
|
+ wait_for_completion(&csk->cmpl);
|
|
+
|
|
+ return csk->err;
|
|
}
|
|
|
|
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
|
|
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
|
|
index 75f876409fb9d..245742557c036 100644
|
|
--- a/drivers/scsi/cxgbi/libcxgbi.c
|
|
+++ b/drivers/scsi/cxgbi/libcxgbi.c
|
|
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
|
|
skb_queue_head_init(&csk->receive_queue);
|
|
skb_queue_head_init(&csk->write_queue);
|
|
timer_setup(&csk->retry_timer, NULL, 0);
|
|
+ init_completion(&csk->cmpl);
|
|
rwlock_init(&csk->callback_lock);
|
|
csk->cdev = cdev;
|
|
csk->flags = 0;
|
|
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
|
|
if (!err && conn->hdrdgst_en)
|
|
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
|
|
conn->hdrdgst_en,
|
|
- conn->datadgst_en, 0);
|
|
+ conn->datadgst_en);
|
|
break;
|
|
case ISCSI_PARAM_DATADGST_EN:
|
|
err = iscsi_set_param(cls_conn, param, buf, buflen);
|
|
if (!err && conn->datadgst_en)
|
|
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
|
|
conn->hdrdgst_en,
|
|
- conn->datadgst_en, 0);
|
|
+ conn->datadgst_en);
|
|
break;
|
|
case ISCSI_PARAM_MAX_R2T:
|
|
return iscsi_tcp_set_max_r2t(conn, buf);
|
|
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
|
|
|
|
ppm = csk->cdev->cdev2ppm(csk->cdev);
|
|
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
|
|
- ppm->tformat.pgsz_idx_dflt, 0);
|
|
+ ppm->tformat.pgsz_idx_dflt);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
|
|
index 5d5d8b50d8426..1917ff57651d7 100644
|
|
--- a/drivers/scsi/cxgbi/libcxgbi.h
|
|
+++ b/drivers/scsi/cxgbi/libcxgbi.h
|
|
@@ -149,6 +149,7 @@ struct cxgbi_sock {
|
|
struct sk_buff_head receive_queue;
|
|
struct sk_buff_head write_queue;
|
|
struct timer_list retry_timer;
|
|
+ struct completion cmpl;
|
|
int err;
|
|
rwlock_t callback_lock;
|
|
void *user_data;
|
|
@@ -490,9 +491,9 @@ struct cxgbi_device {
|
|
struct cxgbi_ppm *,
|
|
struct cxgbi_task_tag_info *);
|
|
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
|
|
- unsigned int, int, int, int);
|
|
+ unsigned int, int, int);
|
|
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
|
|
- unsigned int, int, bool);
|
|
+ unsigned int, int);
|
|
|
|
void (*csk_release_offload_resources)(struct cxgbi_sock *);
|
|
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
|
|
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
|
|
index 6637116529aa3..f987c40c47a13 100644
|
|
--- a/drivers/scsi/cxlflash/main.c
|
|
+++ b/drivers/scsi/cxlflash/main.c
|
|
@@ -3694,6 +3694,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
|
|
host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
|
|
|
|
cfg = shost_priv(host);
|
|
+ cfg->state = STATE_PROBING;
|
|
cfg->host = host;
|
|
rc = alloc_mem(cfg);
|
|
if (rc) {
|
|
@@ -3782,6 +3783,7 @@ out:
|
|
return rc;
|
|
|
|
out_remove:
|
|
+ cfg->state = STATE_PROBED;
|
|
cxlflash_remove(pdev);
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
|
|
index a369450a1fa7b..c3e0be90e19f9 100644
|
|
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
|
|
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
|
|
@@ -494,7 +494,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
|
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
|
|
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
|
|
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
|
|
-
|
|
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
|
|
/* used for 12G negotiate */
|
|
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
|
|
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
|
|
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
|
|
index 08c7b1e25fe48..dde84f7443136 100644
|
|
--- a/drivers/scsi/isci/init.c
|
|
+++ b/drivers/scsi/isci/init.c
|
|
@@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
|
|
shost->max_lun = ~0;
|
|
shost->max_cmd_len = MAX_COMMAND_SIZE;
|
|
|
|
+ /* turn on DIF support */
|
|
+ scsi_host_set_prot(shost,
|
|
+ SHOST_DIF_TYPE1_PROTECTION |
|
|
+ SHOST_DIF_TYPE2_PROTECTION |
|
|
+ SHOST_DIF_TYPE3_PROTECTION);
|
|
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
|
|
+
|
|
err = scsi_add_host(shost, &pdev->dev);
|
|
if (err)
|
|
goto err_shost;
|
|
@@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
goto err_host_alloc;
|
|
}
|
|
pci_info->hosts[i] = h;
|
|
-
|
|
- /* turn on DIF support */
|
|
- scsi_host_set_prot(to_shost(h),
|
|
- SHOST_DIF_TYPE1_PROTECTION |
|
|
- SHOST_DIF_TYPE2_PROTECTION |
|
|
- SHOST_DIF_TYPE3_PROTECTION);
|
|
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
|
|
}
|
|
|
|
err = isci_setup_interrupts(pdev);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
|
|
index f1c1faa74b46c..c2dae02f193ef 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_els.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_els.c
|
|
@@ -242,6 +242,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
|
|
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
|
|
if (elscmd == ELS_CMD_FLOGI)
|
|
icmd->ulpTimeout = FF_DEF_RATOV * 2;
|
|
+ else if (elscmd == ELS_CMD_LOGO)
|
|
+ icmd->ulpTimeout = phba->fc_ratov;
|
|
else
|
|
icmd->ulpTimeout = phba->fc_ratov * 2;
|
|
} else {
|
|
@@ -2682,16 +2684,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|
goto out;
|
|
}
|
|
|
|
+ /* The LOGO will not be retried on failure. A LOGO was
|
|
+ * issued to the remote rport and a ACC or RJT or no Answer are
|
|
+ * all acceptable. Note the failure and move forward with
|
|
+ * discovery. The PLOGI will retry.
|
|
+ */
|
|
if (irsp->ulpStatus) {
|
|
- /* Check for retry */
|
|
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
|
|
- /* ELS command is being retried */
|
|
- skip_recovery = 1;
|
|
- goto out;
|
|
- }
|
|
/* LOGO failed */
|
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
|
- "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
|
|
+ "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
|
|
ndlp->nlp_DID, irsp->ulpStatus,
|
|
irsp->un.ulpWord[4]);
|
|
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
|
|
@@ -2737,7 +2738,8 @@ out:
|
|
* For any other port type, the rpi is unregistered as an implicit
|
|
* LOGO.
|
|
*/
|
|
- if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
|
|
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
|
|
+ skip_recovery == 0) {
|
|
lpfc_cancel_retry_delay_tmo(vport, ndlp);
|
|
spin_lock_irqsave(shost->host_lock, flags);
|
|
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
|
@@ -2770,6 +2772,8 @@ out:
|
|
* will be stored into the context1 field of the IOCB for the completion
|
|
* callback function to the LOGO ELS command.
|
|
*
|
|
+ * Callers of this routine are expected to unregister the RPI first
|
|
+ *
|
|
* Return code
|
|
* 0 - successfully issued logo
|
|
* 1 - failed to issue logo
|
|
@@ -2811,22 +2815,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
"Issue LOGO: did:x%x",
|
|
ndlp->nlp_DID, 0, 0);
|
|
|
|
- /*
|
|
- * If we are issuing a LOGO, we may try to recover the remote NPort
|
|
- * by issuing a PLOGI later. Even though we issue ELS cmds by the
|
|
- * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
|
|
- * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
|
|
- * for that ELS cmd. To avoid this situation, lets get rid of the
|
|
- * RPI right now, before any ELS cmds are sent.
|
|
- */
|
|
- spin_lock_irq(shost->host_lock);
|
|
- ndlp->nlp_flag |= NLP_ISSUE_LOGO;
|
|
- spin_unlock_irq(shost->host_lock);
|
|
- if (lpfc_unreg_rpi(vport, ndlp)) {
|
|
- lpfc_els_free_iocb(phba, elsiocb);
|
|
- return 0;
|
|
- }
|
|
-
|
|
phba->fc_stat.elsXmitLOGO++;
|
|
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
|
|
spin_lock_irq(shost->host_lock);
|
|
@@ -2834,7 +2822,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
|
|
spin_unlock_irq(shost->host_lock);
|
|
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
|
|
-
|
|
if (rc == IOCB_ERROR) {
|
|
spin_lock_irq(shost->host_lock);
|
|
ndlp->nlp_flag &= ~NLP_LOGO_SND;
|
|
@@ -2842,6 +2829,11 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
lpfc_els_free_iocb(phba, elsiocb);
|
|
return 1;
|
|
}
|
|
+
|
|
+ spin_lock_irq(shost->host_lock);
|
|
+ ndlp->nlp_prev_state = ndlp->nlp_state;
|
|
+ spin_unlock_irq(shost->host_lock);
|
|
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
|
|
return 0;
|
|
}
|
|
|
|
@@ -5701,6 +5693,9 @@ error:
|
|
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
|
|
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
|
|
|
|
+ if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
|
|
+ stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
|
|
+
|
|
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
|
phba->fc_stat.elsXmitLSRJT++;
|
|
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
|
|
@@ -9505,7 +9500,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
|
|
"rport in state 0x%x\n", ndlp->nlp_state);
|
|
return;
|
|
}
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
+ lpfc_printf_log(phba, KERN_ERR,
|
|
+ LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
|
|
"3094 Start rport recovery on shost id 0x%x "
|
|
"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
|
|
"flags 0x%x\n",
|
|
@@ -9518,8 +9514,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
|
|
*/
|
|
spin_lock_irqsave(shost->host_lock, flags);
|
|
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
|
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
|
|
spin_unlock_irqrestore(shost->host_lock, flags);
|
|
- lpfc_issue_els_logo(vport, ndlp, 0);
|
|
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
|
|
+ lpfc_unreg_rpi(vport, ndlp);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
index 269808e8480f1..394ffbe9cb6d7 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
|
|
@@ -836,7 +836,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
|
|
|
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
|
|
+ spin_lock_irq(shost->host_lock);
|
|
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
|
+ spin_unlock_irq(shost->host_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -851,7 +853,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|
return 1;
|
|
}
|
|
}
|
|
+
|
|
+ spin_lock_irq(shost->host_lock);
|
|
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
|
+ spin_unlock_irq(shost->host_lock);
|
|
lpfc_unreg_rpi(vport, ndlp);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
|
|
index b9e5cd79931a2..462ed4ad21d25 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_sli.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_sli.c
|
|
@@ -14501,7 +14501,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
|
|
hw_page_size))/hw_page_size;
|
|
|
|
/* If needed, Adjust page count to match the max the adapter supports */
|
|
- if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
|
|
+ if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
|
|
+ (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
|
|
queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
|
|
|
|
INIT_LIST_HEAD(&queue->list);
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
|
|
index 59ecbb3b53b52..a336285504254 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
|
|
@@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
|
|
|
|
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
|
|
ld = MR_TargetIdToLdGet(ldCount, drv_map);
|
|
- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
|
|
+ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
|
|
lbInfo[ldCount].loadBalanceFlag = 0;
|
|
continue;
|
|
}
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
index f74b5ea24f0f3..49eaa87608f69 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
@@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
|
|
device_id < instance->fw_supported_vd_count)) {
|
|
|
|
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
|
|
- if (ld >= instance->fw_supported_vd_count)
|
|
+ if (ld >= instance->fw_supported_vd_count - 1)
|
|
fp_possible = 0;
|
|
else {
|
|
raid = MR_LdRaidGet(ld, local_map_ptr);
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
index 2500377d07231..bfd826deabbe8 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
@@ -3319,8 +3319,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
|
|
static inline void
|
|
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
|
|
{
|
|
+ wmb();
|
|
__raw_writeq(b, addr);
|
|
- mmiowb();
|
|
+ barrier();
|
|
}
|
|
#else
|
|
static inline void
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
index 03c52847ed071..adac18ba84d42 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
@@ -9641,6 +9641,7 @@ static void scsih_remove(struct pci_dev *pdev)
|
|
|
|
/* release all the volumes */
|
|
_scsih_ir_shutdown(ioc);
|
|
+ sas_remove_host(shost);
|
|
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
|
|
list) {
|
|
if (raid_device->starget) {
|
|
@@ -9682,7 +9683,6 @@ static void scsih_remove(struct pci_dev *pdev)
|
|
ioc->sas_hba.num_phys = 0;
|
|
}
|
|
|
|
- sas_remove_host(shost);
|
|
mpt3sas_base_detach(ioc);
|
|
spin_lock(&gioc_lock);
|
|
list_del(&ioc->list);
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
|
|
index 6a8a3c09b4b11..8338b4db0e31a 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
|
|
@@ -821,10 +821,13 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
|
|
mpt3sas_port->remote_identify.sas_address,
|
|
mpt3sas_phy->phy_id);
|
|
mpt3sas_phy->phy_belongs_to_port = 0;
|
|
- sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
|
|
+ if (!ioc->remove_host)
|
|
+ sas_port_delete_phy(mpt3sas_port->port,
|
|
+ mpt3sas_phy->phy);
|
|
list_del(&mpt3sas_phy->port_siblings);
|
|
}
|
|
- sas_port_delete(mpt3sas_port->port);
|
|
+ if (!ioc->remove_host)
|
|
+ sas_port_delete(mpt3sas_port->port);
|
|
kfree(mpt3sas_port);
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
|
|
index 2f0a4f2c5ff80..d4821b9dea45d 100644
|
|
--- a/drivers/scsi/qedi/qedi_iscsi.c
|
|
+++ b/drivers/scsi/qedi/qedi_iscsi.c
|
|
@@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
|
|
|
|
qedi_ep = ep->dd_data;
|
|
if (qedi_ep->state == EP_STATE_IDLE ||
|
|
+ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
|
|
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
|
|
return -1;
|
|
|
|
@@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
|
|
|
switch (qedi_ep->state) {
|
|
case EP_STATE_OFLDCONN_START:
|
|
+ case EP_STATE_OFLDCONN_NONE:
|
|
goto ep_release_conn;
|
|
case EP_STATE_OFLDCONN_FAILED:
|
|
break;
|
|
@@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
|
|
|
|
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
|
|
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
|
|
+ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
|
|
ret = -EIO;
|
|
goto set_path_exit;
|
|
}
|
|
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
|
|
index 11260776212fa..892d70d545537 100644
|
|
--- a/drivers/scsi/qedi/qedi_iscsi.h
|
|
+++ b/drivers/scsi/qedi/qedi_iscsi.h
|
|
@@ -59,6 +59,7 @@ enum {
|
|
EP_STATE_OFLDCONN_FAILED = 0x2000,
|
|
EP_STATE_CONNECT_FAILED = 0x4000,
|
|
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
|
|
+ EP_STATE_OFLDCONN_NONE = 0x10000,
|
|
};
|
|
|
|
struct qedi_conn;
|
|
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
|
|
index 105b0e4d78180..5d7d018dad6e7 100644
|
|
--- a/drivers/scsi/qedi/qedi_main.c
|
|
+++ b/drivers/scsi/qedi/qedi_main.c
|
|
@@ -952,6 +952,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi,
|
|
cls_sess = iscsi_conn_to_session(cls_conn);
|
|
sess = cls_sess->dd_data;
|
|
|
|
+ if (!iscsi_is_session_online(cls_sess))
|
|
+ continue;
|
|
+
|
|
if (pri_ctrl_flags) {
|
|
if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
|
|
!strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
|
|
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
|
|
index 15a50cc7e4b36..c8589926014d4 100644
|
|
--- a/drivers/scsi/qla1280.c
|
|
+++ b/drivers/scsi/qla1280.c
|
|
@@ -4259,7 +4259,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
ha->devnum = devnum; /* specifies microcode load address */
|
|
|
|
#ifdef QLA_64BIT_PTR
|
|
- if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
|
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
|
if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
|
|
printk(KERN_WARNING "scsi(%li): Unable to set a "
|
|
"suitable DMA mask - aborting\n", ha->host_no);
|
|
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
|
|
index 051164f755a4c..a13396c56a6a1 100644
|
|
--- a/drivers/scsi/qla4xxx/ql4_os.c
|
|
+++ b/drivers/scsi/qla4xxx/ql4_os.c
|
|
@@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
|
|
|
|
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
|
|
fw_ddb_entry);
|
|
+ if (rc)
|
|
+ goto free_sess;
|
|
|
|
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
|
|
__func__, fnode_sess->dev.kobj.name);
|
|
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
|
|
index a2b4179bfdf7b..7639df91b1108 100644
|
|
--- a/drivers/scsi/scsi_pm.c
|
|
+++ b/drivers/scsi/scsi_pm.c
|
|
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
|
|
|
|
if (err == 0) {
|
|
pm_runtime_disable(dev);
|
|
- pm_runtime_set_active(dev);
|
|
+ err = pm_runtime_set_active(dev);
|
|
pm_runtime_enable(dev);
|
|
+
|
|
+ /*
|
|
+ * Forcibly set runtime PM status of request queue to "active"
|
|
+ * to make sure we can again get requests from the queue
|
|
+ * (see also blk_pm_peek_request()).
|
|
+ *
|
|
+ * The resume hook will correct runtime PM status of the disk.
|
|
+ */
|
|
+ if (!err && scsi_is_sdev_device(dev)) {
|
|
+ struct scsi_device *sdev = to_scsi_device(dev);
|
|
+
|
|
+ if (sdev->request_queue->dev)
|
|
+ blk_set_runtime_active(sdev->request_queue);
|
|
+ }
|
|
}
|
|
|
|
return err;
|
|
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
|
|
else
|
|
fn = NULL;
|
|
|
|
- /*
|
|
- * Forcibly set runtime PM status of request queue to "active" to
|
|
- * make sure we can again get requests from the queue (see also
|
|
- * blk_pm_peek_request()).
|
|
- *
|
|
- * The resume hook will correct runtime PM status of the disk.
|
|
- */
|
|
- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
|
|
- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
|
|
-
|
|
if (fn) {
|
|
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
|
|
|
|
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
|
|
index bd0a5c694a97e..c0e21433b1d84 100644
|
|
--- a/drivers/scsi/sd.c
|
|
+++ b/drivers/scsi/sd.c
|
|
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
|
|
sp = buffer_data[0] & 0x80 ? 1 : 0;
|
|
buffer_data[0] &= ~0x80;
|
|
|
|
+ /*
|
|
+ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
|
|
+ * received mode parameter buffer before doing MODE SELECT.
|
|
+ */
|
|
+ data.device_specific = 0;
|
|
+
|
|
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
|
|
SD_MAX_RETRIES, &data, &sshdr)) {
|
|
if (scsi_sense_valid(&sshdr))
|
|
@@ -2954,9 +2960,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
|
if (rot == 1) {
|
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
|
|
- } else {
|
|
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
|
- blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
|
|
}
|
|
|
|
if (sdkp->device->type == TYPE_ZBC) {
|
|
@@ -3093,6 +3096,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|
if (sdkp->media_present) {
|
|
sd_read_capacity(sdkp, buffer);
|
|
|
|
+ /*
|
|
+ * set the default to rotational. All non-rotational devices
|
|
+ * support the block characteristics VPD page, which will
|
|
+ * cause this to be updated correctly and any device which
|
|
+ * doesn't support it should be treated as rotational.
|
|
+ */
|
|
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
|
+ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
|
|
+
|
|
if (scsi_device_supports_vpd(sdp)) {
|
|
sd_read_block_provisioning(sdkp);
|
|
sd_read_block_limits(sdkp);
|
|
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
|
|
index e06c48c866e4b..7d8442c377dfa 100644
|
|
--- a/drivers/scsi/sd_zbc.c
|
|
+++ b/drivers/scsi/sd_zbc.c
|
|
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
|
|
return -EOPNOTSUPP;
|
|
|
|
/*
|
|
- * Get a reply buffer for the number of requested zones plus a header.
|
|
- * For ATA, buffers must be aligned to 512B.
|
|
+ * Get a reply buffer for the number of requested zones plus a header,
|
|
+ * without exceeding the device maximum command size. For ATA disks,
|
|
+ * buffers must be aligned to 512B.
|
|
*/
|
|
- buflen = roundup((nrz + 1) * 64, 512);
|
|
+ buflen = min(queue_max_hw_sectors(disk->queue) << 9,
|
|
+ roundup((nrz + 1) * 64, 512));
|
|
buf = kmalloc(buflen, gfp_mask);
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
@@ -462,12 +464,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
|
sdkp->device->use_10_for_rw = 0;
|
|
|
|
/*
|
|
- * If something changed, revalidate the disk zone bitmaps once we have
|
|
- * the capacity, that is on the second revalidate execution during disk
|
|
- * scan and always during normal revalidate.
|
|
+ * Revalidate the disk zone bitmaps once the block device capacity is
|
|
+ * set on the second revalidate execution during disk scan and if
|
|
+ * something changed when executing a normal revalidate.
|
|
*/
|
|
- if (sdkp->first_scan)
|
|
+ if (sdkp->first_scan) {
|
|
+ sdkp->zone_blocks = zone_blocks;
|
|
+ sdkp->nr_zones = nr_zones;
|
|
return 0;
|
|
+ }
|
|
+
|
|
if (sdkp->zone_blocks != zone_blocks ||
|
|
sdkp->nr_zones != nr_zones ||
|
|
disk->queue->nr_zones != nr_zones) {
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
index a25a07a0b7f0a..489e5cbbcbba8 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi_init.c
|
|
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
@@ -640,6 +640,7 @@ struct bmic_host_wellness_driver_version {
|
|
u8 driver_version_tag[2];
|
|
__le16 driver_version_length;
|
|
char driver_version[32];
|
|
+ u8 dont_write_tag[2];
|
|
u8 end_tag[2];
|
|
};
|
|
|
|
@@ -669,6 +670,8 @@ static int pqi_write_driver_version_to_host_wellness(
|
|
strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
|
|
sizeof(buffer->driver_version) - 1);
|
|
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
|
|
+ buffer->dont_write_tag[0] = 'D';
|
|
+ buffer->dont_write_tag[1] = 'W';
|
|
buffer->end_tag[0] = 'Z';
|
|
buffer->end_tag[1] = 'Z';
|
|
|
|
@@ -1165,6 +1168,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
|
|
if (rc)
|
|
goto out;
|
|
|
|
+ if (vpd->page_code != CISS_VPD_LV_STATUS)
|
|
+ goto out;
|
|
+
|
|
page_length = offsetof(struct ciss_vpd_logical_volume_status,
|
|
volume_status) + vpd->page_length;
|
|
if (page_length < sizeof(*vpd))
|
|
@@ -2704,6 +2710,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
|
|
switch (response->header.iu_type) {
|
|
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
|
|
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
|
|
+ if (io_request->scmd)
|
|
+ io_request->scmd->result = 0;
|
|
+ /* fall through */
|
|
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
|
|
break;
|
|
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
|
|
@@ -6670,6 +6679,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
|
|
* storage.
|
|
*/
|
|
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
|
|
+ pqi_free_interrupts(ctrl_info);
|
|
pqi_reset(ctrl_info);
|
|
if (rc == 0)
|
|
return;
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
|
|
index ea91658c7060d..9d3043df22af4 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
|
|
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
|
|
@@ -59,7 +59,7 @@
|
|
|
|
#define SIS_CTRL_KERNEL_UP 0x80
|
|
#define SIS_CTRL_KERNEL_PANIC 0x100
|
|
-#define SIS_CTRL_READY_TIMEOUT_SECS 30
|
|
+#define SIS_CTRL_READY_TIMEOUT_SECS 180
|
|
#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
|
|
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
|
|
|
|
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
|
|
index 58087d3916d05..5417ce09b1054 100644
|
|
--- a/drivers/scsi/ufs/ufs.h
|
|
+++ b/drivers/scsi/ufs/ufs.h
|
|
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
|
|
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
|
|
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
|
|
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
|
|
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
|
|
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
|
|
QUERY_DESC_POWER_DEF_SIZE = 0x62,
|
|
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
|
|
};
|
|
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
|
index f1c57cd33b5ba..2772ff4357fc4 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.c
|
|
+++ b/drivers/scsi/ufs/ufshcd.c
|
|
@@ -110,13 +110,19 @@
|
|
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
|
|
const char *prefix)
|
|
{
|
|
- u8 *regs;
|
|
+ u32 *regs;
|
|
+ size_t pos;
|
|
+
|
|
+ if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
|
|
+ return -EINVAL;
|
|
|
|
regs = kzalloc(len, GFP_KERNEL);
|
|
if (!regs)
|
|
return -ENOMEM;
|
|
|
|
- memcpy_fromio(regs, hba->mmio_base + offset, len);
|
|
+ for (pos = 0; pos < len; pos += 4)
|
|
+ regs[pos / 4] = ufshcd_readl(hba, offset + pos);
|
|
+
|
|
ufshcd_hex_dump(prefix, regs, len);
|
|
kfree(regs);
|
|
|
|
@@ -7918,6 +7924,8 @@ out:
|
|
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
|
|
ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
hba->curr_dev_pwr_mode, hba->uic_link_state);
|
|
+ if (!ret)
|
|
+ hba->is_sys_suspended = false;
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(ufshcd_system_resume);
|
|
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
|
|
index 14185451901de..bf9123f727e80 100644
|
|
--- a/drivers/soc/bcm/brcmstb/common.c
|
|
+++ b/drivers/soc/bcm/brcmstb/common.c
|
|
@@ -31,13 +31,17 @@ static const struct of_device_id brcmstb_machine_match[] = {
|
|
|
|
bool soc_is_brcmstb(void)
|
|
{
|
|
+ const struct of_device_id *match;
|
|
struct device_node *root;
|
|
|
|
root = of_find_node_by_path("/");
|
|
if (!root)
|
|
return false;
|
|
|
|
- return of_match_node(brcmstb_machine_match, root) != NULL;
|
|
+ match = of_match_node(brcmstb_machine_match, root);
|
|
+ of_node_put(root);
|
|
+
|
|
+ return match != NULL;
|
|
}
|
|
|
|
u32 brcmstb_get_family_id(void)
|
|
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
|
|
index f78c34647ca2d..76480df195a87 100644
|
|
--- a/drivers/soc/fsl/qe/qe_tdm.c
|
|
+++ b/drivers/soc/fsl/qe/qe_tdm.c
|
|
@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
|
|
const char *sprop;
|
|
int ret = 0;
|
|
u32 val;
|
|
- struct resource *res;
|
|
- struct device_node *np2;
|
|
- static int siram_init_flag;
|
|
- struct platform_device *pdev;
|
|
|
|
sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
|
|
if (sprop) {
|
|
@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
|
|
utdm->siram_entry_id = val;
|
|
|
|
set_si_param(utdm, ut_info);
|
|
-
|
|
- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
|
|
- if (!np2)
|
|
- return -EINVAL;
|
|
-
|
|
- pdev = of_find_device_by_node(np2);
|
|
- if (!pdev) {
|
|
- pr_err("%pOFn: failed to lookup pdev\n", np2);
|
|
- of_node_put(np2);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- of_node_put(np2);
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
|
|
- if (IS_ERR(utdm->si_regs)) {
|
|
- ret = PTR_ERR(utdm->si_regs);
|
|
- goto err_miss_siram_property;
|
|
- }
|
|
-
|
|
- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
|
|
- if (!np2) {
|
|
- ret = -EINVAL;
|
|
- goto err_miss_siram_property;
|
|
- }
|
|
-
|
|
- pdev = of_find_device_by_node(np2);
|
|
- if (!pdev) {
|
|
- ret = -EINVAL;
|
|
- pr_err("%pOFn: failed to lookup pdev\n", np2);
|
|
- of_node_put(np2);
|
|
- goto err_miss_siram_property;
|
|
- }
|
|
-
|
|
- of_node_put(np2);
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- utdm->siram = devm_ioremap_resource(&pdev->dev, res);
|
|
- if (IS_ERR(utdm->siram)) {
|
|
- ret = PTR_ERR(utdm->siram);
|
|
- goto err_miss_siram_property;
|
|
- }
|
|
-
|
|
- if (siram_init_flag == 0) {
|
|
- memset_io(utdm->siram, 0, resource_size(res));
|
|
- siram_init_flag = 1;
|
|
- }
|
|
-
|
|
- return ret;
|
|
-
|
|
-err_miss_siram_property:
|
|
- devm_iounmap(&pdev->dev, utdm->si_regs);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(ucc_of_parse_tdm);
|
|
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
|
|
index e1ac4c0f66408..11050e17ea81f 100644
|
|
--- a/drivers/soc/renesas/r8a774c0-sysc.c
|
|
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
|
|
@@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = {
|
|
{ "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
|
|
};
|
|
|
|
-static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas,
|
|
- unsigned int num_areas, u8 id,
|
|
- int new_parent)
|
|
-{
|
|
- unsigned int i;
|
|
-
|
|
- for (i = 0; i < num_areas; i++)
|
|
- if (areas[i].isr_bit == id) {
|
|
- areas[i].parent = new_parent;
|
|
- return;
|
|
- }
|
|
-}
|
|
-
|
|
/* Fixups for RZ/G2E ES1.0 revision */
|
|
static const struct soc_device_attribute r8a774c0[] __initconst = {
|
|
{ .soc_id = "r8a774c0", .revision = "ES1.0" },
|
|
@@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = {
|
|
static int __init r8a774c0_sysc_init(void)
|
|
{
|
|
if (soc_device_match(r8a774c0)) {
|
|
- rcar_sysc_fix_parent(r8a774c0_areas,
|
|
- ARRAY_SIZE(r8a774c0_areas),
|
|
- R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B);
|
|
- rcar_sysc_fix_parent(r8a774c0_areas,
|
|
- ARRAY_SIZE(r8a774c0_areas),
|
|
- R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON);
|
|
+ /* Fix incorrect 3DG hierarchy */
|
|
+ swap(r8a774c0_areas[6], r8a774c0_areas[7]);
|
|
+ r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
|
|
+ r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
|
|
index cd8f41351addf..7bfb154d6fa5e 100644
|
|
--- a/drivers/soc/tegra/common.c
|
|
+++ b/drivers/soc/tegra/common.c
|
|
@@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = {
|
|
|
|
bool soc_is_tegra(void)
|
|
{
|
|
+ const struct of_device_id *match;
|
|
struct device_node *root;
|
|
|
|
root = of_find_node_by_path("/");
|
|
if (!root)
|
|
return false;
|
|
|
|
- return of_match_node(tegra_machine_match, root) != NULL;
|
|
+ match = of_match_node(tegra_machine_match, root);
|
|
+ of_node_put(root);
|
|
+
|
|
+ return match != NULL;
|
|
}
|
|
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
|
|
index a924657642fa8..08bcbd1f9aa25 100644
|
|
--- a/drivers/spi/spi-at91-usart.c
|
|
+++ b/drivers/spi/spi-at91-usart.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/of_gpio.h>
|
|
+#include <linux/pinctrl/consumer.h>
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/spi/spi.h>
|
|
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
|
|
index f35cc10772f66..25abf2d1732a0 100644
|
|
--- a/drivers/spi/spi-bcm2835.c
|
|
+++ b/drivers/spi/spi-bcm2835.c
|
|
@@ -88,7 +88,7 @@ struct bcm2835_spi {
|
|
u8 *rx_buf;
|
|
int tx_len;
|
|
int rx_len;
|
|
- bool dma_pending;
|
|
+ unsigned int dma_pending;
|
|
};
|
|
|
|
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
|
|
@@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
|
|
/* Write as many bytes as possible to FIFO */
|
|
bcm2835_wr_fifo(bs);
|
|
|
|
- /* based on flags decide if we can finish the transfer */
|
|
- if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
|
|
+ if (!bs->rx_len) {
|
|
/* Transfer complete - reset SPI HW */
|
|
bcm2835_spi_reset_hw(master);
|
|
/* wake up the framework */
|
|
@@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
|
|
* is called the tx-dma must have finished - can't get to this
|
|
* situation otherwise...
|
|
*/
|
|
- dmaengine_terminate_all(master->dma_tx);
|
|
-
|
|
- /* mark as no longer pending */
|
|
- bs->dma_pending = 0;
|
|
+ if (cmpxchg(&bs->dma_pending, true, false)) {
|
|
+ dmaengine_terminate_all(master->dma_tx);
|
|
+ }
|
|
|
|
/* and mark as completed */;
|
|
complete(&master->xfer_completion);
|
|
@@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
|
|
if (ret) {
|
|
/* need to reset on errors */
|
|
dmaengine_terminate_all(master->dma_tx);
|
|
+ bs->dma_pending = false;
|
|
bcm2835_spi_reset_hw(master);
|
|
return ret;
|
|
}
|
|
@@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
|
|
struct bcm2835_spi *bs = spi_master_get_devdata(master);
|
|
|
|
/* if an error occurred and we have an active dma, then terminate */
|
|
- if (bs->dma_pending) {
|
|
+ if (cmpxchg(&bs->dma_pending, true, false)) {
|
|
dmaengine_terminate_all(master->dma_tx);
|
|
dmaengine_terminate_all(master->dma_rx);
|
|
- bs->dma_pending = 0;
|
|
}
|
|
/* and reset */
|
|
bcm2835_spi_reset_hw(master);
|
|
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
|
|
index d1cb0d78ab844..e44ca93dcdc68 100644
|
|
--- a/drivers/staging/erofs/dir.c
|
|
+++ b/drivers/staging/erofs/dir.c
|
|
@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
|
|
strnlen(de_name, maxsize - nameoff) :
|
|
le16_to_cpu(de[1].nameoff) - nameoff;
|
|
|
|
- /* the corrupted directory found */
|
|
- BUG_ON(de_namelen < 0);
|
|
+ /* a corrupted entry is found */
|
|
+ if (unlikely(de_namelen < 0)) {
|
|
+ DBG_BUGON(1);
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
#ifdef CONFIG_EROFS_FS_DEBUG
|
|
dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
|
|
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
|
|
index 04c61a9d7b766..d7fbf5f4600f3 100644
|
|
--- a/drivers/staging/erofs/inode.c
|
|
+++ b/drivers/staging/erofs/inode.c
|
|
@@ -133,7 +133,13 @@ static int fill_inline_data(struct inode *inode, void *data,
|
|
return -ENOMEM;
|
|
|
|
m_pofs += vi->inode_isize + vi->xattr_isize;
|
|
- BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
|
|
+
|
|
+ /* inline symlink data shouldn't across page boundary as well */
|
|
+ if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
|
|
+ DBG_BUGON(1);
|
|
+ kfree(lnk);
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
/* get in-page inline data */
|
|
memcpy(lnk, data + m_pofs, inode->i_size);
|
|
@@ -171,7 +177,7 @@ static int fill_inode(struct inode *inode, int isdir)
|
|
return PTR_ERR(page);
|
|
}
|
|
|
|
- BUG_ON(!PageUptodate(page));
|
|
+ DBG_BUGON(!PageUptodate(page));
|
|
data = page_address(page);
|
|
|
|
err = read_inode(inode, data + ofs);
|
|
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
|
|
index 57575c7f56355..8929443558676 100644
|
|
--- a/drivers/staging/erofs/internal.h
|
|
+++ b/drivers/staging/erofs/internal.h
|
|
@@ -39,7 +39,7 @@
|
|
#define debugln(x, ...) ((void)0)
|
|
|
|
#define dbg_might_sleep() ((void)0)
|
|
-#define DBG_BUGON(...) ((void)0)
|
|
+#define DBG_BUGON(x) ((void)(x))
|
|
#endif
|
|
|
|
enum {
|
|
@@ -194,50 +194,70 @@ struct erofs_workgroup {
|
|
|
|
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
|
|
|
|
-static inline bool erofs_workgroup_try_to_freeze(
|
|
- struct erofs_workgroup *grp, int v)
|
|
+#if defined(CONFIG_SMP)
|
|
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
|
+ int val)
|
|
{
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
- if (v != atomic_cmpxchg(&grp->refcount,
|
|
- v, EROFS_LOCKED_MAGIC))
|
|
- return false;
|
|
preempt_disable();
|
|
+ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
|
|
+ preempt_enable();
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
|
|
+ int orig_val)
|
|
+{
|
|
+ /*
|
|
+ * other observers should notice all modifications
|
|
+ * in the freezing period.
|
|
+ */
|
|
+ smp_mb();
|
|
+ atomic_set(&grp->refcount, orig_val);
|
|
+ preempt_enable();
|
|
+}
|
|
+
|
|
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
|
+{
|
|
+ return atomic_cond_read_relaxed(&grp->refcount,
|
|
+ VAL != EROFS_LOCKED_MAGIC);
|
|
+}
|
|
#else
|
|
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
|
+ int val)
|
|
+{
|
|
preempt_disable();
|
|
- if (atomic_read(&grp->refcount) != v) {
|
|
+ /* no need to spin on UP platforms, let's just disable preemption. */
|
|
+ if (val != atomic_read(&grp->refcount)) {
|
|
preempt_enable();
|
|
return false;
|
|
}
|
|
-#endif
|
|
return true;
|
|
}
|
|
|
|
-static inline void erofs_workgroup_unfreeze(
|
|
- struct erofs_workgroup *grp, int v)
|
|
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
|
|
+ int orig_val)
|
|
{
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
- atomic_set(&grp->refcount, v);
|
|
-#endif
|
|
preempt_enable();
|
|
}
|
|
|
|
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
|
+{
|
|
+ int v = atomic_read(&grp->refcount);
|
|
+
|
|
+ /* workgroup is never freezed on uniprocessor systems */
|
|
+ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
|
|
+ return v;
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
|
|
{
|
|
- const int locked = (int)EROFS_LOCKED_MAGIC;
|
|
int o;
|
|
|
|
repeat:
|
|
- o = atomic_read(&grp->refcount);
|
|
-
|
|
- /* spin if it is temporarily locked at the reclaim path */
|
|
- if (unlikely(o == locked)) {
|
|
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
- do
|
|
- cpu_relax();
|
|
- while (atomic_read(&grp->refcount) == locked);
|
|
-#endif
|
|
- goto repeat;
|
|
- }
|
|
+ o = erofs_wait_on_workgroup_freezed(grp);
|
|
|
|
if (unlikely(o <= 0))
|
|
return -1;
|
|
@@ -250,6 +270,7 @@ repeat:
|
|
}
|
|
|
|
#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
|
|
+#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
|
|
|
|
extern int erofs_workgroup_put(struct erofs_workgroup *grp);
|
|
|
|
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
|
|
index f69e619807a17..1c2eb69682efc 100644
|
|
--- a/drivers/staging/erofs/super.c
|
|
+++ b/drivers/staging/erofs/super.c
|
|
@@ -40,7 +40,6 @@ static int __init erofs_init_inode_cache(void)
|
|
|
|
static void erofs_exit_inode_cache(void)
|
|
{
|
|
- BUG_ON(erofs_inode_cachep == NULL);
|
|
kmem_cache_destroy(erofs_inode_cachep);
|
|
}
|
|
|
|
@@ -303,8 +302,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
|
|
int ret = 1; /* 0 - busy */
|
|
struct address_space *const mapping = page->mapping;
|
|
|
|
- BUG_ON(!PageLocked(page));
|
|
- BUG_ON(mapping->a_ops != &managed_cache_aops);
|
|
+ DBG_BUGON(!PageLocked(page));
|
|
+ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
|
|
|
|
if (PagePrivate(page))
|
|
ret = erofs_try_to_free_cached_page(mapping, page);
|
|
@@ -317,10 +316,10 @@ static void managed_cache_invalidatepage(struct page *page,
|
|
{
|
|
const unsigned int stop = length + offset;
|
|
|
|
- BUG_ON(!PageLocked(page));
|
|
+ DBG_BUGON(!PageLocked(page));
|
|
|
|
- /* Check for overflow */
|
|
- BUG_ON(stop > PAGE_SIZE || stop < length);
|
|
+ /* Check for potential overflow in debug mode */
|
|
+ DBG_BUGON(stop > PAGE_SIZE || stop < length);
|
|
|
|
if (offset == 0 && stop == PAGE_SIZE)
|
|
while (!managed_cache_releasepage(page, GFP_NOFS))
|
|
@@ -442,12 +441,6 @@ static int erofs_read_super(struct super_block *sb,
|
|
|
|
erofs_register_super(sb);
|
|
|
|
- /*
|
|
- * We already have a positive dentry, which was instantiated
|
|
- * by d_make_root. Just need to d_rehash it.
|
|
- */
|
|
- d_rehash(sb->s_root);
|
|
-
|
|
if (!silent)
|
|
infoln("mounted on %s with opts: %s.", dev_name,
|
|
(char *)data);
|
|
@@ -655,7 +648,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
|
|
unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
|
|
int err;
|
|
|
|
- BUG_ON(!sb_rdonly(sb));
|
|
+ DBG_BUGON(!sb_rdonly(sb));
|
|
err = parse_options(sb, data);
|
|
if (err)
|
|
goto out;
|
|
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
|
|
index 0956615b86f72..23856ba2742d8 100644
|
|
--- a/drivers/staging/erofs/unzip_pagevec.h
|
|
+++ b/drivers/staging/erofs/unzip_pagevec.h
|
|
@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
|
|
erofs_vtptr_t t;
|
|
|
|
if (unlikely(ctor->index >= ctor->nr)) {
|
|
- BUG_ON(ctor->next == NULL);
|
|
+ DBG_BUGON(!ctor->next);
|
|
z_erofs_pagevec_ctor_pagedown(ctor, true);
|
|
}
|
|
|
|
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
|
|
index 79d3ba62b2985..1c4b3e0343f58 100644
|
|
--- a/drivers/staging/erofs/unzip_vle.c
|
|
+++ b/drivers/staging/erofs/unzip_vle.c
|
|
@@ -20,9 +20,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
|
|
|
|
void z_erofs_exit_zip_subsystem(void)
|
|
{
|
|
- BUG_ON(z_erofs_workqueue == NULL);
|
|
- BUG_ON(z_erofs_workgroup_cachep == NULL);
|
|
-
|
|
destroy_workqueue(z_erofs_workqueue);
|
|
kmem_cache_destroy(z_erofs_workgroup_cachep);
|
|
}
|
|
@@ -366,7 +363,10 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
|
|
struct z_erofs_vle_work *work;
|
|
|
|
/* if multiref is disabled, grp should never be nullptr */
|
|
- BUG_ON(grp != NULL);
|
|
+ if (unlikely(grp)) {
|
|
+ DBG_BUGON(1);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
|
|
/* no available workgroup, let's allocate one */
|
|
grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
|
|
@@ -717,13 +717,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
|
|
struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
|
|
bool background = tagptr_unfold_tags(t);
|
|
|
|
- if (atomic_add_return(bios, &io->pending_bios))
|
|
+ if (!background) {
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&io->u.wait.lock, flags);
|
|
+ if (!atomic_add_return(bios, &io->pending_bios))
|
|
+ wake_up_locked(&io->u.wait);
|
|
+ spin_unlock_irqrestore(&io->u.wait.lock, flags);
|
|
return;
|
|
+ }
|
|
|
|
- if (background)
|
|
+ if (!atomic_add_return(bios, &io->pending_bios))
|
|
queue_work(z_erofs_workqueue, &io->u.work);
|
|
- else
|
|
- wake_up(&io->u.wait);
|
|
}
|
|
|
|
static inline void z_erofs_vle_read_endio(struct bio *bio)
|
|
@@ -740,7 +745,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
|
|
bool cachemngd = false;
|
|
|
|
DBG_BUGON(PageUptodate(page));
|
|
- BUG_ON(page->mapping == NULL);
|
|
+ DBG_BUGON(!page->mapping);
|
|
|
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
|
|
@@ -798,7 +803,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|
|
|
might_sleep();
|
|
work = z_erofs_vle_grab_primary_work(grp);
|
|
- BUG_ON(!READ_ONCE(work->nr_pages));
|
|
+ DBG_BUGON(!READ_ONCE(work->nr_pages));
|
|
|
|
mutex_lock(&work->lock);
|
|
nr_pages = work->nr_pages;
|
|
@@ -847,8 +852,8 @@ repeat:
|
|
else
|
|
pagenr = z_erofs_onlinepage_index(page);
|
|
|
|
- BUG_ON(pagenr >= nr_pages);
|
|
- BUG_ON(pages[pagenr] != NULL);
|
|
+ DBG_BUGON(pagenr >= nr_pages);
|
|
+ DBG_BUGON(pages[pagenr]);
|
|
|
|
pages[pagenr] = page;
|
|
}
|
|
@@ -871,9 +876,8 @@ repeat:
|
|
if (z_erofs_is_stagingpage(page))
|
|
continue;
|
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
- else if (page->mapping == mngda) {
|
|
- BUG_ON(PageLocked(page));
|
|
- BUG_ON(!PageUptodate(page));
|
|
+ if (page->mapping == mngda) {
|
|
+ DBG_BUGON(!PageUptodate(page));
|
|
continue;
|
|
}
|
|
#endif
|
|
@@ -881,8 +885,8 @@ repeat:
|
|
/* only non-head page could be reused as a compressed page */
|
|
pagenr = z_erofs_onlinepage_index(page);
|
|
|
|
- BUG_ON(pagenr >= nr_pages);
|
|
- BUG_ON(pages[pagenr] != NULL);
|
|
+ DBG_BUGON(pagenr >= nr_pages);
|
|
+ DBG_BUGON(pages[pagenr]);
|
|
++sparsemem_pages;
|
|
pages[pagenr] = page;
|
|
|
|
@@ -892,9 +896,6 @@ repeat:
|
|
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
|
|
|
|
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
|
|
- /* FIXME! this should be fixed in the future */
|
|
- BUG_ON(grp->llen != llen);
|
|
-
|
|
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
|
|
pages, nr_pages, work->pageofs);
|
|
goto out;
|
|
@@ -909,10 +910,8 @@ repeat:
|
|
if (err != -ENOTSUPP)
|
|
goto out_percpu;
|
|
|
|
- if (sparsemem_pages >= nr_pages) {
|
|
- BUG_ON(sparsemem_pages > nr_pages);
|
|
+ if (sparsemem_pages >= nr_pages)
|
|
goto skip_allocpage;
|
|
- }
|
|
|
|
for (i = 0; i < nr_pages; ++i) {
|
|
if (pages[i] != NULL)
|
|
@@ -1005,7 +1004,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
|
|
struct z_erofs_vle_unzip_io_sb, io.u.work);
|
|
LIST_HEAD(page_pool);
|
|
|
|
- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
|
|
+ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
|
|
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
|
|
|
|
put_pages_list(&page_pool);
|
|
@@ -1339,7 +1338,6 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
|
|
continue;
|
|
}
|
|
|
|
- BUG_ON(PagePrivate(page));
|
|
set_page_private(page, (unsigned long)head);
|
|
head = page;
|
|
}
|
|
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
|
|
index 1a428658cbea2..16ac335ee59f4 100644
|
|
--- a/drivers/staging/erofs/unzip_vle_lz4.c
|
|
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
|
|
@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
|
|
if (compressed_pages[j] != page)
|
|
continue;
|
|
|
|
- BUG_ON(mirrored[j]);
|
|
+ DBG_BUGON(mirrored[j]);
|
|
memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
|
|
mirrored[j] = true;
|
|
break;
|
|
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
|
|
index ea8a962e5c950..b535898ca753f 100644
|
|
--- a/drivers/staging/erofs/utils.c
|
|
+++ b/drivers/staging/erofs/utils.c
|
|
@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
|
|
list_del(&page->lru);
|
|
} else {
|
|
page = alloc_pages(gfp | __GFP_NOFAIL, 0);
|
|
-
|
|
- BUG_ON(page == NULL);
|
|
- BUG_ON(page->mapping != NULL);
|
|
}
|
|
return page;
|
|
}
|
|
@@ -58,7 +55,7 @@ repeat:
|
|
/* decrease refcount added by erofs_workgroup_put */
|
|
if (unlikely(oldcount == 1))
|
|
atomic_long_dec(&erofs_global_shrink_cnt);
|
|
- BUG_ON(index != grp->index);
|
|
+ DBG_BUGON(index != grp->index);
|
|
}
|
|
rcu_read_unlock();
|
|
return grp;
|
|
@@ -71,8 +68,11 @@ int erofs_register_workgroup(struct super_block *sb,
|
|
struct erofs_sb_info *sbi;
|
|
int err;
|
|
|
|
- /* grp->refcount should not < 1 */
|
|
- BUG_ON(!atomic_read(&grp->refcount));
|
|
+ /* grp shouldn't be broken or used before */
|
|
+ if (unlikely(atomic_read(&grp->refcount) != 1)) {
|
|
+ DBG_BUGON(1);
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
err = radix_tree_preload(GFP_NOFS);
|
|
if (err)
|
|
@@ -83,12 +83,21 @@ int erofs_register_workgroup(struct super_block *sb,
|
|
|
|
grp = xa_tag_pointer(grp, tag);
|
|
|
|
- err = radix_tree_insert(&sbi->workstn_tree,
|
|
- grp->index, grp);
|
|
+ /*
|
|
+ * Bump up reference count before making this workgroup
|
|
+ * visible to other users in order to avoid potential UAF
|
|
+ * without serialized by erofs_workstn_lock.
|
|
+ */
|
|
+ __erofs_workgroup_get(grp);
|
|
|
|
- if (!err) {
|
|
- __erofs_workgroup_get(grp);
|
|
- }
|
|
+ err = radix_tree_insert(&sbi->workstn_tree,
|
|
+ grp->index, grp);
|
|
+ if (unlikely(err))
|
|
+ /*
|
|
+ * it's safe to decrease since the workgroup isn't visible
|
|
+ * and refcount >= 2 (cannot be freezed).
|
|
+ */
|
|
+ __erofs_workgroup_put(grp);
|
|
|
|
erofs_workstn_unlock(sbi);
|
|
radix_tree_preload_end();
|
|
@@ -97,19 +106,94 @@ int erofs_register_workgroup(struct super_block *sb,
|
|
|
|
extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
|
|
|
|
+static void __erofs_workgroup_free(struct erofs_workgroup *grp)
|
|
+{
|
|
+ atomic_long_dec(&erofs_global_shrink_cnt);
|
|
+ erofs_workgroup_free_rcu(grp);
|
|
+}
|
|
+
|
|
int erofs_workgroup_put(struct erofs_workgroup *grp)
|
|
{
|
|
int count = atomic_dec_return(&grp->refcount);
|
|
|
|
if (count == 1)
|
|
atomic_long_inc(&erofs_global_shrink_cnt);
|
|
- else if (!count) {
|
|
- atomic_long_dec(&erofs_global_shrink_cnt);
|
|
- erofs_workgroup_free_rcu(grp);
|
|
- }
|
|
+ else if (!count)
|
|
+ __erofs_workgroup_free(grp);
|
|
return count;
|
|
}
|
|
|
|
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
+/* for cache-managed case, customized reclaim paths exist */
|
|
+static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
|
|
+{
|
|
+ erofs_workgroup_unfreeze(grp, 0);
|
|
+ __erofs_workgroup_free(grp);
|
|
+}
|
|
+
|
|
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
|
|
+ struct erofs_workgroup *grp,
|
|
+ bool cleanup)
|
|
+{
|
|
+ /*
|
|
+ * for managed cache enabled, the refcount of workgroups
|
|
+ * themselves could be < 0 (freezed). So there is no guarantee
|
|
+ * that all refcount > 0 if managed cache is enabled.
|
|
+ */
|
|
+ if (!erofs_workgroup_try_to_freeze(grp, 1))
|
|
+ return false;
|
|
+
|
|
+ /*
|
|
+ * note that all cached pages should be unlinked
|
|
+ * before delete it from the radix tree.
|
|
+ * Otherwise some cached pages of an orphan old workgroup
|
|
+ * could be still linked after the new one is available.
|
|
+ */
|
|
+ if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
|
|
+ erofs_workgroup_unfreeze(grp, 1);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * it is impossible to fail after the workgroup is freezed,
|
|
+ * however in order to avoid some race conditions, add a
|
|
+ * DBG_BUGON to observe this in advance.
|
|
+ */
|
|
+ DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
|
|
+ grp->index)) != grp);
|
|
+
|
|
+ /*
|
|
+ * if managed cache is enable, the last refcount
|
|
+ * should indicate the related workstation.
|
|
+ */
|
|
+ erofs_workgroup_unfreeze_final(grp);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#else
|
|
+/* for nocache case, no customized reclaim path at all */
|
|
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
|
|
+ struct erofs_workgroup *grp,
|
|
+ bool cleanup)
|
|
+{
|
|
+ int cnt = atomic_read(&grp->refcount);
|
|
+
|
|
+ DBG_BUGON(cnt <= 0);
|
|
+ DBG_BUGON(cleanup && cnt != 1);
|
|
+
|
|
+ if (cnt > 1)
|
|
+ return false;
|
|
+
|
|
+ DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
|
|
+ grp->index)) != grp);
|
|
+
|
|
+ /* (rarely) could be grabbed again when freeing */
|
|
+ erofs_workgroup_put(grp);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
|
|
unsigned long nr_shrink,
|
|
bool cleanup)
|
|
@@ -126,41 +210,13 @@ repeat:
|
|
batch, first_index, PAGEVEC_SIZE);
|
|
|
|
for (i = 0; i < found; ++i) {
|
|
- int cnt;
|
|
struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
|
|
|
|
first_index = grp->index + 1;
|
|
|
|
- cnt = atomic_read(&grp->refcount);
|
|
- BUG_ON(cnt <= 0);
|
|
-
|
|
- if (cleanup)
|
|
- BUG_ON(cnt != 1);
|
|
-
|
|
-#ifndef EROFS_FS_HAS_MANAGED_CACHE
|
|
- else if (cnt > 1)
|
|
-#else
|
|
- if (!erofs_workgroup_try_to_freeze(grp, 1))
|
|
-#endif
|
|
- continue;
|
|
-
|
|
- if (xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
|
|
- grp->index)) != grp) {
|
|
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
-skip:
|
|
- erofs_workgroup_unfreeze(grp, 1);
|
|
-#endif
|
|
+ /* try to shrink each valid workgroup */
|
|
+ if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
|
|
continue;
|
|
- }
|
|
-
|
|
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
|
- if (erofs_try_to_free_all_cached_pages(sbi, grp))
|
|
- goto skip;
|
|
-
|
|
- erofs_workgroup_unfreeze(grp, 1);
|
|
-#endif
|
|
- /* (rarely) grabbed again when freeing */
|
|
- erofs_workgroup_put(grp);
|
|
|
|
++freed;
|
|
if (unlikely(!--nr_shrink))
|
|
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
|
|
index 58420dcb406d9..cbeb52485a319 100644
|
|
--- a/drivers/staging/iio/adc/ad7280a.c
|
|
+++ b/drivers/staging/iio/adc/ad7280a.c
|
|
@@ -256,7 +256,9 @@ static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- __ad7280_read32(st, &tmp);
|
|
+ ret = __ad7280_read32(st, &tmp);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (ad7280_check_crc(st, tmp))
|
|
return -EIO;
|
|
@@ -294,7 +296,9 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
|
|
|
|
ad7280_delay(st);
|
|
|
|
- __ad7280_read32(st, &tmp);
|
|
+ ret = __ad7280_read32(st, &tmp);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (ad7280_check_crc(st, tmp))
|
|
return -EIO;
|
|
@@ -327,7 +331,9 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
|
|
ad7280_delay(st);
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
- __ad7280_read32(st, &tmp);
|
|
+ ret = __ad7280_read32(st, &tmp);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (ad7280_check_crc(st, tmp))
|
|
return -EIO;
|
|
@@ -370,7 +376,10 @@ static int ad7280_chain_setup(struct ad7280_state *st)
|
|
return ret;
|
|
|
|
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
|
|
- __ad7280_read32(st, &val);
|
|
+ ret = __ad7280_read32(st, &val);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
if (val == 0)
|
|
return n - 1;
|
|
|
|
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
|
|
index b67412db0318a..c7cb05cedbbcd 100644
|
|
--- a/drivers/staging/iio/adc/ad7780.c
|
|
+++ b/drivers/staging/iio/adc/ad7780.c
|
|
@@ -87,12 +87,16 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
|
|
long m)
|
|
{
|
|
struct ad7780_state *st = iio_priv(indio_dev);
|
|
+ int voltage_uv;
|
|
|
|
switch (m) {
|
|
case IIO_CHAN_INFO_RAW:
|
|
return ad_sigma_delta_single_conversion(indio_dev, chan, val);
|
|
case IIO_CHAN_INFO_SCALE:
|
|
- *val = st->int_vref_mv * st->gain;
|
|
+ voltage_uv = regulator_get_voltage(st->reg);
|
|
+ if (voltage_uv < 0)
|
|
+ return voltage_uv;
|
|
+ *val = (voltage_uv / 1000) * st->gain;
|
|
*val2 = chan->scan_type.realbits - 1;
|
|
return IIO_VAL_FRACTIONAL_LOG2;
|
|
case IIO_CHAN_INFO_OFFSET:
|
|
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
|
|
index 59586947a9366..51cda91514124 100644
|
|
--- a/drivers/staging/iio/resolver/ad2s90.c
|
|
+++ b/drivers/staging/iio/resolver/ad2s90.c
|
|
@@ -85,7 +85,12 @@ static int ad2s90_probe(struct spi_device *spi)
|
|
/* need 600ns between CS and the first falling edge of SCLK */
|
|
spi->max_speed_hz = 830000;
|
|
spi->mode = SPI_MODE_3;
|
|
- spi_setup(spi);
|
|
+ ret = spi_setup(spi);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ dev_err(&spi->dev, "spi_setup failed!\n");
|
|
+ return ret;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
|
|
index c85a805a12431..a497ec1978721 100644
|
|
--- a/drivers/staging/pi433/pi433_if.c
|
|
+++ b/drivers/staging/pi433/pi433_if.c
|
|
@@ -1255,6 +1255,10 @@ static int pi433_probe(struct spi_device *spi)
|
|
|
|
/* create cdev */
|
|
device->cdev = cdev_alloc();
|
|
+ if (!device->cdev) {
|
|
+ dev_dbg(device->dev, "allocation of cdev failed");
|
|
+ goto cdev_failed;
|
|
+ }
|
|
device->cdev->owner = THIS_MODULE;
|
|
cdev_init(device->cdev, &pi433_fops);
|
|
retval = cdev_add(device->cdev, device->devt, 1);
|
|
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
|
|
index f7407632e80bd..bab96c8700424 100644
|
|
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
|
|
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
|
|
@@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
|
|
|
|
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
|
|
|
|
- crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
|
|
+ crypto_ops = lib80211_get_crypto_ops("WEP");
|
|
|
|
if (!crypto_ops)
|
|
return;
|
|
@@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
|
|
void *crypto_private = NULL;
|
|
int status = _SUCCESS;
|
|
const int keyindex = prxattrib->key_index;
|
|
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
|
|
+ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
|
|
char iv[4], icv[4];
|
|
|
|
if (!crypto_ops) {
|
|
@@ -1291,7 +1291,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
|
|
struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
|
|
void *crypto_private = NULL;
|
|
u8 *key, *pframe = skb->data;
|
|
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp");
|
|
+ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
|
|
struct security_priv *psecuritypriv = &padapter->securitypriv;
|
|
char iv[8], icv[8];
|
|
|
|
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
|
|
index 28cbd6b3d26c3..dfee6985efa61 100644
|
|
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
|
|
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
|
|
@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
|
|
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
|
|
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
|
|
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
|
|
+ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
|
|
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
|
|
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
|
|
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
|
|
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
|
|
index 979e3ae249c13..033be0ad03e75 100644
|
|
--- a/drivers/staging/speakup/spk_ttyio.c
|
|
+++ b/drivers/staging/speakup/spk_ttyio.c
|
|
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
|
|
return;
|
|
}
|
|
|
|
- speakup_tty->ops->send_xchar(speakup_tty, ch);
|
|
+ if (speakup_tty->ops->send_xchar)
|
|
+ speakup_tty->ops->send_xchar(speakup_tty, ch);
|
|
mutex_unlock(&speakup_tty_mutex);
|
|
}
|
|
|
|
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
|
|
return;
|
|
}
|
|
|
|
- speakup_tty->ops->tiocmset(speakup_tty, set, clear);
|
|
+ if (speakup_tty->ops->tiocmset)
|
|
+ speakup_tty->ops->tiocmset(speakup_tty, set, clear);
|
|
mutex_unlock(&speakup_tty_mutex);
|
|
}
|
|
|
|
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
|
|
index 781754f36da73..8da66e996d231 100644
|
|
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
|
|
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
|
|
@@ -143,7 +143,6 @@ vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
|
|
dev_err(instance->dev,
|
|
"failed to open VCHI service connection (status=%d)\n",
|
|
status);
|
|
- kfree(instance);
|
|
return -EPERM;
|
|
}
|
|
|
|
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
|
|
index ca351c9503449..5c3e4df804ebf 100644
|
|
--- a/drivers/staging/wilc1000/wilc_sdio.c
|
|
+++ b/drivers/staging/wilc1000/wilc_sdio.c
|
|
@@ -841,6 +841,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
|
|
if (!sdio_priv->irq_gpio) {
|
|
int i;
|
|
|
|
+ cmd.read_write = 0;
|
|
cmd.function = 1;
|
|
cmd.address = 0x04;
|
|
cmd.data = 0;
|
|
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
|
|
index f459118bc11ba..c37dd36ec77d2 100644
|
|
--- a/drivers/target/target_core_spc.c
|
|
+++ b/drivers/target/target_core_spc.c
|
|
@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
|
|
|
|
buf[7] = 0x2; /* CmdQue=1 */
|
|
|
|
- memcpy(&buf[8], "LIO-ORG ", 8);
|
|
- memset(&buf[16], 0x20, 16);
|
|
+ /*
|
|
+ * ASCII data fields described as being left-aligned shall have any
|
|
+ * unused bytes at the end of the field (i.e., highest offset) and the
|
|
+ * unused bytes shall be filled with ASCII space characters (20h).
|
|
+ */
|
|
+ memset(&buf[8], 0x20, 8 + 16 + 4);
|
|
+ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
|
|
memcpy(&buf[16], dev->t10_wwn.model,
|
|
- min_t(size_t, strlen(dev->t10_wwn.model), 16));
|
|
+ strnlen(dev->t10_wwn.model, 16));
|
|
memcpy(&buf[32], dev->t10_wwn.revision,
|
|
- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
|
|
+ strnlen(dev->t10_wwn.revision, 4));
|
|
buf[4] = 31; /* Set additional length to 31 */
|
|
|
|
return 0;
|
|
@@ -251,7 +256,9 @@ check_t10_vend_desc:
|
|
buf[off] = 0x2; /* ASCII */
|
|
buf[off+1] = 0x1; /* T10 Vendor ID */
|
|
buf[off+2] = 0x0;
|
|
- memcpy(&buf[off+4], "LIO-ORG", 8);
|
|
+ /* left align Vendor ID and pad with spaces */
|
|
+ memset(&buf[off+4], 0x20, 8);
|
|
+ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
|
|
/* Extra Byte for NULL Terminator */
|
|
id_len++;
|
|
/* Identifier Length */
|
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
|
index 2cfd61d62e973..900442605c72f 100644
|
|
--- a/drivers/target/target_core_transport.c
|
|
+++ b/drivers/target/target_core_transport.c
|
|
@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
|
|
sub_api_initialized = 1;
|
|
}
|
|
|
|
+static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
|
|
+{
|
|
+ struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
|
|
+
|
|
+ wake_up(&sess->cmd_list_wq);
|
|
+}
|
|
+
|
|
/**
|
|
* transport_init_session - initialize a session object
|
|
* @se_sess: Session object pointer.
|
|
*
|
|
* The caller must have zero-initialized @se_sess before calling this function.
|
|
*/
|
|
-void transport_init_session(struct se_session *se_sess)
|
|
+int transport_init_session(struct se_session *se_sess)
|
|
{
|
|
INIT_LIST_HEAD(&se_sess->sess_list);
|
|
INIT_LIST_HEAD(&se_sess->sess_acl_list);
|
|
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
|
|
spin_lock_init(&se_sess->sess_cmd_lock);
|
|
init_waitqueue_head(&se_sess->cmd_list_wq);
|
|
+ return percpu_ref_init(&se_sess->cmd_count,
|
|
+ target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
|
|
}
|
|
EXPORT_SYMBOL(transport_init_session);
|
|
|
|
@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
|
|
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
|
|
{
|
|
struct se_session *se_sess;
|
|
+ int ret;
|
|
|
|
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
|
|
if (!se_sess) {
|
|
@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
|
|
" se_sess_cache\n");
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
- transport_init_session(se_sess);
|
|
+ ret = transport_init_session(se_sess);
|
|
+ if (ret < 0) {
|
|
+ kmem_cache_free(se_sess_cache, se_sess);
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
se_sess->sup_prot_ops = sup_prot_ops;
|
|
|
|
return se_sess;
|
|
@@ -581,6 +595,7 @@ void transport_free_session(struct se_session *se_sess)
|
|
sbitmap_queue_free(&se_sess->sess_tag_pool);
|
|
kvfree(se_sess->sess_cmd_map);
|
|
}
|
|
+ percpu_ref_exit(&se_sess->cmd_count);
|
|
kmem_cache_free(se_sess_cache, se_sess);
|
|
}
|
|
EXPORT_SYMBOL(transport_free_session);
|
|
@@ -2719,6 +2734,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
|
|
}
|
|
se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
|
|
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
|
|
+ percpu_ref_get(&se_sess->cmd_count);
|
|
out:
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
|
|
@@ -2749,8 +2765,6 @@ static void target_release_cmd_kref(struct kref *kref)
|
|
if (se_sess) {
|
|
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
|
list_del_init(&se_cmd->se_cmd_list);
|
|
- if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
|
|
- wake_up(&se_sess->cmd_list_wq);
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
}
|
|
|
|
@@ -2758,6 +2772,8 @@ static void target_release_cmd_kref(struct kref *kref)
|
|
se_cmd->se_tfo->release_cmd(se_cmd);
|
|
if (compl)
|
|
complete(compl);
|
|
+
|
|
+ percpu_ref_put(&se_sess->cmd_count);
|
|
}
|
|
|
|
/**
|
|
@@ -2886,6 +2902,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
|
|
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
|
se_sess->sess_tearing_down = 1;
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
+
|
|
+ percpu_ref_kill(&se_sess->cmd_count);
|
|
}
|
|
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
|
|
|
|
@@ -2900,17 +2918,14 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
|
|
|
WARN_ON_ONCE(!se_sess->sess_tearing_down);
|
|
|
|
- spin_lock_irq(&se_sess->sess_cmd_lock);
|
|
do {
|
|
- ret = wait_event_lock_irq_timeout(
|
|
- se_sess->cmd_list_wq,
|
|
- list_empty(&se_sess->sess_cmd_list),
|
|
- se_sess->sess_cmd_lock, 180 * HZ);
|
|
+ ret = wait_event_timeout(se_sess->cmd_list_wq,
|
|
+ percpu_ref_is_zero(&se_sess->cmd_count),
|
|
+ 180 * HZ);
|
|
list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
|
|
target_show_cmd("session shutdown: still waiting for ",
|
|
cmd);
|
|
} while (ret <= 0);
|
|
- spin_unlock_irq(&se_sess->sess_cmd_lock);
|
|
}
|
|
EXPORT_SYMBOL(target_wait_for_sess_cmds);
|
|
|
|
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
|
|
index 9cd404acdb82b..ac7620120491b 100644
|
|
--- a/drivers/target/target_core_user.c
|
|
+++ b/drivers/target/target_core_user.c
|
|
@@ -148,7 +148,7 @@ struct tcmu_dev {
|
|
size_t ring_size;
|
|
|
|
struct mutex cmdr_lock;
|
|
- struct list_head cmdr_queue;
|
|
+ struct list_head qfull_queue;
|
|
|
|
uint32_t dbi_max;
|
|
uint32_t dbi_thresh;
|
|
@@ -159,6 +159,7 @@ struct tcmu_dev {
|
|
|
|
struct timer_list cmd_timer;
|
|
unsigned int cmd_time_out;
|
|
+ struct list_head inflight_queue;
|
|
|
|
struct timer_list qfull_timer;
|
|
int qfull_time_out;
|
|
@@ -179,7 +180,7 @@ struct tcmu_dev {
|
|
struct tcmu_cmd {
|
|
struct se_cmd *se_cmd;
|
|
struct tcmu_dev *tcmu_dev;
|
|
- struct list_head cmdr_queue_entry;
|
|
+ struct list_head queue_entry;
|
|
|
|
uint16_t cmd_id;
|
|
|
|
@@ -192,6 +193,7 @@ struct tcmu_cmd {
|
|
unsigned long deadline;
|
|
|
|
#define TCMU_CMD_BIT_EXPIRED 0
|
|
+#define TCMU_CMD_BIT_INFLIGHT 1
|
|
unsigned long flags;
|
|
};
|
|
/*
|
|
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
|
if (!tcmu_cmd)
|
|
return NULL;
|
|
|
|
- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
|
|
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
|
|
tcmu_cmd->se_cmd = se_cmd;
|
|
tcmu_cmd->tcmu_dev = udev;
|
|
|
|
@@ -915,11 +917,13 @@ setup_timer:
|
|
return 0;
|
|
|
|
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
|
|
- mod_timer(timer, tcmu_cmd->deadline);
|
|
+ if (!timer_pending(timer))
|
|
+ mod_timer(timer, tcmu_cmd->deadline);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
|
|
{
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
unsigned int tmo;
|
|
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
|
|
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
|
|
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
|
|
tcmu_cmd->cmd_id, udev->name);
|
|
return 0;
|
|
@@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
|
|
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
|
|
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
|
|
|
|
- if (!list_empty(&udev->cmdr_queue))
|
|
+ if (!list_empty(&udev->qfull_queue))
|
|
goto queue;
|
|
|
|
mb = udev->mb_addr;
|
|
@@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
|
|
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
|
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
|
|
|
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
|
|
+ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
|
|
+
|
|
/* TODO: only if FLUSH and FUA? */
|
|
uio_event_notify(&udev->uio_info);
|
|
|
|
return 0;
|
|
|
|
queue:
|
|
- if (add_to_cmdr_queue(tcmu_cmd)) {
|
|
+ if (add_to_qfull_queue(tcmu_cmd)) {
|
|
*scsi_err = TCM_OUT_OF_RESOURCES;
|
|
return -1;
|
|
}
|
|
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
|
|
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
|
|
goto out;
|
|
|
|
+ list_del_init(&cmd->queue_entry);
|
|
+
|
|
tcmu_cmd_reset_dbi_cur(cmd);
|
|
|
|
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
|
|
@@ -1194,9 +1203,29 @@ out:
|
|
tcmu_free_cmd(cmd);
|
|
}
|
|
|
|
+static void tcmu_set_next_deadline(struct list_head *queue,
|
|
+ struct timer_list *timer)
|
|
+{
|
|
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
|
|
+ unsigned long deadline = 0;
|
|
+
|
|
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
|
|
+ if (!time_after(jiffies, tcmu_cmd->deadline)) {
|
|
+ deadline = tcmu_cmd->deadline;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (deadline)
|
|
+ mod_timer(timer, deadline);
|
|
+ else
|
|
+ del_timer(timer);
|
|
+}
|
|
+
|
|
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
{
|
|
struct tcmu_mailbox *mb;
|
|
+ struct tcmu_cmd *cmd;
|
|
int handled = 0;
|
|
|
|
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
|
|
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
|
|
|
|
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
|
|
- struct tcmu_cmd *cmd;
|
|
|
|
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
|
|
|
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
/* no more pending commands */
|
|
del_timer(&udev->cmd_timer);
|
|
|
|
- if (list_empty(&udev->cmdr_queue)) {
|
|
+ if (list_empty(&udev->qfull_queue)) {
|
|
/*
|
|
* no more pending or waiting commands so try to
|
|
* reclaim blocks if needed.
|
|
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
tcmu_global_max_blocks)
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
}
|
|
+ } else if (udev->cmd_time_out) {
|
|
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
|
|
}
|
|
|
|
return handled;
|
|
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
if (!time_after(jiffies, cmd->deadline))
|
|
return 0;
|
|
|
|
- is_running = list_empty(&cmd->cmdr_queue_entry);
|
|
+ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
|
|
se_cmd = cmd->se_cmd;
|
|
|
|
if (is_running) {
|
|
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
*/
|
|
scsi_status = SAM_STAT_CHECK_CONDITION;
|
|
} else {
|
|
- list_del_init(&cmd->cmdr_queue_entry);
|
|
-
|
|
idr_remove(&udev->commands, id);
|
|
tcmu_free_cmd(cmd);
|
|
scsi_status = SAM_STAT_TASK_SET_FULL;
|
|
}
|
|
+ list_del_init(&cmd->queue_entry);
|
|
|
|
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
|
id, udev->name, is_running ? "inflight" : "queued");
|
|
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
|
|
INIT_LIST_HEAD(&udev->node);
|
|
INIT_LIST_HEAD(&udev->timedout_entry);
|
|
- INIT_LIST_HEAD(&udev->cmdr_queue);
|
|
+ INIT_LIST_HEAD(&udev->qfull_queue);
|
|
+ INIT_LIST_HEAD(&udev->inflight_queue);
|
|
idr_init(&udev->commands);
|
|
|
|
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
|
|
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
return &udev->se_dev;
|
|
}
|
|
|
|
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
|
|
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
|
|
{
|
|
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
|
|
LIST_HEAD(cmds);
|
|
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
|
|
sense_reason_t scsi_ret;
|
|
int ret;
|
|
|
|
- if (list_empty(&udev->cmdr_queue))
|
|
+ if (list_empty(&udev->qfull_queue))
|
|
return true;
|
|
|
|
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
|
|
|
|
- list_splice_init(&udev->cmdr_queue, &cmds);
|
|
+ list_splice_init(&udev->qfull_queue, &cmds);
|
|
|
|
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
|
|
- list_del_init(&tcmu_cmd->cmdr_queue_entry);
|
|
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
|
|
+ list_del_init(&tcmu_cmd->queue_entry);
|
|
|
|
pr_debug("removing cmd %u on dev %s from queue\n",
|
|
tcmu_cmd->cmd_id, udev->name);
|
|
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
|
|
* cmd was requeued, so just put all cmds back in
|
|
* the queue
|
|
*/
|
|
- list_splice_tail(&cmds, &udev->cmdr_queue);
|
|
+ list_splice_tail(&cmds, &udev->qfull_queue);
|
|
drained = false;
|
|
- goto done;
|
|
+ break;
|
|
}
|
|
}
|
|
- if (list_empty(&udev->cmdr_queue))
|
|
- del_timer(&udev->qfull_timer);
|
|
-done:
|
|
+
|
|
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
|
|
return drained;
|
|
}
|
|
|
|
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
|
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
tcmu_handle_completions(udev);
|
|
- run_cmdr_queue(udev, false);
|
|
+ run_qfull_queue(udev, false);
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
return 0;
|
|
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
|
|
/* complete IO that has executed successfully */
|
|
tcmu_handle_completions(udev);
|
|
/* fail IO waiting to be queued */
|
|
- run_cmdr_queue(udev, true);
|
|
+ run_qfull_queue(udev, true);
|
|
|
|
unlock:
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
|
|
mutex_lock(&udev->cmdr_lock);
|
|
|
|
idr_for_each_entry(&udev->commands, cmd, i) {
|
|
- if (!list_empty(&cmd->cmdr_queue_entry))
|
|
+ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
|
|
continue;
|
|
|
|
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
|
|
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
|
|
|
|
idr_remove(&udev->commands, i);
|
|
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
|
+ list_del_init(&cmd->queue_entry);
|
|
if (err_level == 1) {
|
|
/*
|
|
* Userspace was not able to start the
|
|
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
|
|
|
|
mutex_lock(&udev->cmdr_lock);
|
|
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
|
|
+
|
|
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
|
|
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
|
|
+
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
|
|
spin_lock_bh(&timed_out_udevs_lock);
|
|
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
|
|
index 70adcfdca8d13..124495f953fa9 100644
|
|
--- a/drivers/target/target_core_xcopy.c
|
|
+++ b/drivers/target/target_core_xcopy.c
|
|
@@ -479,6 +479,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
|
|
|
|
int target_xcopy_setup_pt(void)
|
|
{
|
|
+ int ret;
|
|
+
|
|
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
|
|
if (!xcopy_wq) {
|
|
pr_err("Unable to allocate xcopy_wq\n");
|
|
@@ -496,7 +498,9 @@ int target_xcopy_setup_pt(void)
|
|
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
|
|
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
|
|
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
|
|
- transport_init_session(&xcopy_pt_sess);
|
|
+ ret = transport_init_session(&xcopy_pt_sess);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
|
|
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
|
|
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
|
|
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
|
|
index df35fc01fd3e5..43626e15703a8 100644
|
|
--- a/drivers/tee/optee/supp.c
|
|
+++ b/drivers/tee/optee/supp.c
|
|
@@ -19,7 +19,7 @@
|
|
struct optee_supp_req {
|
|
struct list_head link;
|
|
|
|
- bool busy;
|
|
+ bool in_queue;
|
|
u32 func;
|
|
u32 ret;
|
|
size_t num_params;
|
|
@@ -54,7 +54,6 @@ void optee_supp_release(struct optee_supp *supp)
|
|
|
|
/* Abort all request retrieved by supplicant */
|
|
idr_for_each_entry(&supp->idr, req, id) {
|
|
- req->busy = false;
|
|
idr_remove(&supp->idr, id);
|
|
req->ret = TEEC_ERROR_COMMUNICATION;
|
|
complete(&req->c);
|
|
@@ -63,6 +62,7 @@ void optee_supp_release(struct optee_supp *supp)
|
|
/* Abort all queued requests */
|
|
list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
|
|
list_del(&req->link);
|
|
+ req->in_queue = false;
|
|
req->ret = TEEC_ERROR_COMMUNICATION;
|
|
complete(&req->c);
|
|
}
|
|
@@ -103,6 +103,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
|
|
/* Insert the request in the request list */
|
|
mutex_lock(&supp->mutex);
|
|
list_add_tail(&req->link, &supp->reqs);
|
|
+ req->in_queue = true;
|
|
mutex_unlock(&supp->mutex);
|
|
|
|
/* Tell an eventual waiter there's a new request */
|
|
@@ -130,9 +131,10 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
|
|
* will serve all requests in a timely manner and
|
|
* interrupting then wouldn't make sense.
|
|
*/
|
|
- interruptable = !req->busy;
|
|
- if (!req->busy)
|
|
+ if (req->in_queue) {
|
|
list_del(&req->link);
|
|
+ req->in_queue = false;
|
|
+ }
|
|
}
|
|
mutex_unlock(&supp->mutex);
|
|
|
|
@@ -176,7 +178,7 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
list_del(&req->link);
|
|
- req->busy = true;
|
|
+ req->in_queue = false;
|
|
|
|
return req;
|
|
}
|
|
@@ -318,7 +320,6 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
|
|
if ((num_params - nm) != req->num_params)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- req->busy = false;
|
|
idr_remove(&supp->idr, id);
|
|
supp->req_id = -1;
|
|
*num_meta = nm;
|
|
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
|
|
index b9d90f0ed504d..720760cd493fe 100644
|
|
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
|
|
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
|
|
@@ -18,6 +18,8 @@
|
|
#include <linux/platform_device.h>
|
|
#include <linux/thermal.h>
|
|
|
|
+#include "../thermal_hwmon.h"
|
|
+
|
|
#define BCM2835_TS_TSENSCTL 0x00
|
|
#define BCM2835_TS_TSENSSTAT 0x04
|
|
|
|
@@ -266,6 +268,15 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
|
|
|
platform_set_drvdata(pdev, tz);
|
|
|
|
+ /*
|
|
+ * Thermal_zone doesn't enable hwmon as default,
|
|
+ * enable it here
|
|
+ */
|
|
+ tz->tzp->no_hwmon = false;
|
|
+ err = thermal_add_hwmon_sysfs(tz);
|
|
+ if (err)
|
|
+ goto err_tz;
|
|
+
|
|
bcm2835_thermal_debugfs(pdev);
|
|
|
|
return 0;
|
|
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
|
|
index 3be4be2e04659..78652cac7f3df 100644
|
|
--- a/drivers/thermal/qcom/tsens-common.c
|
|
+++ b/drivers/thermal/qcom/tsens-common.c
|
|
@@ -114,6 +114,14 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
|
|
}
|
|
|
|
static const struct regmap_config tsens_config = {
|
|
+ .name = "tm",
|
|
+ .reg_bits = 32,
|
|
+ .val_bits = 32,
|
|
+ .reg_stride = 4,
|
|
+};
|
|
+
|
|
+static const struct regmap_config tsens_srot_config = {
|
|
+ .name = "srot",
|
|
.reg_bits = 32,
|
|
.val_bits = 32,
|
|
.reg_stride = 4,
|
|
@@ -139,8 +147,8 @@ int __init init_common(struct tsens_device *tmdev)
|
|
if (IS_ERR(srot_base))
|
|
return PTR_ERR(srot_base);
|
|
|
|
- tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev,
|
|
- srot_base, &tsens_config);
|
|
+ tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev, srot_base,
|
|
+ &tsens_srot_config);
|
|
if (IS_ERR(tmdev->srot_map))
|
|
return PTR_ERR(tmdev->srot_map);
|
|
|
|
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
|
|
index bf1c628d4a7ad..e22fc60ad36dc 100644
|
|
--- a/drivers/thermal/thermal-generic-adc.c
|
|
+++ b/drivers/thermal/thermal-generic-adc.c
|
|
@@ -26,7 +26,7 @@ struct gadc_thermal_info {
|
|
|
|
static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
|
|
{
|
|
- int temp, adc_hi, adc_lo;
|
|
+ int temp, temp_hi, temp_lo, adc_hi, adc_lo;
|
|
int i;
|
|
|
|
for (i = 0; i < gti->nlookup_table; i++) {
|
|
@@ -36,13 +36,17 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
|
|
|
|
if (i == 0) {
|
|
temp = gti->lookup_table[0];
|
|
- } else if (i >= (gti->nlookup_table - 1)) {
|
|
+ } else if (i >= gti->nlookup_table) {
|
|
temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
|
|
} else {
|
|
adc_hi = gti->lookup_table[2 * i - 1];
|
|
adc_lo = gti->lookup_table[2 * i + 1];
|
|
- temp = gti->lookup_table[2 * i];
|
|
- temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
|
|
+
|
|
+ temp_hi = gti->lookup_table[2 * i - 2];
|
|
+ temp_lo = gti->lookup_table[2 * i];
|
|
+
|
|
+ temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi,
|
|
+ adc_lo - adc_hi);
|
|
}
|
|
|
|
return temp;
|
|
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
|
|
index d6ebc1cf6aa9b..3a014cd8daf02 100644
|
|
--- a/drivers/thermal/thermal_core.c
|
|
+++ b/drivers/thermal/thermal_core.c
|
|
@@ -453,16 +453,20 @@ static void update_temperature(struct thermal_zone_device *tz)
|
|
tz->last_temperature, tz->temperature);
|
|
}
|
|
|
|
-static void thermal_zone_device_reset(struct thermal_zone_device *tz)
|
|
+static void thermal_zone_device_init(struct thermal_zone_device *tz)
|
|
{
|
|
struct thermal_instance *pos;
|
|
-
|
|
tz->temperature = THERMAL_TEMP_INVALID;
|
|
- tz->passive = 0;
|
|
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
|
|
pos->initialized = false;
|
|
}
|
|
|
|
+static void thermal_zone_device_reset(struct thermal_zone_device *tz)
|
|
+{
|
|
+ tz->passive = 0;
|
|
+ thermal_zone_device_init(tz);
|
|
+}
|
|
+
|
|
void thermal_zone_device_update(struct thermal_zone_device *tz,
|
|
enum thermal_notify_event event)
|
|
{
|
|
@@ -1504,7 +1508,7 @@ static int thermal_pm_notify(struct notifier_block *nb,
|
|
case PM_POST_SUSPEND:
|
|
atomic_set(&in_suspend, 0);
|
|
list_for_each_entry(tz, &thermal_tz_list, node) {
|
|
- thermal_zone_device_reset(tz);
|
|
+ thermal_zone_device_init(tz);
|
|
thermal_zone_device_update(tz,
|
|
THERMAL_EVENT_UNSPECIFIED);
|
|
}
|
|
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
|
|
index 019f6f88224e9..a160b9d62dd0a 100644
|
|
--- a/drivers/thermal/thermal_hwmon.h
|
|
+++ b/drivers/thermal/thermal_hwmon.h
|
|
@@ -19,13 +19,13 @@
|
|
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
|
|
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
|
|
#else
|
|
-static int
|
|
+static inline int
|
|
thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
-static void
|
|
+static inline void
|
|
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
|
|
{
|
|
}
|
|
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
|
|
index 2241ceae7d7f1..aa99edb4dff7d 100644
|
|
--- a/drivers/thermal/thermal_sysfs.c
|
|
+++ b/drivers/thermal/thermal_sysfs.c
|
|
@@ -712,11 +712,14 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
|
|
if ((long)state < 0)
|
|
return -EINVAL;
|
|
|
|
+ mutex_lock(&cdev->lock);
|
|
+
|
|
result = cdev->ops->set_cur_state(cdev, state);
|
|
- if (result)
|
|
- return result;
|
|
- thermal_cooling_device_stats_update(cdev, state);
|
|
- return count;
|
|
+ if (!result)
|
|
+ thermal_cooling_device_stats_update(cdev, state);
|
|
+
|
|
+ mutex_unlock(&cdev->lock);
|
|
+ return result ? result : count;
|
|
}
|
|
|
|
static struct device_attribute
|
|
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
|
|
index dabb391909aad..bb63519db7ae4 100644
|
|
--- a/drivers/tty/n_hdlc.c
|
|
+++ b/drivers/tty/n_hdlc.c
|
|
@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
|
|
/* too large for caller's buffer */
|
|
ret = -EOVERFLOW;
|
|
} else {
|
|
+ __set_current_state(TASK_RUNNING);
|
|
if (copy_to_user(buf, rbuf->buf, rbuf->count))
|
|
ret = -EFAULT;
|
|
else
|
|
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
|
|
index f80a300b5d68f..48bd694a5fa1f 100644
|
|
--- a/drivers/tty/serial/8250/8250_pci.c
|
|
+++ b/drivers/tty/serial/8250/8250_pci.c
|
|
@@ -3420,6 +3420,11 @@ static int
|
|
serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
|
|
{
|
|
int num_iomem, num_port, first_port = -1, i;
|
|
+ int rc;
|
|
+
|
|
+ rc = serial_pci_is_class_communication(dev);
|
|
+ if (rc)
|
|
+ return rc;
|
|
|
|
/*
|
|
* Should we try to make guesses for multiport serial devices later?
|
|
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
|
|
|
|
board = &pci_boards[ent->driver_data];
|
|
|
|
- rc = serial_pci_is_class_communication(dev);
|
|
- if (rc)
|
|
- return rc;
|
|
-
|
|
rc = serial_pci_is_blacklisted(dev);
|
|
if (rc)
|
|
return rc;
|
|
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
|
index ebd33c0232e63..89ade213a1a9a 100644
|
|
--- a/drivers/tty/serial/amba-pl011.c
|
|
+++ b/drivers/tty/serial/amba-pl011.c
|
|
@@ -2780,6 +2780,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
|
|
.name = "sbsa-uart",
|
|
.of_match_table = of_match_ptr(sbsa_uart_of_match),
|
|
.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
|
|
+ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
|
|
},
|
|
};
|
|
|
|
@@ -2808,6 +2809,7 @@ static struct amba_driver pl011_driver = {
|
|
.drv = {
|
|
.name = "uart-pl011",
|
|
.pm = &pl011_dev_pm_ops,
|
|
+ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
|
|
},
|
|
.id_table = pl011_ids,
|
|
.probe = pl011_probe,
|
|
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
|
|
index 00c220e4f43c0..086642ea4b26e 100644
|
|
--- a/drivers/tty/serial/fsl_lpuart.c
|
|
+++ b/drivers/tty/serial/fsl_lpuart.c
|
|
@@ -1479,6 +1479,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
|
|
else
|
|
cr1 &= ~UARTCR1_PT;
|
|
}
|
|
+ } else {
|
|
+ cr1 &= ~UARTCR1_PE;
|
|
}
|
|
|
|
/* ask the core to calculate the divisor */
|
|
@@ -1690,6 +1692,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
|
|
else
|
|
ctrl &= ~UARTCTRL_PT;
|
|
}
|
|
+ } else {
|
|
+ ctrl &= ~UARTCTRL_PE;
|
|
}
|
|
|
|
/* ask the core to calculate the divisor */
|
|
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
|
|
index fd80d999308d4..0bdf1687983f5 100644
|
|
--- a/drivers/tty/serial/pic32_uart.c
|
|
+++ b/drivers/tty/serial/pic32_uart.c
|
|
@@ -919,6 +919,7 @@ static struct platform_driver pic32_uart_platform_driver = {
|
|
.driver = {
|
|
.name = PIC32_DEV_NAME,
|
|
.of_match_table = of_match_ptr(pic32_serial_dt_ids),
|
|
+ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32),
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
|
|
index da1bd4bba8a94..2a49b6d876b87 100644
|
|
--- a/drivers/tty/serial/samsung.c
|
|
+++ b/drivers/tty/serial/samsung.c
|
|
@@ -1365,11 +1365,14 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
|
|
wr_regl(port, S3C2410_ULCON, ulcon);
|
|
wr_regl(port, S3C2410_UBRDIV, quot);
|
|
|
|
+ port->status &= ~UPSTAT_AUTOCTS;
|
|
+
|
|
umcon = rd_regl(port, S3C2410_UMCON);
|
|
if (termios->c_cflag & CRTSCTS) {
|
|
umcon |= S3C2410_UMCOM_AFC;
|
|
/* Disable RTS when RX FIFO contains 63 bytes */
|
|
umcon &= ~S3C2412_UMCON_AFC_8;
|
|
+ port->status = UPSTAT_AUTOCTS;
|
|
} else {
|
|
umcon &= ~S3C2410_UMCOM_AFC;
|
|
}
|
|
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
|
|
index c439a5a1e6c07..556f50aa1b586 100644
|
|
--- a/drivers/tty/serial/serial_core.c
|
|
+++ b/drivers/tty/serial/serial_core.c
|
|
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
|
|
struct uart_port *port;
|
|
unsigned long flags;
|
|
|
|
+ if (!state)
|
|
+ return;
|
|
+
|
|
port = uart_port_lock(state, flags);
|
|
__uart_start(tty);
|
|
uart_port_unlock(port, flags);
|
|
@@ -205,10 +208,15 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
|
|
if (!state->xmit.buf) {
|
|
state->xmit.buf = (unsigned char *) page;
|
|
uart_circ_clear(&state->xmit);
|
|
+ uart_port_unlock(uport, flags);
|
|
} else {
|
|
+ uart_port_unlock(uport, flags);
|
|
+ /*
|
|
+ * Do not free() the page under the port lock, see
|
|
+ * uart_shutdown().
|
|
+ */
|
|
free_page(page);
|
|
}
|
|
- uart_port_unlock(uport, flags);
|
|
|
|
retval = uport->ops->startup(uport);
|
|
if (retval == 0) {
|
|
@@ -268,6 +276,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
|
|
struct uart_port *uport = uart_port_check(state);
|
|
struct tty_port *port = &state->port;
|
|
unsigned long flags = 0;
|
|
+ char *xmit_buf = NULL;
|
|
|
|
/*
|
|
* Set the TTY IO error marker
|
|
@@ -298,14 +307,18 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
|
|
tty_port_set_suspended(port, 0);
|
|
|
|
/*
|
|
- * Free the transmit buffer page.
|
|
+ * Do not free() the transmit buffer page under the port lock since
|
|
+ * this can create various circular locking scenarios. For instance,
|
|
+ * console driver may need to allocate/free a debug object, which
|
|
+ * can endup in printk() recursion.
|
|
*/
|
|
uart_port_lock(state, flags);
|
|
- if (state->xmit.buf) {
|
|
- free_page((unsigned long)state->xmit.buf);
|
|
- state->xmit.buf = NULL;
|
|
- }
|
|
+ xmit_buf = state->xmit.buf;
|
|
+ state->xmit.buf = NULL;
|
|
uart_port_unlock(uport, flags);
|
|
+
|
|
+ if (xmit_buf)
|
|
+ free_page((unsigned long)xmit_buf);
|
|
}
|
|
|
|
/**
|
|
@@ -540,10 +553,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
|
|
int ret = 0;
|
|
|
|
circ = &state->xmit;
|
|
- if (!circ->buf)
|
|
+ port = uart_port_lock(state, flags);
|
|
+ if (!circ->buf) {
|
|
+ uart_port_unlock(port, flags);
|
|
return 0;
|
|
+ }
|
|
|
|
- port = uart_port_lock(state, flags);
|
|
if (port && uart_circ_chars_free(circ) != 0) {
|
|
circ->buf[circ->head] = c;
|
|
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
|
|
@@ -576,11 +591,13 @@ static int uart_write(struct tty_struct *tty,
|
|
return -EL3HLT;
|
|
}
|
|
|
|
+ port = uart_port_lock(state, flags);
|
|
circ = &state->xmit;
|
|
- if (!circ->buf)
|
|
+ if (!circ->buf) {
|
|
+ uart_port_unlock(port, flags);
|
|
return 0;
|
|
+ }
|
|
|
|
- port = uart_port_lock(state, flags);
|
|
while (port) {
|
|
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
|
|
if (count < c)
|
|
@@ -713,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
|
|
upstat_t mask = UPSTAT_SYNC_FIFO;
|
|
struct uart_port *port;
|
|
|
|
+ if (!state)
|
|
+ return;
|
|
+
|
|
port = uart_port_ref(state);
|
|
if (!port)
|
|
return;
|
|
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
|
|
index cc56cb3b3ecaa..014944a41613a 100644
|
|
--- a/drivers/tty/serial/sh-sci.c
|
|
+++ b/drivers/tty/serial/sh-sci.c
|
|
@@ -1331,7 +1331,7 @@ static void sci_tx_dma_release(struct sci_port *s)
|
|
dma_release_channel(chan);
|
|
}
|
|
|
|
-static void sci_submit_rx(struct sci_port *s)
|
|
+static int sci_submit_rx(struct sci_port *s, bool port_lock_held)
|
|
{
|
|
struct dma_chan *chan = s->chan_rx;
|
|
struct uart_port *port = &s->port;
|
|
@@ -1359,19 +1359,22 @@ static void sci_submit_rx(struct sci_port *s)
|
|
s->active_rx = s->cookie_rx[0];
|
|
|
|
dma_async_issue_pending(chan);
|
|
- return;
|
|
+ return 0;
|
|
|
|
fail:
|
|
+ /* Switch to PIO */
|
|
+ if (!port_lock_held)
|
|
+ spin_lock_irqsave(&port->lock, flags);
|
|
if (i)
|
|
dmaengine_terminate_async(chan);
|
|
for (i = 0; i < 2; i++)
|
|
s->cookie_rx[i] = -EINVAL;
|
|
s->active_rx = -EINVAL;
|
|
- /* Switch to PIO */
|
|
- spin_lock_irqsave(&port->lock, flags);
|
|
s->chan_rx = NULL;
|
|
sci_start_rx(port);
|
|
- spin_unlock_irqrestore(&port->lock, flags);
|
|
+ if (!port_lock_held)
|
|
+ spin_unlock_irqrestore(&port->lock, flags);
|
|
+ return -EAGAIN;
|
|
}
|
|
|
|
static void work_fn_tx(struct work_struct *work)
|
|
@@ -1491,7 +1494,7 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
|
|
}
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
- sci_submit_rx(s);
|
|
+ sci_submit_rx(s, true);
|
|
|
|
/* Direct new serial port interrupts back to CPU */
|
|
scr = serial_port_in(port, SCSCR);
|
|
@@ -1617,7 +1620,7 @@ static void sci_request_dma(struct uart_port *port)
|
|
s->chan_rx_saved = s->chan_rx = chan;
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
- sci_submit_rx(s);
|
|
+ sci_submit_rx(s, false);
|
|
}
|
|
}
|
|
|
|
@@ -1666,8 +1669,10 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
|
|
disable_irq_nosync(irq);
|
|
scr |= SCSCR_RDRQE;
|
|
} else {
|
|
+ if (sci_submit_rx(s, false) < 0)
|
|
+ goto handle_pio;
|
|
+
|
|
scr &= ~SCSCR_RIE;
|
|
- sci_submit_rx(s);
|
|
}
|
|
serial_port_out(port, SCSCR, scr);
|
|
/* Clear current interrupt */
|
|
@@ -1679,6 +1684,8 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
+
|
|
+handle_pio:
|
|
#endif
|
|
|
|
if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
|
|
@@ -1914,7 +1921,7 @@ out_nomem:
|
|
|
|
static void sci_free_irq(struct sci_port *port)
|
|
{
|
|
- int i;
|
|
+ int i, j;
|
|
|
|
/*
|
|
* Intentionally in reverse order so we iterate over the muxed
|
|
@@ -1930,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
|
|
if (unlikely(irq < 0))
|
|
continue;
|
|
|
|
+ /* Check if already freed (irq was muxed) */
|
|
+ for (j = 0; j < i; j++)
|
|
+ if (port->irqs[j] == irq)
|
|
+ j = i + 1;
|
|
+ if (j > i)
|
|
+ continue;
|
|
+
|
|
free_irq(port->irqs[i], port);
|
|
kfree(port->irqstr[i]);
|
|
|
|
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
|
|
index 57c66d2c34714..6df252648e406 100644
|
|
--- a/drivers/tty/serial/xilinx_uartps.c
|
|
+++ b/drivers/tty/serial/xilinx_uartps.c
|
|
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
|
|
#define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
|
|
#define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
|
|
#define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
|
|
-#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
|
|
+#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */
|
|
|
|
/*
|
|
* Do not enable parity error interrupt for the following
|
|
@@ -364,7 +364,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
|
|
cdns_uart_handle_tx(dev_id);
|
|
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
|
|
}
|
|
- if (isrstatus & CDNS_UART_IXR_MASK)
|
|
+ if (isrstatus & CDNS_UART_IXR_RXMASK)
|
|
cdns_uart_handle_rx(dev_id, isrstatus);
|
|
|
|
spin_unlock(&port->lock);
|
|
@@ -1719,6 +1719,7 @@ static struct platform_driver cdns_uart_platform_driver = {
|
|
.name = CDNS_UART_NAME,
|
|
.of_match_table = cdns_uart_of_match,
|
|
.pm = &cdns_uart_dev_pm_ops,
|
|
+ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
|
|
index 687250ec80323..21ffcce169271 100644
|
|
--- a/drivers/tty/tty_io.c
|
|
+++ b/drivers/tty/tty_io.c
|
|
@@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
|
|
static int tty_reopen(struct tty_struct *tty)
|
|
{
|
|
struct tty_driver *driver = tty->driver;
|
|
- int retval;
|
|
+ struct tty_ldisc *ld;
|
|
+ int retval = 0;
|
|
|
|
if (driver->type == TTY_DRIVER_TYPE_PTY &&
|
|
driver->subtype == PTY_TYPE_MASTER)
|
|
@@ -1268,14 +1269,21 @@ static int tty_reopen(struct tty_struct *tty)
|
|
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
|
|
return -EBUSY;
|
|
|
|
- tty->count++;
|
|
+ ld = tty_ldisc_ref_wait(tty);
|
|
+ if (ld) {
|
|
+ tty_ldisc_deref(ld);
|
|
+ } else {
|
|
+ retval = tty_ldisc_lock(tty, 5 * HZ);
|
|
+ if (retval)
|
|
+ return retval;
|
|
|
|
- if (tty->ldisc)
|
|
- return 0;
|
|
+ if (!tty->ldisc)
|
|
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
|
|
+ tty_ldisc_unlock(tty);
|
|
+ }
|
|
|
|
- retval = tty_ldisc_reinit(tty, tty->termios.c_line);
|
|
- if (retval)
|
|
- tty->count--;
|
|
+ if (retval == 0)
|
|
+ tty->count++;
|
|
|
|
return retval;
|
|
}
|
|
@@ -2181,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
|
|
ld = tty_ldisc_ref_wait(tty);
|
|
if (!ld)
|
|
return -EIO;
|
|
- ld->ops->receive_buf(tty, &ch, &mbz, 1);
|
|
+ if (ld->ops->receive_buf)
|
|
+ ld->ops->receive_buf(tty, &ch, &mbz, 1);
|
|
tty_ldisc_deref(ld);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
|
|
index 0c98d88f795a7..b989ca26fc788 100644
|
|
--- a/drivers/tty/tty_ldsem.c
|
|
+++ b/drivers/tty/tty_ldsem.c
|
|
@@ -293,6 +293,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
|
|
if (!locked)
|
|
atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
|
|
list_del(&waiter.list);
|
|
+
|
|
+ /*
|
|
+ * In case of timeout, wake up every reader who gave the right of way
|
|
+ * to writer. Prevent separation readers into two groups:
|
|
+ * one that helds semaphore and another that sleeps.
|
|
+ * (in case of no contention with a writer)
|
|
+ */
|
|
+ if (!locked && list_empty(&sem->write_wait))
|
|
+ __ldsem_wake_readers(sem);
|
|
+
|
|
raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
|
index 41ec8e5010f30..bba75560d11e2 100644
|
|
--- a/drivers/tty/vt/vt.c
|
|
+++ b/drivers/tty/vt/vt.c
|
|
@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
|
|
if (con_is_visible(vc))
|
|
update_screen(vc);
|
|
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
|
|
+ notify_update(vc);
|
|
return err;
|
|
}
|
|
|
|
@@ -2764,8 +2765,8 @@ rescan_last_byte:
|
|
con_flush(vc, draw_from, draw_to, &draw_x);
|
|
vc_uniscr_debug_check(vc);
|
|
console_conditional_schedule();
|
|
- console_unlock();
|
|
notify_update(vc);
|
|
+ console_unlock();
|
|
return n;
|
|
}
|
|
|
|
@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
|
|
unsigned char c;
|
|
static DEFINE_SPINLOCK(printing_lock);
|
|
const ushort *start;
|
|
- ushort cnt = 0;
|
|
- ushort myx;
|
|
+ ushort start_x, cnt;
|
|
int kmsg_console;
|
|
|
|
/* console busy or not yet initialized */
|
|
@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
|
|
if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
|
|
vc = vc_cons[kmsg_console - 1].d;
|
|
|
|
- /* read `x' only after setting currcons properly (otherwise
|
|
- the `x' macro will read the x of the foreground console). */
|
|
- myx = vc->vc_x;
|
|
-
|
|
if (!vc_cons_allocated(fg_console)) {
|
|
/* impossible */
|
|
/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
|
|
@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
|
|
hide_cursor(vc);
|
|
|
|
start = (ushort *)vc->vc_pos;
|
|
-
|
|
- /* Contrived structure to try to emulate original need_wrap behaviour
|
|
- * Problems caused when we have need_wrap set on '\n' character */
|
|
+ start_x = vc->vc_x;
|
|
+ cnt = 0;
|
|
while (count--) {
|
|
c = *b++;
|
|
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
|
|
- if (cnt > 0) {
|
|
- if (con_is_visible(vc))
|
|
- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
|
|
- vc->vc_x += cnt;
|
|
- if (vc->vc_need_wrap)
|
|
- vc->vc_x--;
|
|
- cnt = 0;
|
|
- }
|
|
+ if (cnt && con_is_visible(vc))
|
|
+ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
|
|
+ cnt = 0;
|
|
if (c == 8) { /* backspace */
|
|
bs(vc);
|
|
start = (ushort *)vc->vc_pos;
|
|
- myx = vc->vc_x;
|
|
+ start_x = vc->vc_x;
|
|
continue;
|
|
}
|
|
if (c != 13)
|
|
lf(vc);
|
|
cr(vc);
|
|
start = (ushort *)vc->vc_pos;
|
|
- myx = vc->vc_x;
|
|
+ start_x = vc->vc_x;
|
|
if (c == 10 || c == 13)
|
|
continue;
|
|
}
|
|
+ vc_uniscr_putc(vc, c);
|
|
scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
|
|
notify_write(vc, c);
|
|
cnt++;
|
|
- if (myx == vc->vc_cols - 1) {
|
|
- vc->vc_need_wrap = 1;
|
|
- continue;
|
|
- }
|
|
- vc->vc_pos += 2;
|
|
- myx++;
|
|
- }
|
|
- if (cnt > 0) {
|
|
- if (con_is_visible(vc))
|
|
- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
|
|
- vc->vc_x += cnt;
|
|
- if (vc->vc_x == vc->vc_cols) {
|
|
- vc->vc_x--;
|
|
+ if (vc->vc_x == vc->vc_cols - 1) {
|
|
vc->vc_need_wrap = 1;
|
|
+ } else {
|
|
+ vc->vc_pos += 2;
|
|
+ vc->vc_x++;
|
|
}
|
|
}
|
|
+ if (cnt && con_is_visible(vc))
|
|
+ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
|
|
set_cursor(vc);
|
|
notify_update(vc);
|
|
|
|
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
|
|
index 987fc5ba63211..70e6c956c23ce 100644
|
|
--- a/drivers/usb/Kconfig
|
|
+++ b/drivers/usb/Kconfig
|
|
@@ -205,8 +205,4 @@ config USB_ULPI_BUS
|
|
To compile this driver as a module, choose M here: the module will
|
|
be called ulpi.
|
|
|
|
-config USB_ROLE_SWITCH
|
|
- tristate
|
|
- select USB_COMMON
|
|
-
|
|
endif # USB_SUPPORT
|
|
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
|
|
index 1b68fed464cb9..739f8960811ac 100644
|
|
--- a/drivers/usb/class/cdc-acm.c
|
|
+++ b/drivers/usb/class/cdc-acm.c
|
|
@@ -581,6 +581,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
|
|
if (retval)
|
|
goto error_init_termios;
|
|
|
|
+ /*
|
|
+ * Suppress initial echoing for some devices which might send data
|
|
+ * immediately after acm driver has been installed.
|
|
+ */
|
|
+ if (acm->quirks & DISABLE_ECHO)
|
|
+ tty->termios.c_lflag &= ~ECHO;
|
|
+
|
|
tty->driver_data = acm;
|
|
|
|
return 0;
|
|
@@ -1657,6 +1664,9 @@ static const struct usb_device_id acm_ids[] = {
|
|
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
|
|
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
|
},
|
|
+ { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
|
|
+ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
|
|
+ },
|
|
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
|
|
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
|
},
|
|
@@ -1855,6 +1865,13 @@ static const struct usb_device_id acm_ids[] = {
|
|
.driver_info = IGNORE_DEVICE,
|
|
},
|
|
|
|
+ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
|
|
+ .driver_info = SEND_ZERO_PACKET,
|
|
+ },
|
|
+ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
|
|
+ .driver_info = SEND_ZERO_PACKET,
|
|
+ },
|
|
+
|
|
/* control interfaces without any protocol set */
|
|
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
|
|
USB_CDC_PROTO_NONE) },
|
|
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
|
|
index ca06b20d7af9c..515aad0847ee8 100644
|
|
--- a/drivers/usb/class/cdc-acm.h
|
|
+++ b/drivers/usb/class/cdc-acm.h
|
|
@@ -140,3 +140,4 @@ struct acm {
|
|
#define QUIRK_CONTROL_LINE_STATE BIT(6)
|
|
#define CLEAR_HALT_CONDITIONS BIT(7)
|
|
#define SEND_ZERO_PACKET BIT(8)
|
|
+#define DISABLE_ECHO BIT(9)
|
|
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
|
|
index fb4d5ef4165c7..0a7c45e854813 100644
|
|
--- a/drivers/usb/common/Makefile
|
|
+++ b/drivers/usb/common/Makefile
|
|
@@ -9,4 +9,3 @@ usb-common-$(CONFIG_USB_LED_TRIG) += led.o
|
|
|
|
obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
|
|
obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o
|
|
-obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
|
|
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
|
|
deleted file mode 100644
|
|
index 99116af07f1d9..0000000000000
|
|
--- a/drivers/usb/common/roles.c
|
|
+++ /dev/null
|
|
@@ -1,314 +0,0 @@
|
|
-// SPDX-License-Identifier: GPL-2.0
|
|
-/*
|
|
- * USB Role Switch Support
|
|
- *
|
|
- * Copyright (C) 2018 Intel Corporation
|
|
- * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
|
|
- * Hans de Goede <hdegoede@redhat.com>
|
|
- */
|
|
-
|
|
-#include <linux/usb/role.h>
|
|
-#include <linux/device.h>
|
|
-#include <linux/module.h>
|
|
-#include <linux/mutex.h>
|
|
-#include <linux/slab.h>
|
|
-
|
|
-static struct class *role_class;
|
|
-
|
|
-struct usb_role_switch {
|
|
- struct device dev;
|
|
- struct mutex lock; /* device lock*/
|
|
- enum usb_role role;
|
|
-
|
|
- /* From descriptor */
|
|
- struct device *usb2_port;
|
|
- struct device *usb3_port;
|
|
- struct device *udc;
|
|
- usb_role_switch_set_t set;
|
|
- usb_role_switch_get_t get;
|
|
- bool allow_userspace_control;
|
|
-};
|
|
-
|
|
-#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
|
|
-
|
|
-/**
|
|
- * usb_role_switch_set_role - Set USB role for a switch
|
|
- * @sw: USB role switch
|
|
- * @role: USB role to be switched to
|
|
- *
|
|
- * Set USB role @role for @sw.
|
|
- */
|
|
-int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- if (IS_ERR_OR_NULL(sw))
|
|
- return 0;
|
|
-
|
|
- mutex_lock(&sw->lock);
|
|
-
|
|
- ret = sw->set(sw->dev.parent, role);
|
|
- if (!ret)
|
|
- sw->role = role;
|
|
-
|
|
- mutex_unlock(&sw->lock);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
|
|
-
|
|
-/**
|
|
- * usb_role_switch_get_role - Get the USB role for a switch
|
|
- * @sw: USB role switch
|
|
- *
|
|
- * Depending on the role-switch-driver this function returns either a cached
|
|
- * value of the last set role, or reads back the actual value from the hardware.
|
|
- */
|
|
-enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
|
|
-{
|
|
- enum usb_role role;
|
|
-
|
|
- if (IS_ERR_OR_NULL(sw))
|
|
- return USB_ROLE_NONE;
|
|
-
|
|
- mutex_lock(&sw->lock);
|
|
-
|
|
- if (sw->get)
|
|
- role = sw->get(sw->dev.parent);
|
|
- else
|
|
- role = sw->role;
|
|
-
|
|
- mutex_unlock(&sw->lock);
|
|
-
|
|
- return role;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
|
|
-
|
|
-static int __switch_match(struct device *dev, const void *name)
|
|
-{
|
|
- return !strcmp((const char *)name, dev_name(dev));
|
|
-}
|
|
-
|
|
-static void *usb_role_switch_match(struct device_connection *con, int ep,
|
|
- void *data)
|
|
-{
|
|
- struct device *dev;
|
|
-
|
|
- dev = class_find_device(role_class, NULL, con->endpoint[ep],
|
|
- __switch_match);
|
|
-
|
|
- return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
|
|
-}
|
|
-
|
|
-/**
|
|
- * usb_role_switch_get - Find USB role switch linked with the caller
|
|
- * @dev: The caller device
|
|
- *
|
|
- * Finds and returns role switch linked with @dev. The reference count for the
|
|
- * found switch is incremented.
|
|
- */
|
|
-struct usb_role_switch *usb_role_switch_get(struct device *dev)
|
|
-{
|
|
- struct usb_role_switch *sw;
|
|
-
|
|
- sw = device_connection_find_match(dev, "usb-role-switch", NULL,
|
|
- usb_role_switch_match);
|
|
-
|
|
- if (!IS_ERR_OR_NULL(sw))
|
|
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
|
|
-
|
|
- return sw;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(usb_role_switch_get);
|
|
-
|
|
-/**
|
|
- * usb_role_switch_put - Release handle to a switch
|
|
- * @sw: USB Role Switch
|
|
- *
|
|
- * Decrement reference count for @sw.
|
|
- */
|
|
-void usb_role_switch_put(struct usb_role_switch *sw)
|
|
-{
|
|
- if (!IS_ERR_OR_NULL(sw)) {
|
|
- put_device(&sw->dev);
|
|
- module_put(sw->dev.parent->driver->owner);
|
|
- }
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(usb_role_switch_put);
|
|
-
|
|
-static umode_t
|
|
-usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
|
|
-{
|
|
- struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
|
- struct usb_role_switch *sw = to_role_switch(dev);
|
|
-
|
|
- if (sw->allow_userspace_control)
|
|
- return attr->mode;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static const char * const usb_roles[] = {
|
|
- [USB_ROLE_NONE] = "none",
|
|
- [USB_ROLE_HOST] = "host",
|
|
- [USB_ROLE_DEVICE] = "device",
|
|
-};
|
|
-
|
|
-static ssize_t
|
|
-role_show(struct device *dev, struct device_attribute *attr, char *buf)
|
|
-{
|
|
- struct usb_role_switch *sw = to_role_switch(dev);
|
|
- enum usb_role role = usb_role_switch_get_role(sw);
|
|
-
|
|
- return sprintf(buf, "%s\n", usb_roles[role]);
|
|
-}
|
|
-
|
|
-static ssize_t role_store(struct device *dev, struct device_attribute *attr,
|
|
- const char *buf, size_t size)
|
|
-{
|
|
- struct usb_role_switch *sw = to_role_switch(dev);
|
|
- int ret;
|
|
-
|
|
- ret = sysfs_match_string(usb_roles, buf);
|
|
- if (ret < 0) {
|
|
- bool res;
|
|
-
|
|
- /* Extra check if the user wants to disable the switch */
|
|
- ret = kstrtobool(buf, &res);
|
|
- if (ret || res)
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- ret = usb_role_switch_set_role(sw, ret);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- return size;
|
|
-}
|
|
-static DEVICE_ATTR_RW(role);
|
|
-
|
|
-static struct attribute *usb_role_switch_attrs[] = {
|
|
- &dev_attr_role.attr,
|
|
- NULL,
|
|
-};
|
|
-
|
|
-static const struct attribute_group usb_role_switch_group = {
|
|
- .is_visible = usb_role_switch_is_visible,
|
|
- .attrs = usb_role_switch_attrs,
|
|
-};
|
|
-
|
|
-static const struct attribute_group *usb_role_switch_groups[] = {
|
|
- &usb_role_switch_group,
|
|
- NULL,
|
|
-};
|
|
-
|
|
-static int
|
|
-usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
|
|
- if (ret)
|
|
- dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static void usb_role_switch_release(struct device *dev)
|
|
-{
|
|
- struct usb_role_switch *sw = to_role_switch(dev);
|
|
-
|
|
- kfree(sw);
|
|
-}
|
|
-
|
|
-static const struct device_type usb_role_dev_type = {
|
|
- .name = "usb_role_switch",
|
|
- .groups = usb_role_switch_groups,
|
|
- .uevent = usb_role_switch_uevent,
|
|
- .release = usb_role_switch_release,
|
|
-};
|
|
-
|
|
-/**
|
|
- * usb_role_switch_register - Register USB Role Switch
|
|
- * @parent: Parent device for the switch
|
|
- * @desc: Description of the switch
|
|
- *
|
|
- * USB Role Switch is a device capable or choosing the role for USB connector.
|
|
- * On platforms where the USB controller is dual-role capable, the controller
|
|
- * driver will need to register the switch. On platforms where the USB host and
|
|
- * USB device controllers behind the connector are separate, there will be a
|
|
- * mux, and the driver for that mux will need to register the switch.
|
|
- *
|
|
- * Returns handle to a new role switch or ERR_PTR. The content of @desc is
|
|
- * copied.
|
|
- */
|
|
-struct usb_role_switch *
|
|
-usb_role_switch_register(struct device *parent,
|
|
- const struct usb_role_switch_desc *desc)
|
|
-{
|
|
- struct usb_role_switch *sw;
|
|
- int ret;
|
|
-
|
|
- if (!desc || !desc->set)
|
|
- return ERR_PTR(-EINVAL);
|
|
-
|
|
- sw = kzalloc(sizeof(*sw), GFP_KERNEL);
|
|
- if (!sw)
|
|
- return ERR_PTR(-ENOMEM);
|
|
-
|
|
- mutex_init(&sw->lock);
|
|
-
|
|
- sw->allow_userspace_control = desc->allow_userspace_control;
|
|
- sw->usb2_port = desc->usb2_port;
|
|
- sw->usb3_port = desc->usb3_port;
|
|
- sw->udc = desc->udc;
|
|
- sw->set = desc->set;
|
|
- sw->get = desc->get;
|
|
-
|
|
- sw->dev.parent = parent;
|
|
- sw->dev.class = role_class;
|
|
- sw->dev.type = &usb_role_dev_type;
|
|
- dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
|
|
-
|
|
- ret = device_register(&sw->dev);
|
|
- if (ret) {
|
|
- put_device(&sw->dev);
|
|
- return ERR_PTR(ret);
|
|
- }
|
|
-
|
|
- /* TODO: Symlinks for the host port and the device controller. */
|
|
-
|
|
- return sw;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(usb_role_switch_register);
|
|
-
|
|
-/**
|
|
- * usb_role_switch_unregister - Unregsiter USB Role Switch
|
|
- * @sw: USB Role Switch
|
|
- *
|
|
- * Unregister switch that was registered with usb_role_switch_register().
|
|
- */
|
|
-void usb_role_switch_unregister(struct usb_role_switch *sw)
|
|
-{
|
|
- if (!IS_ERR_OR_NULL(sw))
|
|
- device_unregister(&sw->dev);
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
|
|
-
|
|
-static int __init usb_roles_init(void)
|
|
-{
|
|
- role_class = class_create(THIS_MODULE, "usb_role");
|
|
- return PTR_ERR_OR_ZERO(role_class);
|
|
-}
|
|
-subsys_initcall(usb_roles_init);
|
|
-
|
|
-static void __exit usb_roles_exit(void)
|
|
-{
|
|
- class_destroy(role_class);
|
|
-}
|
|
-module_exit(usb_roles_exit);
|
|
-
|
|
-MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
|
|
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
|
|
-MODULE_LICENSE("GPL v2");
|
|
-MODULE_DESCRIPTION("USB Role Class");
|
|
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
|
|
index 356b05c82dbcf..f713cecc1f419 100644
|
|
--- a/drivers/usb/core/generic.c
|
|
+++ b/drivers/usb/core/generic.c
|
|
@@ -143,9 +143,12 @@ int usb_choose_configuration(struct usb_device *udev)
|
|
continue;
|
|
}
|
|
|
|
- if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) {
|
|
- best = c;
|
|
- break;
|
|
+ if (i > 0 && desc && is_audio(desc)) {
|
|
+ if (is_uac3_config(desc)) {
|
|
+ best = c;
|
|
+ break;
|
|
+ }
|
|
+ continue;
|
|
}
|
|
|
|
/* From the remaining configs, choose the first one whose
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index f76b2e0aba9d5..1d1e61e980f38 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -1112,6 +1112,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|
USB_PORT_FEAT_ENABLE);
|
|
}
|
|
|
|
+ /*
|
|
+ * Add debounce if USB3 link is in polling/link training state.
|
|
+ * Link will automatically transition to Enabled state after
|
|
+ * link training completes.
|
|
+ */
|
|
+ if (hub_is_superspeed(hdev) &&
|
|
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
|
|
+ USB_SS_PORT_LS_POLLING))
|
|
+ need_debounce_delay = true;
|
|
+
|
|
/* Clear status-change flags; we'll debounce later */
|
|
if (portchange & USB_PORT_STAT_C_CONNECTION) {
|
|
need_debounce_delay = true;
|
|
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
|
|
index dc7f7fd71684c..c12ac56606c3f 100644
|
|
--- a/drivers/usb/core/ledtrig-usbport.c
|
|
+++ b/drivers/usb/core/ledtrig-usbport.c
|
|
@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
|
|
.attrs = ports_attrs,
|
|
};
|
|
|
|
-static const struct attribute_group *ports_groups[] = {
|
|
- &ports_group,
|
|
- NULL
|
|
-};
|
|
-
|
|
/***************************************
|
|
* Adding & removing ports
|
|
***************************************/
|
|
@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
|
|
static int usbport_trig_activate(struct led_classdev *led_cdev)
|
|
{
|
|
struct usbport_trig_data *usbport_data;
|
|
+ int err;
|
|
|
|
usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
|
|
if (!usbport_data)
|
|
@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
|
|
|
|
/* List of ports */
|
|
INIT_LIST_HEAD(&usbport_data->ports);
|
|
+ err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
|
|
+ if (err)
|
|
+ goto err_free;
|
|
usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
|
|
usbport_trig_update_count(usbport_data);
|
|
|
|
@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
|
|
usbport_data->nb.notifier_call = usbport_trig_notify;
|
|
led_set_trigger_data(led_cdev, usbport_data);
|
|
usb_register_notify(&usbport_data->nb);
|
|
-
|
|
return 0;
|
|
+
|
|
+err_free:
|
|
+ kfree(usbport_data);
|
|
+ return err;
|
|
}
|
|
|
|
static void usbport_trig_deactivate(struct led_classdev *led_cdev)
|
|
@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
|
|
usbport_trig_remove_port(usbport_data, port);
|
|
}
|
|
|
|
+ sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
|
|
+
|
|
usb_unregister_notify(&usbport_data->nb);
|
|
|
|
kfree(usbport_data);
|
|
@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
|
|
.name = "usbport",
|
|
.activate = usbport_trig_activate,
|
|
.deactivate = usbport_trig_deactivate,
|
|
- .groups = ports_groups,
|
|
};
|
|
|
|
static int __init usbport_trig_init(void)
|
|
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
|
index 514c5214ddb24..8bc35d53408b5 100644
|
|
--- a/drivers/usb/core/quirks.c
|
|
+++ b/drivers/usb/core/quirks.c
|
|
@@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|
{ USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
|
|
|
|
/* Corsair K70 RGB */
|
|
- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
|
|
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
|
|
+ USB_QUIRK_DELAY_CTRL_MSG },
|
|
|
|
/* Corsair Strafe */
|
|
{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
|
|
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
|
|
index 2d6d2c8244de6..a00a56b4ae794 100644
|
|
--- a/drivers/usb/dwc2/gadget.c
|
|
+++ b/drivers/usb/dwc2/gadget.c
|
|
@@ -3165,8 +3165,6 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
|
|
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
|
|
}
|
|
|
|
-static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
|
|
-
|
|
/**
|
|
* dwc2_hsotg_disconnect - disconnect service
|
|
* @hsotg: The device state.
|
|
@@ -3188,9 +3186,11 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
|
|
/* all endpoints should be shutdown */
|
|
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
|
|
if (hsotg->eps_in[ep])
|
|
- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
|
|
+ kill_all_requests(hsotg, hsotg->eps_in[ep],
|
|
+ -ESHUTDOWN);
|
|
if (hsotg->eps_out[ep])
|
|
- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
|
|
+ kill_all_requests(hsotg, hsotg->eps_out[ep],
|
|
+ -ESHUTDOWN);
|
|
}
|
|
|
|
call_gadget(hsotg, disconnect);
|
|
@@ -3234,6 +3234,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
|
|
GINTSTS_PTXFEMP | \
|
|
GINTSTS_RXFLVL)
|
|
|
|
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
|
|
/**
|
|
* dwc2_hsotg_core_init - issue softreset to the core
|
|
* @hsotg: The device state
|
|
@@ -4069,10 +4070,8 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
|
|
struct dwc2_hsotg *hsotg = hs_ep->parent;
|
|
int dir_in = hs_ep->dir_in;
|
|
int index = hs_ep->index;
|
|
- unsigned long flags;
|
|
u32 epctrl_reg;
|
|
u32 ctrl;
|
|
- int locked;
|
|
|
|
dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
|
|
|
|
@@ -4088,10 +4087,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
|
|
|
|
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
|
|
|
|
- locked = spin_is_locked(&hsotg->lock);
|
|
- if (!locked)
|
|
- spin_lock_irqsave(&hsotg->lock, flags);
|
|
-
|
|
ctrl = dwc2_readl(hsotg, epctrl_reg);
|
|
|
|
if (ctrl & DXEPCTL_EPENA)
|
|
@@ -4114,12 +4109,22 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
|
|
hs_ep->fifo_index = 0;
|
|
hs_ep->fifo_size = 0;
|
|
|
|
- if (!locked)
|
|
- spin_unlock_irqrestore(&hsotg->lock, flags);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
+static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
|
|
+{
|
|
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
|
|
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
|
|
+ unsigned long flags;
|
|
+ int ret;
|
|
+
|
|
+ spin_lock_irqsave(&hsotg->lock, flags);
|
|
+ ret = dwc2_hsotg_ep_disable(ep);
|
|
+ spin_unlock_irqrestore(&hsotg->lock, flags);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/**
|
|
* on_list - check request is on the given endpoint
|
|
* @ep: The endpoint to check.
|
|
@@ -4267,7 +4272,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
|
|
|
|
static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
|
|
.enable = dwc2_hsotg_ep_enable,
|
|
- .disable = dwc2_hsotg_ep_disable,
|
|
+ .disable = dwc2_hsotg_ep_disable_lock,
|
|
.alloc_request = dwc2_hsotg_ep_alloc_request,
|
|
.free_request = dwc2_hsotg_ep_free_request,
|
|
.queue = dwc2_hsotg_ep_queue_lock,
|
|
@@ -4407,9 +4412,9 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
|
|
/* all endpoints should be shutdown */
|
|
for (ep = 1; ep < hsotg->num_of_eps; ep++) {
|
|
if (hsotg->eps_in[ep])
|
|
- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
|
|
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
|
|
if (hsotg->eps_out[ep])
|
|
- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
|
|
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
|
|
}
|
|
|
|
spin_lock_irqsave(&hsotg->lock, flags);
|
|
@@ -4857,9 +4862,9 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
|
|
|
|
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
|
|
if (hsotg->eps_in[ep])
|
|
- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
|
|
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
|
|
if (hsotg->eps_out[ep])
|
|
- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
|
|
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
|
|
index 3f9bccc95add1..c089ffa1f0a8f 100644
|
|
--- a/drivers/usb/dwc2/hcd.h
|
|
+++ b/drivers/usb/dwc2/hcd.h
|
|
@@ -366,7 +366,7 @@ struct dwc2_qh {
|
|
u32 desc_list_sz;
|
|
u32 *n_bytes;
|
|
struct timer_list unreserve_timer;
|
|
- struct timer_list wait_timer;
|
|
+ struct hrtimer wait_timer;
|
|
struct dwc2_tt *dwc_tt;
|
|
int ttport;
|
|
unsigned tt_buffer_dirty:1;
|
|
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
|
|
index 40839591d2ec1..ea3aa640c15c1 100644
|
|
--- a/drivers/usb/dwc2/hcd_queue.c
|
|
+++ b/drivers/usb/dwc2/hcd_queue.c
|
|
@@ -59,7 +59,7 @@
|
|
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
|
|
|
|
/* If we get a NAK, wait this long before retrying */
|
|
-#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
|
|
+#define DWC2_RETRY_WAIT_DELAY 1*1E6L
|
|
|
|
/**
|
|
* dwc2_periodic_channel_available() - Checks that a channel is available for a
|
|
@@ -1464,10 +1464,12 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
|
|
* qh back to the "inactive" list, then queues transactions.
|
|
*
|
|
* @t: Pointer to wait_timer in a qh.
|
|
+ *
|
|
+ * Return: HRTIMER_NORESTART to not automatically restart this timer.
|
|
*/
|
|
-static void dwc2_wait_timer_fn(struct timer_list *t)
|
|
+static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
|
|
{
|
|
- struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
|
|
+ struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
|
|
struct dwc2_hsotg *hsotg = qh->hsotg;
|
|
unsigned long flags;
|
|
|
|
@@ -1491,6 +1493,7 @@ static void dwc2_wait_timer_fn(struct timer_list *t)
|
|
}
|
|
|
|
spin_unlock_irqrestore(&hsotg->lock, flags);
|
|
+ return HRTIMER_NORESTART;
|
|
}
|
|
|
|
/**
|
|
@@ -1521,7 +1524,8 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
|
|
/* Initialize QH */
|
|
qh->hsotg = hsotg;
|
|
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
|
|
- timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
|
|
+ hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
+ qh->wait_timer.function = &dwc2_wait_timer_fn;
|
|
qh->ep_type = ep_type;
|
|
qh->ep_is_in = ep_is_in;
|
|
|
|
@@ -1690,7 +1694,7 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
|
* won't do anything anyway, but we want it to finish before we free
|
|
* memory.
|
|
*/
|
|
- del_timer_sync(&qh->wait_timer);
|
|
+ hrtimer_cancel(&qh->wait_timer);
|
|
|
|
dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
|
|
|
|
@@ -1716,6 +1720,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
|
{
|
|
int status;
|
|
u32 intr_mask;
|
|
+ ktime_t delay;
|
|
|
|
if (dbg_qh(qh))
|
|
dev_vdbg(hsotg->dev, "%s()\n", __func__);
|
|
@@ -1734,8 +1739,8 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
|
list_add_tail(&qh->qh_list_entry,
|
|
&hsotg->non_periodic_sched_waiting);
|
|
qh->wait_timer_cancel = false;
|
|
- mod_timer(&qh->wait_timer,
|
|
- jiffies + DWC2_RETRY_WAIT_DELAY + 1);
|
|
+ delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
|
|
+ hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
|
|
} else {
|
|
list_add_tail(&qh->qh_list_entry,
|
|
&hsotg->non_periodic_sched_inactive);
|
|
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
|
|
index 7c1b6938f2128..24ff5f21cb25d 100644
|
|
--- a/drivers/usb/dwc2/params.c
|
|
+++ b/drivers/usb/dwc2/params.c
|
|
@@ -71,6 +71,13 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
|
|
p->power_down = false;
|
|
}
|
|
|
|
+static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
|
|
+{
|
|
+ struct dwc2_core_params *p = &hsotg->params;
|
|
+
|
|
+ p->power_down = 0;
|
|
+}
|
|
+
|
|
static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
|
|
{
|
|
struct dwc2_core_params *p = &hsotg->params;
|
|
@@ -111,6 +118,7 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
|
|
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
|
|
p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
|
|
GAHBCFG_HBSTLEN_SHIFT;
|
|
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
|
|
}
|
|
|
|
static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
|
|
@@ -151,7 +159,8 @@ const struct of_device_id dwc2_of_match_table[] = {
|
|
{ .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
|
|
{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
|
|
{ .compatible = "snps,dwc2" },
|
|
- { .compatible = "samsung,s3c6400-hsotg" },
|
|
+ { .compatible = "samsung,s3c6400-hsotg",
|
|
+ .data = dwc2_set_s3c6400_params },
|
|
{ .compatible = "amlogic,meson8-usb",
|
|
.data = dwc2_set_amlogic_params },
|
|
{ .compatible = "amlogic,meson8b-usb",
|
|
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
|
|
index 842795856bf49..fdc6e4e403e81 100644
|
|
--- a/drivers/usb/dwc3/dwc3-pci.c
|
|
+++ b/drivers/usb/dwc3/dwc3-pci.c
|
|
@@ -170,20 +170,20 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
|
|
* put the gpio descriptors again here because the phy driver
|
|
* might want to grab them, too.
|
|
*/
|
|
- gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
|
|
- GPIOD_OUT_LOW);
|
|
+ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
|
|
if (IS_ERR(gpio))
|
|
return PTR_ERR(gpio);
|
|
|
|
gpiod_set_value_cansleep(gpio, 1);
|
|
+ gpiod_put(gpio);
|
|
|
|
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
|
|
- GPIOD_OUT_LOW);
|
|
+ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
|
|
if (IS_ERR(gpio))
|
|
return PTR_ERR(gpio);
|
|
|
|
if (gpio) {
|
|
gpiod_set_value_cansleep(gpio, 1);
|
|
+ gpiod_put(gpio);
|
|
usleep_range(10000, 11000);
|
|
}
|
|
}
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 9f92ee03dde70..5a5b37e0a140f 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -177,6 +177,8 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
|
|
req->started = false;
|
|
list_del(&req->list);
|
|
req->remaining = 0;
|
|
+ req->unaligned = false;
|
|
+ req->zero = false;
|
|
|
|
if (req->request.status == -EINPROGRESS)
|
|
req->request.status = status;
|
|
@@ -919,8 +921,6 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
|
|
struct usb_gadget *gadget = &dwc->gadget;
|
|
enum usb_device_speed speed = gadget->speed;
|
|
|
|
- dwc3_ep_inc_enq(dep);
|
|
-
|
|
trb->size = DWC3_TRB_SIZE_LENGTH(length);
|
|
trb->bpl = lower_32_bits(dma);
|
|
trb->bph = upper_32_bits(dma);
|
|
@@ -990,16 +990,20 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
|
|
usb_endpoint_type(dep->endpoint.desc));
|
|
}
|
|
|
|
- /* always enable Continue on Short Packet */
|
|
+ /*
|
|
+ * Enable Continue on Short Packet
|
|
+ * when endpoint is not a stream capable
|
|
+ */
|
|
if (usb_endpoint_dir_out(dep->endpoint.desc)) {
|
|
- trb->ctrl |= DWC3_TRB_CTRL_CSP;
|
|
+ if (!dep->stream_capable)
|
|
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
|
|
|
|
if (short_not_ok)
|
|
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
|
|
}
|
|
|
|
if ((!no_interrupt && !chain) ||
|
|
- (dwc3_calc_trbs_left(dep) == 0))
|
|
+ (dwc3_calc_trbs_left(dep) == 1))
|
|
trb->ctrl |= DWC3_TRB_CTRL_IOC;
|
|
|
|
if (chain)
|
|
@@ -1010,6 +1014,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
|
|
|
|
trb->ctrl |= DWC3_TRB_CTRL_HWO;
|
|
|
|
+ dwc3_ep_inc_enq(dep);
|
|
+
|
|
trace_dwc3_prepare_trb(dep, trb);
|
|
}
|
|
|
|
@@ -1113,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
|
|
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
|
|
unsigned int rem = length % maxp;
|
|
|
|
- if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
|
|
+ if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
|
|
struct dwc3 *dwc = dep->dwc;
|
|
struct dwc3_trb *trb;
|
|
|
|
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
|
|
index f22714cce070b..f27c5cbe285c5 100644
|
|
--- a/drivers/usb/dwc3/trace.h
|
|
+++ b/drivers/usb/dwc3/trace.h
|
|
@@ -251,9 +251,11 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
|
|
s = "2x ";
|
|
break;
|
|
case 3:
|
|
+ default:
|
|
s = "3x ";
|
|
break;
|
|
}
|
|
+ break;
|
|
default:
|
|
s = "";
|
|
} s; }),
|
|
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
|
|
index 660878a195055..b77f3126580eb 100644
|
|
--- a/drivers/usb/gadget/udc/net2272.c
|
|
+++ b/drivers/usb/gadget/udc/net2272.c
|
|
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
|
|
#if defined(PLX_PCI_RDK2)
|
|
/* see if PCI int for us by checking irqstat */
|
|
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
|
|
- if (!intcsr & (1 << NET2272_PCI_IRQ)) {
|
|
+ if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
|
|
spin_unlock(&dev->lock);
|
|
return IRQ_NONE;
|
|
}
|
|
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
|
|
index cdffbd1e0316b..6e34f95941597 100644
|
|
--- a/drivers/usb/gadget/udc/renesas_usb3.c
|
|
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
|
|
@@ -358,6 +358,7 @@ struct renesas_usb3 {
|
|
bool extcon_host; /* check id and set EXTCON_USB_HOST */
|
|
bool extcon_usb; /* check vbus and set EXTCON_USB */
|
|
bool forced_b_device;
|
|
+ bool start_to_connect;
|
|
};
|
|
|
|
#define gadget_to_renesas_usb3(_gadget) \
|
|
@@ -476,7 +477,8 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
|
|
static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
|
|
{
|
|
usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
|
|
- usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
|
|
+ if (!usb3->workaround_for_vbus)
|
|
+ usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
|
|
}
|
|
|
|
static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
|
|
@@ -700,8 +702,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
|
|
usb3_set_mode_by_role_sw(usb3, host);
|
|
usb3_vbus_out(usb3, a_dev);
|
|
/* for A-Peripheral or forced B-device mode */
|
|
- if ((!host && a_dev) ||
|
|
- (usb3->workaround_for_vbus && usb3->forced_b_device))
|
|
+ if ((!host && a_dev) || usb3->start_to_connect)
|
|
usb3_connect(usb3);
|
|
spin_unlock_irqrestore(&usb3->lock, flags);
|
|
}
|
|
@@ -2432,7 +2433,11 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
|
|
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
|
|
return -EFAULT;
|
|
|
|
- if (!strncmp(buf, "1", 1))
|
|
+ usb3->start_to_connect = false;
|
|
+ if (usb3->workaround_for_vbus && usb3->forced_b_device &&
|
|
+ !strncmp(buf, "2", 1))
|
|
+ usb3->start_to_connect = true;
|
|
+ else if (!strncmp(buf, "1", 1))
|
|
usb3->forced_b_device = true;
|
|
else
|
|
usb3->forced_b_device = false;
|
|
@@ -2440,7 +2445,7 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
|
|
if (usb3->workaround_for_vbus)
|
|
usb3_disconnect(usb3);
|
|
|
|
- /* Let this driver call usb3_connect() anyway */
|
|
+ /* Let this driver call usb3_connect() if needed */
|
|
usb3_check_id(usb3);
|
|
|
|
return count;
|
|
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
|
|
index f26109eafdbfb..66ec1fdf9fe7d 100644
|
|
--- a/drivers/usb/host/ehci-mv.c
|
|
+++ b/drivers/usb/host/ehci-mv.c
|
|
@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
|
|
MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
|
|
MODULE_ALIAS("mv-ehci");
|
|
MODULE_LICENSE("GPL");
|
|
+MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
|
|
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
|
|
index 984892dd72f55..42668aeca57c8 100644
|
|
--- a/drivers/usb/host/r8a66597-hcd.c
|
|
+++ b/drivers/usb/host/r8a66597-hcd.c
|
|
@@ -1979,6 +1979,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
|
|
|
|
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
|
|
struct usb_host_endpoint *hep)
|
|
+__acquires(r8a66597->lock)
|
|
+__releases(r8a66597->lock)
|
|
{
|
|
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
|
|
struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
|
|
@@ -1991,13 +1993,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
|
|
return;
|
|
pipenum = pipe->info.pipenum;
|
|
|
|
+ spin_lock_irqsave(&r8a66597->lock, flags);
|
|
if (pipenum == 0) {
|
|
kfree(hep->hcpriv);
|
|
hep->hcpriv = NULL;
|
|
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irqsave(&r8a66597->lock, flags);
|
|
pipe_stop(r8a66597, pipe);
|
|
pipe_irq_disable(r8a66597, pipenum);
|
|
disable_irq_empty(r8a66597, pipenum);
|
|
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
|
|
index ae70b9bfd7975..8606935201326 100644
|
|
--- a/drivers/usb/mtu3/mtu3_core.c
|
|
+++ b/drivers/usb/mtu3/mtu3_core.c
|
|
@@ -578,8 +578,10 @@ static void mtu3_regs_init(struct mtu3 *mtu)
|
|
if (mtu->is_u3_ip) {
|
|
/* disable LGO_U1/U2 by default */
|
|
mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
|
|
- SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
|
|
SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
|
|
+ /* enable accept LGO_U1/U2 link command from host */
|
|
+ mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL,
|
|
+ SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE);
|
|
/* device responses to u3_exit from host automatically */
|
|
mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
|
|
/* automatically build U2 link when U3 detect fail */
|
|
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
|
|
index 25216e79cd6ee..3c464d8ae023f 100644
|
|
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
|
|
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
|
|
@@ -336,9 +336,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
|
|
|
|
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
|
|
if (set)
|
|
- lpc |= SW_U1_ACCEPT_ENABLE;
|
|
+ lpc |= SW_U1_REQUEST_ENABLE;
|
|
else
|
|
- lpc &= ~SW_U1_ACCEPT_ENABLE;
|
|
+ lpc &= ~SW_U1_REQUEST_ENABLE;
|
|
mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
|
|
|
|
mtu->u1_enable = !!set;
|
|
@@ -351,9 +351,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
|
|
|
|
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
|
|
if (set)
|
|
- lpc |= SW_U2_ACCEPT_ENABLE;
|
|
+ lpc |= SW_U2_REQUEST_ENABLE;
|
|
else
|
|
- lpc &= ~SW_U2_ACCEPT_ENABLE;
|
|
+ lpc &= ~SW_U2_REQUEST_ENABLE;
|
|
mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
|
|
|
|
mtu->u2_enable = !!set;
|
|
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
|
|
index 23a0df79ef212..403eb97915f8a 100644
|
|
--- a/drivers/usb/musb/musb_dsps.c
|
|
+++ b/drivers/usb/musb/musb_dsps.c
|
|
@@ -181,9 +181,11 @@ static void dsps_musb_enable(struct musb *musb)
|
|
|
|
musb_writel(reg_base, wrp->epintr_set, epmask);
|
|
musb_writel(reg_base, wrp->coreintr_set, coremask);
|
|
- /* start polling for ID change in dual-role idle mode */
|
|
- if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
|
|
- musb->port_mode == MUSB_OTG)
|
|
+ /*
|
|
+ * start polling for runtime PM active and idle,
|
|
+ * and for ID change in dual-role idle mode.
|
|
+ */
|
|
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
|
|
dsps_mod_timer(glue, -1);
|
|
}
|
|
|
|
@@ -227,8 +229,13 @@ static int dsps_check_status(struct musb *musb, void *unused)
|
|
|
|
switch (musb->xceiv->otg->state) {
|
|
case OTG_STATE_A_WAIT_VRISE:
|
|
- dsps_mod_timer_optional(glue);
|
|
- break;
|
|
+ if (musb->port_mode == MUSB_HOST) {
|
|
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
|
|
+ dsps_mod_timer_optional(glue);
|
|
+ break;
|
|
+ }
|
|
+ /* fall through */
|
|
+
|
|
case OTG_STATE_A_WAIT_BCON:
|
|
/* keep VBUS on for host-only mode */
|
|
if (musb->port_mode == MUSB_HOST) {
|
|
@@ -249,6 +256,10 @@ static int dsps_check_status(struct musb *musb, void *unused)
|
|
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
|
|
MUSB_HST_MODE(musb);
|
|
}
|
|
+
|
|
+ if (musb->port_mode == MUSB_PERIPHERAL)
|
|
+ skip_session = 1;
|
|
+
|
|
if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
|
|
musb_writeb(mregs, MUSB_DEVCTL,
|
|
MUSB_DEVCTL_SESSION);
|
|
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
|
|
index eae8b1b1b45b8..ffe462a657b15 100644
|
|
--- a/drivers/usb/musb/musb_gadget.c
|
|
+++ b/drivers/usb/musb/musb_gadget.c
|
|
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
|
|
}
|
|
|
|
if (request) {
|
|
- u8 is_dma = 0;
|
|
- bool short_packet = false;
|
|
|
|
trace_musb_req_tx(req);
|
|
|
|
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
|
|
- is_dma = 1;
|
|
csr |= MUSB_TXCSR_P_WZC_BITS;
|
|
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
|
|
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
|
|
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
|
|
*/
|
|
if ((request->zero && request->length)
|
|
&& (request->length % musb_ep->packet_sz == 0)
|
|
- && (request->actual == request->length))
|
|
- short_packet = true;
|
|
+ && (request->actual == request->length)) {
|
|
|
|
- if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
|
|
- (is_dma && (!dma->desired_mode ||
|
|
- (request->actual &
|
|
- (musb_ep->packet_sz - 1)))))
|
|
- short_packet = true;
|
|
-
|
|
- if (short_packet) {
|
|
/*
|
|
* On DMA completion, FIFO may not be
|
|
* available yet...
|
|
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
|
|
index a688f7f87829f..5fc6825745f21 100644
|
|
--- a/drivers/usb/musb/musbhsdma.c
|
|
+++ b/drivers/usb/musb/musbhsdma.c
|
|
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
|
|
channel->status = MUSB_DMA_STATUS_FREE;
|
|
|
|
/* completed */
|
|
- if ((devctl & MUSB_DEVCTL_HM)
|
|
- && (musb_channel->transmit)
|
|
- && ((channel->desired_mode == 0)
|
|
- || (channel->actual_len &
|
|
- (musb_channel->max_packet_sz - 1)))
|
|
- ) {
|
|
+ if (musb_channel->transmit &&
|
|
+ (!channel->desired_mode ||
|
|
+ (channel->actual_len %
|
|
+ musb_channel->max_packet_sz))) {
|
|
u8 epnum = musb_channel->epnum;
|
|
int offset = musb->io.ep_offset(epnum,
|
|
MUSB_TXCSR);
|
|
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
|
|
*/
|
|
musb_ep_select(mbase, epnum);
|
|
txcsr = musb_readw(mbase, offset);
|
|
- txcsr &= ~(MUSB_TXCSR_DMAENAB
|
|
+ if (channel->desired_mode == 1) {
|
|
+ txcsr &= ~(MUSB_TXCSR_DMAENAB
|
|
| MUSB_TXCSR_AUTOSET);
|
|
- musb_writew(mbase, offset, txcsr);
|
|
- /* Send out the packet */
|
|
- txcsr &= ~MUSB_TXCSR_DMAMODE;
|
|
+ musb_writew(mbase, offset, txcsr);
|
|
+ /* Send out the packet */
|
|
+ txcsr &= ~MUSB_TXCSR_DMAMODE;
|
|
+ txcsr |= MUSB_TXCSR_DMAENAB;
|
|
+ }
|
|
txcsr |= MUSB_TXCSR_TXPKTRDY;
|
|
musb_writew(mbase, offset, txcsr);
|
|
}
|
|
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
|
|
index 27bdb72225272..f5f0568d8533e 100644
|
|
--- a/drivers/usb/phy/phy-am335x.c
|
|
+++ b/drivers/usb/phy/phy-am335x.c
|
|
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
|
|
- if (ret)
|
|
- return ret;
|
|
am_phy->usb_phy_gen.phy.init = am335x_init;
|
|
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
|
|
|
|
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
|
|
device_set_wakeup_enable(dev, false);
|
|
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
|
|
|
|
- return 0;
|
|
+ return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
|
|
}
|
|
|
|
static int am335x_phy_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
|
|
index a3e1290d682d3..1de333be93536 100644
|
|
--- a/drivers/usb/renesas_usbhs/common.c
|
|
+++ b/drivers/usb/renesas_usbhs/common.c
|
|
@@ -539,6 +539,10 @@ static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
|
|
* platform functions
|
|
*/
|
|
static const struct of_device_id usbhs_of_match[] = {
|
|
+ {
|
|
+ .compatible = "renesas,usbhs-r8a774c0",
|
|
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
|
|
+ },
|
|
{
|
|
.compatible = "renesas,usbhs-r8a7790",
|
|
.data = (void *)USBHS_TYPE_RCAR_GEN2,
|
|
diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
|
|
index f5a5e6f79f1b9..e4194ac945107 100644
|
|
--- a/drivers/usb/roles/Kconfig
|
|
+++ b/drivers/usb/roles/Kconfig
|
|
@@ -1,3 +1,16 @@
|
|
+config USB_ROLE_SWITCH
|
|
+ tristate "USB Role Switch Support"
|
|
+ help
|
|
+ USB Role Switch is a device that can select the USB role - host or
|
|
+ device - for a USB port (connector). In most cases dual-role capable
|
|
+ USB controller will also represent the switch, but on some platforms
|
|
+ multiplexer/demultiplexer switch is used to route the data lines on
|
|
+ the USB connector between separate USB host and device controllers.
|
|
+
|
|
+ Say Y here if your USB connectors support both device and host roles.
|
|
+ To compile the driver as module, choose M here: the module will be
|
|
+ called roles.ko.
|
|
+
|
|
if USB_ROLE_SWITCH
|
|
|
|
config USB_ROLES_INTEL_XHCI
|
|
diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
|
|
index e44b179ba2751..c02873206fc18 100644
|
|
--- a/drivers/usb/roles/Makefile
|
|
+++ b/drivers/usb/roles/Makefile
|
|
@@ -1 +1,3 @@
|
|
-obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
|
|
+obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
|
|
+roles-y := class.o
|
|
+obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
|
|
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
|
|
new file mode 100644
|
|
index 0000000000000..99116af07f1d9
|
|
--- /dev/null
|
|
+++ b/drivers/usb/roles/class.c
|
|
@@ -0,0 +1,314 @@
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
+/*
|
|
+ * USB Role Switch Support
|
|
+ *
|
|
+ * Copyright (C) 2018 Intel Corporation
|
|
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
|
|
+ * Hans de Goede <hdegoede@redhat.com>
|
|
+ */
|
|
+
|
|
+#include <linux/usb/role.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/slab.h>
|
|
+
|
|
+static struct class *role_class;
|
|
+
|
|
+struct usb_role_switch {
|
|
+ struct device dev;
|
|
+ struct mutex lock; /* device lock*/
|
|
+ enum usb_role role;
|
|
+
|
|
+ /* From descriptor */
|
|
+ struct device *usb2_port;
|
|
+ struct device *usb3_port;
|
|
+ struct device *udc;
|
|
+ usb_role_switch_set_t set;
|
|
+ usb_role_switch_get_t get;
|
|
+ bool allow_userspace_control;
|
|
+};
|
|
+
|
|
+#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
|
|
+
|
|
+/**
|
|
+ * usb_role_switch_set_role - Set USB role for a switch
|
|
+ * @sw: USB role switch
|
|
+ * @role: USB role to be switched to
|
|
+ *
|
|
+ * Set USB role @role for @sw.
|
|
+ */
|
|
+int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (IS_ERR_OR_NULL(sw))
|
|
+ return 0;
|
|
+
|
|
+ mutex_lock(&sw->lock);
|
|
+
|
|
+ ret = sw->set(sw->dev.parent, role);
|
|
+ if (!ret)
|
|
+ sw->role = role;
|
|
+
|
|
+ mutex_unlock(&sw->lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
|
|
+
|
|
+/**
|
|
+ * usb_role_switch_get_role - Get the USB role for a switch
|
|
+ * @sw: USB role switch
|
|
+ *
|
|
+ * Depending on the role-switch-driver this function returns either a cached
|
|
+ * value of the last set role, or reads back the actual value from the hardware.
|
|
+ */
|
|
+enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
|
|
+{
|
|
+ enum usb_role role;
|
|
+
|
|
+ if (IS_ERR_OR_NULL(sw))
|
|
+ return USB_ROLE_NONE;
|
|
+
|
|
+ mutex_lock(&sw->lock);
|
|
+
|
|
+ if (sw->get)
|
|
+ role = sw->get(sw->dev.parent);
|
|
+ else
|
|
+ role = sw->role;
|
|
+
|
|
+ mutex_unlock(&sw->lock);
|
|
+
|
|
+ return role;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
|
|
+
|
|
+static int __switch_match(struct device *dev, const void *name)
|
|
+{
|
|
+ return !strcmp((const char *)name, dev_name(dev));
|
|
+}
|
|
+
|
|
+static void *usb_role_switch_match(struct device_connection *con, int ep,
|
|
+ void *data)
|
|
+{
|
|
+ struct device *dev;
|
|
+
|
|
+ dev = class_find_device(role_class, NULL, con->endpoint[ep],
|
|
+ __switch_match);
|
|
+
|
|
+ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * usb_role_switch_get - Find USB role switch linked with the caller
|
|
+ * @dev: The caller device
|
|
+ *
|
|
+ * Finds and returns role switch linked with @dev. The reference count for the
|
|
+ * found switch is incremented.
|
|
+ */
|
|
+struct usb_role_switch *usb_role_switch_get(struct device *dev)
|
|
+{
|
|
+ struct usb_role_switch *sw;
|
|
+
|
|
+ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
|
|
+ usb_role_switch_match);
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(sw))
|
|
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
|
|
+
|
|
+ return sw;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(usb_role_switch_get);
|
|
+
|
|
+/**
|
|
+ * usb_role_switch_put - Release handle to a switch
|
|
+ * @sw: USB Role Switch
|
|
+ *
|
|
+ * Decrement reference count for @sw.
|
|
+ */
|
|
+void usb_role_switch_put(struct usb_role_switch *sw)
|
|
+{
|
|
+ if (!IS_ERR_OR_NULL(sw)) {
|
|
+ put_device(&sw->dev);
|
|
+ module_put(sw->dev.parent->driver->owner);
|
|
+ }
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(usb_role_switch_put);
|
|
+
|
|
+static umode_t
|
|
+usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
|
|
+{
|
|
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
|
+ struct usb_role_switch *sw = to_role_switch(dev);
|
|
+
|
|
+ if (sw->allow_userspace_control)
|
|
+ return attr->mode;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const char * const usb_roles[] = {
|
|
+ [USB_ROLE_NONE] = "none",
|
|
+ [USB_ROLE_HOST] = "host",
|
|
+ [USB_ROLE_DEVICE] = "device",
|
|
+};
|
|
+
|
|
+static ssize_t
|
|
+role_show(struct device *dev, struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct usb_role_switch *sw = to_role_switch(dev);
|
|
+ enum usb_role role = usb_role_switch_get_role(sw);
|
|
+
|
|
+ return sprintf(buf, "%s\n", usb_roles[role]);
|
|
+}
|
|
+
|
|
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t size)
|
|
+{
|
|
+ struct usb_role_switch *sw = to_role_switch(dev);
|
|
+ int ret;
|
|
+
|
|
+ ret = sysfs_match_string(usb_roles, buf);
|
|
+ if (ret < 0) {
|
|
+ bool res;
|
|
+
|
|
+ /* Extra check if the user wants to disable the switch */
|
|
+ ret = kstrtobool(buf, &res);
|
|
+ if (ret || res)
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ret = usb_role_switch_set_role(sw, ret);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return size;
|
|
+}
|
|
+static DEVICE_ATTR_RW(role);
|
|
+
|
|
+static struct attribute *usb_role_switch_attrs[] = {
|
|
+ &dev_attr_role.attr,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static const struct attribute_group usb_role_switch_group = {
|
|
+ .is_visible = usb_role_switch_is_visible,
|
|
+ .attrs = usb_role_switch_attrs,
|
|
+};
|
|
+
|
|
+static const struct attribute_group *usb_role_switch_groups[] = {
|
|
+ &usb_role_switch_group,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static int
|
|
+usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
|
|
+ if (ret)
|
|
+ dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void usb_role_switch_release(struct device *dev)
|
|
+{
|
|
+ struct usb_role_switch *sw = to_role_switch(dev);
|
|
+
|
|
+ kfree(sw);
|
|
+}
|
|
+
|
|
+static const struct device_type usb_role_dev_type = {
|
|
+ .name = "usb_role_switch",
|
|
+ .groups = usb_role_switch_groups,
|
|
+ .uevent = usb_role_switch_uevent,
|
|
+ .release = usb_role_switch_release,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * usb_role_switch_register - Register USB Role Switch
|
|
+ * @parent: Parent device for the switch
|
|
+ * @desc: Description of the switch
|
|
+ *
|
|
+ * USB Role Switch is a device capable or choosing the role for USB connector.
|
|
+ * On platforms where the USB controller is dual-role capable, the controller
|
|
+ * driver will need to register the switch. On platforms where the USB host and
|
|
+ * USB device controllers behind the connector are separate, there will be a
|
|
+ * mux, and the driver for that mux will need to register the switch.
|
|
+ *
|
|
+ * Returns handle to a new role switch or ERR_PTR. The content of @desc is
|
|
+ * copied.
|
|
+ */
|
|
+struct usb_role_switch *
|
|
+usb_role_switch_register(struct device *parent,
|
|
+ const struct usb_role_switch_desc *desc)
|
|
+{
|
|
+ struct usb_role_switch *sw;
|
|
+ int ret;
|
|
+
|
|
+ if (!desc || !desc->set)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
|
|
+ if (!sw)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ mutex_init(&sw->lock);
|
|
+
|
|
+ sw->allow_userspace_control = desc->allow_userspace_control;
|
|
+ sw->usb2_port = desc->usb2_port;
|
|
+ sw->usb3_port = desc->usb3_port;
|
|
+ sw->udc = desc->udc;
|
|
+ sw->set = desc->set;
|
|
+ sw->get = desc->get;
|
|
+
|
|
+ sw->dev.parent = parent;
|
|
+ sw->dev.class = role_class;
|
|
+ sw->dev.type = &usb_role_dev_type;
|
|
+ dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
|
|
+
|
|
+ ret = device_register(&sw->dev);
|
|
+ if (ret) {
|
|
+ put_device(&sw->dev);
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
+ /* TODO: Symlinks for the host port and the device controller. */
|
|
+
|
|
+ return sw;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(usb_role_switch_register);
|
|
+
|
|
+/**
|
|
+ * usb_role_switch_unregister - Unregsiter USB Role Switch
|
|
+ * @sw: USB Role Switch
|
|
+ *
|
|
+ * Unregister switch that was registered with usb_role_switch_register().
|
|
+ */
|
|
+void usb_role_switch_unregister(struct usb_role_switch *sw)
|
|
+{
|
|
+ if (!IS_ERR_OR_NULL(sw))
|
|
+ device_unregister(&sw->dev);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
|
|
+
|
|
+static int __init usb_roles_init(void)
|
|
+{
|
|
+ role_class = class_create(THIS_MODULE, "usb_role");
|
|
+ return PTR_ERR_OR_ZERO(role_class);
|
|
+}
|
|
+subsys_initcall(usb_roles_init);
|
|
+
|
|
+static void __exit usb_roles_exit(void)
|
|
+{
|
|
+ class_destroy(role_class);
|
|
+}
|
|
+module_exit(usb_roles_exit);
|
|
+
|
|
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
|
|
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_DESCRIPTION("USB Role Class");
|
|
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
|
|
index 609198d9594ca..f459c1a18156e 100644
|
|
--- a/drivers/usb/serial/ftdi_sio.c
|
|
+++ b/drivers/usb/serial/ftdi_sio.c
|
|
@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
|
|
int result;
|
|
u16 val;
|
|
|
|
+ result = usb_autopm_get_interface(serial->interface);
|
|
+ if (result)
|
|
+ return result;
|
|
+
|
|
val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
|
|
result = usb_control_msg(serial->dev,
|
|
usb_sndctrlpipe(serial->dev, 0),
|
|
@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
|
|
val, result);
|
|
}
|
|
|
|
+ usb_autopm_put_interface(serial->interface);
|
|
+
|
|
return result;
|
|
}
|
|
|
|
@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
|
|
unsigned char *buf;
|
|
int result;
|
|
|
|
+ result = usb_autopm_get_interface(serial->interface);
|
|
+ if (result)
|
|
+ return result;
|
|
+
|
|
buf = kmalloc(1, GFP_KERNEL);
|
|
- if (!buf)
|
|
+ if (!buf) {
|
|
+ usb_autopm_put_interface(serial->interface);
|
|
return -ENOMEM;
|
|
+ }
|
|
|
|
result = usb_control_msg(serial->dev,
|
|
usb_rcvctrlpipe(serial->dev, 0),
|
|
@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
|
|
}
|
|
|
|
kfree(buf);
|
|
+ usb_autopm_put_interface(serial->interface);
|
|
|
|
return result;
|
|
}
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index 1ce27f3ff7a78..aef15497ff31f 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -1955,6 +1955,10 @@ static const struct usb_device_id option_ids[] = {
|
|
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
|
|
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
|
|
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
|
|
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
|
|
+ .driver_info = RSVD(4) | RSVD(5) },
|
|
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
|
|
+ .driver_info = RSVD(6) },
|
|
{ } /* Terminating entry */
|
|
};
|
|
MODULE_DEVICE_TABLE(usb, option_ids);
|
|
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
|
|
index a4e0d13fc1219..bb3f9aa4a9093 100644
|
|
--- a/drivers/usb/serial/pl2303.c
|
|
+++ b/drivers/usb/serial/pl2303.c
|
|
@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
|
|
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
|
|
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
|
|
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
|
|
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
|
|
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
|
|
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
|
|
@@ -91,9 +92,14 @@ static const struct usb_device_id id_table[] = {
|
|
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
|
|
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
|
|
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
|
|
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
|
|
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
|
|
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
|
|
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
|
|
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
|
|
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
|
|
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
|
|
+ { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
|
|
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
|
|
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
|
|
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
|
|
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
|
|
index 26965cc23c170..559941ca884da 100644
|
|
--- a/drivers/usb/serial/pl2303.h
|
|
+++ b/drivers/usb/serial/pl2303.h
|
|
@@ -8,6 +8,7 @@
|
|
|
|
#define PL2303_VENDOR_ID 0x067b
|
|
#define PL2303_PRODUCT_ID 0x2303
|
|
+#define PL2303_PRODUCT_ID_TB 0x2304
|
|
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
|
|
#define PL2303_PRODUCT_ID_DCU11 0x1234
|
|
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
|
|
@@ -20,6 +21,7 @@
|
|
#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
|
|
#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
|
|
|
|
+
|
|
#define ATEN_VENDOR_ID 0x0557
|
|
#define ATEN_VENDOR_ID2 0x0547
|
|
#define ATEN_PRODUCT_ID 0x2008
|
|
@@ -119,10 +121,15 @@
|
|
|
|
/* Hewlett-Packard POS Pole Displays */
|
|
#define HP_VENDOR_ID 0x03f0
|
|
+#define HP_LM920_PRODUCT_ID 0x026b
|
|
+#define HP_TD620_PRODUCT_ID 0x0956
|
|
#define HP_LD960_PRODUCT_ID 0x0b39
|
|
#define HP_LCM220_PRODUCT_ID 0x3139
|
|
#define HP_LCM960_PRODUCT_ID 0x3239
|
|
#define HP_LD220_PRODUCT_ID 0x3524
|
|
+#define HP_LD220TA_PRODUCT_ID 0x4349
|
|
+#define HP_LD960TA_PRODUCT_ID 0x4439
|
|
+#define HP_LM940_PRODUCT_ID 0x5039
|
|
|
|
/* Cressi Edy (diving computer) PC interface */
|
|
#define CRESSI_VENDOR_ID 0x04b8
|
|
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
|
|
index 4d0273508043d..edbbb13d6de6e 100644
|
|
--- a/drivers/usb/serial/usb-serial-simple.c
|
|
+++ b/drivers/usb/serial/usb-serial-simple.c
|
|
@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
|
|
/* Motorola Tetra driver */
|
|
#define MOTOROLA_TETRA_IDS() \
|
|
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
|
|
- { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
|
|
+ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
|
|
+ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
|
|
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
|
|
|
|
/* Novatel Wireless GPS driver */
|
|
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
|
|
index e227bb5b794fe..101ebac43c87a 100644
|
|
--- a/drivers/usb/storage/scsiglue.c
|
|
+++ b/drivers/usb/storage/scsiglue.c
|
|
@@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev)
|
|
if (!(us->fflags & US_FL_NEEDS_CAP16))
|
|
sdev->try_rc_10_first = 1;
|
|
|
|
- /* assume SPC3 or latter devices support sense size > 18 */
|
|
- if (sdev->scsi_level > SCSI_SPC_2)
|
|
+ /*
|
|
+ * assume SPC3 or latter devices support sense size > 18
|
|
+ * unless US_FL_BAD_SENSE quirk is specified.
|
|
+ */
|
|
+ if (sdev->scsi_level > SCSI_SPC_2 &&
|
|
+ !(us->fflags & US_FL_BAD_SENSE))
|
|
us->fflags |= US_FL_SANE_SENSE;
|
|
|
|
/*
|
|
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
|
|
index f7f83b21dc746..ea0d27a94afe0 100644
|
|
--- a/drivers/usb/storage/unusual_devs.h
|
|
+++ b/drivers/usb/storage/unusual_devs.h
|
|
@@ -1265,6 +1265,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
|
|
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
US_FL_FIX_CAPACITY ),
|
|
|
|
+/*
|
|
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
|
|
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
|
|
+ * that do not process read/write command if a long sense is requested,
|
|
+ * so force to use 18-byte sense.
|
|
+ */
|
|
+UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
|
|
+ "SMI",
|
|
+ "SM3350 UFS-to-USB-Mass-Storage bridge",
|
|
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
|
+ US_FL_BAD_SENSE ),
|
|
+
|
|
/*
|
|
* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
|
|
* This card reader returns "Illegal Request, Logical Block Address
|
|
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
|
|
index dbbd71f754d02..ba6e5cdaed2c5 100644
|
|
--- a/drivers/usb/typec/tcpm/tcpm.c
|
|
+++ b/drivers/usb/typec/tcpm/tcpm.c
|
|
@@ -317,6 +317,9 @@ struct tcpm_port {
|
|
/* Deadline in jiffies to exit src_try_wait state */
|
|
unsigned long max_wait;
|
|
|
|
+ /* port belongs to a self powered device */
|
|
+ bool self_powered;
|
|
+
|
|
#ifdef CONFIG_DEBUG_FS
|
|
struct dentry *dentry;
|
|
struct mutex logbuffer_lock; /* log buffer access lock */
|
|
@@ -3254,7 +3257,8 @@ static void run_state_machine(struct tcpm_port *port)
|
|
case SRC_HARD_RESET_VBUS_OFF:
|
|
tcpm_set_vconn(port, true);
|
|
tcpm_set_vbus(port, false);
|
|
- tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
|
|
+ tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
|
|
+ TYPEC_HOST);
|
|
tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
|
|
break;
|
|
case SRC_HARD_RESET_VBUS_ON:
|
|
@@ -3267,7 +3271,8 @@ static void run_state_machine(struct tcpm_port *port)
|
|
memset(&port->pps_data, 0, sizeof(port->pps_data));
|
|
tcpm_set_vconn(port, false);
|
|
tcpm_set_charge(port, false);
|
|
- tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
|
|
+ tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
|
|
+ TYPEC_DEVICE);
|
|
/*
|
|
* VBUS may or may not toggle, depending on the adapter.
|
|
* If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
|
|
@@ -4412,6 +4417,8 @@ sink:
|
|
return -EINVAL;
|
|
port->operating_snk_mw = mw / 1000;
|
|
|
|
+ port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -4720,6 +4727,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
|
|
port->typec_caps.prefer_role = tcfg->default_role;
|
|
port->typec_caps.type = tcfg->type;
|
|
port->typec_caps.data = tcfg->data;
|
|
+ port->self_powered = port->tcpc->config->self_powered;
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
|
|
index d9fd3188615dc..64cbc2d007c9e 100644
|
|
--- a/drivers/vfio/vfio_iommu_type1.c
|
|
+++ b/drivers/vfio/vfio_iommu_type1.c
|
|
@@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
|
|
return -EINVAL;
|
|
if (!unmap->size || unmap->size & mask)
|
|
return -EINVAL;
|
|
- if (unmap->iova + unmap->size < unmap->iova ||
|
|
+ if (unmap->iova + unmap->size - 1 < unmap->iova ||
|
|
unmap->size > SIZE_MAX)
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
|
|
index ad7a6f475a442..4954718b2754b 100644
|
|
--- a/drivers/vhost/net.c
|
|
+++ b/drivers/vhost/net.c
|
|
@@ -1192,7 +1192,8 @@ static void handle_rx(struct vhost_net *net)
|
|
if (nvq->done_idx > VHOST_NET_BATCH)
|
|
vhost_net_signal_used(nvq);
|
|
if (unlikely(vq_log))
|
|
- vhost_log_write(vq, vq_log, log, vhost_len);
|
|
+ vhost_log_write(vq, vq_log, log, vhost_len,
|
|
+ vq->iov, in);
|
|
total_len += vhost_len;
|
|
if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
|
|
vhost_poll_queue(&vq->poll);
|
|
@@ -1292,7 +1293,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
|
|
n->vqs[i].rx_ring = NULL;
|
|
vhost_net_buf_init(&n->vqs[i].rxq);
|
|
}
|
|
- vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
|
|
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
|
|
+ UIO_MAXIOV + VHOST_NET_BATCH);
|
|
|
|
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
|
|
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
|
|
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
|
|
index 50dffe83714c6..11bd8b6422ebf 100644
|
|
--- a/drivers/vhost/scsi.c
|
|
+++ b/drivers/vhost/scsi.c
|
|
@@ -1132,16 +1132,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
|
|
struct vhost_virtqueue *vq,
|
|
struct vhost_scsi_ctx *vc)
|
|
{
|
|
- struct virtio_scsi_ctrl_tmf_resp __user *resp;
|
|
struct virtio_scsi_ctrl_tmf_resp rsp;
|
|
+ struct iov_iter iov_iter;
|
|
int ret;
|
|
|
|
pr_debug("%s\n", __func__);
|
|
memset(&rsp, 0, sizeof(rsp));
|
|
rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
|
|
- resp = vq->iov[vc->out].iov_base;
|
|
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
|
- if (!ret)
|
|
+
|
|
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
|
|
+
|
|
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
|
|
+ if (likely(ret == sizeof(rsp)))
|
|
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
|
|
else
|
|
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
|
|
@@ -1152,16 +1154,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
|
|
struct vhost_virtqueue *vq,
|
|
struct vhost_scsi_ctx *vc)
|
|
{
|
|
- struct virtio_scsi_ctrl_an_resp __user *resp;
|
|
struct virtio_scsi_ctrl_an_resp rsp;
|
|
+ struct iov_iter iov_iter;
|
|
int ret;
|
|
|
|
pr_debug("%s\n", __func__);
|
|
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
|
|
rsp.response = VIRTIO_SCSI_S_OK;
|
|
- resp = vq->iov[vc->out].iov_base;
|
|
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
|
- if (!ret)
|
|
+
|
|
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
|
|
+
|
|
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
|
|
+ if (likely(ret == sizeof(rsp)))
|
|
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
|
|
else
|
|
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
|
|
@@ -1628,7 +1632,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
|
|
vqs[i] = &vs->vqs[i].vq;
|
|
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
|
|
}
|
|
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
|
|
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
|
|
|
|
vhost_scsi_init_inflight(vs, NULL);
|
|
|
|
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
|
|
index 55e5aa662ad59..b214a72d5caad 100644
|
|
--- a/drivers/vhost/vhost.c
|
|
+++ b/drivers/vhost/vhost.c
|
|
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
|
|
vq->indirect = kmalloc_array(UIO_MAXIOV,
|
|
sizeof(*vq->indirect),
|
|
GFP_KERNEL);
|
|
- vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
|
|
+ vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
|
|
GFP_KERNEL);
|
|
- vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
|
|
+ vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
|
|
GFP_KERNEL);
|
|
if (!vq->indirect || !vq->log || !vq->heads)
|
|
goto err_nomem;
|
|
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
|
|
}
|
|
|
|
void vhost_dev_init(struct vhost_dev *dev,
|
|
- struct vhost_virtqueue **vqs, int nvqs)
|
|
+ struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
|
|
{
|
|
struct vhost_virtqueue *vq;
|
|
int i;
|
|
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
|
|
dev->iotlb = NULL;
|
|
dev->mm = NULL;
|
|
dev->worker = NULL;
|
|
+ dev->iov_limit = iov_limit;
|
|
init_llist_head(&dev->work_list);
|
|
init_waitqueue_head(&dev->wait);
|
|
INIT_LIST_HEAD(&dev->read_list);
|
|
@@ -1034,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
|
int type, ret;
|
|
|
|
ret = copy_from_iter(&type, sizeof(type), from);
|
|
- if (ret != sizeof(type))
|
|
+ if (ret != sizeof(type)) {
|
|
+ ret = -EINVAL;
|
|
goto done;
|
|
+ }
|
|
|
|
switch (type) {
|
|
case VHOST_IOTLB_MSG:
|
|
@@ -1054,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
|
|
|
iov_iter_advance(from, offset);
|
|
ret = copy_from_iter(&msg, sizeof(msg), from);
|
|
- if (ret != sizeof(msg))
|
|
+ if (ret != sizeof(msg)) {
|
|
+ ret = -EINVAL;
|
|
goto done;
|
|
+ }
|
|
if (vhost_process_iotlb_msg(dev, &msg)) {
|
|
ret = -EFAULT;
|
|
goto done;
|
|
@@ -1733,13 +1738,87 @@ static int log_write(void __user *log_base,
|
|
return r;
|
|
}
|
|
|
|
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
|
|
+{
|
|
+ struct vhost_umem *umem = vq->umem;
|
|
+ struct vhost_umem_node *u;
|
|
+ u64 start, end, l, min;
|
|
+ int r;
|
|
+ bool hit = false;
|
|
+
|
|
+ while (len) {
|
|
+ min = len;
|
|
+ /* More than one GPAs can be mapped into a single HVA. So
|
|
+ * iterate all possible umems here to be safe.
|
|
+ */
|
|
+ list_for_each_entry(u, &umem->umem_list, link) {
|
|
+ if (u->userspace_addr > hva - 1 + len ||
|
|
+ u->userspace_addr - 1 + u->size < hva)
|
|
+ continue;
|
|
+ start = max(u->userspace_addr, hva);
|
|
+ end = min(u->userspace_addr - 1 + u->size,
|
|
+ hva - 1 + len);
|
|
+ l = end - start + 1;
|
|
+ r = log_write(vq->log_base,
|
|
+ u->start + start - u->userspace_addr,
|
|
+ l);
|
|
+ if (r < 0)
|
|
+ return r;
|
|
+ hit = true;
|
|
+ min = min(l, min);
|
|
+ }
|
|
+
|
|
+ if (!hit)
|
|
+ return -EFAULT;
|
|
+
|
|
+ len -= min;
|
|
+ hva += min;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
|
|
+{
|
|
+ struct iovec iov[64];
|
|
+ int i, ret;
|
|
+
|
|
+ if (!vq->iotlb)
|
|
+ return log_write(vq->log_base, vq->log_addr + used_offset, len);
|
|
+
|
|
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
|
|
+ len, iov, 64, VHOST_ACCESS_WO);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ for (i = 0; i < ret; i++) {
|
|
+ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
|
|
+ iov[i].iov_len);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
|
- unsigned int log_num, u64 len)
|
|
+ unsigned int log_num, u64 len, struct iovec *iov, int count)
|
|
{
|
|
int i, r;
|
|
|
|
/* Make sure data written is seen before log. */
|
|
smp_wmb();
|
|
+
|
|
+ if (vq->iotlb) {
|
|
+ for (i = 0; i < count; i++) {
|
|
+ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
|
|
+ iov[i].iov_len);
|
|
+ if (r < 0)
|
|
+ return r;
|
|
+ }
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
for (i = 0; i < log_num; ++i) {
|
|
u64 l = min(log[i].len, len);
|
|
r = log_write(vq->log_base, log[i].addr, l);
|
|
@@ -1769,9 +1848,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
|
|
smp_wmb();
|
|
/* Log used flag write. */
|
|
used = &vq->used->flags;
|
|
- log_write(vq->log_base, vq->log_addr +
|
|
- (used - (void __user *)vq->used),
|
|
- sizeof vq->used->flags);
|
|
+ log_used(vq, (used - (void __user *)vq->used),
|
|
+ sizeof vq->used->flags);
|
|
if (vq->log_ctx)
|
|
eventfd_signal(vq->log_ctx, 1);
|
|
}
|
|
@@ -1789,9 +1867,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
|
|
smp_wmb();
|
|
/* Log avail event write */
|
|
used = vhost_avail_event(vq);
|
|
- log_write(vq->log_base, vq->log_addr +
|
|
- (used - (void __user *)vq->used),
|
|
- sizeof *vhost_avail_event(vq));
|
|
+ log_used(vq, (used - (void __user *)vq->used),
|
|
+ sizeof *vhost_avail_event(vq));
|
|
if (vq->log_ctx)
|
|
eventfd_signal(vq->log_ctx, 1);
|
|
}
|
|
@@ -2191,10 +2268,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
|
|
/* Make sure data is seen before log. */
|
|
smp_wmb();
|
|
/* Log used ring entry write. */
|
|
- log_write(vq->log_base,
|
|
- vq->log_addr +
|
|
- ((void __user *)used - (void __user *)vq->used),
|
|
- count * sizeof *used);
|
|
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
|
|
+ count * sizeof *used);
|
|
}
|
|
old = vq->last_used_idx;
|
|
new = (vq->last_used_idx += count);
|
|
@@ -2236,9 +2311,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
|
|
/* Make sure used idx is seen before log. */
|
|
smp_wmb();
|
|
/* Log used index update. */
|
|
- log_write(vq->log_base,
|
|
- vq->log_addr + offsetof(struct vring_used, idx),
|
|
- sizeof vq->used->idx);
|
|
+ log_used(vq, offsetof(struct vring_used, idx),
|
|
+ sizeof vq->used->idx);
|
|
if (vq->log_ctx)
|
|
eventfd_signal(vq->log_ctx, 1);
|
|
}
|
|
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
|
|
index 466ef75422916..9490e7ddb3404 100644
|
|
--- a/drivers/vhost/vhost.h
|
|
+++ b/drivers/vhost/vhost.h
|
|
@@ -170,9 +170,11 @@ struct vhost_dev {
|
|
struct list_head read_list;
|
|
struct list_head pending_list;
|
|
wait_queue_head_t wait;
|
|
+ int iov_limit;
|
|
};
|
|
|
|
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
|
|
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
|
|
+ int nvqs, int iov_limit);
|
|
long vhost_dev_set_owner(struct vhost_dev *dev);
|
|
bool vhost_dev_has_owner(struct vhost_dev *dev);
|
|
long vhost_dev_check_owner(struct vhost_dev *);
|
|
@@ -205,7 +207,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
|
|
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
|
|
|
|
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
|
- unsigned int log_num, u64 len);
|
|
+ unsigned int log_num, u64 len,
|
|
+ struct iovec *iov, int count);
|
|
int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
|
|
|
|
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
|
|
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
|
|
index 98ed5be132c6a..fa93f6711d8d3 100644
|
|
--- a/drivers/vhost/vsock.c
|
|
+++ b/drivers/vhost/vsock.c
|
|
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
|
|
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
|
|
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
|
|
|
|
- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
|
|
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
|
|
|
|
file->private_data = vsock;
|
|
spin_lock_init(&vsock->send_pkt_list_lock);
|
|
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
|
|
index f9ef0673a083c..aded3213bfb51 100644
|
|
--- a/drivers/video/backlight/pwm_bl.c
|
|
+++ b/drivers/video/backlight/pwm_bl.c
|
|
@@ -268,6 +268,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
|
|
|
|
memset(data, 0, sizeof(*data));
|
|
|
|
+ /*
|
|
+ * These values are optional and set as 0 by default, the out values
|
|
+ * are modified only if a valid u32 value can be decoded.
|
|
+ */
|
|
+ of_property_read_u32(node, "post-pwm-on-delay-ms",
|
|
+ &data->post_pwm_on_delay);
|
|
+ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
|
|
+
|
|
+ data->enable_gpio = -EINVAL;
|
|
+
|
|
/*
|
|
* Determine the number of brightness levels, if this property is not
|
|
* set a default table of brightness levels will be used.
|
|
@@ -380,15 +390,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
|
|
data->max_brightness--;
|
|
}
|
|
|
|
- /*
|
|
- * These values are optional and set as 0 by default, the out values
|
|
- * are modified only if a valid u32 value can be decoded.
|
|
- */
|
|
- of_property_read_u32(node, "post-pwm-on-delay-ms",
|
|
- &data->post_pwm_on_delay);
|
|
- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
|
|
-
|
|
- data->enable_gpio = -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
|
|
index 09731b2f6815f..c6b3bdbbdbc9e 100644
|
|
--- a/drivers/video/console/vgacon.c
|
|
+++ b/drivers/video/console/vgacon.c
|
|
@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
|
|
|
|
static void vgacon_restore_screen(struct vc_data *c)
|
|
{
|
|
+ c->vc_origin = c->vc_visible_origin;
|
|
vgacon_scrollback_cur->save = 0;
|
|
|
|
if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
|
|
@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
|
|
int start, end, count, soff;
|
|
|
|
if (!lines) {
|
|
- c->vc_visible_origin = c->vc_origin;
|
|
- vga_set_mem_top(c);
|
|
+ vgacon_restore_screen(c);
|
|
return;
|
|
}
|
|
|
|
@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
|
|
if (!vgacon_scrollback_cur->save) {
|
|
vgacon_cursor(c, CM_ERASE);
|
|
vgacon_save_screen(c);
|
|
+ c->vc_origin = (unsigned long)c->vc_screenbuf;
|
|
vgacon_scrollback_cur->save = 1;
|
|
}
|
|
|
|
@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
|
|
int copysize;
|
|
|
|
int diff = c->vc_rows - count;
|
|
- void *d = (void *) c->vc_origin;
|
|
+ void *d = (void *) c->vc_visible_origin;
|
|
void *s = (void *) c->vc_screenbuf;
|
|
|
|
count *= c->vc_size_row;
|
|
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
|
|
index ff561073ee4ec..42f909618f049 100644
|
|
--- a/drivers/video/fbdev/clps711x-fb.c
|
|
+++ b/drivers/video/fbdev/clps711x-fb.c
|
|
@@ -287,14 +287,17 @@ static int clps711x_fb_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ of_node_put(disp);
|
|
goto out_fb_release;
|
|
+ }
|
|
|
|
of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
|
|
cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
|
|
|
|
ret = of_property_read_u32(disp, "bits-per-pixel",
|
|
&info->var.bits_per_pixel);
|
|
+ of_node_put(disp);
|
|
if (ret)
|
|
goto out_fb_release;
|
|
|
|
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
|
|
index 8958ccc8b1ac3..8976190b6c1ff 100644
|
|
--- a/drivers/video/fbdev/core/fbcon.c
|
|
+++ b/drivers/video/fbdev/core/fbcon.c
|
|
@@ -3064,7 +3064,7 @@ static int fbcon_fb_unbind(int idx)
|
|
for (i = first_fb_vc; i <= last_fb_vc; i++) {
|
|
if (con2fb_map[i] != idx &&
|
|
con2fb_map[i] != -1) {
|
|
- new_idx = i;
|
|
+ new_idx = con2fb_map[i];
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
|
|
index 861bf80816192..7dd6924feaa8a 100644
|
|
--- a/drivers/video/fbdev/core/fbmem.c
|
|
+++ b/drivers/video/fbdev/core/fbmem.c
|
|
@@ -436,7 +436,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
|
|
image->dx += image->width + 8;
|
|
}
|
|
} else if (rotate == FB_ROTATE_UD) {
|
|
- for (x = 0; x < num; x++) {
|
|
+ u32 dx = image->dx;
|
|
+
|
|
+ for (x = 0; x < num && image->dx <= dx; x++) {
|
|
info->fbops->fb_imageblit(info, image);
|
|
image->dx -= image->width + 8;
|
|
}
|
|
@@ -448,7 +450,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
|
|
image->dy += image->height + 8;
|
|
}
|
|
} else if (rotate == FB_ROTATE_CCW) {
|
|
- for (x = 0; x < num; x++) {
|
|
+ u32 dy = image->dy;
|
|
+
|
|
+ for (x = 0; x < num && image->dy <= dy; x++) {
|
|
info->fbops->fb_imageblit(info, image);
|
|
image->dy -= image->height + 8;
|
|
}
|
|
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
|
|
index 31f769d67195b..057d3cdef92e6 100644
|
|
--- a/drivers/video/fbdev/offb.c
|
|
+++ b/drivers/video/fbdev/offb.c
|
|
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
|
|
}
|
|
|
|
static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
|
|
- const char *name, unsigned long address)
|
|
+ unsigned long address)
|
|
{
|
|
struct offb_par *par = (struct offb_par *) info->par;
|
|
|
|
- if (dp && !strncmp(name, "ATY,Rage128", 11)) {
|
|
+ if (of_node_name_prefix(dp, "ATY,Rage128")) {
|
|
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
|
|
if (par->cmap_adr)
|
|
par->cmap_type = cmap_r128;
|
|
- } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12)
|
|
- || !strncmp(name, "ATY,RageM3p12A", 14))) {
|
|
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
|
|
+ of_node_name_prefix(dp, "ATY,RageM3p12A")) {
|
|
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
|
|
if (par->cmap_adr)
|
|
par->cmap_type = cmap_M3A;
|
|
- } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) {
|
|
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
|
|
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
|
|
if (par->cmap_adr)
|
|
par->cmap_type = cmap_M3B;
|
|
- } else if (dp && !strncmp(name, "ATY,Rage6", 9)) {
|
|
+ } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
|
|
par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
|
|
if (par->cmap_adr)
|
|
par->cmap_type = cmap_radeon;
|
|
- } else if (!strncmp(name, "ATY,", 4)) {
|
|
+ } else if (of_node_name_prefix(dp, "ATY,")) {
|
|
unsigned long base = address & 0xff000000UL;
|
|
par->cmap_adr =
|
|
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
|
|
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
|
|
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
|
|
if (par->cmap_adr)
|
|
par->cmap_type = cmap_gxt2000;
|
|
- } else if (dp && !strncmp(name, "vga,Display-", 12)) {
|
|
+ } else if (of_node_name_prefix(dp, "vga,Display-")) {
|
|
/* Look for AVIVO initialized by SLOF */
|
|
struct device_node *pciparent = of_get_parent(dp);
|
|
const u32 *vid, *did;
|
|
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
|
|
|
|
par->cmap_type = cmap_unknown;
|
|
if (depth == 8)
|
|
- offb_init_palette_hacks(info, dp, name, address);
|
|
+ offb_init_palette_hacks(info, dp, address);
|
|
else
|
|
fix->visual = FB_VISUAL_TRUECOLOR;
|
|
|
|
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
|
|
index a3edb20ea4c36..a846d32ee653e 100644
|
|
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
|
|
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
|
|
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
|
|
|
|
int r = 0;
|
|
|
|
+ memset(&p, 0, sizeof(p));
|
|
+
|
|
switch (cmd) {
|
|
case OMAPFB_SYNC_GFX:
|
|
DBG("ioctl SYNC_GFX\n");
|
|
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
|
|
index bbed039617a42..d59c8a59f5827 100644
|
|
--- a/drivers/video/fbdev/pxafb.c
|
|
+++ b/drivers/video/fbdev/pxafb.c
|
|
@@ -2234,10 +2234,8 @@ static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev)
|
|
if (!info)
|
|
return ERR_PTR(-ENOMEM);
|
|
ret = of_get_pxafb_mode_info(dev, info);
|
|
- if (ret) {
|
|
- kfree(info->modes);
|
|
+ if (ret)
|
|
return ERR_PTR(ret);
|
|
- }
|
|
|
|
/*
|
|
* On purpose, neither lccrX registers nor video memory size can be
|
|
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
|
|
index 3093655c7b927..1475ed5ffcdec 100644
|
|
--- a/drivers/virt/vboxguest/vboxguest_core.c
|
|
+++ b/drivers/virt/vboxguest/vboxguest_core.c
|
|
@@ -1312,7 +1312,7 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (f32bit)
|
|
+ if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
|
|
ret = vbg_hgcm_call32(gdev, client_id,
|
|
call->function, call->timeout_ms,
|
|
VBG_IOCTL_HGCM_CALL_PARMS32(call),
|
|
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
|
|
index 728ecd1eea305..fb12fe205f866 100644
|
|
--- a/drivers/virtio/virtio_balloon.c
|
|
+++ b/drivers/virtio/virtio_balloon.c
|
|
@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
|
|
VIRTIO_BALLOON_VQ_MAX
|
|
};
|
|
|
|
+enum virtio_balloon_config_read {
|
|
+ VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
|
|
+};
|
|
+
|
|
struct virtio_balloon {
|
|
struct virtio_device *vdev;
|
|
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
|
|
@@ -77,14 +81,20 @@ struct virtio_balloon {
|
|
/* Prevent updating balloon when it is being canceled. */
|
|
spinlock_t stop_update_lock;
|
|
bool stop_update;
|
|
+ /* Bitmap to indicate if reading the related config fields are needed */
|
|
+ unsigned long config_read_bitmap;
|
|
|
|
/* The list of allocated free pages, waiting to be given back to mm */
|
|
struct list_head free_page_list;
|
|
spinlock_t free_page_list_lock;
|
|
/* The number of free page blocks on the above list */
|
|
unsigned long num_free_page_blocks;
|
|
- /* The cmd id received from host */
|
|
- u32 cmd_id_received;
|
|
+ /*
|
|
+ * The cmd id received from host.
|
|
+ * Read it via virtio_balloon_cmd_id_received to get the latest value
|
|
+ * sent from host.
|
|
+ */
|
|
+ u32 cmd_id_received_cache;
|
|
/* The cmd id that is actively in use */
|
|
__virtio32 cmd_id_active;
|
|
/* Buffer to store the stop sign */
|
|
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
|
|
return num_returned;
|
|
}
|
|
|
|
+static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
|
|
+{
|
|
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
|
|
+ return;
|
|
+
|
|
+ /* No need to queue the work if the bit was already set. */
|
|
+ if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
|
|
+ &vb->config_read_bitmap))
|
|
+ return;
|
|
+
|
|
+ queue_work(vb->balloon_wq, &vb->report_free_page_work);
|
|
+}
|
|
+
|
|
static void virtballoon_changed(struct virtio_device *vdev)
|
|
{
|
|
struct virtio_balloon *vb = vdev->priv;
|
|
unsigned long flags;
|
|
- s64 diff = towards_target(vb);
|
|
-
|
|
- if (diff) {
|
|
- spin_lock_irqsave(&vb->stop_update_lock, flags);
|
|
- if (!vb->stop_update)
|
|
- queue_work(system_freezable_wq,
|
|
- &vb->update_balloon_size_work);
|
|
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
|
|
- }
|
|
|
|
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
|
|
- virtio_cread(vdev, struct virtio_balloon_config,
|
|
- free_page_report_cmd_id, &vb->cmd_id_received);
|
|
- if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
|
|
- /* Pass ULONG_MAX to give back all the free pages */
|
|
- return_free_pages_to_mm(vb, ULONG_MAX);
|
|
- } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
|
|
- vb->cmd_id_received !=
|
|
- virtio32_to_cpu(vdev, vb->cmd_id_active)) {
|
|
- spin_lock_irqsave(&vb->stop_update_lock, flags);
|
|
- if (!vb->stop_update) {
|
|
- queue_work(vb->balloon_wq,
|
|
- &vb->report_free_page_work);
|
|
- }
|
|
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
|
|
- }
|
|
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
|
|
+ if (!vb->stop_update) {
|
|
+ queue_work(system_freezable_wq,
|
|
+ &vb->update_balloon_size_work);
|
|
+ virtio_balloon_queue_free_page_work(vb);
|
|
}
|
|
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
|
|
}
|
|
|
|
static void update_balloon_size(struct virtio_balloon *vb)
|
|
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
|
|
return 0;
|
|
}
|
|
|
|
+static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
|
|
+{
|
|
+ if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
|
|
+ &vb->config_read_bitmap))
|
|
+ virtio_cread(vb->vdev, struct virtio_balloon_config,
|
|
+ free_page_report_cmd_id,
|
|
+ &vb->cmd_id_received_cache);
|
|
+
|
|
+ return vb->cmd_id_received_cache;
|
|
+}
|
|
+
|
|
static int send_cmd_id_start(struct virtio_balloon *vb)
|
|
{
|
|
struct scatterlist sg;
|
|
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
|
|
while (virtqueue_get_buf(vq, &unused))
|
|
;
|
|
|
|
- vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
|
|
+ vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
|
|
+ virtio_balloon_cmd_id_received(vb));
|
|
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
|
|
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
|
|
if (!err)
|
|
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
|
|
* stop the reporting.
|
|
*/
|
|
cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
|
|
- if (cmd_id_active != vb->cmd_id_received)
|
|
+ if (unlikely(cmd_id_active !=
|
|
+ virtio_balloon_cmd_id_received(vb)))
|
|
break;
|
|
|
|
/*
|
|
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
|
|
return 0;
|
|
}
|
|
|
|
-static void report_free_page_func(struct work_struct *work)
|
|
+static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
|
|
{
|
|
int err;
|
|
- struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
|
|
- report_free_page_work);
|
|
struct device *dev = &vb->vdev->dev;
|
|
|
|
/* Start by sending the received cmd id to host with an outbuf. */
|
|
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
|
|
dev_err(dev, "Failed to send a stop id, err = %d\n", err);
|
|
}
|
|
|
|
+static void report_free_page_func(struct work_struct *work)
|
|
+{
|
|
+ struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
|
|
+ report_free_page_work);
|
|
+ u32 cmd_id_received;
|
|
+
|
|
+ cmd_id_received = virtio_balloon_cmd_id_received(vb);
|
|
+ if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
|
|
+ /* Pass ULONG_MAX to give back all the free pages */
|
|
+ return_free_pages_to_mm(vb, ULONG_MAX);
|
|
+ } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
|
|
+ cmd_id_received !=
|
|
+ virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
|
|
+ virtio_balloon_report_free_page(vb);
|
|
+ }
|
|
+}
|
|
+
|
|
#ifdef CONFIG_BALLOON_COMPACTION
|
|
/*
|
|
* virtballoon_migratepage - perform the balloon page migration on behalf of
|
|
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
|
goto out_del_vqs;
|
|
}
|
|
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
|
|
- vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
|
|
+ vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
|
|
vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
|
|
VIRTIO_BALLOON_CMD_ID_STOP);
|
|
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
|
|
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
|
|
index 4cd9ea5c75be7..d9dd0f7892791 100644
|
|
--- a/drivers/virtio/virtio_mmio.c
|
|
+++ b/drivers/virtio/virtio_mmio.c
|
|
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|
{
|
|
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
|
|
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
|
|
- int i, err;
|
|
+ int i, err, queue_idx = 0;
|
|
|
|
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
|
|
dev_name(&vdev->dev), vm_dev);
|
|
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
|
return err;
|
|
|
|
for (i = 0; i < nvqs; ++i) {
|
|
- vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
|
|
+ if (!names[i]) {
|
|
+ vqs[i] = NULL;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
|
|
ctx ? ctx[i] : false);
|
|
if (IS_ERR(vqs[i])) {
|
|
vm_del_vqs(vdev);
|
|
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
|
|
index 5c4a764717c4d..81208cd3f4ecb 100644
|
|
--- a/drivers/watchdog/mt7621_wdt.c
|
|
+++ b/drivers/watchdog/mt7621_wdt.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/watchdog.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
|
|
#include <asm/mach-ralink/ralink_regs.h>
|
|
|
|
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
|
|
index 0d74c3e489797..55c9eb6c6e519 100644
|
|
--- a/drivers/watchdog/renesas_wdt.c
|
|
+++ b/drivers/watchdog/renesas_wdt.c
|
|
@@ -74,12 +74,17 @@ static int rwdt_init_timeout(struct watchdog_device *wdev)
|
|
static int rwdt_start(struct watchdog_device *wdev)
|
|
{
|
|
struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
|
|
+ u8 val;
|
|
|
|
pm_runtime_get_sync(wdev->parent);
|
|
|
|
- rwdt_write(priv, 0, RWTCSRB);
|
|
- rwdt_write(priv, priv->cks, RWTCSRA);
|
|
+ /* Stop the timer before we modify any register */
|
|
+ val = readb_relaxed(priv->base + RWTCSRA) & ~RWTCSRA_TME;
|
|
+ rwdt_write(priv, val, RWTCSRA);
|
|
+
|
|
rwdt_init_timeout(wdev);
|
|
+ rwdt_write(priv, priv->cks, RWTCSRA);
|
|
+ rwdt_write(priv, 0, RWTCSRB);
|
|
|
|
while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG)
|
|
cpu_relax();
|
|
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
|
|
index 98967f0a7d10e..db7c57d82cfdc 100644
|
|
--- a/drivers/watchdog/rt2880_wdt.c
|
|
+++ b/drivers/watchdog/rt2880_wdt.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/watchdog.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
|
|
#include <asm/mach-ralink/ralink_regs.h>
|
|
|
|
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
|
|
index 93194f3e75404..117e76b2f9391 100644
|
|
--- a/drivers/xen/events/events_base.c
|
|
+++ b/drivers/xen/events/events_base.c
|
|
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
|
|
xen_have_vector_callback = 0;
|
|
return;
|
|
}
|
|
- pr_info("Xen HVM callback vector for event delivery is enabled\n");
|
|
+ pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
|
|
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
|
|
xen_hvm_callback_vector);
|
|
}
|
|
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
|
|
index 2e5d845b50914..7aa64d1b119c2 100644
|
|
--- a/drivers/xen/pvcalls-back.c
|
|
+++ b/drivers/xen/pvcalls-back.c
|
|
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
|
|
|
|
/* write the data, then modify the indexes */
|
|
virt_wmb();
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ atomic_set(&map->read, 0);
|
|
intf->in_error = ret;
|
|
- else
|
|
+ } else
|
|
intf->in_prod = prod + ret;
|
|
/* update the indexes, then notify the other end */
|
|
virt_wmb();
|
|
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
|
|
static void pvcalls_sk_state_change(struct sock *sock)
|
|
{
|
|
struct sock_mapping *map = sock->sk_user_data;
|
|
- struct pvcalls_data_intf *intf;
|
|
|
|
if (map == NULL)
|
|
return;
|
|
|
|
- intf = map->ring;
|
|
- intf->in_error = -ENOTCONN;
|
|
+ atomic_inc(&map->read);
|
|
notify_remote_via_irq(map->irq);
|
|
}
|
|
|
|
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
|
|
index 77224d8f3e6fe..91da7e44d5d4f 100644
|
|
--- a/drivers/xen/pvcalls-front.c
|
|
+++ b/drivers/xen/pvcalls-front.c
|
|
@@ -31,6 +31,12 @@
|
|
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
|
|
#define PVCALLS_FRONT_MAX_SPIN 5000
|
|
|
|
+static struct proto pvcalls_proto = {
|
|
+ .name = "PVCalls",
|
|
+ .owner = THIS_MODULE,
|
|
+ .obj_size = sizeof(struct sock),
|
|
+};
|
|
+
|
|
struct pvcalls_bedata {
|
|
struct xen_pvcalls_front_ring ring;
|
|
grant_ref_t ref;
|
|
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
|
|
return ret;
|
|
}
|
|
|
|
+static void free_active_ring(struct sock_mapping *map)
|
|
+{
|
|
+ if (!map->active.ring)
|
|
+ return;
|
|
+
|
|
+ free_pages((unsigned long)map->active.data.in,
|
|
+ map->active.ring->ring_order);
|
|
+ free_page((unsigned long)map->active.ring);
|
|
+}
|
|
+
|
|
+static int alloc_active_ring(struct sock_mapping *map)
|
|
+{
|
|
+ void *bytes;
|
|
+
|
|
+ map->active.ring = (struct pvcalls_data_intf *)
|
|
+ get_zeroed_page(GFP_KERNEL);
|
|
+ if (!map->active.ring)
|
|
+ goto out;
|
|
+
|
|
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
|
|
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
|
+ PVCALLS_RING_ORDER);
|
|
+ if (!bytes)
|
|
+ goto out;
|
|
+
|
|
+ map->active.data.in = bytes;
|
|
+ map->active.data.out = bytes +
|
|
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out:
|
|
+ free_active_ring(map);
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
static int create_active(struct sock_mapping *map, int *evtchn)
|
|
{
|
|
void *bytes;
|
|
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
*evtchn = -1;
|
|
init_waitqueue_head(&map->active.inflight_conn_req);
|
|
|
|
- map->active.ring = (struct pvcalls_data_intf *)
|
|
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
|
|
- if (map->active.ring == NULL)
|
|
- goto out_error;
|
|
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
|
|
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
|
- PVCALLS_RING_ORDER);
|
|
- if (bytes == NULL)
|
|
- goto out_error;
|
|
+ bytes = map->active.data.in;
|
|
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
|
|
map->active.ring->ref[i] = gnttab_grant_foreign_access(
|
|
pvcalls_front_dev->otherend_id,
|
|
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
pvcalls_front_dev->otherend_id,
|
|
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
|
|
|
|
- map->active.data.in = bytes;
|
|
- map->active.data.out = bytes +
|
|
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
|
|
-
|
|
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
|
|
if (ret)
|
|
goto out_error;
|
|
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
out_error:
|
|
if (*evtchn >= 0)
|
|
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
|
|
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
|
|
- free_page((unsigned long)map->active.ring);
|
|
return ret;
|
|
}
|
|
|
|
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
|
|
return PTR_ERR(map);
|
|
|
|
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
|
+ ret = alloc_active_ring(map);
|
|
+ if (ret < 0) {
|
|
+ pvcalls_exit_sock(sock);
|
|
+ return ret;
|
|
+ }
|
|
|
|
spin_lock(&bedata->socket_lock);
|
|
ret = get_request(bedata, &req_id);
|
|
if (ret < 0) {
|
|
spin_unlock(&bedata->socket_lock);
|
|
+ free_active_ring(map);
|
|
pvcalls_exit_sock(sock);
|
|
return ret;
|
|
}
|
|
ret = create_active(map, &evtchn);
|
|
if (ret < 0) {
|
|
spin_unlock(&bedata->socket_lock);
|
|
+ free_active_ring(map);
|
|
pvcalls_exit_sock(sock);
|
|
return ret;
|
|
}
|
|
@@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
|
|
error = intf->in_error;
|
|
/* get pointers before reading from the ring */
|
|
virt_rmb();
|
|
- if (error < 0)
|
|
- return error;
|
|
|
|
size = pvcalls_queued(prod, cons, array_size);
|
|
masked_prod = pvcalls_mask(prod, array_size);
|
|
masked_cons = pvcalls_mask(cons, array_size);
|
|
|
|
if (size == 0)
|
|
- return 0;
|
|
+ return error ?: size;
|
|
|
|
if (len > size)
|
|
len = size;
|
|
@@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|
}
|
|
}
|
|
|
|
- spin_lock(&bedata->socket_lock);
|
|
- ret = get_request(bedata, &req_id);
|
|
- if (ret < 0) {
|
|
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
|
|
+ if (map2 == NULL) {
|
|
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
(void *)&map->passive.flags);
|
|
- spin_unlock(&bedata->socket_lock);
|
|
+ pvcalls_exit_sock(sock);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ ret = alloc_active_ring(map2);
|
|
+ if (ret < 0) {
|
|
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
+ (void *)&map->passive.flags);
|
|
+ kfree(map2);
|
|
pvcalls_exit_sock(sock);
|
|
return ret;
|
|
}
|
|
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
|
|
- if (map2 == NULL) {
|
|
+ spin_lock(&bedata->socket_lock);
|
|
+ ret = get_request(bedata, &req_id);
|
|
+ if (ret < 0) {
|
|
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
(void *)&map->passive.flags);
|
|
spin_unlock(&bedata->socket_lock);
|
|
+ free_active_ring(map2);
|
|
+ kfree(map2);
|
|
pvcalls_exit_sock(sock);
|
|
- return -ENOMEM;
|
|
+ return ret;
|
|
}
|
|
+
|
|
ret = create_active(map2, &evtchn);
|
|
if (ret < 0) {
|
|
+ free_active_ring(map2);
|
|
kfree(map2);
|
|
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
|
(void *)&map->passive.flags);
|
|
@@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|
|
|
received:
|
|
map2->sock = newsock;
|
|
- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
|
|
+ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
|
|
if (!newsock->sk) {
|
|
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
|
|
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
|
|
@@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
|
|
spin_lock(&bedata->socket_lock);
|
|
list_del(&map->list);
|
|
spin_unlock(&bedata->socket_lock);
|
|
- if (READ_ONCE(map->passive.inflight_req_id) !=
|
|
- PVCALLS_INVALID_ID) {
|
|
+ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
|
|
+ READ_ONCE(map->passive.inflight_req_id) != 0) {
|
|
pvcalls_front_free_map(bedata,
|
|
map->passive.accept_map);
|
|
}
|
|
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
|
|
index 0568fd9868210..e432bd27a2e7b 100644
|
|
--- a/fs/afs/flock.c
|
|
+++ b/fs/afs/flock.c
|
|
@@ -208,7 +208,7 @@ again:
|
|
/* The new front of the queue now owns the state variables. */
|
|
next = list_entry(vnode->pending_locks.next,
|
|
struct file_lock, fl_u.afs.link);
|
|
- vnode->lock_key = afs_file_key(next->fl_file);
|
|
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
|
|
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
|
|
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
|
|
goto again;
|
|
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
|
|
/* The new front of the queue now owns the state variables. */
|
|
next = list_entry(vnode->pending_locks.next,
|
|
struct file_lock, fl_u.afs.link);
|
|
- vnode->lock_key = afs_file_key(next->fl_file);
|
|
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
|
|
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
|
|
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
|
|
afs_lock_may_be_available(vnode);
|
|
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
|
|
index 6b17d36204142..1a4ce07fb406d 100644
|
|
--- a/fs/afs/inode.c
|
|
+++ b/fs/afs/inode.c
|
|
@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
|
|
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
|
|
valid = true;
|
|
} else {
|
|
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
|
|
vnode->cb_v_break = vnode->volume->cb_v_break;
|
|
valid = false;
|
|
}
|
|
@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
|
|
#endif
|
|
|
|
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
|
|
+ key_put(vnode->lock_key);
|
|
+ vnode->lock_key = NULL;
|
|
_leave("");
|
|
}
|
|
|
|
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
|
|
index 07bc10f076aac..d443e2bfa0946 100644
|
|
--- a/fs/afs/protocol_yfs.h
|
|
+++ b/fs/afs/protocol_yfs.h
|
|
@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
|
|
struct yfs_xdr_u64 max_quota;
|
|
struct yfs_xdr_u64 file_quota;
|
|
} __packed;
|
|
+
|
|
+enum yfs_lock_type {
|
|
+ yfs_LockNone = -1,
|
|
+ yfs_LockRead = 0,
|
|
+ yfs_LockWrite = 1,
|
|
+ yfs_LockExtend = 2,
|
|
+ yfs_LockRelease = 3,
|
|
+ yfs_LockMandatoryRead = 0x100,
|
|
+ yfs_LockMandatoryWrite = 0x101,
|
|
+ yfs_LockMandatoryExtend = 0x102,
|
|
+};
|
|
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
|
|
index a7b44863d502e..2c588f9bbbda2 100644
|
|
--- a/fs/afs/rxrpc.c
|
|
+++ b/fs/afs/rxrpc.c
|
|
@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
|
|
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
|
|
static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
|
|
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
|
|
+static void afs_delete_async_call(struct work_struct *);
|
|
static void afs_process_async_call(struct work_struct *);
|
|
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
|
|
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
|
|
@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
|
|
}
|
|
}
|
|
|
|
+static struct afs_call *afs_get_call(struct afs_call *call,
|
|
+ enum afs_call_trace why)
|
|
+{
|
|
+ int u = atomic_inc_return(&call->usage);
|
|
+
|
|
+ trace_afs_call(call, why, u,
|
|
+ atomic_read(&call->net->nr_outstanding_calls),
|
|
+ __builtin_return_address(0));
|
|
+ return call;
|
|
+}
|
|
+
|
|
/*
|
|
* Queue the call for actual work.
|
|
*/
|
|
static void afs_queue_call_work(struct afs_call *call)
|
|
{
|
|
if (call->type->work) {
|
|
- int u = atomic_inc_return(&call->usage);
|
|
-
|
|
- trace_afs_call(call, afs_call_trace_work, u,
|
|
- atomic_read(&call->net->nr_outstanding_calls),
|
|
- __builtin_return_address(0));
|
|
-
|
|
INIT_WORK(&call->work, call->type->work);
|
|
|
|
+ afs_get_call(call, afs_call_trace_work);
|
|
if (!queue_work(afs_wq, &call->work))
|
|
afs_put_call(call);
|
|
}
|
|
@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
|
|
}
|
|
}
|
|
|
|
+ /* If the call is going to be asynchronous, we need an extra ref for
|
|
+ * the call to hold itself so the caller need not hang on to its ref.
|
|
+ */
|
|
+ if (call->async)
|
|
+ afs_get_call(call, afs_call_trace_get);
|
|
+
|
|
/* create a call */
|
|
rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
|
|
(unsigned long)call,
|
|
@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
|
|
goto error_do_abort;
|
|
}
|
|
|
|
- /* at this point, an async call may no longer exist as it may have
|
|
- * already completed */
|
|
- if (call->async)
|
|
+ /* Note that at this point, we may have received the reply or an abort
|
|
+ * - and an asynchronous call may already have completed.
|
|
+ */
|
|
+ if (call->async) {
|
|
+ afs_put_call(call);
|
|
return -EINPROGRESS;
|
|
+ }
|
|
|
|
return afs_wait_for_call_to_complete(call, ac);
|
|
|
|
error_do_abort:
|
|
- call->state = AFS_CALL_COMPLETE;
|
|
if (ret != -ECONNABORTED) {
|
|
rxrpc_kernel_abort_call(call->net->socket, rxcall,
|
|
RX_USER_ABORT, ret, "KSD");
|
|
@@ -463,8 +478,24 @@ error_do_abort:
|
|
error_kill_call:
|
|
if (call->type->done)
|
|
call->type->done(call);
|
|
- afs_put_call(call);
|
|
+
|
|
+ /* We need to dispose of the extra ref we grabbed for an async call.
|
|
+ * The call, however, might be queued on afs_async_calls and we need to
|
|
+ * make sure we don't get any more notifications that might requeue it.
|
|
+ */
|
|
+ if (call->rxcall) {
|
|
+ rxrpc_kernel_end_call(call->net->socket, call->rxcall);
|
|
+ call->rxcall = NULL;
|
|
+ }
|
|
+ if (call->async) {
|
|
+ if (cancel_work_sync(&call->async_work))
|
|
+ afs_put_call(call);
|
|
+ afs_put_call(call);
|
|
+ }
|
|
+
|
|
ac->error = ret;
|
|
+ call->state = AFS_CALL_COMPLETE;
|
|
+ afs_put_call(call);
|
|
_leave(" = %d", ret);
|
|
return ret;
|
|
}
|
|
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
|
|
index 12658c1363ae4..5aa57929e8c23 100644
|
|
--- a/fs/afs/yfsclient.c
|
|
+++ b/fs/afs/yfsclient.c
|
|
@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
|
|
bp = xdr_encode_YFSFid(bp, &vnode->fid);
|
|
bp = xdr_encode_string(bp, name, namesz);
|
|
bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
|
|
- bp = xdr_encode_u32(bp, 0); /* ViceLockType */
|
|
+ bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
|
|
yfs_check_req(call, bp);
|
|
|
|
afs_use_fs_server(call, fc->cbi);
|
|
diff --git a/fs/block_dev.c b/fs/block_dev.c
|
|
index a80b4f0ee7c4f..5a35ed922c952 100644
|
|
--- a/fs/block_dev.c
|
|
+++ b/fs/block_dev.c
|
|
@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
|
|
}
|
|
EXPORT_SYMBOL(invalidate_bdev);
|
|
|
|
+static void set_init_blocksize(struct block_device *bdev)
|
|
+{
|
|
+ unsigned bsize = bdev_logical_block_size(bdev);
|
|
+ loff_t size = i_size_read(bdev->bd_inode);
|
|
+
|
|
+ while (bsize < PAGE_SIZE) {
|
|
+ if (size & bsize)
|
|
+ break;
|
|
+ bsize <<= 1;
|
|
+ }
|
|
+ bdev->bd_block_size = bsize;
|
|
+ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
|
|
+}
|
|
+
|
|
int set_blocksize(struct block_device *bdev, int size)
|
|
{
|
|
/* Size must be a power of two, and between 512 and PAGE_SIZE */
|
|
@@ -1408,18 +1422,9 @@ EXPORT_SYMBOL(check_disk_change);
|
|
|
|
void bd_set_size(struct block_device *bdev, loff_t size)
|
|
{
|
|
- unsigned bsize = bdev_logical_block_size(bdev);
|
|
-
|
|
inode_lock(bdev->bd_inode);
|
|
i_size_write(bdev->bd_inode, size);
|
|
inode_unlock(bdev->bd_inode);
|
|
- while (bsize < PAGE_SIZE) {
|
|
- if (size & bsize)
|
|
- break;
|
|
- bsize <<= 1;
|
|
- }
|
|
- bdev->bd_block_size = bsize;
|
|
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
|
|
}
|
|
EXPORT_SYMBOL(bd_set_size);
|
|
|
|
@@ -1496,8 +1501,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|
}
|
|
}
|
|
|
|
- if (!ret)
|
|
+ if (!ret) {
|
|
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
|
|
+ set_init_blocksize(bdev);
|
|
+ }
|
|
|
|
/*
|
|
* If the device is invalidated, rescan partition
|
|
@@ -1532,6 +1539,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|
goto out_clear;
|
|
}
|
|
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
|
|
+ set_init_blocksize(bdev);
|
|
}
|
|
|
|
if (bdev->bd_bdi == &noop_backing_dev_info)
|
|
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
|
|
index 97d91e55b70a8..20288b49718fb 100644
|
|
--- a/fs/btrfs/btrfs_inode.h
|
|
+++ b/fs/btrfs/btrfs_inode.h
|
|
@@ -29,6 +29,7 @@ enum {
|
|
BTRFS_INODE_IN_DELALLOC_LIST,
|
|
BTRFS_INODE_READDIO_NEED_LOCK,
|
|
BTRFS_INODE_HAS_PROPS,
|
|
+ BTRFS_INODE_SNAPSHOT_FLUSH,
|
|
};
|
|
|
|
/* in memory btrfs inode */
|
|
@@ -146,6 +147,12 @@ struct btrfs_inode {
|
|
*/
|
|
u64 last_unlink_trans;
|
|
|
|
+ /*
|
|
+ * Track the transaction id of the last transaction used to create a
|
|
+ * hard link for the inode. This is used by the log tree (fsync).
|
|
+ */
|
|
+ u64 last_link_trans;
|
|
+
|
|
/*
|
|
* Number of bytes outstanding that are going to need csums. This is
|
|
* used in ENOSPC accounting.
|
|
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
|
|
index 539901fb51650..92b572eb6d308 100644
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -967,6 +967,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
|
|
return 0;
|
|
}
|
|
|
|
+static struct extent_buffer *alloc_tree_block_no_bg_flush(
|
|
+ struct btrfs_trans_handle *trans,
|
|
+ struct btrfs_root *root,
|
|
+ u64 parent_start,
|
|
+ const struct btrfs_disk_key *disk_key,
|
|
+ int level,
|
|
+ u64 hint,
|
|
+ u64 empty_size)
|
|
+{
|
|
+ struct btrfs_fs_info *fs_info = root->fs_info;
|
|
+ struct extent_buffer *ret;
|
|
+
|
|
+ /*
|
|
+ * If we are COWing a node/leaf from the extent, chunk, device or free
|
|
+ * space trees, make sure that we do not finish block group creation of
|
|
+ * pending block groups. We do this to avoid a deadlock.
|
|
+ * COWing can result in allocation of a new chunk, and flushing pending
|
|
+ * block groups (btrfs_create_pending_block_groups()) can be triggered
|
|
+ * when finishing allocation of a new chunk. Creation of a pending block
|
|
+ * group modifies the extent, chunk, device and free space trees,
|
|
+ * therefore we could deadlock with ourselves since we are holding a
|
|
+ * lock on an extent buffer that btrfs_create_pending_block_groups() may
|
|
+ * try to COW later.
|
|
+ * For similar reasons, we also need to delay flushing pending block
|
|
+ * groups when splitting a leaf or node, from one of those trees, since
|
|
+ * we are holding a write lock on it and its parent or when inserting a
|
|
+ * new root node for one of those trees.
|
|
+ */
|
|
+ if (root == fs_info->extent_root ||
|
|
+ root == fs_info->chunk_root ||
|
|
+ root == fs_info->dev_root ||
|
|
+ root == fs_info->free_space_root)
|
|
+ trans->can_flush_pending_bgs = false;
|
|
+
|
|
+ ret = btrfs_alloc_tree_block(trans, root, parent_start,
|
|
+ root->root_key.objectid, disk_key, level,
|
|
+ hint, empty_size);
|
|
+ trans->can_flush_pending_bgs = true;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
/*
|
|
* does the dirty work in cow of a single block. The parent block (if
|
|
* supplied) is updated to point to the new cow copy. The new buffer is marked
|
|
@@ -1014,26 +1056,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
|
|
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
|
|
parent_start = parent->start;
|
|
|
|
- /*
|
|
- * If we are COWing a node/leaf from the extent, chunk or device trees,
|
|
- * make sure that we do not finish block group creation of pending block
|
|
- * groups. We do this to avoid a deadlock.
|
|
- * COWing can result in allocation of a new chunk, and flushing pending
|
|
- * block groups (btrfs_create_pending_block_groups()) can be triggered
|
|
- * when finishing allocation of a new chunk. Creation of a pending block
|
|
- * group modifies the extent, chunk and device trees, therefore we could
|
|
- * deadlock with ourselves since we are holding a lock on an extent
|
|
- * buffer that btrfs_create_pending_block_groups() may try to COW later.
|
|
- */
|
|
- if (root == fs_info->extent_root ||
|
|
- root == fs_info->chunk_root ||
|
|
- root == fs_info->dev_root)
|
|
- trans->can_flush_pending_bgs = false;
|
|
-
|
|
- cow = btrfs_alloc_tree_block(trans, root, parent_start,
|
|
- root->root_key.objectid, &disk_key, level,
|
|
- search_start, empty_size);
|
|
- trans->can_flush_pending_bgs = true;
|
|
+ cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
|
|
+ level, search_start, empty_size);
|
|
if (IS_ERR(cow))
|
|
return PTR_ERR(cow);
|
|
|
|
@@ -2584,14 +2608,27 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
|
|
root_lock = BTRFS_READ_LOCK;
|
|
|
|
if (p->search_commit_root) {
|
|
- /* The commit roots are read only so we always do read locks */
|
|
- if (p->need_commit_sem)
|
|
+ /*
|
|
+ * The commit roots are read only so we always do read locks,
|
|
+ * and we always must hold the commit_root_sem when doing
|
|
+ * searches on them, the only exception is send where we don't
|
|
+ * want to block transaction commits for a long time, so
|
|
+ * we need to clone the commit root in order to avoid races
|
|
+ * with transaction commits that create a snapshot of one of
|
|
+ * the roots used by a send operation.
|
|
+ */
|
|
+ if (p->need_commit_sem) {
|
|
down_read(&fs_info->commit_root_sem);
|
|
- b = root->commit_root;
|
|
- extent_buffer_get(b);
|
|
- level = btrfs_header_level(b);
|
|
- if (p->need_commit_sem)
|
|
+ b = btrfs_clone_extent_buffer(root->commit_root);
|
|
up_read(&fs_info->commit_root_sem);
|
|
+ if (!b)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ } else {
|
|
+ b = root->commit_root;
|
|
+ extent_buffer_get(b);
|
|
+ }
|
|
+ level = btrfs_header_level(b);
|
|
/*
|
|
* Ensure that all callers have set skip_locking when
|
|
* p->search_commit_root = 1.
|
|
@@ -2717,6 +2754,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|
again:
|
|
prev_cmp = -1;
|
|
b = btrfs_search_slot_get_root(root, p, write_lock_level);
|
|
+ if (IS_ERR(b)) {
|
|
+ ret = PTR_ERR(b);
|
|
+ goto done;
|
|
+ }
|
|
|
|
while (b) {
|
|
level = btrfs_header_level(b);
|
|
@@ -3323,8 +3364,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
|
|
else
|
|
btrfs_node_key(lower, &lower_key, 0);
|
|
|
|
- c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
|
|
- &lower_key, level, root->node->start, 0);
|
|
+ c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
|
|
+ root->node->start, 0);
|
|
if (IS_ERR(c))
|
|
return PTR_ERR(c);
|
|
|
|
@@ -3453,8 +3494,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
|
|
mid = (c_nritems + 1) / 2;
|
|
btrfs_node_key(c, &disk_key, mid);
|
|
|
|
- split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
|
|
- &disk_key, level, c->start, 0);
|
|
+ split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
|
|
+ c->start, 0);
|
|
if (IS_ERR(split))
|
|
return PTR_ERR(split);
|
|
|
|
@@ -4238,8 +4279,8 @@ again:
|
|
else
|
|
btrfs_item_key(l, &disk_key, mid);
|
|
|
|
- right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
|
|
- &disk_key, 0, l->start, 0);
|
|
+ right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
|
|
+ l->start, 0);
|
|
if (IS_ERR(right))
|
|
return PTR_ERR(right);
|
|
|
|
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
|
|
index 68f322f600a06..131e90aad941e 100644
|
|
--- a/fs/btrfs/ctree.h
|
|
+++ b/fs/btrfs/ctree.h
|
|
@@ -3141,7 +3141,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
|
|
struct inode *inode, u64 new_size,
|
|
u32 min_type);
|
|
|
|
-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
|
|
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
|
|
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr);
|
|
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
|
|
unsigned int extra_bits,
|
|
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
|
|
index 2aa48aecc52b7..65e4b86376386 100644
|
|
--- a/fs/btrfs/dev-replace.c
|
|
+++ b/fs/btrfs/dev-replace.c
|
|
@@ -797,39 +797,58 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
|
|
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
|
|
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
|
|
btrfs_dev_replace_write_unlock(dev_replace);
|
|
- goto leave;
|
|
+ break;
|
|
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
|
|
+ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
|
|
+ tgt_device = dev_replace->tgtdev;
|
|
+ src_device = dev_replace->srcdev;
|
|
+ btrfs_dev_replace_write_unlock(dev_replace);
|
|
+ btrfs_scrub_cancel(fs_info);
|
|
+ /* btrfs_dev_replace_finishing() will handle the cleanup part */
|
|
+ btrfs_info_in_rcu(fs_info,
|
|
+ "dev_replace from %s (devid %llu) to %s canceled",
|
|
+ btrfs_dev_name(src_device), src_device->devid,
|
|
+ btrfs_dev_name(tgt_device));
|
|
+ break;
|
|
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
|
|
+ /*
|
|
+ * Scrub doing the replace isn't running so we need to do the
|
|
+ * cleanup step of btrfs_dev_replace_finishing() here
|
|
+ */
|
|
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
|
|
tgt_device = dev_replace->tgtdev;
|
|
src_device = dev_replace->srcdev;
|
|
dev_replace->tgtdev = NULL;
|
|
dev_replace->srcdev = NULL;
|
|
- break;
|
|
- }
|
|
- dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
|
|
- dev_replace->time_stopped = ktime_get_real_seconds();
|
|
- dev_replace->item_needs_writeback = 1;
|
|
- btrfs_dev_replace_write_unlock(dev_replace);
|
|
- btrfs_scrub_cancel(fs_info);
|
|
+ dev_replace->replace_state =
|
|
+ BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
|
|
+ dev_replace->time_stopped = ktime_get_real_seconds();
|
|
+ dev_replace->item_needs_writeback = 1;
|
|
|
|
- trans = btrfs_start_transaction(root, 0);
|
|
- if (IS_ERR(trans)) {
|
|
- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
|
- return PTR_ERR(trans);
|
|
- }
|
|
- ret = btrfs_commit_transaction(trans);
|
|
- WARN_ON(ret);
|
|
+ btrfs_dev_replace_write_unlock(dev_replace);
|
|
|
|
- btrfs_info_in_rcu(fs_info,
|
|
- "dev_replace from %s (devid %llu) to %s canceled",
|
|
- btrfs_dev_name(src_device), src_device->devid,
|
|
- btrfs_dev_name(tgt_device));
|
|
+ btrfs_scrub_cancel(fs_info);
|
|
|
|
- if (tgt_device)
|
|
- btrfs_destroy_dev_replace_tgtdev(tgt_device);
|
|
+ trans = btrfs_start_transaction(root, 0);
|
|
+ if (IS_ERR(trans)) {
|
|
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
|
+ return PTR_ERR(trans);
|
|
+ }
|
|
+ ret = btrfs_commit_transaction(trans);
|
|
+ WARN_ON(ret);
|
|
+
|
|
+ btrfs_info_in_rcu(fs_info,
|
|
+ "suspended dev_replace from %s (devid %llu) to %s canceled",
|
|
+ btrfs_dev_name(src_device), src_device->devid,
|
|
+ btrfs_dev_name(tgt_device));
|
|
+
|
|
+ if (tgt_device)
|
|
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
|
|
+ break;
|
|
+ default:
|
|
+ result = -EINVAL;
|
|
+ }
|
|
|
|
-leave:
|
|
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
|
return result;
|
|
}
|
|
@@ -884,6 +903,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
|
|
"cannot continue dev_replace, tgtdev is missing");
|
|
btrfs_info(fs_info,
|
|
"you may cancel the operation after 'mount -o degraded'");
|
|
+ dev_replace->replace_state =
|
|
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
|
|
btrfs_dev_replace_write_unlock(dev_replace);
|
|
return 0;
|
|
}
|
|
@@ -895,6 +916,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
|
|
* dev-replace to start anyway.
|
|
*/
|
|
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
|
|
+ btrfs_dev_replace_write_lock(dev_replace);
|
|
+ dev_replace->replace_state =
|
|
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
|
|
+ btrfs_dev_replace_write_unlock(dev_replace);
|
|
btrfs_info(fs_info,
|
|
"cannot resume dev-replace, other exclusive operation running");
|
|
return 0;
|
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
|
index 6d776717d8b39..f74c9e6b84ce2 100644
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -4155,6 +4155,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
|
|
spin_lock(&fs_info->ordered_root_lock);
|
|
}
|
|
spin_unlock(&fs_info->ordered_root_lock);
|
|
+
|
|
+ /*
|
|
+ * We need this here because if we've been flipped read-only we won't
|
|
+ * get sync() from the umount, so we need to make sure any ordered
|
|
+ * extents that haven't had their dirty pages IO start writeout yet
|
|
+ * actually get run and error out properly.
|
|
+ */
|
|
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
|
|
}
|
|
|
|
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
|
|
index a1febf155747e..fe1fef3d7eed1 100644
|
|
--- a/fs/btrfs/extent-tree.c
|
|
+++ b/fs/btrfs/extent-tree.c
|
|
@@ -8944,6 +8944,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|
goto out_free;
|
|
}
|
|
|
|
+ err = btrfs_run_delayed_items(trans);
|
|
+ if (err)
|
|
+ goto out_end_trans;
|
|
+
|
|
if (block_rsv)
|
|
trans->block_rsv = block_rsv;
|
|
|
|
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
|
|
index d228f706ff3e6..c8e886caacd77 100644
|
|
--- a/fs/btrfs/extent_io.c
|
|
+++ b/fs/btrfs/extent_io.c
|
|
@@ -3934,12 +3934,25 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
|
range_whole = 1;
|
|
scanned = 1;
|
|
}
|
|
- if (wbc->sync_mode == WB_SYNC_ALL)
|
|
+
|
|
+ /*
|
|
+ * We do the tagged writepage as long as the snapshot flush bit is set
|
|
+ * and we are the first one who do the filemap_flush() on this inode.
|
|
+ *
|
|
+ * The nr_to_write == LONG_MAX is needed to make sure other flushers do
|
|
+ * not race in and drop the bit.
|
|
+ */
|
|
+ if (range_whole && wbc->nr_to_write == LONG_MAX &&
|
|
+ test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
|
|
+ &BTRFS_I(inode)->runtime_flags))
|
|
+ wbc->tagged_writepages = 1;
|
|
+
|
|
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
|
tag = PAGECACHE_TAG_TOWRITE;
|
|
else
|
|
tag = PAGECACHE_TAG_DIRTY;
|
|
retry:
|
|
- if (wbc->sync_mode == WB_SYNC_ALL)
|
|
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
|
tag_pages_for_writeback(mapping, index, end);
|
|
done_index = index;
|
|
while (!done && !nr_to_write_done && (index <= end) &&
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index 9ea4c6f0352f0..965a64bde6fde 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -1372,7 +1372,8 @@ next_slot:
|
|
* Do the same check as in btrfs_cross_ref_exist but
|
|
* without the unnecessary search.
|
|
*/
|
|
- if (btrfs_file_extent_generation(leaf, fi) <=
|
|
+ if (!nolock &&
|
|
+ btrfs_file_extent_generation(leaf, fi) <=
|
|
btrfs_root_last_snapshot(&root->root_item))
|
|
goto out_check;
|
|
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
|
|
@@ -3146,9 +3147,6 @@ out:
|
|
/* once for the tree */
|
|
btrfs_put_ordered_extent(ordered_extent);
|
|
|
|
- /* Try to release some metadata so we don't get an OOM but don't wait */
|
|
- btrfs_btree_balance_dirty_nodelay(fs_info);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -3686,6 +3684,21 @@ cache_index:
|
|
* inode is not a directory, logging its parent unnecessarily.
|
|
*/
|
|
BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
|
|
+ /*
|
|
+ * Similar reasoning for last_link_trans, needs to be set otherwise
|
|
+ * for a case like the following:
|
|
+ *
|
|
+ * mkdir A
|
|
+ * touch foo
|
|
+ * ln foo A/bar
|
|
+ * echo 2 > /proc/sys/vm/drop_caches
|
|
+ * fsync foo
|
|
+ * <power failure>
|
|
+ *
|
|
+ * Would result in link bar and directory A not existing after the power
|
|
+ * failure.
|
|
+ */
|
|
+ BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
|
|
|
|
path->slots[0]++;
|
|
if (inode->i_nlink != 1 ||
|
|
@@ -6406,14 +6419,19 @@ fail_dir_item:
|
|
err = btrfs_del_root_ref(trans, key.objectid,
|
|
root->root_key.objectid, parent_ino,
|
|
&local_index, name, name_len);
|
|
-
|
|
+ if (err)
|
|
+ btrfs_abort_transaction(trans, err);
|
|
} else if (add_backref) {
|
|
u64 local_index;
|
|
int err;
|
|
|
|
err = btrfs_del_inode_ref(trans, root, name, name_len,
|
|
ino, parent_ino, &local_index);
|
|
+ if (err)
|
|
+ btrfs_abort_transaction(trans, err);
|
|
}
|
|
+
|
|
+ /* Return the original error code */
|
|
return ret;
|
|
}
|
|
|
|
@@ -6625,6 +6643,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|
if (err)
|
|
goto fail;
|
|
}
|
|
+ BTRFS_I(inode)->last_link_trans = trans->transid;
|
|
d_instantiate(dentry, inode);
|
|
ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
|
|
true, NULL);
|
|
@@ -9157,6 +9176,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|
ei->index_cnt = (u64)-1;
|
|
ei->dir_index = 0;
|
|
ei->last_unlink_trans = 0;
|
|
+ ei->last_link_trans = 0;
|
|
ei->last_log_commit = 0;
|
|
|
|
spin_lock_init(&ei->lock);
|
|
@@ -9968,7 +9988,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
|
|
* some fairly slow code that needs optimization. This walks the list
|
|
* of all the inodes with pending delalloc and forces them to disk.
|
|
*/
|
|
-static int start_delalloc_inodes(struct btrfs_root *root, int nr)
|
|
+static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
|
|
{
|
|
struct btrfs_inode *binode;
|
|
struct inode *inode;
|
|
@@ -9996,6 +10016,9 @@ static int start_delalloc_inodes(struct btrfs_root *root, int nr)
|
|
}
|
|
spin_unlock(&root->delalloc_lock);
|
|
|
|
+ if (snapshot)
|
|
+ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
|
|
+ &binode->runtime_flags);
|
|
work = btrfs_alloc_delalloc_work(inode);
|
|
if (!work) {
|
|
iput(inode);
|
|
@@ -10029,7 +10052,7 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
|
|
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
|
|
{
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
int ret;
|
|
@@ -10037,7 +10060,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
|
|
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
|
|
return -EROFS;
|
|
|
|
- ret = start_delalloc_inodes(root, -1);
|
|
+ ret = start_delalloc_inodes(root, -1, true);
|
|
if (ret > 0)
|
|
ret = 0;
|
|
return ret;
|
|
@@ -10066,7 +10089,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
|
|
&fs_info->delalloc_roots);
|
|
spin_unlock(&fs_info->delalloc_root_lock);
|
|
|
|
- ret = start_delalloc_inodes(root, nr);
|
|
+ ret = start_delalloc_inodes(root, nr, false);
|
|
btrfs_put_fs_root(root);
|
|
if (ret < 0)
|
|
goto out;
|
|
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
|
|
index 802a628e9f7d7..87f4f0f65dbb4 100644
|
|
--- a/fs/btrfs/ioctl.c
|
|
+++ b/fs/btrfs/ioctl.c
|
|
@@ -777,7 +777,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
|
wait_event(root->subv_writers->wait,
|
|
percpu_counter_sum(&root->subv_writers->counter) == 0);
|
|
|
|
- ret = btrfs_start_delalloc_inodes(root);
|
|
+ ret = btrfs_start_delalloc_snapshot(root);
|
|
if (ret)
|
|
goto dec_and_free;
|
|
|
|
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
|
|
index f70825af6438e..9e419f6878c56 100644
|
|
--- a/fs/btrfs/qgroup.c
|
|
+++ b/fs/btrfs/qgroup.c
|
|
@@ -1013,16 +1013,22 @@ out_add_root:
|
|
btrfs_abort_transaction(trans, ret);
|
|
goto out_free_path;
|
|
}
|
|
- spin_lock(&fs_info->qgroup_lock);
|
|
- fs_info->quota_root = quota_root;
|
|
- set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
|
- spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
ret = btrfs_commit_transaction(trans);
|
|
trans = NULL;
|
|
if (ret)
|
|
goto out_free_path;
|
|
|
|
+ /*
|
|
+ * Set quota enabled flag after committing the transaction, to avoid
|
|
+ * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
|
|
+ * creation.
|
|
+ */
|
|
+ spin_lock(&fs_info->qgroup_lock);
|
|
+ fs_info->quota_root = quota_root;
|
|
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
|
+ spin_unlock(&fs_info->qgroup_lock);
|
|
+
|
|
ret = qgroup_rescan_init(fs_info, 0, 1);
|
|
if (!ret) {
|
|
qgroup_rescan_zero_tracking(fs_info);
|
|
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
|
|
index 902819d3cf41a..bbd1b36f4918f 100644
|
|
--- a/fs/btrfs/scrub.c
|
|
+++ b/fs/btrfs/scrub.c
|
|
@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
|
|
struct rb_node *parent = NULL;
|
|
struct full_stripe_lock *entry;
|
|
struct full_stripe_lock *ret;
|
|
+ unsigned int nofs_flag;
|
|
|
|
lockdep_assert_held(&locks_root->lock);
|
|
|
|
@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
|
|
}
|
|
}
|
|
|
|
- /* Insert new lock */
|
|
+ /*
|
|
+ * Insert new lock.
|
|
+ *
|
|
+ * We must use GFP_NOFS because the scrub task might be waiting for a
|
|
+ * worker task executing this function and in turn a transaction commit
|
|
+ * might be waiting the scrub task to pause (which needs to wait for all
|
|
+ * the worker tasks to complete before pausing).
|
|
+ */
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
if (!ret)
|
|
return ERR_PTR(-ENOMEM);
|
|
ret->logical = fstripe_logical;
|
|
@@ -1620,8 +1630,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|
mutex_lock(&sctx->wr_lock);
|
|
again:
|
|
if (!sctx->wr_curr_bio) {
|
|
+ unsigned int nofs_flag;
|
|
+
|
|
+ /*
|
|
+ * We must use GFP_NOFS because the scrub task might be waiting
|
|
+ * for a worker task executing this function and in turn a
|
|
+ * transaction commit might be waiting the scrub task to pause
|
|
+ * (which needs to wait for all the worker tasks to complete
|
|
+ * before pausing).
|
|
+ */
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
|
|
GFP_KERNEL);
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
if (!sctx->wr_curr_bio) {
|
|
mutex_unlock(&sctx->wr_lock);
|
|
return -ENOMEM;
|
|
@@ -3772,6 +3793,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|
struct scrub_ctx *sctx;
|
|
int ret;
|
|
struct btrfs_device *dev;
|
|
+ unsigned int nofs_flag;
|
|
|
|
if (btrfs_fs_closing(fs_info))
|
|
return -EINVAL;
|
|
@@ -3875,6 +3897,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|
atomic_inc(&fs_info->scrubs_running);
|
|
mutex_unlock(&fs_info->scrub_lock);
|
|
|
|
+ /*
|
|
+ * In order to avoid deadlock with reclaim when there is a transaction
|
|
+ * trying to pause scrub, make sure we use GFP_NOFS for all the
|
|
+ * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
|
|
+ * invoked by our callees. The pausing request is done when the
|
|
+ * transaction commit starts, and it blocks the transaction until scrub
|
|
+ * is paused (done at specific points at scrub_stripe() or right above
|
|
+ * before incrementing fs_info->scrubs_running).
|
|
+ */
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
if (!is_dev_replace) {
|
|
/*
|
|
* by holding device list mutex, we can
|
|
@@ -3887,6 +3919,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
|
|
|
|
if (!ret)
|
|
ret = scrub_enumerate_chunks(sctx, dev, start, end);
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
|
|
wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
|
|
atomic_dec(&fs_info->scrubs_running);
|
|
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
|
|
index 645fc81e2a948..38f6cb0bc5f6c 100644
|
|
--- a/fs/btrfs/super.c
|
|
+++ b/fs/btrfs/super.c
|
|
@@ -1677,6 +1677,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
|
flags | SB_RDONLY, device_name, data);
|
|
if (IS_ERR(mnt_root)) {
|
|
root = ERR_CAST(mnt_root);
|
|
+ kfree(subvol_name);
|
|
goto out;
|
|
}
|
|
|
|
@@ -1686,12 +1687,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
|
if (error < 0) {
|
|
root = ERR_PTR(error);
|
|
mntput(mnt_root);
|
|
+ kfree(subvol_name);
|
|
goto out;
|
|
}
|
|
}
|
|
}
|
|
if (IS_ERR(mnt_root)) {
|
|
root = ERR_CAST(mnt_root);
|
|
+ kfree(subvol_name);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
|
|
index a5ce99a6c9365..15d2914f0a678 100644
|
|
--- a/fs/btrfs/tree-log.c
|
|
+++ b/fs/btrfs/tree-log.c
|
|
@@ -5778,6 +5778,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|
goto end_trans;
|
|
}
|
|
|
|
+ /*
|
|
+ * If a new hard link was added to the inode in the current transaction
|
|
+ * and its link count is now greater than 1, we need to fallback to a
|
|
+ * transaction commit, otherwise we can end up not logging all its new
|
|
+ * parents for all the hard links. Here just from the dentry used to
|
|
+ * fsync, we can not visit the ancestor inodes for all the other hard
|
|
+ * links to figure out if any is new, so we fallback to a transaction
|
|
+ * commit (instead of adding a lot of complexity of scanning a btree,
|
|
+ * since this scenario is not a common use case).
|
|
+ */
|
|
+ if (inode->vfs_inode.i_nlink > 1 &&
|
|
+ inode->last_link_trans > last_committed) {
|
|
+ ret = -EMLINK;
|
|
+ goto end_trans;
|
|
+ }
|
|
+
|
|
while (1) {
|
|
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
|
|
break;
|
|
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
|
|
index f435d397019ea..6f09f6032db3f 100644
|
|
--- a/fs/btrfs/volumes.c
|
|
+++ b/fs/btrfs/volumes.c
|
|
@@ -850,6 +850,35 @@ static noinline struct btrfs_device *device_list_add(const char *path,
|
|
return ERR_PTR(-EEXIST);
|
|
}
|
|
|
|
+ /*
|
|
+ * We are going to replace the device path for a given devid,
|
|
+ * make sure it's the same device if the device is mounted
|
|
+ */
|
|
+ if (device->bdev) {
|
|
+ struct block_device *path_bdev;
|
|
+
|
|
+ path_bdev = lookup_bdev(path);
|
|
+ if (IS_ERR(path_bdev)) {
|
|
+ mutex_unlock(&fs_devices->device_list_mutex);
|
|
+ return ERR_CAST(path_bdev);
|
|
+ }
|
|
+
|
|
+ if (device->bdev != path_bdev) {
|
|
+ bdput(path_bdev);
|
|
+ mutex_unlock(&fs_devices->device_list_mutex);
|
|
+ btrfs_warn_in_rcu(device->fs_info,
|
|
+ "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
|
|
+ disk_super->fsid, devid,
|
|
+ rcu_str_deref(device->name), path);
|
|
+ return ERR_PTR(-EEXIST);
|
|
+ }
|
|
+ bdput(path_bdev);
|
|
+ btrfs_info_in_rcu(device->fs_info,
|
|
+ "device fsid %pU devid %llu moved old:%s new:%s",
|
|
+ disk_super->fsid, devid,
|
|
+ rcu_str_deref(device->name), path);
|
|
+ }
|
|
+
|
|
name = rcu_string_strdup(path, GFP_NOFS);
|
|
if (!name) {
|
|
mutex_unlock(&fs_devices->device_list_mutex);
|
|
@@ -3724,6 +3753,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
|
int ret;
|
|
u64 num_devices;
|
|
unsigned seq;
|
|
+ bool reducing_integrity;
|
|
|
|
if (btrfs_fs_closing(fs_info) ||
|
|
atomic_read(&fs_info->balance_pause_req) ||
|
|
@@ -3803,24 +3833,30 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
|
|
!(bctl->sys.target & allowed)) ||
|
|
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
|
(fs_info->avail_metadata_alloc_bits & allowed) &&
|
|
- !(bctl->meta.target & allowed))) {
|
|
- if (bctl->flags & BTRFS_BALANCE_FORCE) {
|
|
- btrfs_info(fs_info,
|
|
- "balance: force reducing metadata integrity");
|
|
- } else {
|
|
- btrfs_err(fs_info,
|
|
- "balance: reduces metadata integrity, use --force if you want this");
|
|
- ret = -EINVAL;
|
|
- goto out;
|
|
- }
|
|
- }
|
|
+ !(bctl->meta.target & allowed)))
|
|
+ reducing_integrity = true;
|
|
+ else
|
|
+ reducing_integrity = false;
|
|
+
|
|
+ /* if we're not converting, the target field is uninitialized */
|
|
+ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
|
|
+ bctl->meta.target : fs_info->avail_metadata_alloc_bits;
|
|
+ data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
|
|
+ bctl->data.target : fs_info->avail_data_alloc_bits;
|
|
} while (read_seqretry(&fs_info->profiles_lock, seq));
|
|
|
|
- /* if we're not converting, the target field is uninitialized */
|
|
- meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
|
|
- bctl->meta.target : fs_info->avail_metadata_alloc_bits;
|
|
- data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
|
|
- bctl->data.target : fs_info->avail_data_alloc_bits;
|
|
+ if (reducing_integrity) {
|
|
+ if (bctl->flags & BTRFS_BALANCE_FORCE) {
|
|
+ btrfs_info(fs_info,
|
|
+ "balance: force reducing metadata integrity");
|
|
+ } else {
|
|
+ btrfs_err(fs_info,
|
|
+ "balance: reduces metadata integrity, use --force if you want this");
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
|
|
btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
|
|
int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
|
|
@@ -4768,19 +4804,17 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|
/*
|
|
* Use the number of data stripes to figure out how big this chunk
|
|
* is really going to be in terms of logical address space,
|
|
- * and compare that answer with the max chunk size
|
|
+ * and compare that answer with the max chunk size. If it's higher,
|
|
+ * we try to reduce stripe_size.
|
|
*/
|
|
if (stripe_size * data_stripes > max_chunk_size) {
|
|
- stripe_size = div_u64(max_chunk_size, data_stripes);
|
|
-
|
|
- /* bump the answer up to a 16MB boundary */
|
|
- stripe_size = round_up(stripe_size, SZ_16M);
|
|
-
|
|
/*
|
|
- * But don't go higher than the limits we found while searching
|
|
- * for free extents
|
|
+ * Reduce stripe_size, round it up to a 16MB boundary again and
|
|
+ * then use it, unless it ends up being even bigger than the
|
|
+ * previous value we had already.
|
|
*/
|
|
- stripe_size = min(devices_info[ndevs - 1].max_avail,
|
|
+ stripe_size = min(round_up(div_u64(max_chunk_size,
|
|
+ data_stripes), SZ_16M),
|
|
stripe_size);
|
|
}
|
|
|
|
@@ -7478,6 +7512,8 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
|
|
struct btrfs_path *path;
|
|
struct btrfs_root *root = fs_info->dev_root;
|
|
struct btrfs_key key;
|
|
+ u64 prev_devid = 0;
|
|
+ u64 prev_dev_ext_end = 0;
|
|
int ret = 0;
|
|
|
|
key.objectid = 1;
|
|
@@ -7522,10 +7558,22 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
|
|
chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
|
|
physical_len = btrfs_dev_extent_length(leaf, dext);
|
|
|
|
+ /* Check if this dev extent overlaps with the previous one */
|
|
+ if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
|
|
+ btrfs_err(fs_info,
|
|
+"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
|
|
+ devid, physical_offset, prev_dev_ext_end);
|
|
+ ret = -EUCLEAN;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
|
|
physical_offset, physical_len);
|
|
if (ret < 0)
|
|
goto out;
|
|
+ prev_devid = devid;
|
|
+ prev_dev_ext_end = physical_offset + physical_len;
|
|
+
|
|
ret = btrfs_next_item(root, path);
|
|
if (ret < 0)
|
|
goto out;
|
|
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
|
|
index ea78c3d6dcfce..f141b45ce3498 100644
|
|
--- a/fs/btrfs/xattr.c
|
|
+++ b/fs/btrfs/xattr.c
|
|
@@ -11,6 +11,7 @@
|
|
#include <linux/security.h>
|
|
#include <linux/posix_acl_xattr.h>
|
|
#include <linux/iversion.h>
|
|
+#include <linux/sched/mm.h>
|
|
#include "ctree.h"
|
|
#include "btrfs_inode.h"
|
|
#include "transaction.h"
|
|
@@ -422,9 +423,15 @@ static int btrfs_initxattrs(struct inode *inode,
|
|
{
|
|
const struct xattr *xattr;
|
|
struct btrfs_trans_handle *trans = fs_info;
|
|
+ unsigned int nofs_flag;
|
|
char *name;
|
|
int err = 0;
|
|
|
|
+ /*
|
|
+ * We're holding a transaction handle, so use a NOFS memory allocation
|
|
+ * context to avoid deadlock if reclaim happens.
|
|
+ */
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
|
name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
|
|
strlen(xattr->name) + 1, GFP_KERNEL);
|
|
@@ -440,6 +447,7 @@ static int btrfs_initxattrs(struct inode *inode,
|
|
if (err < 0)
|
|
break;
|
|
}
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
|
|
index f3496db4bb3e8..08aaf580fa1c9 100644
|
|
--- a/fs/ceph/caps.c
|
|
+++ b/fs/ceph/caps.c
|
|
@@ -1032,6 +1032,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
|
|
list_del_init(&ci->i_snap_realm_item);
|
|
ci->i_snap_realm_counter++;
|
|
ci->i_snap_realm = NULL;
|
|
+ if (realm->ino == ci->i_vino.ino)
|
|
+ realm->inode = NULL;
|
|
spin_unlock(&realm->inodes_with_caps_lock);
|
|
ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
|
|
realm);
|
|
@@ -3569,7 +3571,6 @@ retry:
|
|
tcap->cap_id = t_cap_id;
|
|
tcap->seq = t_seq - 1;
|
|
tcap->issue_seq = t_seq - 1;
|
|
- tcap->mseq = t_mseq;
|
|
tcap->issued |= issued;
|
|
tcap->implemented |= issued;
|
|
if (cap == ci->i_auth_cap)
|
|
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
|
|
index 041c27ea8de15..f74193da0e092 100644
|
|
--- a/fs/ceph/snap.c
|
|
+++ b/fs/ceph/snap.c
|
|
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
|
|
capsnap->size);
|
|
|
|
spin_lock(&mdsc->snap_flush_lock);
|
|
- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
|
|
+ if (list_empty(&ci->i_snap_flush_item))
|
|
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
|
|
spin_unlock(&mdsc->snap_flush_lock);
|
|
return 1; /* caller may want to ceph_flush_snaps */
|
|
}
|
|
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
|
|
index 38ab0fca49e1d..3736391992917 100644
|
|
--- a/fs/cifs/cifsglob.h
|
|
+++ b/fs/cifs/cifsglob.h
|
|
@@ -1426,6 +1426,7 @@ struct mid_q_entry {
|
|
int mid_state; /* wish this were enum but can not pass to wait_event */
|
|
unsigned int mid_flags;
|
|
__le16 command; /* smb command code */
|
|
+ unsigned int optype; /* operation type */
|
|
bool large_buf:1; /* if valid response, is pointer to large buf */
|
|
bool multiRsp:1; /* multiple trans2 responses for one request */
|
|
bool multiEnd:1; /* both received */
|
|
@@ -1562,6 +1563,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
|
|
kfree(param);
|
|
}
|
|
|
|
+static inline bool is_interrupt_error(int error)
|
|
+{
|
|
+ switch (error) {
|
|
+ case -EINTR:
|
|
+ case -ERESTARTSYS:
|
|
+ case -ERESTARTNOHAND:
|
|
+ case -ERESTARTNOINTR:
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static inline bool is_retryable_error(int error)
|
|
+{
|
|
+ if (is_interrupt_error(error) || error == -EAGAIN)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
#define MID_FREE 0
|
|
#define MID_REQUEST_ALLOCATED 1
|
|
#define MID_REQUEST_SUBMITTED 2
|
|
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
|
|
index f82fd342bca56..327a101f7894f 100644
|
|
--- a/fs/cifs/cifssmb.c
|
|
+++ b/fs/cifs/cifssmb.c
|
|
@@ -1458,18 +1458,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
|
|
}
|
|
|
|
static int
|
|
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|
+__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
|
|
+ bool malformed)
|
|
{
|
|
int length;
|
|
- struct cifs_readdata *rdata = mid->callback_data;
|
|
|
|
length = cifs_discard_remaining_data(server);
|
|
- dequeue_mid(mid, rdata->result);
|
|
+ dequeue_mid(mid, malformed);
|
|
mid->resp_buf = server->smallbuf;
|
|
server->smallbuf = NULL;
|
|
return length;
|
|
}
|
|
|
|
+static int
|
|
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|
+{
|
|
+ struct cifs_readdata *rdata = mid->callback_data;
|
|
+
|
|
+ return __cifs_readv_discard(server, mid, rdata->result);
|
|
+}
|
|
+
|
|
int
|
|
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|
{
|
|
@@ -1511,12 +1519,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|
return -1;
|
|
}
|
|
|
|
+ /* set up first two iov for signature check and to get credits */
|
|
+ rdata->iov[0].iov_base = buf;
|
|
+ rdata->iov[0].iov_len = 4;
|
|
+ rdata->iov[1].iov_base = buf + 4;
|
|
+ rdata->iov[1].iov_len = server->total_read - 4;
|
|
+ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
|
|
+ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
|
|
+ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
|
|
+ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
|
|
+
|
|
/* Was the SMB read successful? */
|
|
rdata->result = server->ops->map_error(buf, false);
|
|
if (rdata->result != 0) {
|
|
cifs_dbg(FYI, "%s: server returned error %d\n",
|
|
__func__, rdata->result);
|
|
- return cifs_readv_discard(server, mid);
|
|
+ /* normal error on read response */
|
|
+ return __cifs_readv_discard(server, mid, false);
|
|
}
|
|
|
|
/* Is there enough to get to the rest of the READ_RSP header? */
|
|
@@ -1560,14 +1579,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|
server->total_read += length;
|
|
}
|
|
|
|
- /* set up first iov for signature check */
|
|
- rdata->iov[0].iov_base = buf;
|
|
- rdata->iov[0].iov_len = 4;
|
|
- rdata->iov[1].iov_base = buf + 4;
|
|
- rdata->iov[1].iov_len = server->total_read - 4;
|
|
- cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
|
|
- rdata->iov[0].iov_base, server->total_read);
|
|
-
|
|
/* how much data is in the response? */
|
|
#ifdef CONFIG_CIFS_SMB_DIRECT
|
|
use_rdma_mr = rdata->mr;
|
|
@@ -2032,7 +2043,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
|
|
|
|
for (j = 0; j < nr_pages; j++) {
|
|
unlock_page(wdata2->pages[j]);
|
|
- if (rc != 0 && rc != -EAGAIN) {
|
|
+ if (rc != 0 && !is_retryable_error(rc)) {
|
|
SetPageError(wdata2->pages[j]);
|
|
end_page_writeback(wdata2->pages[j]);
|
|
put_page(wdata2->pages[j]);
|
|
@@ -2041,7 +2052,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
|
|
|
|
if (rc) {
|
|
kref_put(&wdata2->refcount, cifs_writedata_release);
|
|
- if (rc == -EAGAIN)
|
|
+ if (is_retryable_error(rc))
|
|
continue;
|
|
break;
|
|
}
|
|
@@ -2050,7 +2061,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
|
|
i += nr_pages;
|
|
} while (i < wdata->nr_pages);
|
|
|
|
- mapping_set_error(inode->i_mapping, rc);
|
|
+ if (rc != 0 && !is_retryable_error(rc))
|
|
+ mapping_set_error(inode->i_mapping, rc);
|
|
kref_put(&wdata->refcount, cifs_writedata_release);
|
|
}
|
|
|
|
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
|
|
index 6f24f129a7513..b14e1abb891c9 100644
|
|
--- a/fs/cifs/connect.c
|
|
+++ b/fs/cifs/connect.c
|
|
@@ -50,6 +50,7 @@
|
|
#include "cifs_unicode.h"
|
|
#include "cifs_debug.h"
|
|
#include "cifs_fs_sb.h"
|
|
+#include "dns_resolve.h"
|
|
#include "ntlmssp.h"
|
|
#include "nterr.h"
|
|
#include "rfc1002pdu.h"
|
|
@@ -318,6 +319,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
|
|
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
|
|
const char *devname, bool is_smb3);
|
|
|
|
+/*
|
|
+ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
|
|
+ * get their ip addresses changed at some point.
|
|
+ *
|
|
+ * This should be called with server->srv_mutex held.
|
|
+ */
|
|
+#ifdef CONFIG_CIFS_DFS_UPCALL
|
|
+static int reconn_set_ipaddr(struct TCP_Server_Info *server)
|
|
+{
|
|
+ int rc;
|
|
+ int len;
|
|
+ char *unc, *ipaddr = NULL;
|
|
+
|
|
+ if (!server->hostname)
|
|
+ return -EINVAL;
|
|
+
|
|
+ len = strlen(server->hostname) + 3;
|
|
+
|
|
+ unc = kmalloc(len, GFP_KERNEL);
|
|
+ if (!unc) {
|
|
+ cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ snprintf(unc, len, "\\\\%s", server->hostname);
|
|
+
|
|
+ rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
|
|
+ kfree(unc);
|
|
+
|
|
+ if (rc < 0) {
|
|
+ cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
|
|
+ __func__, server->hostname, rc);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
|
|
+ strlen(ipaddr));
|
|
+ kfree(ipaddr);
|
|
+
|
|
+ return !rc ? -1 : 0;
|
|
+}
|
|
+#else
|
|
+static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* cifs tcp session reconnection
|
|
*
|
|
@@ -418,6 +466,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
rc = generic_ip_connect(server);
|
|
if (rc) {
|
|
cifs_dbg(FYI, "reconnect error %d\n", rc);
|
|
+ rc = reconn_set_ipaddr(server);
|
|
+ if (rc) {
|
|
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
|
|
+ __func__, rc);
|
|
+ }
|
|
mutex_unlock(&server->srv_mutex);
|
|
msleep(3000);
|
|
} else {
|
|
@@ -534,6 +587,21 @@ server_unresponsive(struct TCP_Server_Info *server)
|
|
return false;
|
|
}
|
|
|
|
+static inline bool
|
|
+zero_credits(struct TCP_Server_Info *server)
|
|
+{
|
|
+ int val;
|
|
+
|
|
+ spin_lock(&server->req_lock);
|
|
+ val = server->credits + server->echo_credits + server->oplock_credits;
|
|
+ if (server->in_flight == 0 && val == 0) {
|
|
+ spin_unlock(&server->req_lock);
|
|
+ return true;
|
|
+ }
|
|
+ spin_unlock(&server->req_lock);
|
|
+ return false;
|
|
+}
|
|
+
|
|
static int
|
|
cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
|
|
{
|
|
@@ -546,6 +614,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
|
|
for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
|
|
try_to_freeze();
|
|
|
|
+ /* reconnect if no credits and no requests in flight */
|
|
+ if (zero_credits(server)) {
|
|
+ cifs_reconnect(server);
|
|
+ return -ECONNABORTED;
|
|
+ }
|
|
+
|
|
if (server_unresponsive(server))
|
|
return -ECONNABORTED;
|
|
if (cifs_rdma_enabled(server) && server->smbd_conn)
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index c9bc56b1baac2..c13effbaadba5 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -732,7 +732,8 @@ reopen_success:
|
|
|
|
if (can_flush) {
|
|
rc = filemap_write_and_wait(inode->i_mapping);
|
|
- mapping_set_error(inode->i_mapping, rc);
|
|
+ if (!is_interrupt_error(rc))
|
|
+ mapping_set_error(inode->i_mapping, rc);
|
|
|
|
if (tcon->unix_ext)
|
|
rc = cifs_get_inode_info_unix(&inode, full_path,
|
|
@@ -1131,14 +1132,18 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|
|
|
/*
|
|
* Accessing maxBuf is racy with cifs_reconnect - need to store value
|
|
- * and check it for zero before using.
|
|
+ * and check it before using.
|
|
*/
|
|
max_buf = tcon->ses->server->maxBuf;
|
|
- if (!max_buf) {
|
|
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
|
|
free_xid(xid);
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
|
+ PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
|
+ PAGE_SIZE);
|
|
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
|
sizeof(LOCKING_ANDX_RANGE);
|
|
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
|
@@ -1471,12 +1476,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
|
|
|
/*
|
|
* Accessing maxBuf is racy with cifs_reconnect - need to store value
|
|
- * and check it for zero before using.
|
|
+ * and check it before using.
|
|
*/
|
|
max_buf = tcon->ses->server->maxBuf;
|
|
- if (!max_buf)
|
|
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
|
|
return -EINVAL;
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
|
+ PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
|
+ PAGE_SIZE);
|
|
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
|
sizeof(LOCKING_ANDX_RANGE);
|
|
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
|
@@ -2109,6 +2118,7 @@ static int cifs_writepages(struct address_space *mapping,
|
|
pgoff_t end, index;
|
|
struct cifs_writedata *wdata;
|
|
int rc = 0;
|
|
+ int saved_rc = 0;
|
|
unsigned int xid;
|
|
|
|
/*
|
|
@@ -2137,8 +2147,10 @@ retry:
|
|
|
|
rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
|
|
&wsize, &credits);
|
|
- if (rc)
|
|
+ if (rc != 0) {
|
|
+ done = true;
|
|
break;
|
|
+ }
|
|
|
|
tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
|
|
|
|
@@ -2146,6 +2158,7 @@ retry:
|
|
&found_pages);
|
|
if (!wdata) {
|
|
rc = -ENOMEM;
|
|
+ done = true;
|
|
add_credits_and_wake_if(server, credits, 0);
|
|
break;
|
|
}
|
|
@@ -2174,7 +2187,7 @@ retry:
|
|
if (rc != 0) {
|
|
add_credits_and_wake_if(server, wdata->credits, 0);
|
|
for (i = 0; i < nr_pages; ++i) {
|
|
- if (rc == -EAGAIN)
|
|
+ if (is_retryable_error(rc))
|
|
redirty_page_for_writepage(wbc,
|
|
wdata->pages[i]);
|
|
else
|
|
@@ -2182,7 +2195,7 @@ retry:
|
|
end_page_writeback(wdata->pages[i]);
|
|
put_page(wdata->pages[i]);
|
|
}
|
|
- if (rc != -EAGAIN)
|
|
+ if (!is_retryable_error(rc))
|
|
mapping_set_error(mapping, rc);
|
|
}
|
|
kref_put(&wdata->refcount, cifs_writedata_release);
|
|
@@ -2192,6 +2205,15 @@ retry:
|
|
continue;
|
|
}
|
|
|
|
+ /* Return immediately if we received a signal during writing */
|
|
+ if (is_interrupt_error(rc)) {
|
|
+ done = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (rc != 0 && saved_rc == 0)
|
|
+ saved_rc = rc;
|
|
+
|
|
wbc->nr_to_write -= nr_pages;
|
|
if (wbc->nr_to_write <= 0)
|
|
done = true;
|
|
@@ -2209,6 +2231,9 @@ retry:
|
|
goto retry;
|
|
}
|
|
|
|
+ if (saved_rc != 0)
|
|
+ rc = saved_rc;
|
|
+
|
|
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
|
|
mapping->writeback_index = index;
|
|
|
|
@@ -2241,8 +2266,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
|
|
set_page_writeback(page);
|
|
retry_write:
|
|
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
|
|
- if (rc == -EAGAIN) {
|
|
- if (wbc->sync_mode == WB_SYNC_ALL)
|
|
+ if (is_retryable_error(rc)) {
|
|
+ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
|
|
goto retry_write;
|
|
redirty_page_for_writepage(wbc, page);
|
|
} else if (rc != 0) {
|
|
@@ -2617,11 +2642,13 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
|
if (rc)
|
|
break;
|
|
|
|
+ cur_len = min_t(const size_t, len, wsize);
|
|
+
|
|
if (ctx->direct_io) {
|
|
ssize_t result;
|
|
|
|
result = iov_iter_get_pages_alloc(
|
|
- from, &pagevec, wsize, &start);
|
|
+ from, &pagevec, cur_len, &start);
|
|
if (result < 0) {
|
|
cifs_dbg(VFS,
|
|
"direct_writev couldn't get user pages "
|
|
@@ -2630,6 +2657,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
|
result, from->type,
|
|
from->iov_offset, from->count);
|
|
dump_stack();
|
|
+
|
|
+ rc = result;
|
|
+ add_credits_and_wake_if(server, credits, 0);
|
|
break;
|
|
}
|
|
cur_len = (size_t)result;
|
|
@@ -2665,6 +2695,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
|
|
|
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
|
|
if (rc) {
|
|
+ kvfree(wdata->pages);
|
|
kfree(wdata);
|
|
add_credits_and_wake_if(server, credits, 0);
|
|
break;
|
|
@@ -2676,6 +2707,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
|
|
if (rc) {
|
|
for (i = 0; i < nr_pages; i++)
|
|
put_page(wdata->pages[i]);
|
|
+ kvfree(wdata->pages);
|
|
kfree(wdata);
|
|
add_credits_and_wake_if(server, credits, 0);
|
|
break;
|
|
@@ -3313,13 +3345,16 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
|
cur_len, &start);
|
|
if (result < 0) {
|
|
cifs_dbg(VFS,
|
|
- "couldn't get user pages (cur_len=%zd)"
|
|
+ "couldn't get user pages (rc=%zd)"
|
|
" iter type %d"
|
|
" iov_offset %zd count %zd\n",
|
|
result, direct_iov.type,
|
|
direct_iov.iov_offset,
|
|
direct_iov.count);
|
|
dump_stack();
|
|
+
|
|
+ rc = result;
|
|
+ add_credits_and_wake_if(server, credits, 0);
|
|
break;
|
|
}
|
|
cur_len = (size_t)result;
|
|
@@ -3352,8 +3387,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
|
}
|
|
|
|
rc = cifs_read_allocate_pages(rdata, npages);
|
|
- if (rc)
|
|
- goto error;
|
|
+ if (rc) {
|
|
+ kvfree(rdata->pages);
|
|
+ kfree(rdata);
|
|
+ add_credits_and_wake_if(server, credits, 0);
|
|
+ break;
|
|
+ }
|
|
|
|
rdata->tailsz = PAGE_SIZE;
|
|
}
|
|
@@ -3373,7 +3412,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
|
|
if (!rdata->cfile->invalidHandle ||
|
|
!(rc = cifs_reopen_file(rdata->cfile, true)))
|
|
rc = server->ops->async_readv(rdata);
|
|
-error:
|
|
if (rc) {
|
|
add_credits_and_wake_if(server, rdata->credits, 0);
|
|
kref_put(&rdata->refcount,
|
|
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
|
|
index a81a9df997c1c..84d51ca91ef7b 100644
|
|
--- a/fs/cifs/inode.c
|
|
+++ b/fs/cifs/inode.c
|
|
@@ -2261,6 +2261,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
|
|
* the flush returns error?
|
|
*/
|
|
rc = filemap_write_and_wait(inode->i_mapping);
|
|
+ if (is_interrupt_error(rc)) {
|
|
+ rc = -ERESTARTSYS;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
mapping_set_error(inode->i_mapping, rc);
|
|
rc = 0;
|
|
|
|
@@ -2404,6 +2409,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
|
|
* the flush returns error?
|
|
*/
|
|
rc = filemap_write_and_wait(inode->i_mapping);
|
|
+ if (is_interrupt_error(rc)) {
|
|
+ rc = -ERESTARTSYS;
|
|
+ goto cifs_setattr_exit;
|
|
+ }
|
|
+
|
|
mapping_set_error(inode->i_mapping, rc);
|
|
rc = 0;
|
|
|
|
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
|
|
index 8a41f4eba7264..6f33253938cd0 100644
|
|
--- a/fs/cifs/misc.c
|
|
+++ b/fs/cifs/misc.c
|
|
@@ -111,21 +111,27 @@ struct cifs_tcon *
|
|
tconInfoAlloc(void)
|
|
{
|
|
struct cifs_tcon *ret_buf;
|
|
- ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
|
|
- if (ret_buf) {
|
|
- atomic_inc(&tconInfoAllocCount);
|
|
- ret_buf->tidStatus = CifsNew;
|
|
- ++ret_buf->tc_count;
|
|
- INIT_LIST_HEAD(&ret_buf->openFileList);
|
|
- INIT_LIST_HEAD(&ret_buf->tcon_list);
|
|
- spin_lock_init(&ret_buf->open_file_lock);
|
|
- mutex_init(&ret_buf->crfid.fid_mutex);
|
|
- ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
|
|
- GFP_KERNEL);
|
|
- spin_lock_init(&ret_buf->stat_lock);
|
|
- atomic_set(&ret_buf->num_local_opens, 0);
|
|
- atomic_set(&ret_buf->num_remote_opens, 0);
|
|
+
|
|
+ ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
|
|
+ if (!ret_buf)
|
|
+ return NULL;
|
|
+ ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
|
|
+ if (!ret_buf->crfid.fid) {
|
|
+ kfree(ret_buf);
|
|
+ return NULL;
|
|
}
|
|
+
|
|
+ atomic_inc(&tconInfoAllocCount);
|
|
+ ret_buf->tidStatus = CifsNew;
|
|
+ ++ret_buf->tc_count;
|
|
+ INIT_LIST_HEAD(&ret_buf->openFileList);
|
|
+ INIT_LIST_HEAD(&ret_buf->tcon_list);
|
|
+ spin_lock_init(&ret_buf->open_file_lock);
|
|
+ mutex_init(&ret_buf->crfid.fid_mutex);
|
|
+ spin_lock_init(&ret_buf->stat_lock);
|
|
+ atomic_set(&ret_buf->num_local_opens, 0);
|
|
+ atomic_set(&ret_buf->num_remote_opens, 0);
|
|
+
|
|
return ret_buf;
|
|
}
|
|
|
|
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
|
|
index e169e1a5fd358..3925a7bfc74d6 100644
|
|
--- a/fs/cifs/readdir.c
|
|
+++ b/fs/cifs/readdir.c
|
|
@@ -655,7 +655,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
|
|
/* scan and find it */
|
|
int i;
|
|
char *cur_ent;
|
|
- char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
|
|
+ char *end_of_smb;
|
|
+
|
|
+ if (cfile->srch_inf.ntwrk_buf_start == NULL) {
|
|
+ cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ end_of_smb = cfile->srch_inf.ntwrk_buf_start +
|
|
server->ops->calc_smb_size(
|
|
cfile->srch_inf.ntwrk_buf_start,
|
|
server);
|
|
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
|
|
index 4ed10dd086e6f..b204e84b87fb5 100644
|
|
--- a/fs/cifs/smb2file.c
|
|
+++ b/fs/cifs/smb2file.c
|
|
@@ -122,12 +122,14 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
|
|
|
/*
|
|
* Accessing maxBuf is racy with cifs_reconnect - need to store value
|
|
- * and check it for zero before using.
|
|
+ * and check it before using.
|
|
*/
|
|
max_buf = tcon->ses->server->maxBuf;
|
|
- if (!max_buf)
|
|
+ if (max_buf < sizeof(struct smb2_lock_element))
|
|
return -EINVAL;
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
|
max_num = max_buf / sizeof(struct smb2_lock_element);
|
|
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
|
if (!buf)
|
|
@@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
|
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
|
max_num = max_buf / sizeof(struct smb2_lock_element);
|
|
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
|
if (!buf) {
|
|
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
|
|
index a8999f930b224..057d2034209fa 100644
|
|
--- a/fs/cifs/smb2inode.c
|
|
+++ b/fs/cifs/smb2inode.c
|
|
@@ -294,6 +294,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
|
|
int rc;
|
|
struct smb2_file_all_info *smb2_data;
|
|
__u32 create_options = 0;
|
|
+ struct cifs_fid fid;
|
|
+ bool no_cached_open = tcon->nohandlecache;
|
|
|
|
*adjust_tz = false;
|
|
*symlink = false;
|
|
@@ -302,6 +304,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
|
|
GFP_KERNEL);
|
|
if (smb2_data == NULL)
|
|
return -ENOMEM;
|
|
+
|
|
+ /* If it is a root and its handle is cached then use it */
|
|
+ if (!strlen(full_path) && !no_cached_open) {
|
|
+ rc = open_shroot(xid, tcon, &fid);
|
|
+ if (rc)
|
|
+ goto out;
|
|
+ rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
|
|
+ fid.volatile_fid, smb2_data);
|
|
+ close_shroot(&tcon->crfid);
|
|
+ if (rc)
|
|
+ goto out;
|
|
+ move_smb2_info_to_cifs(data, smb2_data);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (backup_cred(cifs_sb))
|
|
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
|
|
|
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
|
|
index d47b7f5dfa6ce..924269cec1352 100644
|
|
--- a/fs/cifs/smb2maperror.c
|
|
+++ b/fs/cifs/smb2maperror.c
|
|
@@ -379,8 +379,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
|
|
{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
|
|
{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
|
|
{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
|
|
- {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
|
|
- {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
|
|
+ {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
|
|
+ {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
|
|
{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
|
|
{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
|
|
"STATUS_CTL_FILE_NOT_SUPPORTED"},
|
|
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
|
|
index 6a9c47541c53d..7b8b58fb4d3fb 100644
|
|
--- a/fs/cifs/smb2misc.c
|
|
+++ b/fs/cifs/smb2misc.c
|
|
@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
|
|
if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
|
|
return false;
|
|
|
|
+ if (rsp->sync_hdr.CreditRequest) {
|
|
+ spin_lock(&server->req_lock);
|
|
+ server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
|
|
+ spin_unlock(&server->req_lock);
|
|
+ wake_up(&server->request_q);
|
|
+ }
|
|
+
|
|
if (rsp->StructureSize !=
|
|
smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
|
|
if (le16_to_cpu(rsp->StructureSize) == 44)
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index e25c7aade98a4..aa71e620f3cd8 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -34,6 +34,7 @@
|
|
#include "cifs_ioctl.h"
|
|
#include "smbdirect.h"
|
|
|
|
+/* Change credits for different ops and return the total number of credits */
|
|
static int
|
|
change_conf(struct TCP_Server_Info *server)
|
|
{
|
|
@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
|
|
server->oplock_credits = server->echo_credits = 0;
|
|
switch (server->credits) {
|
|
case 0:
|
|
- return -1;
|
|
+ return 0;
|
|
case 1:
|
|
server->echoes = false;
|
|
server->oplocks = false;
|
|
- cifs_dbg(VFS, "disabling echoes and oplocks\n");
|
|
break;
|
|
case 2:
|
|
server->echoes = true;
|
|
server->oplocks = false;
|
|
server->echo_credits = 1;
|
|
- cifs_dbg(FYI, "disabling oplocks\n");
|
|
break;
|
|
default:
|
|
server->echoes = true;
|
|
@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
|
|
server->echo_credits = 1;
|
|
}
|
|
server->credits -= server->echo_credits + server->oplock_credits;
|
|
- return 0;
|
|
+ return server->credits + server->echo_credits + server->oplock_credits;
|
|
}
|
|
|
|
static void
|
|
smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
|
|
const int optype)
|
|
{
|
|
- int *val, rc = 0;
|
|
+ int *val, rc = -1;
|
|
+
|
|
spin_lock(&server->req_lock);
|
|
val = server->ops->get_credits_field(server, optype);
|
|
|
|
@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
|
|
}
|
|
spin_unlock(&server->req_lock);
|
|
wake_up(&server->request_q);
|
|
- if (rc)
|
|
- cifs_reconnect(server);
|
|
+
|
|
+ if (server->tcpStatus == CifsNeedReconnect)
|
|
+ return;
|
|
+
|
|
+ switch (rc) {
|
|
+ case -1:
|
|
+ /* change_conf hasn't been executed */
|
|
+ break;
|
|
+ case 0:
|
|
+ cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
|
|
+ break;
|
|
+ case 1:
|
|
+ cifs_dbg(VFS, "disabling echoes and oplocks\n");
|
|
+ break;
|
|
+ case 2:
|
|
+ cifs_dbg(FYI, "disabling oplocks\n");
|
|
+ break;
|
|
+ default:
|
|
+ cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
|
|
+ }
|
|
}
|
|
|
|
static void
|
|
@@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid)
|
|
{
|
|
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
|
|
|
|
- return le16_to_cpu(shdr->CreditRequest);
|
|
+ if (mid->mid_state == MID_RESPONSE_RECEIVED
|
|
+ || mid->mid_state == MID_RESPONSE_MALFORMED)
|
|
+ return le16_to_cpu(shdr->CreditRequest);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int
|
|
@@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
|
|
|
|
scredits = server->credits;
|
|
/* can deadlock with reopen */
|
|
- if (scredits == 1) {
|
|
+ if (scredits <= 8) {
|
|
*num = SMB2_MAX_BUFFER_SIZE;
|
|
*credits = 0;
|
|
break;
|
|
}
|
|
|
|
- /* leave one credit for a possible reopen */
|
|
- scredits--;
|
|
+ /* leave some credits for reopen and other ops */
|
|
+ scredits -= 8;
|
|
*num = min_t(unsigned int, size,
|
|
scredits * SMB2_MAX_BUFFER_SIZE);
|
|
|
|
@@ -3101,11 +3123,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
|
|
server->ops->is_status_pending(buf, server, 0))
|
|
return -1;
|
|
|
|
- rdata->result = server->ops->map_error(buf, false);
|
|
+ /* set up first two iov to get credits */
|
|
+ rdata->iov[0].iov_base = buf;
|
|
+ rdata->iov[0].iov_len = 4;
|
|
+ rdata->iov[1].iov_base = buf + 4;
|
|
+ rdata->iov[1].iov_len =
|
|
+ min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
|
|
+ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
|
|
+ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
|
|
+ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
|
|
+ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
|
|
+
|
|
+ rdata->result = server->ops->map_error(buf, true);
|
|
if (rdata->result != 0) {
|
|
cifs_dbg(FYI, "%s: server returned error %d\n",
|
|
__func__, rdata->result);
|
|
- dequeue_mid(mid, rdata->result);
|
|
+ /* normal error on read response */
|
|
+ dequeue_mid(mid, false);
|
|
return 0;
|
|
}
|
|
|
|
@@ -3178,14 +3212,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
|
|
return 0;
|
|
}
|
|
|
|
- /* set up first iov for signature check */
|
|
- rdata->iov[0].iov_base = buf;
|
|
- rdata->iov[0].iov_len = 4;
|
|
- rdata->iov[1].iov_base = buf + 4;
|
|
- rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
|
|
- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
|
|
- rdata->iov[0].iov_base, server->vals->read_rsp_size);
|
|
-
|
|
length = rdata->copy_into_pages(server, rdata, &iter);
|
|
|
|
kfree(bvec);
|
|
@@ -3384,8 +3410,10 @@ smb3_receive_transform(struct TCP_Server_Info *server,
|
|
}
|
|
|
|
/* TODO: add support for compounds containing READ. */
|
|
- if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
|
|
+ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
|
|
+ *num_mids = 1;
|
|
return receive_encrypted_read(server, &mids[0]);
|
|
+ }
|
|
|
|
return receive_encrypted_standard(server, mids, bufs, num_mids);
|
|
}
|
|
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
|
|
index 27f86537a5d11..28712080add97 100644
|
|
--- a/fs/cifs/smb2pdu.c
|
|
+++ b/fs/cifs/smb2pdu.c
|
|
@@ -2826,9 +2826,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
|
|
{
|
|
struct TCP_Server_Info *server = mid->callback_data;
|
|
struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
|
|
- unsigned int credits_received = 1;
|
|
+ unsigned int credits_received = 0;
|
|
|
|
- if (mid->mid_state == MID_RESPONSE_RECEIVED)
|
|
+ if (mid->mid_state == MID_RESPONSE_RECEIVED
|
|
+ || mid->mid_state == MID_RESPONSE_MALFORMED)
|
|
credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
|
|
|
|
DeleteMidQEntry(mid);
|
|
@@ -3085,7 +3086,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
|
struct TCP_Server_Info *server = tcon->ses->server;
|
|
struct smb2_sync_hdr *shdr =
|
|
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
|
|
- unsigned int credits_received = 1;
|
|
+ unsigned int credits_received = 0;
|
|
struct smb_rqst rqst = { .rq_iov = rdata->iov,
|
|
.rq_nvec = 2,
|
|
.rq_pages = rdata->pages,
|
|
@@ -3124,6 +3125,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
|
task_io_account_read(rdata->got_bytes);
|
|
cifs_stats_bytes_read(tcon, rdata->got_bytes);
|
|
break;
|
|
+ case MID_RESPONSE_MALFORMED:
|
|
+ credits_received = le16_to_cpu(shdr->CreditRequest);
|
|
+ /* fall through */
|
|
default:
|
|
if (rdata->result != -ENODATA)
|
|
rdata->result = -EIO;
|
|
@@ -3139,8 +3143,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
|
rdata->mr = NULL;
|
|
}
|
|
#endif
|
|
- if (rdata->result)
|
|
+ if (rdata->result && rdata->result != -ENODATA) {
|
|
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
|
|
+ trace_smb3_read_err(0 /* xid */,
|
|
+ rdata->cfile->fid.persistent_fid,
|
|
+ tcon->tid, tcon->ses->Suid, rdata->offset,
|
|
+ rdata->bytes, rdata->result);
|
|
+ } else
|
|
+ trace_smb3_read_done(0 /* xid */,
|
|
+ rdata->cfile->fid.persistent_fid,
|
|
+ tcon->tid, tcon->ses->Suid,
|
|
+ rdata->offset, rdata->got_bytes);
|
|
|
|
queue_work(cifsiod_wq, &rdata->work);
|
|
DeleteMidQEntry(mid);
|
|
@@ -3197,12 +3210,14 @@ smb2_async_readv(struct cifs_readdata *rdata)
|
|
if (rdata->credits) {
|
|
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
|
|
SMB2_MAX_BUFFER_SIZE));
|
|
- shdr->CreditRequest = shdr->CreditCharge;
|
|
+ shdr->CreditRequest =
|
|
+ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
|
|
spin_lock(&server->req_lock);
|
|
server->credits += rdata->credits -
|
|
le16_to_cpu(shdr->CreditCharge);
|
|
spin_unlock(&server->req_lock);
|
|
wake_up(&server->request_q);
|
|
+ rdata->credits = le16_to_cpu(shdr->CreditCharge);
|
|
flags |= CIFS_HAS_CREDITS;
|
|
}
|
|
|
|
@@ -3213,13 +3228,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
|
|
if (rc) {
|
|
kref_put(&rdata->refcount, cifs_readdata_release);
|
|
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
|
|
- trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
|
|
- io_parms.tcon->tid, io_parms.tcon->ses->Suid,
|
|
- io_parms.offset, io_parms.length);
|
|
- } else
|
|
- trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
|
|
- io_parms.tcon->tid, io_parms.tcon->ses->Suid,
|
|
- io_parms.offset, io_parms.length);
|
|
+ trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
|
|
+ io_parms.tcon->tid,
|
|
+ io_parms.tcon->ses->Suid,
|
|
+ io_parms.offset, io_parms.length, rc);
|
|
+ }
|
|
|
|
cifs_small_buf_release(buf);
|
|
return rc;
|
|
@@ -3263,10 +3276,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|
if (rc != -ENODATA) {
|
|
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
|
|
cifs_dbg(VFS, "Send error in read = %d\n", rc);
|
|
+ trace_smb3_read_err(xid, req->PersistentFileId,
|
|
+ io_parms->tcon->tid, ses->Suid,
|
|
+ io_parms->offset, io_parms->length,
|
|
+ rc);
|
|
}
|
|
- trace_smb3_read_err(rc, xid, req->PersistentFileId,
|
|
- io_parms->tcon->tid, ses->Suid,
|
|
- io_parms->offset, io_parms->length);
|
|
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
|
|
return rc == -ENODATA ? 0 : rc;
|
|
} else
|
|
@@ -3307,7 +3321,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
|
|
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
|
|
unsigned int written;
|
|
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
|
|
- unsigned int credits_received = 1;
|
|
+ unsigned int credits_received = 0;
|
|
|
|
switch (mid->mid_state) {
|
|
case MID_RESPONSE_RECEIVED:
|
|
@@ -3335,6 +3349,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
|
|
case MID_RETRY_NEEDED:
|
|
wdata->result = -EAGAIN;
|
|
break;
|
|
+ case MID_RESPONSE_MALFORMED:
|
|
+ credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
|
|
+ /* fall through */
|
|
default:
|
|
wdata->result = -EIO;
|
|
break;
|
|
@@ -3352,8 +3369,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
|
|
wdata->mr = NULL;
|
|
}
|
|
#endif
|
|
- if (wdata->result)
|
|
+ if (wdata->result) {
|
|
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
|
|
+ trace_smb3_write_err(0 /* no xid */,
|
|
+ wdata->cfile->fid.persistent_fid,
|
|
+ tcon->tid, tcon->ses->Suid, wdata->offset,
|
|
+ wdata->bytes, wdata->result);
|
|
+ } else
|
|
+ trace_smb3_write_done(0 /* no xid */,
|
|
+ wdata->cfile->fid.persistent_fid,
|
|
+ tcon->tid, tcon->ses->Suid,
|
|
+ wdata->offset, wdata->bytes);
|
|
|
|
queue_work(cifsiod_wq, &wdata->work);
|
|
DeleteMidQEntry(mid);
|
|
@@ -3474,12 +3500,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
|
|
if (wdata->credits) {
|
|
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
|
|
SMB2_MAX_BUFFER_SIZE));
|
|
- shdr->CreditRequest = shdr->CreditCharge;
|
|
+ shdr->CreditRequest =
|
|
+ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
|
|
spin_lock(&server->req_lock);
|
|
server->credits += wdata->credits -
|
|
le16_to_cpu(shdr->CreditCharge);
|
|
spin_unlock(&server->req_lock);
|
|
wake_up(&server->request_q);
|
|
+ wdata->credits = le16_to_cpu(shdr->CreditCharge);
|
|
flags |= CIFS_HAS_CREDITS;
|
|
}
|
|
|
|
@@ -3493,10 +3521,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
|
|
wdata->bytes, rc);
|
|
kref_put(&wdata->refcount, release);
|
|
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
|
|
- } else
|
|
- trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
|
|
- tcon->tid, tcon->ses->Suid, wdata->offset,
|
|
- wdata->bytes);
|
|
+ }
|
|
|
|
async_writev_out:
|
|
cifs_small_buf_release(req);
|
|
@@ -3722,8 +3747,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
|
|
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
|
|
srch_inf->endOfSearch = true;
|
|
rc = 0;
|
|
- }
|
|
- cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
|
|
+ } else
|
|
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
|
|
goto qdir_exit;
|
|
}
|
|
|
|
@@ -4321,8 +4346,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
|
|
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
|
|
cifs_small_buf_release(req);
|
|
|
|
- please_key_low = (__u64 *)req->LeaseKey;
|
|
- please_key_high = (__u64 *)(req->LeaseKey+8);
|
|
+ please_key_low = (__u64 *)lease_key;
|
|
+ please_key_high = (__u64 *)(lease_key+8);
|
|
if (rc) {
|
|
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
|
|
trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
|
|
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
|
|
index 83ff0c25710d0..6f937e8269100 100644
|
|
--- a/fs/cifs/transport.c
|
|
+++ b/fs/cifs/transport.c
|
|
@@ -385,7 +385,7 @@ smbd_done:
|
|
if (rc < 0 && rc != -EINTR)
|
|
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
|
|
rc);
|
|
- else
|
|
+ else if (rc > 0)
|
|
rc = 0;
|
|
|
|
return rc;
|
|
@@ -781,8 +781,25 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
|
|
}
|
|
|
|
static void
|
|
-cifs_noop_callback(struct mid_q_entry *mid)
|
|
+cifs_compound_callback(struct mid_q_entry *mid)
|
|
+{
|
|
+ struct TCP_Server_Info *server = mid->server;
|
|
+
|
|
+ add_credits(server, server->ops->get_credits(mid), mid->optype);
|
|
+}
|
|
+
|
|
+static void
|
|
+cifs_compound_last_callback(struct mid_q_entry *mid)
|
|
{
|
|
+ cifs_compound_callback(mid);
|
|
+ cifs_wake_up_task(mid);
|
|
+}
|
|
+
|
|
+static void
|
|
+cifs_cancelled_callback(struct mid_q_entry *mid)
|
|
+{
|
|
+ cifs_compound_callback(mid);
|
|
+ DeleteMidQEntry(mid);
|
|
}
|
|
|
|
int
|
|
@@ -793,7 +810,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
int i, j, rc = 0;
|
|
int timeout, optype;
|
|
struct mid_q_entry *midQ[MAX_COMPOUND];
|
|
- unsigned int credits = 0;
|
|
+ bool cancelled_mid[MAX_COMPOUND] = {false};
|
|
+ unsigned int credits[MAX_COMPOUND] = {0};
|
|
char *buf;
|
|
|
|
timeout = flags & CIFS_TIMEOUT_MASK;
|
|
@@ -811,13 +829,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
return -ENOENT;
|
|
|
|
/*
|
|
- * Ensure that we do not send more than 50 overlapping requests
|
|
- * to the same server. We may make this configurable later or
|
|
- * use ses->maxReq.
|
|
+ * Ensure we obtain 1 credit per request in the compound chain.
|
|
+ * It can be optimized further by waiting for all the credits
|
|
+ * at once but this can wait long enough if we don't have enough
|
|
+ * credits due to some heavy operations in progress or the server
|
|
+ * not granting us much, so a fallback to the current approach is
|
|
+ * needed anyway.
|
|
*/
|
|
- rc = wait_for_free_request(ses->server, timeout, optype);
|
|
- if (rc)
|
|
- return rc;
|
|
+ for (i = 0; i < num_rqst; i++) {
|
|
+ rc = wait_for_free_request(ses->server, timeout, optype);
|
|
+ if (rc) {
|
|
+ /*
|
|
+ * We haven't sent an SMB packet to the server yet but
|
|
+ * we already obtained credits for i requests in the
|
|
+ * compound chain - need to return those credits back
|
|
+ * for future use. Note that we need to call add_credits
|
|
+ * multiple times to match the way we obtained credits
|
|
+ * in the first place and to account for in flight
|
|
+ * requests correctly.
|
|
+ */
|
|
+ for (j = 0; j < i; j++)
|
|
+ add_credits(ses->server, 1, optype);
|
|
+ return rc;
|
|
+ }
|
|
+ credits[i] = 1;
|
|
+ }
|
|
|
|
/*
|
|
* Make sure that we sign in the same order that we send on this socket
|
|
@@ -833,18 +869,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
for (j = 0; j < i; j++)
|
|
cifs_delete_mid(midQ[j]);
|
|
mutex_unlock(&ses->server->srv_mutex);
|
|
+
|
|
/* Update # of requests on wire to server */
|
|
- add_credits(ses->server, 1, optype);
|
|
+ for (j = 0; j < num_rqst; j++)
|
|
+ add_credits(ses->server, credits[j], optype);
|
|
return PTR_ERR(midQ[i]);
|
|
}
|
|
|
|
midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
|
|
+ midQ[i]->optype = optype;
|
|
/*
|
|
- * We don't invoke the callback compounds unless it is the last
|
|
- * request.
|
|
+ * Invoke callback for every part of the compound chain
|
|
+ * to calculate credits properly. Wake up this thread only when
|
|
+ * the last element is received.
|
|
*/
|
|
if (i < num_rqst - 1)
|
|
- midQ[i]->callback = cifs_noop_callback;
|
|
+ midQ[i]->callback = cifs_compound_callback;
|
|
+ else
|
|
+ midQ[i]->callback = cifs_compound_last_callback;
|
|
}
|
|
cifs_in_send_inc(ses->server);
|
|
rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
|
|
@@ -858,8 +900,20 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
|
|
mutex_unlock(&ses->server->srv_mutex);
|
|
|
|
- if (rc < 0)
|
|
+ if (rc < 0) {
|
|
+ /* Sending failed for some reason - return credits back */
|
|
+ for (i = 0; i < num_rqst; i++)
|
|
+ add_credits(ses->server, credits[i], optype);
|
|
goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * At this point the request is passed to the network stack - we assume
|
|
+ * that any credits taken from the server structure on the client have
|
|
+ * been spent and we can't return them back. Once we receive responses
|
|
+ * we will collect credits granted by the server in the mid callbacks
|
|
+ * and add those credits to the server structure.
|
|
+ */
|
|
|
|
/*
|
|
* Compounding is never used during session establish.
|
|
@@ -873,36 +927,34 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
|
|
for (i = 0; i < num_rqst; i++) {
|
|
rc = wait_for_response(ses->server, midQ[i]);
|
|
- if (rc != 0) {
|
|
+ if (rc != 0)
|
|
+ break;
|
|
+ }
|
|
+ if (rc != 0) {
|
|
+ for (; i < num_rqst; i++) {
|
|
cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
|
|
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
|
|
send_cancel(ses->server, &rqst[i], midQ[i]);
|
|
spin_lock(&GlobalMid_Lock);
|
|
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
|
|
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
|
|
- midQ[i]->callback = DeleteMidQEntry;
|
|
- spin_unlock(&GlobalMid_Lock);
|
|
- add_credits(ses->server, 1, optype);
|
|
- return rc;
|
|
+ midQ[i]->callback = cifs_cancelled_callback;
|
|
+ cancelled_mid[i] = true;
|
|
+ credits[i] = 0;
|
|
}
|
|
spin_unlock(&GlobalMid_Lock);
|
|
}
|
|
}
|
|
|
|
- for (i = 0; i < num_rqst; i++)
|
|
- if (midQ[i]->resp_buf)
|
|
- credits += ses->server->ops->get_credits(midQ[i]);
|
|
- if (!credits)
|
|
- credits = 1;
|
|
-
|
|
for (i = 0; i < num_rqst; i++) {
|
|
if (rc < 0)
|
|
goto out;
|
|
|
|
rc = cifs_sync_mid_result(midQ[i], ses->server);
|
|
if (rc != 0) {
|
|
- add_credits(ses->server, credits, optype);
|
|
- return rc;
|
|
+ /* mark this mid as cancelled to not free it below */
|
|
+ cancelled_mid[i] = true;
|
|
+ goto out;
|
|
}
|
|
|
|
if (!midQ[i]->resp_buf ||
|
|
@@ -949,9 +1001,10 @@ out:
|
|
* This is prevented above by using a noop callback that will not
|
|
* wake this thread except for the very last PDU.
|
|
*/
|
|
- for (i = 0; i < num_rqst; i++)
|
|
- cifs_delete_mid(midQ[i]);
|
|
- add_credits(ses->server, credits, optype);
|
|
+ for (i = 0; i < num_rqst; i++) {
|
|
+ if (!cancelled_mid[i])
|
|
+ cifs_delete_mid(midQ[i]);
|
|
+ }
|
|
|
|
return rc;
|
|
}
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 2593153471cf7..44e5652b26648 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -1188,15 +1188,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
|
|
*/
|
|
void shrink_dcache_sb(struct super_block *sb)
|
|
{
|
|
- long freed;
|
|
-
|
|
do {
|
|
LIST_HEAD(dispose);
|
|
|
|
- freed = list_lru_walk(&sb->s_dentry_lru,
|
|
+ list_lru_walk(&sb->s_dentry_lru,
|
|
dentry_lru_isolate_shrink, &dispose, 1024);
|
|
-
|
|
- this_cpu_sub(nr_dentry_unused, freed);
|
|
shrink_dentry_list(&dispose);
|
|
} while (list_lru_count(&sb->s_dentry_lru) > 0);
|
|
}
|
|
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
|
|
index 13b01351dd1cb..41ef452c1fcfb 100644
|
|
--- a/fs/debugfs/inode.c
|
|
+++ b/fs/debugfs/inode.c
|
|
@@ -787,6 +787,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
|
|
struct dentry *dentry = NULL, *trap;
|
|
struct name_snapshot old_name;
|
|
|
|
+ if (IS_ERR(old_dir))
|
|
+ return old_dir;
|
|
+ if (IS_ERR(new_dir))
|
|
+ return new_dir;
|
|
+ if (IS_ERR_OR_NULL(old_dentry))
|
|
+ return old_dentry;
|
|
+
|
|
trap = lock_rename(new_dir, old_dir);
|
|
/* Source or destination directories don't exist? */
|
|
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
|
|
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
|
|
index 562fa8c3edff3..47ee66d701098 100644
|
|
--- a/fs/dlm/ast.c
|
|
+++ b/fs/dlm/ast.c
|
|
@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
|
|
flush_workqueue(ls->ls_callback_wq);
|
|
}
|
|
|
|
+#define MAX_CB_QUEUE 25
|
|
+
|
|
void dlm_callback_resume(struct dlm_ls *ls)
|
|
{
|
|
struct dlm_lkb *lkb, *safe;
|
|
@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
|
|
if (!ls->ls_callback_wq)
|
|
return;
|
|
|
|
+more:
|
|
mutex_lock(&ls->ls_cb_mutex);
|
|
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
|
|
list_del_init(&lkb->lkb_cb_list);
|
|
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
|
|
count++;
|
|
+ if (count == MAX_CB_QUEUE)
|
|
+ break;
|
|
}
|
|
mutex_unlock(&ls->ls_cb_mutex);
|
|
|
|
if (count)
|
|
log_rinfo(ls, "dlm_callback_resume %d", count);
|
|
+ if (count == MAX_CB_QUEUE) {
|
|
+ count = 0;
|
|
+ cond_resched();
|
|
+ goto more;
|
|
+ }
|
|
}
|
|
|
|
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
|
|
index cc91963683ded..a928ba008d7d7 100644
|
|
--- a/fs/dlm/lock.c
|
|
+++ b/fs/dlm/lock.c
|
|
@@ -1209,6 +1209,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
|
|
|
|
if (rv < 0) {
|
|
log_error(ls, "create_lkb idr error %d", rv);
|
|
+ dlm_free_lkb(lkb);
|
|
return rv;
|
|
}
|
|
|
|
@@ -4179,6 +4180,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
|
|
(unsigned long long)lkb->lkb_recover_seq,
|
|
ms->m_header.h_nodeid, ms->m_lkid);
|
|
error = -ENOENT;
|
|
+ dlm_put_lkb(lkb);
|
|
goto fail;
|
|
}
|
|
|
|
@@ -4232,6 +4234,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
|
|
lkb->lkb_id, lkb->lkb_remid,
|
|
ms->m_header.h_nodeid, ms->m_lkid);
|
|
error = -ENOENT;
|
|
+ dlm_put_lkb(lkb);
|
|
goto fail;
|
|
}
|
|
|
|
@@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
|
|
goto out;
|
|
}
|
|
}
|
|
-
|
|
- /* After ua is attached to lkb it will be freed by dlm_free_lkb().
|
|
- When DLM_IFL_USER is set, the dlm knows that this is a userspace
|
|
- lock and that lkb_astparam is the dlm_user_args structure. */
|
|
-
|
|
error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
|
|
fake_astfn, ua, fake_bastfn, &args);
|
|
- lkb->lkb_flags |= DLM_IFL_USER;
|
|
-
|
|
if (error) {
|
|
+ kfree(ua->lksb.sb_lvbptr);
|
|
+ ua->lksb.sb_lvbptr = NULL;
|
|
+ kfree(ua);
|
|
__put_lkb(ls, lkb);
|
|
goto out;
|
|
}
|
|
|
|
+ /* After ua is attached to lkb it will be freed by dlm_free_lkb().
|
|
+ When DLM_IFL_USER is set, the dlm knows that this is a userspace
|
|
+ lock and that lkb_astparam is the dlm_user_args structure. */
|
|
+ lkb->lkb_flags |= DLM_IFL_USER;
|
|
error = request_lock(ls, lkb, name, namelen, &args);
|
|
|
|
switch (error) {
|
|
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
|
|
index 5ba94be006eec..6a1529e478f3d 100644
|
|
--- a/fs/dlm/lockspace.c
|
|
+++ b/fs/dlm/lockspace.c
|
|
@@ -680,11 +680,11 @@ static int new_lockspace(const char *name, const char *cluster,
|
|
kfree(ls->ls_recover_buf);
|
|
out_lkbidr:
|
|
idr_destroy(&ls->ls_lkbidr);
|
|
+ out_rsbtbl:
|
|
for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
|
|
if (ls->ls_remove_names[i])
|
|
kfree(ls->ls_remove_names[i]);
|
|
}
|
|
- out_rsbtbl:
|
|
vfree(ls->ls_rsbtbl);
|
|
out_lsfree:
|
|
if (do_unreg)
|
|
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
|
|
index 42bbe6824b4bf..58f48ea0db234 100644
|
|
--- a/fs/eventpoll.c
|
|
+++ b/fs/eventpoll.c
|
|
@@ -1154,7 +1154,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
|
|
* semantics). All the events that happen during that period of time are
|
|
* chained in ep->ovflist and requeued later on.
|
|
*/
|
|
- if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
|
|
+ if (ep->ovflist != EP_UNACTIVE_PTR) {
|
|
if (epi->next == EP_UNACTIVE_PTR) {
|
|
epi->next = ep->ovflist;
|
|
ep->ovflist = epi;
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index 3f89d0ab08fc4..185a05d3257e8 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -2454,8 +2454,19 @@ int do_journal_get_write_access(handle_t *handle,
|
|
#define FALL_BACK_TO_NONDELALLOC 1
|
|
#define CONVERT_INLINE_DATA 2
|
|
|
|
-extern struct inode *ext4_iget(struct super_block *, unsigned long);
|
|
-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
|
|
+typedef enum {
|
|
+ EXT4_IGET_NORMAL = 0,
|
|
+ EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
|
|
+ EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
|
|
+} ext4_iget_flags;
|
|
+
|
|
+extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
|
+ ext4_iget_flags flags, const char *function,
|
|
+ unsigned int line);
|
|
+
|
|
+#define ext4_iget(sb, ino, flags) \
|
|
+ __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
|
|
+
|
|
extern int ext4_write_inode(struct inode *, struct writeback_control *);
|
|
extern int ext4_setattr(struct dentry *, struct iattr *);
|
|
extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
|
|
@@ -2538,6 +2549,8 @@ extern int ext4_group_extend(struct super_block *sb,
|
|
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
|
|
|
|
/* super.c */
|
|
+extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
|
|
+ sector_t block, int op_flags);
|
|
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
|
|
extern int ext4_calculate_overhead(struct super_block *sb);
|
|
extern void ext4_superblock_csum_set(struct super_block *sb);
|
|
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
|
|
index 26a7fe5c4fd3c..5508baa11bb66 100644
|
|
--- a/fs/ext4/fsync.c
|
|
+++ b/fs/ext4/fsync.c
|
|
@@ -159,6 +159,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|
ret = err;
|
|
}
|
|
out:
|
|
+ err = file_check_and_advance_wb_err(file);
|
|
+ if (ret == 0)
|
|
+ ret = err;
|
|
trace_ext4_sync_file_exit(inode, ret);
|
|
return ret;
|
|
}
|
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
|
index 014f6a698cb71..7ff14a1adba3c 100644
|
|
--- a/fs/ext4/ialloc.c
|
|
+++ b/fs/ext4/ialloc.c
|
|
@@ -1225,7 +1225,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
|
if (!ext4_test_bit(bit, bitmap_bh->b_data))
|
|
goto bad_orphan;
|
|
|
|
- inode = ext4_iget(sb, ino);
|
|
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
|
|
if (IS_ERR(inode)) {
|
|
err = PTR_ERR(inode);
|
|
ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
|
|
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
|
|
index 9c4bac18cc6c0..56f6e1782d5f7 100644
|
|
--- a/fs/ext4/inline.c
|
|
+++ b/fs/ext4/inline.c
|
|
@@ -705,8 +705,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
|
|
|
|
if (!PageUptodate(page)) {
|
|
ret = ext4_read_inline_page(inode, page);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ unlock_page(page);
|
|
+ put_page(page);
|
|
goto out_up_read;
|
|
+ }
|
|
}
|
|
|
|
ret = 1;
|
|
@@ -1887,12 +1890,12 @@ int ext4_inline_data_fiemap(struct inode *inode,
|
|
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
|
|
physical += offsetof(struct ext4_inode, i_block);
|
|
|
|
- if (physical)
|
|
- error = fiemap_fill_next_extent(fieinfo, start, physical,
|
|
- inline_len, flags);
|
|
brelse(iloc.bh);
|
|
out:
|
|
up_read(&EXT4_I(inode)->xattr_sem);
|
|
+ if (physical)
|
|
+ error = fiemap_fill_next_extent(fieinfo, start, physical,
|
|
+ inline_len, flags);
|
|
return (error < 0 ? error : 0);
|
|
}
|
|
|
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
|
index 22a9d81597206..34d7e0703cc6f 100644
|
|
--- a/fs/ext4/inode.c
|
|
+++ b/fs/ext4/inode.c
|
|
@@ -2778,7 +2778,8 @@ static int ext4_writepages(struct address_space *mapping,
|
|
* We may need to convert up to one extent per block in
|
|
* the page and we may dirty the inode.
|
|
*/
|
|
- rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
|
|
+ rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
|
|
+ PAGE_SIZE >> inode->i_blkbits);
|
|
}
|
|
|
|
/*
|
|
@@ -4817,7 +4818,9 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
|
|
return inode_peek_iversion(inode);
|
|
}
|
|
|
|
-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
+struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
|
+ ext4_iget_flags flags, const char *function,
|
|
+ unsigned int line)
|
|
{
|
|
struct ext4_iloc iloc;
|
|
struct ext4_inode *raw_inode;
|
|
@@ -4831,6 +4834,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
gid_t i_gid;
|
|
projid_t i_projid;
|
|
|
|
+ if ((!(flags & EXT4_IGET_SPECIAL) &&
|
|
+ (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
|
|
+ (ino < EXT4_ROOT_INO) ||
|
|
+ (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
|
|
+ if (flags & EXT4_IGET_HANDLE)
|
|
+ return ERR_PTR(-ESTALE);
|
|
+ __ext4_error(sb, function, line,
|
|
+ "inode #%lu: comm %s: iget: illegal inode #",
|
|
+ ino, current->comm);
|
|
+ return ERR_PTR(-EFSCORRUPTED);
|
|
+ }
|
|
+
|
|
inode = iget_locked(sb, ino);
|
|
if (!inode)
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -4846,18 +4861,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
raw_inode = ext4_raw_inode(&iloc);
|
|
|
|
if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
|
|
- EXT4_ERROR_INODE(inode, "root inode unallocated");
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: root inode unallocated");
|
|
ret = -EFSCORRUPTED;
|
|
goto bad_inode;
|
|
}
|
|
|
|
+ if ((flags & EXT4_IGET_HANDLE) &&
|
|
+ (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
|
|
+ ret = -ESTALE;
|
|
+ goto bad_inode;
|
|
+ }
|
|
+
|
|
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
|
|
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
|
|
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
|
|
EXT4_INODE_SIZE(inode->i_sb) ||
|
|
(ei->i_extra_isize & 3)) {
|
|
- EXT4_ERROR_INODE(inode,
|
|
- "bad extra_isize %u (inode size %u)",
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: bad extra_isize %u "
|
|
+ "(inode size %u)",
|
|
ei->i_extra_isize,
|
|
EXT4_INODE_SIZE(inode->i_sb));
|
|
ret = -EFSCORRUPTED;
|
|
@@ -4879,7 +4902,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
}
|
|
|
|
if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
|
|
- EXT4_ERROR_INODE(inode, "checksum invalid");
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: checksum invalid");
|
|
ret = -EFSBADCRC;
|
|
goto bad_inode;
|
|
}
|
|
@@ -4936,7 +4960,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
|
|
inode->i_size = ext4_isize(sb, raw_inode);
|
|
if ((size = i_size_read(inode)) < 0) {
|
|
- EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: bad i_size value: %lld", size);
|
|
ret = -EFSCORRUPTED;
|
|
goto bad_inode;
|
|
}
|
|
@@ -5012,7 +5037,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
ret = 0;
|
|
if (ei->i_file_acl &&
|
|
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
|
|
- EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: bad extended attribute block %llu",
|
|
ei->i_file_acl);
|
|
ret = -EFSCORRUPTED;
|
|
goto bad_inode;
|
|
@@ -5040,8 +5066,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
} else if (S_ISLNK(inode->i_mode)) {
|
|
/* VFS does not allow setting these so must be corruption */
|
|
if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
|
|
- EXT4_ERROR_INODE(inode,
|
|
- "immutable or append flags not allowed on symlinks");
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: immutable or append flags "
|
|
+ "not allowed on symlinks");
|
|
ret = -EFSCORRUPTED;
|
|
goto bad_inode;
|
|
}
|
|
@@ -5071,7 +5098,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|
make_bad_inode(inode);
|
|
} else {
|
|
ret = -EFSCORRUPTED;
|
|
- EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
|
|
+ ext4_error_inode(inode, function, line, 0,
|
|
+ "iget: bogus i_mode (%o)", inode->i_mode);
|
|
goto bad_inode;
|
|
}
|
|
brelse(iloc.bh);
|
|
@@ -5085,13 +5113,6 @@ bad_inode:
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
|
|
-{
|
|
- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
|
|
- return ERR_PTR(-EFSCORRUPTED);
|
|
- return ext4_iget(sb, ino);
|
|
-}
|
|
-
|
|
static int ext4_inode_blocks_set(handle_t *handle,
|
|
struct ext4_inode *raw_inode,
|
|
struct ext4_inode_info *ei)
|
|
@@ -5380,9 +5401,13 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
{
|
|
int err;
|
|
|
|
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
|
|
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
|
|
+ sb_rdonly(inode->i_sb))
|
|
return 0;
|
|
|
|
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
|
|
+ return -EIO;
|
|
+
|
|
if (EXT4_SB(inode->i_sb)->s_journal) {
|
|
if (ext4_journal_current_handle()) {
|
|
jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
|
|
@@ -5398,7 +5423,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
|
|
return 0;
|
|
|
|
- err = ext4_force_commit(inode->i_sb);
|
|
+ err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
|
|
+ EXT4_I(inode)->i_sync_tid);
|
|
} else {
|
|
struct ext4_iloc iloc;
|
|
|
|
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
|
|
index 0edee31913d1f..d37dafa1d133b 100644
|
|
--- a/fs/ext4/ioctl.c
|
|
+++ b/fs/ext4/ioctl.c
|
|
@@ -125,7 +125,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
|
!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
|
|
+ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(inode_bl))
|
|
return PTR_ERR(inode_bl);
|
|
ei_bl = EXT4_I(inode_bl);
|
|
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
|
|
index 61a9d19278172..a98bfca9c463c 100644
|
|
--- a/fs/ext4/migrate.c
|
|
+++ b/fs/ext4/migrate.c
|
|
@@ -116,9 +116,9 @@ static int update_ind_extent_range(handle_t *handle, struct inode *inode,
|
|
int i, retval = 0;
|
|
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
|
|
|
|
- bh = sb_bread(inode->i_sb, pblock);
|
|
- if (!bh)
|
|
- return -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
|
|
i_data = (__le32 *)bh->b_data;
|
|
for (i = 0; i < max_entries; i++) {
|
|
@@ -145,9 +145,9 @@ static int update_dind_extent_range(handle_t *handle, struct inode *inode,
|
|
int i, retval = 0;
|
|
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
|
|
|
|
- bh = sb_bread(inode->i_sb, pblock);
|
|
- if (!bh)
|
|
- return -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
|
|
i_data = (__le32 *)bh->b_data;
|
|
for (i = 0; i < max_entries; i++) {
|
|
@@ -175,9 +175,9 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
|
|
int i, retval = 0;
|
|
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
|
|
|
|
- bh = sb_bread(inode->i_sb, pblock);
|
|
- if (!bh)
|
|
- return -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
|
|
i_data = (__le32 *)bh->b_data;
|
|
for (i = 0; i < max_entries; i++) {
|
|
@@ -224,9 +224,9 @@ static int free_dind_blocks(handle_t *handle,
|
|
struct buffer_head *bh;
|
|
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
|
|
|
|
- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
|
|
- if (!bh)
|
|
- return -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
|
|
tmp_idata = (__le32 *)bh->b_data;
|
|
for (i = 0; i < max_entries; i++) {
|
|
@@ -254,9 +254,9 @@ static int free_tind_blocks(handle_t *handle,
|
|
struct buffer_head *bh;
|
|
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
|
|
|
|
- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
|
|
- if (!bh)
|
|
- return -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
|
|
tmp_idata = (__le32 *)bh->b_data;
|
|
for (i = 0; i < max_entries; i++) {
|
|
@@ -382,9 +382,9 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
|
|
struct ext4_extent_header *eh;
|
|
|
|
block = ext4_idx_pblock(ix);
|
|
- bh = sb_bread(inode->i_sb, block);
|
|
- if (!bh)
|
|
- return -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, block, 0);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
|
|
eh = (struct ext4_extent_header *)bh->b_data;
|
|
if (eh->eh_depth != 0) {
|
|
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
|
|
index 437f71fe83ae5..2b928eb07fa24 100644
|
|
--- a/fs/ext4/namei.c
|
|
+++ b/fs/ext4/namei.c
|
|
@@ -1571,7 +1571,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
|
|
dentry);
|
|
return ERR_PTR(-EFSCORRUPTED);
|
|
}
|
|
- inode = ext4_iget_normal(dir->i_sb, ino);
|
|
+ inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
|
|
if (inode == ERR_PTR(-ESTALE)) {
|
|
EXT4_ERROR_INODE(dir,
|
|
"deleted inode referenced: %u",
|
|
@@ -1613,7 +1613,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
|
|
return ERR_PTR(-EFSCORRUPTED);
|
|
}
|
|
|
|
- return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
|
|
+ return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index a5efee34415fe..48421de803b7b 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
|
|
else if (free_blocks_count < 0)
|
|
ext4_warning(sb, "Bad blocks count %u",
|
|
input->blocks_count);
|
|
- else if (!(bh = sb_bread(sb, end - 1)))
|
|
+ else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
|
|
+ err = PTR_ERR(bh);
|
|
+ bh = NULL;
|
|
ext4_warning(sb, "Cannot read last block (%llu)",
|
|
end - 1);
|
|
- else if (outside(input->block_bitmap, start, end))
|
|
+ } else if (outside(input->block_bitmap, start, end))
|
|
ext4_warning(sb, "Block bitmap not in group (block %llu)",
|
|
(unsigned long long)input->block_bitmap);
|
|
else if (outside(input->inode_bitmap, start, end))
|
|
@@ -781,11 +783,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
|
|
unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
|
|
ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
|
|
- struct buffer_head **o_group_desc, **n_group_desc;
|
|
- struct buffer_head *dind;
|
|
- struct buffer_head *gdb_bh;
|
|
+ struct buffer_head **o_group_desc, **n_group_desc = NULL;
|
|
+ struct buffer_head *dind = NULL;
|
|
+ struct buffer_head *gdb_bh = NULL;
|
|
int gdbackups;
|
|
- struct ext4_iloc iloc;
|
|
+ struct ext4_iloc iloc = { .bh = NULL };
|
|
__le32 *data;
|
|
int err;
|
|
|
|
@@ -794,21 +796,22 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
|
|
gdb_num);
|
|
|
|
- gdb_bh = sb_bread(sb, gdblock);
|
|
- if (!gdb_bh)
|
|
- return -EIO;
|
|
+ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
|
|
+ if (IS_ERR(gdb_bh))
|
|
+ return PTR_ERR(gdb_bh);
|
|
|
|
gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
|
|
if (gdbackups < 0) {
|
|
err = gdbackups;
|
|
- goto exit_bh;
|
|
+ goto errout;
|
|
}
|
|
|
|
data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
|
|
- dind = sb_bread(sb, le32_to_cpu(*data));
|
|
- if (!dind) {
|
|
- err = -EIO;
|
|
- goto exit_bh;
|
|
+ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
|
|
+ if (IS_ERR(dind)) {
|
|
+ err = PTR_ERR(dind);
|
|
+ dind = NULL;
|
|
+ goto errout;
|
|
}
|
|
|
|
data = (__le32 *)dind->b_data;
|
|
@@ -816,18 +819,18 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
ext4_warning(sb, "new group %u GDT block %llu not reserved",
|
|
group, gdblock);
|
|
err = -EINVAL;
|
|
- goto exit_dind;
|
|
+ goto errout;
|
|
}
|
|
|
|
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
|
|
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
|
|
if (unlikely(err))
|
|
- goto exit_dind;
|
|
+ goto errout;
|
|
|
|
BUFFER_TRACE(gdb_bh, "get_write_access");
|
|
err = ext4_journal_get_write_access(handle, gdb_bh);
|
|
if (unlikely(err))
|
|
- goto exit_dind;
|
|
+ goto errout;
|
|
|
|
BUFFER_TRACE(dind, "get_write_access");
|
|
err = ext4_journal_get_write_access(handle, dind);
|
|
@@ -837,7 +840,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
/* ext4_reserve_inode_write() gets a reference on the iloc */
|
|
err = ext4_reserve_inode_write(handle, inode, &iloc);
|
|
if (unlikely(err))
|
|
- goto exit_dind;
|
|
+ goto errout;
|
|
|
|
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
|
|
sizeof(struct buffer_head *),
|
|
@@ -846,7 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
err = -ENOMEM;
|
|
ext4_warning(sb, "not enough memory for %lu groups",
|
|
gdb_num + 1);
|
|
- goto exit_inode;
|
|
+ goto errout;
|
|
}
|
|
|
|
/*
|
|
@@ -862,7 +865,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
err = ext4_handle_dirty_metadata(handle, NULL, dind);
|
|
if (unlikely(err)) {
|
|
ext4_std_error(sb, err);
|
|
- goto exit_inode;
|
|
+ goto errout;
|
|
}
|
|
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
|
|
(9 - EXT4_SB(sb)->s_cluster_bits);
|
|
@@ -871,8 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
|
|
if (unlikely(err)) {
|
|
ext4_std_error(sb, err);
|
|
- iloc.bh = NULL;
|
|
- goto exit_inode;
|
|
+ goto errout;
|
|
}
|
|
brelse(dind);
|
|
|
|
@@ -888,15 +890,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|
err = ext4_handle_dirty_super(handle, sb);
|
|
if (err)
|
|
ext4_std_error(sb, err);
|
|
-
|
|
return err;
|
|
-
|
|
-exit_inode:
|
|
+errout:
|
|
kvfree(n_group_desc);
|
|
brelse(iloc.bh);
|
|
-exit_dind:
|
|
brelse(dind);
|
|
-exit_bh:
|
|
brelse(gdb_bh);
|
|
|
|
ext4_debug("leaving with error %d\n", err);
|
|
@@ -916,9 +914,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
|
|
|
|
gdblock = ext4_meta_bg_first_block_no(sb, group) +
|
|
ext4_bg_has_super(sb, group);
|
|
- gdb_bh = sb_bread(sb, gdblock);
|
|
- if (!gdb_bh)
|
|
- return -EIO;
|
|
+ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
|
|
+ if (IS_ERR(gdb_bh))
|
|
+ return PTR_ERR(gdb_bh);
|
|
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
|
|
sizeof(struct buffer_head *),
|
|
GFP_NOFS);
|
|
@@ -975,9 +973,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
|
|
return -ENOMEM;
|
|
|
|
data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
|
|
- dind = sb_bread(sb, le32_to_cpu(*data));
|
|
- if (!dind) {
|
|
- err = -EIO;
|
|
+ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
|
|
+ if (IS_ERR(dind)) {
|
|
+ err = PTR_ERR(dind);
|
|
+ dind = NULL;
|
|
goto exit_free;
|
|
}
|
|
|
|
@@ -996,9 +995,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
|
|
err = -EINVAL;
|
|
goto exit_bh;
|
|
}
|
|
- primary[res] = sb_bread(sb, blk);
|
|
- if (!primary[res]) {
|
|
- err = -EIO;
|
|
+ primary[res] = ext4_sb_bread(sb, blk, 0);
|
|
+ if (IS_ERR(primary[res])) {
|
|
+ err = PTR_ERR(primary[res]);
|
|
+ primary[res] = NULL;
|
|
goto exit_bh;
|
|
}
|
|
gdbackups = verify_reserved_gdb(sb, group, primary[res]);
|
|
@@ -1631,13 +1631,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|
}
|
|
|
|
if (reserved_gdb || gdb_off == 0) {
|
|
- if (ext4_has_feature_resize_inode(sb) ||
|
|
+ if (!ext4_has_feature_resize_inode(sb) ||
|
|
!le16_to_cpu(es->s_reserved_gdt_blocks)) {
|
|
ext4_warning(sb,
|
|
"No reserved GDT blocks, can't resize");
|
|
return -EPERM;
|
|
}
|
|
- inode = ext4_iget(sb, EXT4_RESIZE_INO);
|
|
+ inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(inode)) {
|
|
ext4_warning(sb, "Error opening resize inode");
|
|
return PTR_ERR(inode);
|
|
@@ -1965,7 +1965,8 @@ retry:
|
|
}
|
|
|
|
if (!resize_inode)
|
|
- resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
|
|
+ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
|
|
+ EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(resize_inode)) {
|
|
ext4_warning(sb, "Error opening resize inode");
|
|
return PTR_ERR(resize_inode);
|
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
|
index 53ff6c2a26ed9..521320de2017f 100644
|
|
--- a/fs/ext4/super.c
|
|
+++ b/fs/ext4/super.c
|
|
@@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
|
|
MODULE_ALIAS("ext3");
|
|
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
|
|
|
|
+/*
|
|
+ * This works like sb_bread() except it uses ERR_PTR for error
|
|
+ * returns. Currently with sb_bread it's impossible to distinguish
|
|
+ * between ENOMEM and EIO situations (since both result in a NULL
|
|
+ * return.
|
|
+ */
|
|
+struct buffer_head *
|
|
+ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
|
|
+{
|
|
+ struct buffer_head *bh = sb_getblk(sb, block);
|
|
+
|
|
+ if (bh == NULL)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ if (buffer_uptodate(bh))
|
|
+ return bh;
|
|
+ ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
|
|
+ wait_on_buffer(bh);
|
|
+ if (buffer_uptodate(bh))
|
|
+ return bh;
|
|
+ put_bh(bh);
|
|
+ return ERR_PTR(-EIO);
|
|
+}
|
|
+
|
|
static int ext4_verify_csum_type(struct super_block *sb,
|
|
struct ext4_super_block *es)
|
|
{
|
|
@@ -1151,20 +1174,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
|
|
{
|
|
struct inode *inode;
|
|
|
|
- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
|
|
- return ERR_PTR(-ESTALE);
|
|
- if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
|
|
- return ERR_PTR(-ESTALE);
|
|
-
|
|
- /* iget isn't really right if the inode is currently unallocated!!
|
|
- *
|
|
- * ext4_read_inode will return a bad_inode if the inode had been
|
|
- * deleted, so we should be safe.
|
|
- *
|
|
+ /*
|
|
* Currently we don't know the generation for parent directory, so
|
|
* a generation of 0 means "accept any"
|
|
*/
|
|
- inode = ext4_iget_normal(sb, ino);
|
|
+ inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
|
|
if (IS_ERR(inode))
|
|
return ERR_CAST(inode);
|
|
if (generation && inode->i_generation != generation) {
|
|
@@ -1189,6 +1203,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
|
|
ext4_nfs_get_inode);
|
|
}
|
|
|
|
+static int ext4_nfs_commit_metadata(struct inode *inode)
|
|
+{
|
|
+ struct writeback_control wbc = {
|
|
+ .sync_mode = WB_SYNC_ALL
|
|
+ };
|
|
+
|
|
+ trace_ext4_nfs_commit_metadata(inode);
|
|
+ return ext4_write_inode(inode, &wbc);
|
|
+}
|
|
+
|
|
/*
|
|
* Try to release metadata pages (indirect blocks, directories) which are
|
|
* mapped via the block device. Since these pages could have journal heads
|
|
@@ -1393,6 +1417,7 @@ static const struct export_operations ext4_export_ops = {
|
|
.fh_to_dentry = ext4_fh_to_dentry,
|
|
.fh_to_parent = ext4_fh_to_parent,
|
|
.get_parent = ext4_get_parent,
|
|
+ .commit_metadata = ext4_nfs_commit_metadata,
|
|
};
|
|
|
|
enum {
|
|
@@ -4328,7 +4353,7 @@ no_journal:
|
|
* so we can safely mount the rest of the filesystem now.
|
|
*/
|
|
|
|
- root = ext4_iget(sb, EXT4_ROOT_INO);
|
|
+ root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(root)) {
|
|
ext4_msg(sb, KERN_ERR, "get root inode failed");
|
|
ret = PTR_ERR(root);
|
|
@@ -4598,7 +4623,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
|
|
* happen if we iget() an unused inode, as the subsequent iput()
|
|
* will try to delete it.
|
|
*/
|
|
- journal_inode = ext4_iget(sb, journal_inum);
|
|
+ journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(journal_inode)) {
|
|
ext4_msg(sb, KERN_ERR, "no journal found");
|
|
return NULL;
|
|
@@ -4880,7 +4905,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
|
|
ext4_superblock_csum_set(sb);
|
|
if (sync)
|
|
lock_buffer(sbh);
|
|
- if (buffer_write_io_error(sbh)) {
|
|
+ if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
|
|
/*
|
|
* Oh, dear. A previous attempt to write the
|
|
* superblock failed. This could happen because the
|
|
@@ -5680,7 +5705,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
|
|
if (!qf_inums[type])
|
|
return -EPERM;
|
|
|
|
- qf_inode = ext4_iget(sb, qf_inums[type]);
|
|
+ qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(qf_inode)) {
|
|
ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
|
|
return PTR_ERR(qf_inode);
|
|
@@ -5690,9 +5715,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
|
|
qf_inode->i_flags |= S_NOQUOTA;
|
|
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
|
|
err = dquot_enable(qf_inode, type, format_id, flags);
|
|
- iput(qf_inode);
|
|
if (err)
|
|
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
|
|
+ iput(qf_inode);
|
|
|
|
return err;
|
|
}
|
|
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
|
|
index 7643d52c776c6..86ed9c6862493 100644
|
|
--- a/fs/ext4/xattr.c
|
|
+++ b/fs/ext4/xattr.c
|
|
@@ -384,7 +384,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
|
|
struct inode *inode;
|
|
int err;
|
|
|
|
- inode = ext4_iget(parent->i_sb, ea_ino);
|
|
+ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
|
|
if (IS_ERR(inode)) {
|
|
err = PTR_ERR(inode);
|
|
ext4_error(parent->i_sb,
|
|
@@ -522,14 +522,13 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
|
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
|
|
name_index, name, buffer, (long)buffer_size);
|
|
|
|
- error = -ENODATA;
|
|
if (!EXT4_I(inode)->i_file_acl)
|
|
- goto cleanup;
|
|
+ return -ENODATA;
|
|
ea_idebug(inode, "reading block %llu",
|
|
(unsigned long long)EXT4_I(inode)->i_file_acl);
|
|
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
|
- if (!bh)
|
|
- goto cleanup;
|
|
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
|
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
|
error = ext4_xattr_check_block(inode, bh);
|
|
@@ -696,26 +695,23 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
|
|
buffer, (long)buffer_size);
|
|
|
|
- error = 0;
|
|
if (!EXT4_I(inode)->i_file_acl)
|
|
- goto cleanup;
|
|
+ return 0;
|
|
ea_idebug(inode, "reading block %llu",
|
|
(unsigned long long)EXT4_I(inode)->i_file_acl);
|
|
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
|
- error = -EIO;
|
|
- if (!bh)
|
|
- goto cleanup;
|
|
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bh))
|
|
+ return PTR_ERR(bh);
|
|
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
|
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
|
error = ext4_xattr_check_block(inode, bh);
|
|
if (error)
|
|
goto cleanup;
|
|
ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
|
|
- error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
|
|
-
|
|
+ error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
|
|
+ buffer_size);
|
|
cleanup:
|
|
brelse(bh);
|
|
-
|
|
return error;
|
|
}
|
|
|
|
@@ -830,9 +826,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
|
|
}
|
|
|
|
if (EXT4_I(inode)->i_file_acl) {
|
|
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
|
- if (!bh) {
|
|
- ret = -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bh)) {
|
|
+ ret = PTR_ERR(bh);
|
|
goto out;
|
|
}
|
|
|
|
@@ -1486,7 +1482,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
|
|
}
|
|
|
|
while (ce) {
|
|
- ea_inode = ext4_iget(inode->i_sb, ce->e_value);
|
|
+ ea_inode = ext4_iget(inode->i_sb, ce->e_value,
|
|
+ EXT4_IGET_NORMAL);
|
|
if (!IS_ERR(ea_inode) &&
|
|
!is_bad_inode(ea_inode) &&
|
|
(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
|
|
@@ -1821,16 +1818,15 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
|
|
|
|
if (EXT4_I(inode)->i_file_acl) {
|
|
/* The inode already has an extended attribute block. */
|
|
- bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
|
|
- error = -EIO;
|
|
- if (!bs->bh)
|
|
- goto cleanup;
|
|
+ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bs->bh))
|
|
+ return PTR_ERR(bs->bh);
|
|
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
|
|
atomic_read(&(bs->bh->b_count)),
|
|
le32_to_cpu(BHDR(bs->bh)->h_refcount));
|
|
error = ext4_xattr_check_block(inode, bs->bh);
|
|
if (error)
|
|
- goto cleanup;
|
|
+ return error;
|
|
/* Find the named attribute. */
|
|
bs->s.base = BHDR(bs->bh);
|
|
bs->s.first = BFIRST(bs->bh);
|
|
@@ -1839,13 +1835,10 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
|
|
error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
|
|
i->name_index, i->name, 1);
|
|
if (error && error != -ENODATA)
|
|
- goto cleanup;
|
|
+ return error;
|
|
bs->s.not_found = error;
|
|
}
|
|
- error = 0;
|
|
-
|
|
-cleanup:
|
|
- return error;
|
|
+ return 0;
|
|
}
|
|
|
|
static int
|
|
@@ -2274,9 +2267,9 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
|
|
|
|
if (!EXT4_I(inode)->i_file_acl)
|
|
return NULL;
|
|
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
|
- if (!bh)
|
|
- return ERR_PTR(-EIO);
|
|
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bh))
|
|
+ return bh;
|
|
error = ext4_xattr_check_block(inode, bh);
|
|
if (error) {
|
|
brelse(bh);
|
|
@@ -2729,7 +2722,7 @@ retry:
|
|
base = IFIRST(header);
|
|
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
|
min_offs = end - base;
|
|
- total_ino = sizeof(struct ext4_xattr_ibody_header);
|
|
+ total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
|
|
|
|
error = xattr_check_inode(inode, header, end);
|
|
if (error)
|
|
@@ -2746,10 +2739,11 @@ retry:
|
|
if (EXT4_I(inode)->i_file_acl) {
|
|
struct buffer_head *bh;
|
|
|
|
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
|
- error = -EIO;
|
|
- if (!bh)
|
|
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bh)) {
|
|
+ error = PTR_ERR(bh);
|
|
goto cleanup;
|
|
+ }
|
|
error = ext4_xattr_check_block(inode, bh);
|
|
if (error) {
|
|
brelse(bh);
|
|
@@ -2903,11 +2897,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
|
|
}
|
|
|
|
if (EXT4_I(inode)->i_file_acl) {
|
|
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
|
- if (!bh) {
|
|
- EXT4_ERROR_INODE(inode, "block %llu read error",
|
|
- EXT4_I(inode)->i_file_acl);
|
|
- error = -EIO;
|
|
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
|
+ if (IS_ERR(bh)) {
|
|
+ error = PTR_ERR(bh);
|
|
+ if (error == -EIO)
|
|
+ EXT4_ERROR_INODE(inode, "block %llu read error",
|
|
+ EXT4_I(inode)->i_file_acl);
|
|
goto cleanup;
|
|
}
|
|
error = ext4_xattr_check_block(inode, bh);
|
|
@@ -3060,8 +3055,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
|
|
while (ce) {
|
|
struct buffer_head *bh;
|
|
|
|
- bh = sb_bread(inode->i_sb, ce->e_value);
|
|
- if (!bh) {
|
|
+ bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
|
|
+ if (IS_ERR(bh)) {
|
|
+ if (PTR_ERR(bh) == -ENOMEM)
|
|
+ return NULL;
|
|
EXT4_ERROR_INODE(inode, "block %lu read error",
|
|
(unsigned long)ce->e_value);
|
|
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
|
|
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
|
|
index fa707cdd4120d..22f0d17cde433 100644
|
|
--- a/fs/f2fs/acl.c
|
|
+++ b/fs/f2fs/acl.c
|
|
@@ -352,12 +352,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
|
|
return PTR_ERR(p);
|
|
|
|
clone = f2fs_acl_clone(p, GFP_NOFS);
|
|
- if (!clone)
|
|
- goto no_mem;
|
|
+ if (!clone) {
|
|
+ ret = -ENOMEM;
|
|
+ goto release_acl;
|
|
+ }
|
|
|
|
ret = f2fs_acl_create_masq(clone, mode);
|
|
if (ret < 0)
|
|
- goto no_mem_clone;
|
|
+ goto release_clone;
|
|
|
|
if (ret == 0)
|
|
posix_acl_release(clone);
|
|
@@ -371,11 +373,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
|
|
|
|
return 0;
|
|
|
|
-no_mem_clone:
|
|
+release_clone:
|
|
posix_acl_release(clone);
|
|
-no_mem:
|
|
+release_acl:
|
|
posix_acl_release(p);
|
|
- return -ENOMEM;
|
|
+ return ret;
|
|
}
|
|
|
|
int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
|
|
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
|
|
index 9c28ea439e0bb..e5719fcac47da 100644
|
|
--- a/fs/f2fs/checkpoint.c
|
|
+++ b/fs/f2fs/checkpoint.c
|
|
@@ -1290,11 +1290,12 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
|
|
struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
|
|
int err;
|
|
|
|
- memcpy(page_address(page), src, PAGE_SIZE);
|
|
- set_page_dirty(page);
|
|
-
|
|
f2fs_wait_on_page_writeback(page, META, true);
|
|
f2fs_bug_on(sbi, PageWriteback(page));
|
|
+
|
|
+ memcpy(page_address(page), src, PAGE_SIZE);
|
|
+
|
|
+ set_page_dirty(page);
|
|
if (unlikely(!clear_page_dirty_for_io(page)))
|
|
f2fs_bug_on(sbi, 1);
|
|
|
|
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
|
|
index b293cb3e27a22..e90ca6aa3a00d 100644
|
|
--- a/fs/f2fs/data.c
|
|
+++ b/fs/f2fs/data.c
|
|
@@ -1102,8 +1102,10 @@ next_block:
|
|
if (test_opt(sbi, LFS) && create &&
|
|
flag == F2FS_GET_BLOCK_DIO) {
|
|
err = __allocate_data_block(&dn, map->m_seg_type);
|
|
- if (!err)
|
|
+ if (!err) {
|
|
+ blkaddr = dn.data_blkaddr;
|
|
set_inode_flag(inode, FI_APPEND_WRITE);
|
|
+ }
|
|
}
|
|
} else {
|
|
if (create) {
|
|
@@ -2324,6 +2326,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
|
|
bool locked = false;
|
|
struct extent_info ei = {0,0,0};
|
|
int err = 0;
|
|
+ int flag;
|
|
|
|
/*
|
|
* we already allocated all the blocks, so we don't need to get
|
|
@@ -2333,9 +2336,15 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
|
|
!is_inode_flag_set(inode, FI_NO_PREALLOC))
|
|
return 0;
|
|
|
|
+ /* f2fs_lock_op avoids race between write CP and convert_inline_page */
|
|
+ if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
|
|
+ flag = F2FS_GET_BLOCK_DEFAULT;
|
|
+ else
|
|
+ flag = F2FS_GET_BLOCK_PRE_AIO;
|
|
+
|
|
if (f2fs_has_inline_data(inode) ||
|
|
(pos & PAGE_MASK) >= i_size_read(inode)) {
|
|
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
|
|
+ __do_map_lock(sbi, flag, true);
|
|
locked = true;
|
|
}
|
|
restart:
|
|
@@ -2373,6 +2382,7 @@ restart:
|
|
f2fs_put_dnode(&dn);
|
|
__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
|
|
true);
|
|
+ WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
|
|
locked = true;
|
|
goto restart;
|
|
}
|
|
@@ -2386,7 +2396,7 @@ out:
|
|
f2fs_put_dnode(&dn);
|
|
unlock_out:
|
|
if (locked)
|
|
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
|
|
+ __do_map_lock(sbi, flag, false);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
|
|
index 1e031971a466c..689c10ad57fe0 100644
|
|
--- a/fs/f2fs/f2fs.h
|
|
+++ b/fs/f2fs/f2fs.h
|
|
@@ -2672,10 +2672,19 @@ static inline bool is_dot_dotdot(const struct qstr *str)
|
|
|
|
static inline bool f2fs_may_extent_tree(struct inode *inode)
|
|
{
|
|
- if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
|
|
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
+
|
|
+ if (!test_opt(sbi, EXTENT_CACHE) ||
|
|
is_inode_flag_set(inode, FI_NO_EXTENT))
|
|
return false;
|
|
|
|
+ /*
|
|
+ * for recovered files during mount do not create extents
|
|
+ * if shrinker is not registered.
|
|
+ */
|
|
+ if (list_empty(&sbi->s_list))
|
|
+ return false;
|
|
+
|
|
return S_ISREG(inode->i_mode);
|
|
}
|
|
|
|
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
|
|
index 88b124677189b..9eaf07fd8b4c1 100644
|
|
--- a/fs/f2fs/file.c
|
|
+++ b/fs/f2fs/file.c
|
|
@@ -216,6 +216,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
|
|
|
|
trace_f2fs_sync_file_enter(inode);
|
|
|
|
+ if (S_ISDIR(inode->i_mode))
|
|
+ goto go_write;
|
|
+
|
|
/* if fdatasync is triggered, let's do in-place-update */
|
|
if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
|
|
set_inode_flag(inode, FI_NEED_IPU);
|
|
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
|
|
index a07241fb85370..74f72dccab579 100644
|
|
--- a/fs/f2fs/gc.c
|
|
+++ b/fs/f2fs/gc.c
|
|
@@ -658,6 +658,14 @@ got_it:
|
|
fio.page = page;
|
|
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
|
|
|
+ /*
|
|
+ * don't cache encrypted data into meta inode until previous dirty
|
|
+ * data were writebacked to avoid racing between GC and flush.
|
|
+ */
|
|
+ f2fs_wait_on_page_writeback(page, DATA, true);
|
|
+
|
|
+ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
|
|
+
|
|
fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
|
|
dn.data_blkaddr,
|
|
FGP_LOCK | FGP_CREAT, GFP_NOFS);
|
|
@@ -745,6 +753,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
|
*/
|
|
f2fs_wait_on_page_writeback(page, DATA, true);
|
|
|
|
+ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
|
|
+
|
|
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
|
|
if (err)
|
|
goto put_out;
|
|
@@ -802,8 +812,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
|
}
|
|
|
|
write_page:
|
|
- set_page_dirty(fio.encrypted_page);
|
|
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
|
|
+ set_page_dirty(fio.encrypted_page);
|
|
if (clear_page_dirty_for_io(fio.encrypted_page))
|
|
dec_page_count(fio.sbi, F2FS_DIRTY_META);
|
|
|
|
@@ -897,8 +907,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
|
|
bool is_dirty = PageDirty(page);
|
|
|
|
retry:
|
|
- set_page_dirty(page);
|
|
f2fs_wait_on_page_writeback(page, DATA, true);
|
|
+
|
|
+ set_page_dirty(page);
|
|
if (clear_page_dirty_for_io(page)) {
|
|
inode_dec_dirty_pages(inode);
|
|
f2fs_remove_dirty_inode(inode);
|
|
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
|
|
index d338740d0fdac..33fb3f8aeafa4 100644
|
|
--- a/fs/f2fs/node.c
|
|
+++ b/fs/f2fs/node.c
|
|
@@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn)
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
|
|
struct node_info ni;
|
|
int err;
|
|
+ pgoff_t index;
|
|
|
|
err = f2fs_get_node_info(sbi, dn->nid, &ni);
|
|
if (err)
|
|
@@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn)
|
|
clear_node_page_dirty(dn->node_page);
|
|
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
|
|
|
+ index = dn->node_page->index;
|
|
f2fs_put_page(dn->node_page, 1);
|
|
|
|
invalidate_mapping_pages(NODE_MAPPING(sbi),
|
|
- dn->node_page->index, dn->node_page->index);
|
|
+ index, index);
|
|
|
|
dn->node_page = NULL;
|
|
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
|
|
@@ -1596,10 +1598,11 @@ int f2fs_move_node_page(struct page *node_page, int gc_type)
|
|
.for_reclaim = 0,
|
|
};
|
|
|
|
- set_page_dirty(node_page);
|
|
f2fs_wait_on_page_writeback(node_page, NODE, true);
|
|
-
|
|
f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
|
|
+
|
|
+ set_page_dirty(node_page);
|
|
+
|
|
if (!clear_page_dirty_for_io(node_page)) {
|
|
err = -EAGAIN;
|
|
goto out_page;
|
|
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
|
|
index 6edcf8391dd3d..8f3578c5230eb 100644
|
|
--- a/fs/f2fs/segment.c
|
|
+++ b/fs/f2fs/segment.c
|
|
@@ -387,8 +387,9 @@ static int __f2fs_commit_inmem_pages(struct inode *inode)
|
|
if (page->mapping == inode->i_mapping) {
|
|
trace_f2fs_commit_inmem_page(page, INMEM);
|
|
|
|
- set_page_dirty(page);
|
|
f2fs_wait_on_page_writeback(page, DATA, true);
|
|
+
|
|
+ set_page_dirty(page);
|
|
if (clear_page_dirty_for_io(page)) {
|
|
inode_dec_dirty_pages(inode);
|
|
f2fs_remove_dirty_inode(inode);
|
|
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
|
|
index 9e13db994fdf4..a467aca29cfef 100644
|
|
--- a/fs/f2fs/shrinker.c
|
|
+++ b/fs/f2fs/shrinker.c
|
|
@@ -135,6 +135,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
|
|
f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
|
|
|
|
spin_lock(&f2fs_list_lock);
|
|
- list_del(&sbi->s_list);
|
|
+ list_del_init(&sbi->s_list);
|
|
spin_unlock(&f2fs_list_lock);
|
|
}
|
|
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
|
|
index af58b2cc21b81..a239472f405ae 100644
|
|
--- a/fs/f2fs/super.c
|
|
+++ b/fs/f2fs/super.c
|
|
@@ -1058,9 +1058,6 @@ static void f2fs_put_super(struct super_block *sb)
|
|
f2fs_write_checkpoint(sbi, &cpc);
|
|
}
|
|
|
|
- /* f2fs_write_checkpoint can update stat informaion */
|
|
- f2fs_destroy_stats(sbi);
|
|
-
|
|
/*
|
|
* normally superblock is clean, so we need to release this.
|
|
* In addition, EIO will skip do checkpoint, we need this as well.
|
|
@@ -1080,6 +1077,12 @@ static void f2fs_put_super(struct super_block *sb)
|
|
iput(sbi->node_inode);
|
|
iput(sbi->meta_inode);
|
|
|
|
+ /*
|
|
+ * iput() can update stat information, if f2fs_write_checkpoint()
|
|
+ * above failed with error.
|
|
+ */
|
|
+ f2fs_destroy_stats(sbi);
|
|
+
|
|
/* destroy f2fs internal modules */
|
|
f2fs_destroy_node_manager(sbi);
|
|
f2fs_destroy_segment_manager(sbi);
|
|
@@ -1457,19 +1460,16 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
|
|
|
sbi->sb->s_flags |= SB_ACTIVE;
|
|
|
|
- mutex_lock(&sbi->gc_mutex);
|
|
f2fs_update_time(sbi, DISABLE_TIME);
|
|
|
|
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
|
|
+ mutex_lock(&sbi->gc_mutex);
|
|
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
|
|
if (err == -ENODATA)
|
|
break;
|
|
- if (err && err != -EAGAIN) {
|
|
- mutex_unlock(&sbi->gc_mutex);
|
|
+ if (err && err != -EAGAIN)
|
|
return err;
|
|
- }
|
|
}
|
|
- mutex_unlock(&sbi->gc_mutex);
|
|
|
|
err = sync_filesystem(sbi->sb);
|
|
if (err)
|
|
@@ -2496,10 +2496,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
|
return 1;
|
|
}
|
|
|
|
- if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
|
|
+ if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
|
|
f2fs_msg(sb, KERN_INFO,
|
|
- "Wrong segment_count / block_count (%u > %u)",
|
|
- segment_count, le32_to_cpu(raw_super->block_count));
|
|
+ "Wrong segment_count / block_count (%u > %llu)",
|
|
+ segment_count, le64_to_cpu(raw_super->block_count));
|
|
return 1;
|
|
}
|
|
|
|
@@ -3259,30 +3259,30 @@ try_onemore:
|
|
|
|
f2fs_build_gc_manager(sbi);
|
|
|
|
+ err = f2fs_build_stats(sbi);
|
|
+ if (err)
|
|
+ goto free_nm;
|
|
+
|
|
/* get an inode for node space */
|
|
sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
|
|
if (IS_ERR(sbi->node_inode)) {
|
|
f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
|
|
err = PTR_ERR(sbi->node_inode);
|
|
- goto free_nm;
|
|
+ goto free_stats;
|
|
}
|
|
|
|
- err = f2fs_build_stats(sbi);
|
|
- if (err)
|
|
- goto free_node_inode;
|
|
-
|
|
/* read root inode and dentry */
|
|
root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
|
|
if (IS_ERR(root)) {
|
|
f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
|
|
err = PTR_ERR(root);
|
|
- goto free_stats;
|
|
+ goto free_node_inode;
|
|
}
|
|
if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
|
|
!root->i_size || !root->i_nlink) {
|
|
iput(root);
|
|
err = -EINVAL;
|
|
- goto free_stats;
|
|
+ goto free_node_inode;
|
|
}
|
|
|
|
sb->s_root = d_make_root(root); /* allocate root dentry */
|
|
@@ -3406,12 +3406,12 @@ free_meta:
|
|
free_root_inode:
|
|
dput(sb->s_root);
|
|
sb->s_root = NULL;
|
|
-free_stats:
|
|
- f2fs_destroy_stats(sbi);
|
|
free_node_inode:
|
|
f2fs_release_ino_entry(sbi, true);
|
|
truncate_inode_pages_final(NODE_MAPPING(sbi));
|
|
iput(sbi->node_inode);
|
|
+free_stats:
|
|
+ f2fs_destroy_stats(sbi);
|
|
free_nm:
|
|
f2fs_destroy_node_manager(sbi);
|
|
free_sm:
|
|
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
|
|
index 7261245c208dc..ecd2cf2fc584d 100644
|
|
--- a/fs/f2fs/xattr.c
|
|
+++ b/fs/f2fs/xattr.c
|
|
@@ -288,7 +288,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
|
|
static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
|
|
unsigned int index, unsigned int len,
|
|
const char *name, struct f2fs_xattr_entry **xe,
|
|
- void **base_addr)
|
|
+ void **base_addr, int *base_size)
|
|
{
|
|
void *cur_addr, *txattr_addr, *last_addr = NULL;
|
|
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
|
|
@@ -299,8 +299,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
|
|
if (!size && !inline_size)
|
|
return -ENODATA;
|
|
|
|
- txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
|
|
- inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
|
|
+ *base_size = inline_size + size + XATTR_PADDING_SIZE;
|
|
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
|
|
if (!txattr_addr)
|
|
return -ENOMEM;
|
|
|
|
@@ -312,8 +312,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
|
|
|
|
*xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
|
|
index, len, name);
|
|
- if (*xe)
|
|
+ if (*xe) {
|
|
+ *base_size = inline_size;
|
|
goto check;
|
|
+ }
|
|
}
|
|
|
|
/* read from xattr node block */
|
|
@@ -474,6 +476,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
|
int error = 0;
|
|
unsigned int size, len;
|
|
void *base_addr = NULL;
|
|
+ int base_size;
|
|
|
|
if (name == NULL)
|
|
return -EINVAL;
|
|
@@ -484,7 +487,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
|
|
|
down_read(&F2FS_I(inode)->i_xattr_sem);
|
|
error = lookup_all_xattrs(inode, ipage, index, len, name,
|
|
- &entry, &base_addr);
|
|
+ &entry, &base_addr, &base_size);
|
|
up_read(&F2FS_I(inode)->i_xattr_sem);
|
|
if (error)
|
|
return error;
|
|
@@ -498,6 +501,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
|
|
|
if (buffer) {
|
|
char *pval = entry->e_name + entry->e_name_len;
|
|
+
|
|
+ if (base_size - (pval - (char *)base_addr) < size) {
|
|
+ error = -ERANGE;
|
|
+ goto out;
|
|
+ }
|
|
memcpy(buffer, pval, size);
|
|
}
|
|
error = size;
|
|
diff --git a/fs/file.c b/fs/file.c
|
|
index 7ffd6e9d103d6..8d059d8973e9f 100644
|
|
--- a/fs/file.c
|
|
+++ b/fs/file.c
|
|
@@ -640,6 +640,35 @@ out_unlock:
|
|
}
|
|
EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
|
|
|
|
+/*
|
|
+ * variant of __close_fd that gets a ref on the file for later fput
|
|
+ */
|
|
+int __close_fd_get_file(unsigned int fd, struct file **res)
|
|
+{
|
|
+ struct files_struct *files = current->files;
|
|
+ struct file *file;
|
|
+ struct fdtable *fdt;
|
|
+
|
|
+ spin_lock(&files->file_lock);
|
|
+ fdt = files_fdtable(files);
|
|
+ if (fd >= fdt->max_fds)
|
|
+ goto out_unlock;
|
|
+ file = fdt->fd[fd];
|
|
+ if (!file)
|
|
+ goto out_unlock;
|
|
+ rcu_assign_pointer(fdt->fd[fd], NULL);
|
|
+ __put_unused_fd(files, fd);
|
|
+ spin_unlock(&files->file_lock);
|
|
+ get_file(file);
|
|
+ *res = file;
|
|
+ return filp_close(file, files);
|
|
+
|
|
+out_unlock:
|
|
+ spin_unlock(&files->file_lock);
|
|
+ *res = NULL;
|
|
+ return -ENOENT;
|
|
+}
|
|
+
|
|
void do_close_on_exec(struct files_struct *files)
|
|
{
|
|
unsigned i;
|
|
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
|
|
index a5e516a40e7a3..809c0f2f9942e 100644
|
|
--- a/fs/fuse/dev.c
|
|
+++ b/fs/fuse/dev.c
|
|
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
|
|
req->in.h.nodeid = outarg->nodeid;
|
|
req->in.numargs = 2;
|
|
req->in.argpages = 1;
|
|
- req->page_descs[0].offset = offset;
|
|
req->end = fuse_retrieve_end;
|
|
|
|
index = outarg->offset >> PAGE_SHIFT;
|
|
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
|
|
|
|
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
|
|
req->pages[req->num_pages] = page;
|
|
+ req->page_descs[req->num_pages].offset = offset;
|
|
req->page_descs[req->num_pages].length = this_num;
|
|
req->num_pages++;
|
|
|
|
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
|
|
|
|
ret = fuse_dev_do_write(fud, &cs, len);
|
|
|
|
+ pipe_lock(pipe);
|
|
for (idx = 0; idx < nbuf; idx++)
|
|
pipe_buf_release(pipe, &bufs[idx]);
|
|
+ pipe_unlock(pipe);
|
|
|
|
out:
|
|
kvfree(bufs);
|
|
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
|
|
index ffaffe18352a1..a59c16bd90acc 100644
|
|
--- a/fs/fuse/file.c
|
|
+++ b/fs/fuse/file.c
|
|
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
|
|
spin_unlock(&fc->lock);
|
|
|
|
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
|
|
- dec_node_page_state(page, NR_WRITEBACK_TEMP);
|
|
+ dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
|
|
wb_writeout_inc(&bdi->wb);
|
|
fuse_writepage_free(fc, new_req);
|
|
fuse_request_free(new_req);
|
|
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
|
|
index 568abed20eb20..a7f922a67c697 100644
|
|
--- a/fs/fuse/inode.c
|
|
+++ b/fs/fuse/inode.c
|
|
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
|
|
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
|
|
fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
|
|
fc->user_ns = get_user_ns(user_ns);
|
|
+ fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
|
|
}
|
|
EXPORT_SYMBOL_GPL(fuse_conn_init);
|
|
|
|
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
|
|
fc->user_id = d.user_id;
|
|
fc->group_id = d.group_id;
|
|
fc->max_read = max_t(unsigned, 4096, d.max_read);
|
|
- fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
|
|
|
|
/* Used by get_root_inode() */
|
|
sb->s_fs_info = fc;
|
|
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
|
|
index 648f0ca1ad57e..998051c4aea78 100644
|
|
--- a/fs/gfs2/inode.c
|
|
+++ b/fs/gfs2/inode.c
|
|
@@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|
the gfs2 structures. */
|
|
if (default_acl) {
|
|
error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
|
+ if (error)
|
|
+ goto fail_gunlock3;
|
|
posix_acl_release(default_acl);
|
|
+ default_acl = NULL;
|
|
}
|
|
if (acl) {
|
|
- if (!error)
|
|
- error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
|
+ error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
|
+ if (error)
|
|
+ goto fail_gunlock3;
|
|
posix_acl_release(acl);
|
|
+ acl = NULL;
|
|
}
|
|
|
|
- if (error)
|
|
- goto fail_gunlock3;
|
|
-
|
|
error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
|
|
&gfs2_initxattrs, NULL);
|
|
if (error)
|
|
@@ -789,10 +791,8 @@ fail_free_inode:
|
|
}
|
|
gfs2_rsqa_delete(ip, NULL);
|
|
fail_free_acls:
|
|
- if (default_acl)
|
|
- posix_acl_release(default_acl);
|
|
- if (acl)
|
|
- posix_acl_release(acl);
|
|
+ posix_acl_release(default_acl);
|
|
+ posix_acl_release(acl);
|
|
fail_gunlock:
|
|
gfs2_dir_no_add(&da);
|
|
gfs2_glock_dq_uninit(ghs);
|
|
diff --git a/fs/inode.c b/fs/inode.c
|
|
index 35d2108d567c2..9e198f00b64c6 100644
|
|
--- a/fs/inode.c
|
|
+++ b/fs/inode.c
|
|
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
|
|
return LRU_REMOVED;
|
|
}
|
|
|
|
- /*
|
|
- * Recently referenced inodes and inodes with many attached pages
|
|
- * get one more pass.
|
|
- */
|
|
- if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
|
|
+ /* recently referenced inodes get one more pass */
|
|
+ if (inode->i_state & I_REFERENCED) {
|
|
inode->i_state &= ~I_REFERENCED;
|
|
spin_unlock(&inode->i_lock);
|
|
return LRU_ROTATE;
|
|
diff --git a/fs/iomap.c b/fs/iomap.c
|
|
index d6bc98ae8d350..ce837d962d473 100644
|
|
--- a/fs/iomap.c
|
|
+++ b/fs/iomap.c
|
|
@@ -492,16 +492,29 @@ done:
|
|
}
|
|
EXPORT_SYMBOL_GPL(iomap_readpages);
|
|
|
|
+/*
|
|
+ * iomap_is_partially_uptodate checks whether blocks within a page are
|
|
+ * uptodate or not.
|
|
+ *
|
|
+ * Returns true if all blocks which correspond to a file portion
|
|
+ * we want to read within the page are uptodate.
|
|
+ */
|
|
int
|
|
iomap_is_partially_uptodate(struct page *page, unsigned long from,
|
|
unsigned long count)
|
|
{
|
|
struct iomap_page *iop = to_iomap_page(page);
|
|
struct inode *inode = page->mapping->host;
|
|
- unsigned first = from >> inode->i_blkbits;
|
|
- unsigned last = (from + count - 1) >> inode->i_blkbits;
|
|
+ unsigned len, first, last;
|
|
unsigned i;
|
|
|
|
+ /* Limit range to one page */
|
|
+ len = min_t(unsigned, PAGE_SIZE - from, count);
|
|
+
|
|
+ /* First and last blocks in range within page */
|
|
+ first = from >> inode->i_blkbits;
|
|
+ last = (from + len - 1) >> inode->i_blkbits;
|
|
+
|
|
if (iop) {
|
|
for (i = first; i <= last; i++)
|
|
if (!test_bit(i, iop->uptodate))
|
|
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
|
|
index 902a7dd10e5c4..bb6ae387469f4 100644
|
|
--- a/fs/jffs2/super.c
|
|
+++ b/fs/jffs2/super.c
|
|
@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
|
|
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
|
|
|
|
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
|
|
- cancel_delayed_work_sync(&c->wbuf_dwork);
|
|
+ if (jffs2_is_writebuffered(c))
|
|
+ cancel_delayed_work_sync(&c->wbuf_dwork);
|
|
#endif
|
|
|
|
mutex_lock(&c->alloc_sem);
|
|
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
|
|
index d20b92f271c23..0a67dd4250e97 100644
|
|
--- a/fs/lockd/clntproc.c
|
|
+++ b/fs/lockd/clntproc.c
|
|
@@ -442,7 +442,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
|
|
fl->fl_start = req->a_res.lock.fl.fl_start;
|
|
fl->fl_end = req->a_res.lock.fl.fl_end;
|
|
fl->fl_type = req->a_res.lock.fl.fl_type;
|
|
- fl->fl_pid = 0;
|
|
+ fl->fl_pid = -req->a_res.lock.fl.fl_pid;
|
|
break;
|
|
default:
|
|
status = nlm_stat_to_errno(req->a_res.status);
|
|
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
|
|
index 7147e4aebecc2..9846f7e952826 100644
|
|
--- a/fs/lockd/xdr.c
|
|
+++ b/fs/lockd/xdr.c
|
|
@@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
|
|
|
|
locks_init_lock(fl);
|
|
fl->fl_owner = current->files;
|
|
- fl->fl_pid = (pid_t)lock->svid;
|
|
+ fl->fl_pid = current->tgid;
|
|
fl->fl_flags = FL_POSIX;
|
|
fl->fl_type = F_RDLCK; /* as good as anything else */
|
|
start = ntohl(*p++);
|
|
@@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
|
|
memset(lock, 0, sizeof(*lock));
|
|
locks_init_lock(&lock->fl);
|
|
lock->svid = ~(u32) 0;
|
|
- lock->fl.fl_pid = (pid_t)lock->svid;
|
|
+ lock->fl.fl_pid = current->tgid;
|
|
|
|
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|
|
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
|
|
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
|
|
index 7ed9edf9aed4a..70154f3766951 100644
|
|
--- a/fs/lockd/xdr4.c
|
|
+++ b/fs/lockd/xdr4.c
|
|
@@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
|
|
|
|
locks_init_lock(fl);
|
|
fl->fl_owner = current->files;
|
|
- fl->fl_pid = (pid_t)lock->svid;
|
|
+ fl->fl_pid = current->tgid;
|
|
fl->fl_flags = FL_POSIX;
|
|
fl->fl_type = F_RDLCK; /* as good as anything else */
|
|
p = xdr_decode_hyper(p, &start);
|
|
@@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
|
|
memset(lock, 0, sizeof(*lock));
|
|
locks_init_lock(&lock->fl);
|
|
lock->svid = ~(u32) 0;
|
|
- lock->fl.fl_pid = (pid_t)lock->svid;
|
|
+ lock->fl.fl_pid = current->tgid;
|
|
|
|
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
|
|
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
|
|
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
|
|
index ac4b2f005778c..5ef2c71348bd6 100644
|
|
--- a/fs/nfs/super.c
|
|
+++ b/fs/nfs/super.c
|
|
@@ -2409,8 +2409,7 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
|
|
goto Ebusy;
|
|
if (a->acdirmax != b->acdirmax)
|
|
goto Ebusy;
|
|
- if (b->auth_info.flavor_len > 0 &&
|
|
- clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
|
|
+ if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
|
|
goto Ebusy;
|
|
return 1;
|
|
Ebusy:
|
|
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
|
|
index 586726a590d88..d790faff8e47e 100644
|
|
--- a/fs/nfs/write.c
|
|
+++ b/fs/nfs/write.c
|
|
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|
nfs_set_page_writeback(page);
|
|
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
|
|
|
|
- ret = 0;
|
|
+ ret = req->wb_context->error;
|
|
/* If there is a fatal error that covers this write, just exit */
|
|
- if (nfs_error_is_fatal_on_server(req->wb_context->error))
|
|
+ if (nfs_error_is_fatal_on_server(ret))
|
|
goto out_launder;
|
|
|
|
+ ret = 0;
|
|
if (!nfs_pageio_add_request(pgio, req)) {
|
|
ret = pgio->pg_error;
|
|
/*
|
|
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
|
|
nfs_context_set_write_error(req->wb_context, ret);
|
|
if (nfs_error_is_fatal_on_server(ret))
|
|
goto out_launder;
|
|
- }
|
|
+ } else
|
|
+ ret = -EAGAIN;
|
|
nfs_redirty_request(req);
|
|
- ret = -EAGAIN;
|
|
} else
|
|
nfs_add_stats(page_file_mapping(page)->host,
|
|
NFSIOS_WRITEPAGES, 1);
|
|
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
|
|
index d505990dac7c9..c364acbb6aba0 100644
|
|
--- a/fs/nfsd/nfs4proc.c
|
|
+++ b/fs/nfsd/nfs4proc.c
|
|
@@ -1016,8 +1016,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
|
nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
|
|
&write->wr_head, write->wr_buflen);
|
|
- if (!nvecs)
|
|
- return nfserr_io;
|
|
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
|
|
|
|
status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
|
|
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
|
|
index 6384c9b948988..72a7681f40469 100644
|
|
--- a/fs/nfsd/nfsctl.c
|
|
+++ b/fs/nfsd/nfsctl.c
|
|
@@ -1126,6 +1126,8 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
|
|
case 'Y':
|
|
case 'y':
|
|
case '1':
|
|
+ if (nn->nfsd_serv)
|
|
+ return -EBUSY;
|
|
nfsd4_end_grace(nn);
|
|
break;
|
|
default:
|
|
@@ -1237,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
|
|
retval = nfsd_idmap_init(net);
|
|
if (retval)
|
|
goto out_idmap_error;
|
|
- nn->nfsd4_lease = 45; /* default lease time */
|
|
- nn->nfsd4_grace = 45;
|
|
+ nn->nfsd4_lease = 90; /* default lease time */
|
|
+ nn->nfsd4_grace = 90;
|
|
nn->somebody_reclaimed = false;
|
|
nn->clverifier_counter = prandom_u32();
|
|
nn->clientid_counter = prandom_u32();
|
|
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
|
|
index eb67098117b4c..6f424e3a39812 100644
|
|
--- a/fs/nfsd/vfs.c
|
|
+++ b/fs/nfsd/vfs.c
|
|
@@ -544,9 +544,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
|
|
loff_t cloned;
|
|
|
|
cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
|
|
+ if (cloned < 0)
|
|
+ return nfserrno(cloned);
|
|
if (count && cloned != count)
|
|
- cloned = -EINVAL;
|
|
- return nfserrno(cloned < 0 ? cloned : 0);
|
|
+ return nfserrno(-EINVAL);
|
|
+ return 0;
|
|
}
|
|
|
|
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
|
|
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
|
|
index 105576daca4ab..798f1253141ae 100644
|
|
--- a/fs/notify/inotify/inotify_user.c
|
|
+++ b/fs/notify/inotify/inotify_user.c
|
|
@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
|
|
return -EBADF;
|
|
|
|
/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
|
|
- if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
|
|
- return -EINVAL;
|
|
+ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
|
|
+ ret = -EINVAL;
|
|
+ goto fput_and_out;
|
|
+ }
|
|
|
|
/* verify that this is indeed an inotify instance */
|
|
if (unlikely(f.file->f_op != &inotify_fops)) {
|
|
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
|
|
index 99ee093182cbe..cc9b32b9db7cf 100644
|
|
--- a/fs/ocfs2/Makefile
|
|
+++ b/fs/ocfs2/Makefile
|
|
@@ -1,5 +1,5 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
-ccflags-y := -Ifs/ocfs2
|
|
+ccflags-y := -I$(src)
|
|
|
|
obj-$(CONFIG_OCFS2_FS) += \
|
|
ocfs2.o \
|
|
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
|
|
index 4ebbd57cbf846..f9b84f7a3e4bb 100644
|
|
--- a/fs/ocfs2/buffer_head_io.c
|
|
+++ b/fs/ocfs2/buffer_head_io.c
|
|
@@ -161,7 +161,6 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
|
|
#endif
|
|
}
|
|
|
|
- clear_buffer_uptodate(bh);
|
|
get_bh(bh); /* for end_buffer_read_sync() */
|
|
bh->b_end_io = end_buffer_read_sync;
|
|
submit_bh(REQ_OP_READ, 0, bh);
|
|
@@ -341,7 +340,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
|
|
continue;
|
|
}
|
|
|
|
- clear_buffer_uptodate(bh);
|
|
get_bh(bh); /* for end_buffer_read_sync() */
|
|
if (validate)
|
|
set_buffer_needs_validate(bh);
|
|
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
|
|
index bd1aab1f49a43..ef2854422a6e1 100644
|
|
--- a/fs/ocfs2/dlm/Makefile
|
|
+++ b/fs/ocfs2/dlm/Makefile
|
|
@@ -1,4 +1,4 @@
|
|
-ccflags-y := -Ifs/ocfs2
|
|
+ccflags-y := -I$(src)/..
|
|
|
|
obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
|
|
|
|
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
|
|
index eed3db8c5b492..33431a0296a32 100644
|
|
--- a/fs/ocfs2/dlmfs/Makefile
|
|
+++ b/fs/ocfs2/dlmfs/Makefile
|
|
@@ -1,4 +1,4 @@
|
|
-ccflags-y := -Ifs/ocfs2
|
|
+ccflags-y := -I$(src)/..
|
|
|
|
obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
|
|
|
|
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
|
|
index 7642b6712c39e..30208233f65ba 100644
|
|
--- a/fs/ocfs2/localalloc.c
|
|
+++ b/fs/ocfs2/localalloc.c
|
|
@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
|
|
if (num_used
|
|
|| alloc->id1.bitmap1.i_used
|
|
|| alloc->id1.bitmap1.i_total
|
|
- || la->la_bm_off)
|
|
- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
|
|
+ || la->la_bm_off) {
|
|
+ mlog(ML_ERROR, "inconsistent detected, clean journal with"
|
|
+ " unrecovered local alloc, please run fsck.ocfs2!\n"
|
|
"found = %u, set = %u, taken = %u, off = %u\n",
|
|
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
|
|
le32_to_cpu(alloc->id1.bitmap1.i_total),
|
|
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
|
|
|
|
+ status = -EINVAL;
|
|
+ goto bail;
|
|
+ }
|
|
+
|
|
osb->local_alloc_bh = alloc_bh;
|
|
osb->local_alloc_state = OCFS2_LA_ENABLED;
|
|
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index ce34654794472..bde45ca75ba3e 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -1084,10 +1084,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
|
|
|
|
task_lock(p);
|
|
if (!p->vfork_done && process_shares_mm(p, mm)) {
|
|
- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
|
|
- task_pid_nr(p), p->comm,
|
|
- p->signal->oom_score_adj, oom_adj,
|
|
- task_pid_nr(task), task->comm);
|
|
p->signal->oom_score_adj = oom_adj;
|
|
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
|
|
p->signal->oom_score_adj_min = (short)oom_adj;
|
|
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
|
index 47c3764c469b0..7bcf5714ca246 100644
|
|
--- a/fs/proc/task_mmu.c
|
|
+++ b/fs/proc/task_mmu.c
|
|
@@ -423,7 +423,7 @@ struct mem_size_stats {
|
|
};
|
|
|
|
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|
- bool compound, bool young, bool dirty)
|
|
+ bool compound, bool young, bool dirty, bool locked)
|
|
{
|
|
int i, nr = compound ? 1 << compound_order(page) : 1;
|
|
unsigned long size = nr * PAGE_SIZE;
|
|
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|
else
|
|
mss->private_clean += size;
|
|
mss->pss += (u64)size << PSS_SHIFT;
|
|
+ if (locked)
|
|
+ mss->pss_locked += (u64)size << PSS_SHIFT;
|
|
return;
|
|
}
|
|
|
|
for (i = 0; i < nr; i++, page++) {
|
|
int mapcount = page_mapcount(page);
|
|
+ unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
|
|
|
|
if (mapcount >= 2) {
|
|
if (dirty || PageDirty(page))
|
|
mss->shared_dirty += PAGE_SIZE;
|
|
else
|
|
mss->shared_clean += PAGE_SIZE;
|
|
- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
|
|
+ mss->pss += pss / mapcount;
|
|
+ if (locked)
|
|
+ mss->pss_locked += pss / mapcount;
|
|
} else {
|
|
if (dirty || PageDirty(page))
|
|
mss->private_dirty += PAGE_SIZE;
|
|
else
|
|
mss->private_clean += PAGE_SIZE;
|
|
- mss->pss += PAGE_SIZE << PSS_SHIFT;
|
|
+ mss->pss += pss;
|
|
+ if (locked)
|
|
+ mss->pss_locked += pss;
|
|
}
|
|
}
|
|
}
|
|
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|
{
|
|
struct mem_size_stats *mss = walk->private;
|
|
struct vm_area_struct *vma = walk->vma;
|
|
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
|
|
struct page *page = NULL;
|
|
|
|
if (pte_present(*pte)) {
|
|
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|
if (!page)
|
|
return;
|
|
|
|
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
|
|
+ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
|
|
}
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
{
|
|
struct mem_size_stats *mss = walk->private;
|
|
struct vm_area_struct *vma = walk->vma;
|
|
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
|
|
struct page *page;
|
|
|
|
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
|
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
/* pass */;
|
|
else
|
|
VM_BUG_ON_PAGE(1, page);
|
|
- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
|
|
+ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
|
|
}
|
|
#else
|
|
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
|
|
}
|
|
}
|
|
#endif
|
|
-
|
|
/* mmap_sem is held in m_start */
|
|
walk_page_vma(vma, &smaps_walk);
|
|
- if (vma->vm_flags & VM_LOCKED)
|
|
- mss->pss_locked += mss->pss;
|
|
}
|
|
|
|
#define SEQ_PUT_DEC(str, val) \
|
|
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
|
|
index e02a9039b5ea4..67bdbd3da52e5 100644
|
|
--- a/fs/pstore/ram.c
|
|
+++ b/fs/pstore/ram.c
|
|
@@ -723,18 +723,15 @@ static int ramoops_probe(struct platform_device *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct ramoops_platform_data *pdata = dev->platform_data;
|
|
+ struct ramoops_platform_data pdata_local;
|
|
struct ramoops_context *cxt = &oops_cxt;
|
|
size_t dump_mem_sz;
|
|
phys_addr_t paddr;
|
|
int err = -EINVAL;
|
|
|
|
if (dev_of_node(dev) && !pdata) {
|
|
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
|
- if (!pdata) {
|
|
- pr_err("cannot allocate platform data buffer\n");
|
|
- err = -ENOMEM;
|
|
- goto fail_out;
|
|
- }
|
|
+ pdata = &pdata_local;
|
|
+ memset(pdata, 0, sizeof(*pdata));
|
|
|
|
err = ramoops_parse_dt(pdev, pdata);
|
|
if (err < 0)
|
|
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
|
|
index 12e21f789194b..79f0e183f135a 100644
|
|
--- a/fs/pstore/ram_core.c
|
|
+++ b/fs/pstore/ram_core.c
|
|
@@ -497,6 +497,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
|
|
sig ^= PERSISTENT_RAM_SIG;
|
|
|
|
if (prz->buffer->sig == sig) {
|
|
+ if (buffer_size(prz) == 0) {
|
|
+ pr_debug("found existing empty buffer\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
if (buffer_size(prz) > prz->buffer_size ||
|
|
buffer_start(prz) > buffer_size(prz))
|
|
pr_info("found existing invalid buffer, size %zu, start %zu\n",
|
|
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
|
|
index f0cbf58ad4dad..fd5dd806f1b92 100644
|
|
--- a/fs/quota/quota.c
|
|
+++ b/fs/quota/quota.c
|
|
@@ -791,7 +791,8 @@ static int quotactl_cmd_write(int cmd)
|
|
/* Return true if quotactl command is manipulating quota on/off state */
|
|
static bool quotactl_cmd_onoff(int cmd)
|
|
{
|
|
- return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
|
|
+ return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
|
|
+ (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
|
|
index 5df554a9f9c95..ae796e10f68b2 100644
|
|
--- a/fs/udf/inode.c
|
|
+++ b/fs/udf/inode.c
|
|
@@ -1357,6 +1357,12 @@ reread:
|
|
|
|
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
|
|
ICBTAG_FLAG_AD_MASK;
|
|
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
|
|
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
|
|
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
|
+ ret = -EIO;
|
|
+ goto out;
|
|
+ }
|
|
iinfo->i_unique = 0;
|
|
iinfo->i_lenEAttr = 0;
|
|
iinfo->i_lenExtents = 0;
|
|
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
|
|
index 7a85e609fc276..d8b8323e80f41 100644
|
|
--- a/fs/userfaultfd.c
|
|
+++ b/fs/userfaultfd.c
|
|
@@ -736,10 +736,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
|
struct userfaultfd_ctx *ctx;
|
|
|
|
ctx = vma->vm_userfaultfd_ctx.ctx;
|
|
- if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
|
|
+
|
|
+ if (!ctx)
|
|
+ return;
|
|
+
|
|
+ if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
|
|
vm_ctx->ctx = ctx;
|
|
userfaultfd_ctx_get(ctx);
|
|
WRITE_ONCE(ctx->mmap_changing, true);
|
|
+ } else {
|
|
+ /* Drop uffd context if remap feature not enabled */
|
|
+ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
|
+ vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
|
|
}
|
|
}
|
|
|
|
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
|
|
index 338b9d9984e04..d9048bcea49c5 100644
|
|
--- a/fs/xfs/xfs_aops.c
|
|
+++ b/fs/xfs/xfs_aops.c
|
|
@@ -449,6 +449,7 @@ xfs_map_blocks(
|
|
}
|
|
|
|
wpc->imap = imap;
|
|
+ xfs_trim_extent_eof(&wpc->imap, ip);
|
|
trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
|
|
return 0;
|
|
allocate_blocks:
|
|
@@ -459,6 +460,7 @@ allocate_blocks:
|
|
ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
|
|
imap.br_startoff + imap.br_blockcount <= cow_fsb);
|
|
wpc->imap = imap;
|
|
+ xfs_trim_extent_eof(&wpc->imap, ip);
|
|
trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
|
|
return 0;
|
|
}
|
|
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
|
|
index e098cbe27db54..12babe9915944 100644
|
|
--- a/include/keys/user-type.h
|
|
+++ b/include/keys/user-type.h
|
|
@@ -31,7 +31,7 @@
|
|
struct user_key_payload {
|
|
struct rcu_head rcu; /* RCU destructor */
|
|
unsigned short datalen; /* length of this data */
|
|
- char data[0]; /* actual data */
|
|
+ char data[0] __aligned(__alignof__(u64)); /* actual data */
|
|
};
|
|
|
|
extern struct key_type key_type_user;
|
|
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
|
|
index 9a6bc0951cfaf..c311571355981 100644
|
|
--- a/include/linux/backing-dev-defs.h
|
|
+++ b/include/linux/backing-dev-defs.h
|
|
@@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb)
|
|
*/
|
|
static inline void wb_put(struct bdi_writeback *wb)
|
|
{
|
|
+ if (WARN_ON_ONCE(!wb->bdi)) {
|
|
+ /*
|
|
+ * A driver bug might cause a file to be removed before bdi was
|
|
+ * initialized.
|
|
+ */
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (wb != &wb->bdi->wb)
|
|
percpu_ref_put(&wb->refcnt);
|
|
}
|
|
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
|
|
index 7cca5f859a907..f3c43519baa74 100644
|
|
--- a/include/linux/bcma/bcma_soc.h
|
|
+++ b/include/linux/bcma/bcma_soc.h
|
|
@@ -6,6 +6,7 @@
|
|
|
|
struct bcma_soc {
|
|
struct bcma_bus bus;
|
|
+ struct device *dev;
|
|
};
|
|
|
|
int __init bcma_host_soc_register(struct bcma_soc *soc);
|
|
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
|
|
index d93e89761a8b4..a6349a29748cb 100644
|
|
--- a/include/linux/bpf_verifier.h
|
|
+++ b/include/linux/bpf_verifier.h
|
|
@@ -147,6 +147,7 @@ struct bpf_verifier_state {
|
|
/* call stack tracking */
|
|
struct bpf_func_state *frame[MAX_CALL_FRAMES];
|
|
u32 curframe;
|
|
+ bool speculative;
|
|
};
|
|
|
|
#define bpf_get_spilled_reg(slot, frame) \
|
|
@@ -166,15 +167,25 @@ struct bpf_verifier_state_list {
|
|
struct bpf_verifier_state_list *next;
|
|
};
|
|
|
|
+/* Possible states for alu_state member. */
|
|
+#define BPF_ALU_SANITIZE_SRC 1U
|
|
+#define BPF_ALU_SANITIZE_DST 2U
|
|
+#define BPF_ALU_NEG_VALUE (1U << 2)
|
|
+#define BPF_ALU_NON_POINTER (1U << 3)
|
|
+#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
|
|
+ BPF_ALU_SANITIZE_DST)
|
|
+
|
|
struct bpf_insn_aux_data {
|
|
union {
|
|
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
|
unsigned long map_state; /* pointer/poison value for maps */
|
|
s32 call_imm; /* saved imm field of call insn */
|
|
+ u32 alu_limit; /* limit for add/sub register with pointer */
|
|
};
|
|
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
|
int sanitize_stack_off; /* stack slot to be cleared */
|
|
bool seen; /* this insn was processed by the verifier */
|
|
+ u8 alu_state; /* used in combination with alu_limit */
|
|
};
|
|
|
|
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
|
@@ -210,6 +221,8 @@ struct bpf_subprog_info {
|
|
* one verifier_env per bpf_check() call
|
|
*/
|
|
struct bpf_verifier_env {
|
|
+ u32 insn_idx;
|
|
+ u32 prev_insn_idx;
|
|
struct bpf_prog *prog; /* eBPF program being verified */
|
|
const struct bpf_verifier_ops *ops;
|
|
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
|
|
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
|
|
index 3e7dafb3ea809..7ddaeb5182e33 100644
|
|
--- a/include/linux/compiler-clang.h
|
|
+++ b/include/linux/compiler-clang.h
|
|
@@ -3,9 +3,8 @@
|
|
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
|
|
#endif
|
|
|
|
-/* Some compiler specific definitions are overwritten here
|
|
- * for Clang compiler
|
|
- */
|
|
+/* Compiler specific definitions for Clang compiler */
|
|
+
|
|
#define uninitialized_var(x) x = *(&(x))
|
|
|
|
/* same as gcc, this was present in clang-2.6 so we can assume it works
|
|
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
|
|
index 2010493e10408..c61c4bb2bd15f 100644
|
|
--- a/include/linux/compiler-gcc.h
|
|
+++ b/include/linux/compiler-gcc.h
|
|
@@ -58,17 +58,13 @@
|
|
(typeof(ptr)) (__ptr + (off)); \
|
|
})
|
|
|
|
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
|
-#define OPTIMIZER_HIDE_VAR(var) \
|
|
- __asm__ ("" : "=r" (var) : "0" (var))
|
|
-
|
|
/*
|
|
* A trick to suppress uninitialized variable warning without generating any
|
|
* code
|
|
*/
|
|
#define uninitialized_var(x) x = x
|
|
|
|
-#ifdef RETPOLINE
|
|
+#ifdef CONFIG_RETPOLINE
|
|
#define __noretpoline __attribute__((__indirect_branch__("keep")))
|
|
#endif
|
|
|
|
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
|
|
index 517bd14e12224..b17f3cd18334d 100644
|
|
--- a/include/linux/compiler-intel.h
|
|
+++ b/include/linux/compiler-intel.h
|
|
@@ -5,9 +5,7 @@
|
|
|
|
#ifdef __ECC
|
|
|
|
-/* Some compiler specific definitions are overwritten here
|
|
- * for Intel ECC compiler
|
|
- */
|
|
+/* Compiler specific definitions for Intel ECC compiler */
|
|
|
|
#include <asm/intrinsics.h>
|
|
|
|
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
|
|
index fc5004a4b07d7..445348facea97 100644
|
|
--- a/include/linux/compiler.h
|
|
+++ b/include/linux/compiler.h
|
|
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|
#endif
|
|
|
|
#ifndef OPTIMIZER_HIDE_VAR
|
|
-#define OPTIMIZER_HIDE_VAR(var) barrier()
|
|
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
|
+#define OPTIMIZER_HIDE_VAR(var) \
|
|
+ __asm__ ("" : "=r" (var) : "0" (var))
|
|
#endif
|
|
|
|
/* Not-quite-unique ID. */
|
|
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
|
|
index 218df7f4d3e1d..5041357d0297a 100644
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
|
extern enum cpuhp_smt_control cpu_smt_control;
|
|
extern void cpu_smt_disable(bool force);
|
|
-extern void cpu_smt_check_topology_early(void);
|
|
extern void cpu_smt_check_topology(void);
|
|
#else
|
|
# define cpu_smt_control (CPU_SMT_ENABLED)
|
|
static inline void cpu_smt_disable(bool force) { }
|
|
-static inline void cpu_smt_check_topology_early(void) { }
|
|
static inline void cpu_smt_check_topology(void) { }
|
|
#endif
|
|
|
|
diff --git a/include/linux/efi.h b/include/linux/efi.h
|
|
index 100ce4a4aff6c..845174e113ce9 100644
|
|
--- a/include/linux/efi.h
|
|
+++ b/include/linux/efi.h
|
|
@@ -1167,8 +1167,6 @@ static inline bool efi_enabled(int feature)
|
|
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
|
|
|
|
extern bool efi_is_table_address(unsigned long phys_addr);
|
|
-
|
|
-extern int efi_apply_persistent_mem_reservations(void);
|
|
#else
|
|
static inline bool efi_enabled(int feature)
|
|
{
|
|
@@ -1187,11 +1185,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
|
|
{
|
|
return false;
|
|
}
|
|
-
|
|
-static inline int efi_apply_persistent_mem_reservations(void)
|
|
-{
|
|
- return 0;
|
|
-}
|
|
#endif
|
|
|
|
extern int efi_status_to_err(efi_status_t status);
|
|
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
|
|
index 41615f38bcff3..f07c55ea0c22f 100644
|
|
--- a/include/linux/fdtable.h
|
|
+++ b/include/linux/fdtable.h
|
|
@@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
|
|
unsigned int fd, struct file *file);
|
|
extern int __close_fd(struct files_struct *files,
|
|
unsigned int fd);
|
|
+extern int __close_fd_get_file(unsigned int fd, struct file **res);
|
|
|
|
extern struct kmem_cache *files_cachep;
|
|
|
|
diff --git a/include/linux/filter.h b/include/linux/filter.h
|
|
index a8b9d90a80422..b776626aeb849 100644
|
|
--- a/include/linux/filter.h
|
|
+++ b/include/linux/filter.h
|
|
@@ -53,14 +53,10 @@ struct sock_reuseport;
|
|
#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
|
|
#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
|
|
|
|
-/* Kernel hidden auxiliary/helper register for hardening step.
|
|
- * Only used by eBPF JITs. It's nothing more than a temporary
|
|
- * register that JITs use internally, only that here it's part
|
|
- * of eBPF instructions that have been rewritten for blinding
|
|
- * constants. See JIT pre-step in bpf_jit_blind_constants().
|
|
- */
|
|
+/* Kernel hidden auxiliary/helper register. */
|
|
#define BPF_REG_AX MAX_BPF_REG
|
|
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
|
|
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
|
|
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
|
|
|
|
/* unused opcode to mark special call to bpf_tail_call() helper */
|
|
#define BPF_TAIL_CALL 0xf0
|
|
@@ -675,24 +671,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
|
|
return size;
|
|
}
|
|
|
|
-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
|
|
- u32 size_default)
|
|
-{
|
|
- size_default = bpf_ctx_off_adjust_machine(size_default);
|
|
- size_access = bpf_ctx_off_adjust_machine(size_access);
|
|
-
|
|
-#ifdef __LITTLE_ENDIAN
|
|
- return (off & (size_default - 1)) == 0;
|
|
-#else
|
|
- return (off & (size_default - 1)) + size_access == size_default;
|
|
-#endif
|
|
-}
|
|
-
|
|
static inline bool
|
|
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
|
{
|
|
- return bpf_ctx_narrow_align_ok(off, size, size_default) &&
|
|
- size <= size_default && (size & (size - 1)) == 0;
|
|
+ return size <= size_default && (size & (size - 1)) == 0;
|
|
}
|
|
|
|
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
|
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
|
|
index 70fc838e67738..0c5ee17b4d88c 100644
|
|
--- a/include/linux/genhd.h
|
|
+++ b/include/linux/genhd.h
|
|
@@ -129,7 +129,7 @@ struct hd_struct {
|
|
struct disk_stats dkstats;
|
|
#endif
|
|
struct percpu_ref ref;
|
|
- struct rcu_head rcu_head;
|
|
+ struct rcu_work rcu_work;
|
|
};
|
|
|
|
#define GENHD_FL_REMOVABLE 1
|
|
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
|
|
index 5972e4969197a..eeae59d3ceb74 100644
|
|
--- a/include/linux/genl_magic_struct.h
|
|
+++ b/include/linux/genl_magic_struct.h
|
|
@@ -191,6 +191,7 @@ static inline void ct_assert_unique_operations(void)
|
|
{
|
|
switch (0) {
|
|
#include GENL_MAGIC_INCLUDE_FILE
|
|
+ case 0:
|
|
;
|
|
}
|
|
}
|
|
@@ -209,6 +210,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
|
|
{
|
|
switch (0) {
|
|
#include GENL_MAGIC_INCLUDE_FILE
|
|
+ case 0:
|
|
;
|
|
}
|
|
}
|
|
@@ -218,7 +220,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
|
|
static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
|
|
{ \
|
|
switch (0) { \
|
|
- s_fields \
|
|
+ s_fields \
|
|
+ case 0: \
|
|
; \
|
|
} \
|
|
}
|
|
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
|
|
index f2f887795d43e..ed070512b40ee 100644
|
|
--- a/include/linux/gpio/consumer.h
|
|
+++ b/include/linux/gpio/consumer.h
|
|
@@ -162,7 +162,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
|
|
int gpiod_cansleep(const struct gpio_desc *desc);
|
|
|
|
int gpiod_to_irq(const struct gpio_desc *desc);
|
|
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
|
|
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
|
|
|
|
/* Convert between the old gpio_ and new gpiod_ interfaces */
|
|
struct gpio_desc *gpio_to_desc(unsigned gpio);
|
|
@@ -495,10 +495,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
|
|
return -EINVAL;
|
|
}
|
|
|
|
-static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
|
|
+static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
|
|
+ const char *name)
|
|
{
|
|
/* GPIO can never have been requested */
|
|
WARN_ON(1);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
|
|
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
|
|
index 8663f216c563e..2d6100edf2049 100644
|
|
--- a/include/linux/hid-debug.h
|
|
+++ b/include/linux/hid-debug.h
|
|
@@ -24,7 +24,10 @@
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
+#include <linux/kfifo.h>
|
|
+
|
|
#define HID_DEBUG_BUFSIZE 512
|
|
+#define HID_DEBUG_FIFOSIZE 512
|
|
|
|
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
|
|
void hid_dump_report(struct hid_device *, int , u8 *, int);
|
|
@@ -37,11 +40,8 @@ void hid_debug_init(void);
|
|
void hid_debug_exit(void);
|
|
void hid_debug_event(struct hid_device *, char *);
|
|
|
|
-
|
|
struct hid_debug_list {
|
|
- char *hid_debug_buf;
|
|
- int head;
|
|
- int tail;
|
|
+ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
|
|
struct fasync_struct *fasync;
|
|
struct hid_device *hdev;
|
|
struct list_head node;
|
|
@@ -64,4 +64,3 @@ struct hid_debug_list {
|
|
#endif
|
|
|
|
#endif
|
|
-
|
|
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
|
|
index c6fb869a81c0e..ed89fbc525d2e 100644
|
|
--- a/include/linux/hmm.h
|
|
+++ b/include/linux/hmm.h
|
|
@@ -512,8 +512,7 @@ struct hmm_devmem {
|
|
* enough and allocate struct page for it.
|
|
*
|
|
* The device driver can wrap the hmm_devmem struct inside a private device
|
|
- * driver struct. The device driver must call hmm_devmem_remove() before the
|
|
- * device goes away and before freeing the hmm_devmem struct memory.
|
|
+ * driver struct.
|
|
*/
|
|
struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|
struct device *device,
|
|
@@ -521,7 +520,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
|
struct device *device,
|
|
struct resource *res);
|
|
-void hmm_devmem_remove(struct hmm_devmem *devmem);
|
|
|
|
/*
|
|
* hmm_devmem_page_set_drvdata - set per-page driver data field
|
|
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
|
|
index 14131b6fae68d..dcb6977afce93 100644
|
|
--- a/include/linux/hyperv.h
|
|
+++ b/include/linux/hyperv.h
|
|
@@ -830,15 +830,6 @@ struct vmbus_channel {
|
|
* All Sub-channels of a primary channel are linked here.
|
|
*/
|
|
struct list_head sc_list;
|
|
- /*
|
|
- * Current number of sub-channels.
|
|
- */
|
|
- int num_sc;
|
|
- /*
|
|
- * Number of a sub-channel (position within sc_list) which is supposed
|
|
- * to be used as the next outgoing channel.
|
|
- */
|
|
- int next_oc;
|
|
/*
|
|
* The primary channel this sub-channel belongs to.
|
|
* This will be NULL for the primary channel.
|
|
@@ -972,14 +963,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
|
|
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
|
|
void (*chn_rescind_cb)(struct vmbus_channel *));
|
|
|
|
-/*
|
|
- * Retrieve the (sub) channel on which to send an outgoing request.
|
|
- * When a primary channel has multiple sub-channels, we choose a
|
|
- * channel whose VCPU binding is closest to the VCPU on which
|
|
- * this call is being made.
|
|
- */
|
|
-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
|
|
-
|
|
/*
|
|
* Check if sub-channels have already been offerred. This API will be useful
|
|
* when the driver is unloaded after establishing sub-channels. In this case,
|
|
@@ -1176,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
|
|
u32 bytes_avail_towrite;
|
|
};
|
|
|
|
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
|
- struct hv_ring_buffer_debug_info *debug_info);
|
|
+
|
|
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
|
+ struct hv_ring_buffer_debug_info *debug_info);
|
|
|
|
/* Vmbus interface */
|
|
#define vmbus_driver_register(driver) \
|
|
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
index c926698040e0d..a03d5e264e5e7 100644
|
|
--- a/include/linux/kvm_host.h
|
|
+++ b/include/linux/kvm_host.h
|
|
@@ -694,7 +694,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
|
|
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
void *data, unsigned long len);
|
|
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
- void *data, int offset, unsigned long len);
|
|
+ void *data, unsigned int offset,
|
|
+ unsigned long len);
|
|
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
gpa_t gpa, unsigned long len);
|
|
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
|
|
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
|
|
index aee299a6aa76d..ecff64ff365d4 100644
|
|
--- a/include/linux/memblock.h
|
|
+++ b/include/linux/memblock.h
|
|
@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
|
|
*/
|
|
extern unsigned long long max_possible_pfn;
|
|
|
|
-#define INIT_MEMBLOCK_REGIONS 128
|
|
-#define INIT_PHYSMEM_REGIONS 4
|
|
-
|
|
/**
|
|
* enum memblock_flags - definition of memory region attributes
|
|
* @MEMBLOCK_NONE: no special request
|
|
@@ -320,6 +317,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
|
|
/* Flags for memblock allocation APIs */
|
|
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
|
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
|
|
+#define MEMBLOCK_ALLOC_KASAN 1
|
|
|
|
/* We are using top down, so it is safe to use 0 here */
|
|
#define MEMBLOCK_LOW_LIMIT 0
|
|
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
|
|
index 0ac69ddf5fc46..55db66b3716f2 100644
|
|
--- a/include/linux/memremap.h
|
|
+++ b/include/linux/memremap.h
|
|
@@ -111,6 +111,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
|
|
* @altmap: pre-allocated/reserved memory for vmemmap allocations
|
|
* @res: physical address range covered by @ref
|
|
* @ref: reference count that pins the devm_memremap_pages() mapping
|
|
+ * @kill: callback to transition @ref to the dead state
|
|
* @dev: host device of the mapping for debug
|
|
* @data: private data pointer for page_free()
|
|
* @type: memory type: see MEMORY_* in memory_hotplug.h
|
|
@@ -122,6 +123,7 @@ struct dev_pagemap {
|
|
bool altmap_valid;
|
|
struct resource res;
|
|
struct percpu_ref *ref;
|
|
+ void (*kill)(struct percpu_ref *ref);
|
|
struct device *dev;
|
|
void *data;
|
|
enum memory_type type;
|
|
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
|
|
index aa5963b5d38e1..7d4ed995b4ce5 100644
|
|
--- a/include/linux/mlx5/driver.h
|
|
+++ b/include/linux/mlx5/driver.h
|
|
@@ -1309,7 +1309,7 @@ enum {
|
|
static inline const struct cpumask *
|
|
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
|
|
{
|
|
- return dev->priv.irq_info[vector].mask;
|
|
+ return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
|
|
}
|
|
|
|
#endif /* MLX5_DRIVER_H */
|
|
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
|
|
index de7377815b6b4..8ef330027b134 100644
|
|
--- a/include/linux/mmc/card.h
|
|
+++ b/include/linux/mmc/card.h
|
|
@@ -308,6 +308,7 @@ struct mmc_card {
|
|
unsigned int nr_parts;
|
|
|
|
unsigned int bouncesz; /* Bounce buffer size */
|
|
+ struct workqueue_struct *complete_wq; /* Private workqueue */
|
|
};
|
|
|
|
static inline bool mmc_large_sector(struct mmc_card *card)
|
|
diff --git a/include/linux/module.h b/include/linux/module.h
|
|
index fce6b4335e367..0c575f51fe573 100644
|
|
--- a/include/linux/module.h
|
|
+++ b/include/linux/module.h
|
|
@@ -817,7 +817,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
|
|
static inline void module_bug_cleanup(struct module *mod) {}
|
|
#endif /* CONFIG_GENERIC_BUG */
|
|
|
|
-#ifdef RETPOLINE
|
|
+#ifdef CONFIG_RETPOLINE
|
|
extern bool retpoline_module_ok(bool has_retpoline);
|
|
#else
|
|
static inline bool retpoline_module_ok(bool has_retpoline)
|
|
diff --git a/include/linux/msi.h b/include/linux/msi.h
|
|
index 0e9c50052ff3c..eb213b87617cc 100644
|
|
--- a/include/linux/msi.h
|
|
+++ b/include/linux/msi.h
|
|
@@ -116,6 +116,8 @@ struct msi_desc {
|
|
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
|
|
#define for_each_msi_entry(desc, dev) \
|
|
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
|
|
+#define for_each_msi_entry_safe(desc, tmp, dev) \
|
|
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
|
|
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
|
|
index 2b2a6dce16301..4c76fe2c84880 100644
|
|
--- a/include/linux/netdev_features.h
|
|
+++ b/include/linux/netdev_features.h
|
|
@@ -11,6 +11,8 @@
|
|
#define _LINUX_NETDEV_FEATURES_H
|
|
|
|
#include <linux/types.h>
|
|
+#include <linux/bitops.h>
|
|
+#include <asm/byteorder.h>
|
|
|
|
typedef u64 netdev_features_t;
|
|
|
|
@@ -154,8 +156,26 @@ enum {
|
|
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
|
|
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
|
|
|
|
-#define for_each_netdev_feature(mask_addr, bit) \
|
|
- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
|
|
+/* Finds the next feature with the highest number of the range of start till 0.
|
|
+ */
|
|
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
|
|
+{
|
|
+ /* like BITMAP_LAST_WORD_MASK() for u64
|
|
+ * this sets the most significant 64 - start to 0.
|
|
+ */
|
|
+ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
|
|
+
|
|
+ return fls64(feature) - 1;
|
|
+}
|
|
+
|
|
+/* This goes for the MSB to the LSB through the set feature bits,
|
|
+ * mask_addr should be a u64 and bit an int
|
|
+ */
|
|
+#define for_each_netdev_feature(mask_addr, bit) \
|
|
+ for ((bit) = find_next_netdev_feature((mask_addr), \
|
|
+ NETDEV_FEATURE_COUNT); \
|
|
+ (bit) >= 0; \
|
|
+ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
|
|
|
|
/* Features valid for ethtool to change */
|
|
/* = all defined minus driver/device-class-related */
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 857f8abf7b91b..d5e38eddfb492 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -1487,6 +1487,7 @@ struct net_device_ops {
|
|
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
|
|
* @IFF_FAILOVER: device is a failover master device
|
|
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
|
|
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
|
|
*/
|
|
enum netdev_priv_flags {
|
|
IFF_802_1Q_VLAN = 1<<0,
|
|
@@ -1518,6 +1519,7 @@ enum netdev_priv_flags {
|
|
IFF_NO_RX_HANDLER = 1<<26,
|
|
IFF_FAILOVER = 1<<27,
|
|
IFF_FAILOVER_SLAVE = 1<<28,
|
|
+ IFF_L3MDEV_RX_HANDLER = 1<<29,
|
|
};
|
|
|
|
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
|
|
@@ -1548,6 +1550,7 @@ enum netdev_priv_flags {
|
|
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
|
|
#define IFF_FAILOVER IFF_FAILOVER
|
|
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
|
|
+#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
|
|
|
|
/**
|
|
* struct net_device - The DEVICE structure.
|
|
@@ -4523,6 +4526,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
|
|
return dev->priv_flags & IFF_SUPP_NOFCS;
|
|
}
|
|
|
|
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
|
|
+{
|
|
+ return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
|
|
+}
|
|
+
|
|
static inline bool netif_is_l3_master(const struct net_device *dev)
|
|
{
|
|
return dev->priv_flags & IFF_L3MDEV_MASTER;
|
|
diff --git a/include/linux/of.h b/include/linux/of.h
|
|
index a5aee3c438ade..664cd5573ae27 100644
|
|
--- a/include/linux/of.h
|
|
+++ b/include/linux/of.h
|
|
@@ -138,11 +138,16 @@ extern struct device_node *of_aliases;
|
|
extern struct device_node *of_stdout;
|
|
extern raw_spinlock_t devtree_lock;
|
|
|
|
-/* flag descriptions (need to be visible even when !CONFIG_OF) */
|
|
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
|
|
-#define OF_DETACHED 2 /* node has been detached from the device tree */
|
|
-#define OF_POPULATED 3 /* device already created for the node */
|
|
-#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
|
|
+/*
|
|
+ * struct device_node flag descriptions
|
|
+ * (need to be visible even when !CONFIG_OF)
|
|
+ */
|
|
+#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */
|
|
+#define OF_DETACHED 2 /* detached from the device tree */
|
|
+#define OF_POPULATED 3 /* device already created */
|
|
+#define OF_POPULATED_BUS 4 /* platform bus created for children */
|
|
+#define OF_OVERLAY 5 /* allocated for an overlay */
|
|
+#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */
|
|
|
|
#define OF_BAD_ADDR ((u64)-1)
|
|
|
|
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
|
|
index 53c500f0ca795..c2876e7405148 100644
|
|
--- a/include/linux/perf_event.h
|
|
+++ b/include/linux/perf_event.h
|
|
@@ -447,6 +447,11 @@ struct pmu {
|
|
* Filter events for PMU-specific reasons.
|
|
*/
|
|
int (*filter_match) (struct perf_event *event); /* optional */
|
|
+
|
|
+ /*
|
|
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
|
+ */
|
|
+ int (*check_period) (struct perf_event *event, u64 value); /* optional */
|
|
};
|
|
|
|
enum perf_addr_filter_action_t {
|
|
diff --git a/include/linux/phy.h b/include/linux/phy.h
|
|
index 3ea87f774a76c..f5d4235e38443 100644
|
|
--- a/include/linux/phy.h
|
|
+++ b/include/linux/phy.h
|
|
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
|
|
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
|
|
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
|
|
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
|
|
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
|
|
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
|
|
|
|
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
|
|
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
|
|
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
|
|
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
|
|
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
|
|
+#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
|
|
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
|
|
|
|
/*
|
|
@@ -500,8 +502,8 @@ struct phy_device {
|
|
* only works for PHYs with IDs which match this field
|
|
* name: The friendly name of this PHY type
|
|
* phy_id_mask: Defines the important bits of the phy_id
|
|
- * features: A list of features (speed, duplex, etc) supported
|
|
- * by this PHY
|
|
+ * features: A mandatory list of features (speed, duplex, etc)
|
|
+ * supported by this PHY
|
|
* flags: A bitfield defining certain other features this PHY
|
|
* supports (like interrupts)
|
|
*
|
|
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
|
|
index 5d399eeef1727..f4f8840eab043 100644
|
|
--- a/include/linux/pm_opp.h
|
|
+++ b/include/linux/pm_opp.h
|
|
@@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp);
|
|
int dev_pm_opp_add(struct device *dev, unsigned long freq,
|
|
unsigned long u_volt);
|
|
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
|
|
+void dev_pm_opp_remove_all_dynamic(struct device *dev);
|
|
|
|
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
|
|
|
|
@@ -214,6 +215,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
|
|
{
|
|
}
|
|
|
|
+static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
|
|
+{
|
|
+}
|
|
+
|
|
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
|
|
{
|
|
return 0;
|
|
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
|
|
index 6894976b54e37..186cd8e970c70 100644
|
|
--- a/include/linux/ptr_ring.h
|
|
+++ b/include/linux/ptr_ring.h
|
|
@@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
|
|
else if (destroy)
|
|
destroy(ptr);
|
|
|
|
+ if (producer >= size)
|
|
+ producer = 0;
|
|
__ptr_ring_set_size(r, size);
|
|
r->producer = producer;
|
|
r->consumer_head = 0;
|
|
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
|
|
index 59ddf9af909e4..2dd0a9ed5b361 100644
|
|
--- a/include/linux/qed/qed_chain.h
|
|
+++ b/include/linux/qed/qed_chain.h
|
|
@@ -663,6 +663,37 @@ out:
|
|
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
|
|
u32 prod_idx, void *p_prod_elem)
|
|
{
|
|
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
|
|
+ u32 cur_prod, page_mask, page_cnt, page_diff;
|
|
+
|
|
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
|
|
+ p_chain->u.chain32.prod_idx;
|
|
+
|
|
+ /* Assume that number of elements in a page is power of 2 */
|
|
+ page_mask = ~p_chain->elem_per_page_mask;
|
|
+
|
|
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
|
|
+ * reaches the first element of next page before the page index
|
|
+ * is incremented. See qed_chain_produce().
|
|
+ * Index wrap around is not a problem because the difference
|
|
+ * between current and given producer indices is always
|
|
+ * positive and lower than the chain's capacity.
|
|
+ */
|
|
+ page_diff = (((cur_prod - 1) & page_mask) -
|
|
+ ((prod_idx - 1) & page_mask)) /
|
|
+ p_chain->elem_per_page;
|
|
+
|
|
+ page_cnt = qed_chain_get_page_cnt(p_chain);
|
|
+ if (is_chain_u16(p_chain))
|
|
+ p_chain->pbl.c.u16.prod_page_idx =
|
|
+ (p_chain->pbl.c.u16.prod_page_idx -
|
|
+ page_diff + page_cnt) % page_cnt;
|
|
+ else
|
|
+ p_chain->pbl.c.u32.prod_page_idx =
|
|
+ (p_chain->pbl.c.u32.prod_page_idx -
|
|
+ page_diff + page_cnt) % page_cnt;
|
|
+ }
|
|
+
|
|
if (is_chain_u16(p_chain))
|
|
p_chain->u.chain16.prod_idx = (u16) prod_idx;
|
|
else
|
|
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
|
|
index ec912d01126f4..ecdc6542070f1 100644
|
|
--- a/include/linux/sched/coredump.h
|
|
+++ b/include/linux/sched/coredump.h
|
|
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
|
|
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
|
|
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
|
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
|
+#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
|
|
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
|
|
|
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 0d1b2c3f127b3..8e63c166765ef 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -2418,7 +2418,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
|
|
|
|
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
|
|
skb_set_transport_header(skb, keys.control.thoff);
|
|
- else
|
|
+ else if (offset_hint >= 0)
|
|
skb_set_transport_header(skb, offset_hint);
|
|
}
|
|
|
|
@@ -3204,6 +3204,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
|
|
*
|
|
* This is exactly the same as pskb_trim except that it ensures the
|
|
* checksum of received packets are still valid after the operation.
|
|
+ * It can change skb pointers.
|
|
*/
|
|
|
|
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
|
|
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
|
|
index 2a11e9d91dfa8..36bd858630812 100644
|
|
--- a/include/linux/skmsg.h
|
|
+++ b/include/linux/skmsg.h
|
|
@@ -416,6 +416,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
|
|
sk_psock_drop(sk, psock);
|
|
}
|
|
|
|
+static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
|
|
+{
|
|
+ if (psock->parser.enabled)
|
|
+ psock->parser.saved_data_ready(sk);
|
|
+ else
|
|
+ sk->sk_data_ready(sk);
|
|
+}
|
|
+
|
|
static inline void psock_set_prog(struct bpf_prog **pprog,
|
|
struct bpf_prog *prog)
|
|
{
|
|
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
|
|
index 73e130a840ce4..fdb6b317d9747 100644
|
|
--- a/include/linux/sunrpc/svc.h
|
|
+++ b/include/linux/sunrpc/svc.h
|
|
@@ -295,9 +295,12 @@ struct svc_rqst {
|
|
struct svc_cacherep * rq_cacherep; /* cache info */
|
|
struct task_struct *rq_task; /* service thread */
|
|
spinlock_t rq_lock; /* per-request lock */
|
|
+ struct net *rq_bc_net; /* pointer to backchannel's
|
|
+ * net namespace
|
|
+ */
|
|
};
|
|
|
|
-#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
|
|
+#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
|
|
|
|
/*
|
|
* Rigorous type checking on sockaddr type conversions
|
|
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
|
index d8a07a4f171db..3d3630b3f63d7 100644
|
|
--- a/include/linux/swap.h
|
|
+++ b/include/linux/swap.h
|
|
@@ -233,7 +233,6 @@ struct swap_info_struct {
|
|
unsigned long flags; /* SWP_USED etc: see above */
|
|
signed short prio; /* swap priority of this type */
|
|
struct plist_node list; /* entry in swap_active_head */
|
|
- struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
|
|
signed char type; /* strange name for an index */
|
|
unsigned int max; /* extent of the swap_map */
|
|
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
|
|
@@ -274,6 +273,16 @@ struct swap_info_struct {
|
|
*/
|
|
struct work_struct discard_work; /* discard worker */
|
|
struct swap_cluster_list discard_clusters; /* discard clusters list */
|
|
+ struct plist_node avail_lists[0]; /*
|
|
+ * entries in swap_avail_heads, one
|
|
+ * entry per node.
|
|
+ * Must be last as the number of the
|
|
+ * array is nr_node_ids, which is not
|
|
+ * a fixed value so have to allocate
|
|
+ * dynamically.
|
|
+ * And it has to be an array so that
|
|
+ * plist_for_each_* can work.
|
|
+ */
|
|
};
|
|
|
|
#ifdef CONFIG_64BIT
|
|
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
|
|
index 7e7fbfb84e8e3..50c74a77db55c 100644
|
|
--- a/include/linux/usb/tcpm.h
|
|
+++ b/include/linux/usb/tcpm.h
|
|
@@ -89,6 +89,7 @@ struct tcpc_config {
|
|
enum typec_port_data data;
|
|
enum typec_role default_role;
|
|
bool try_role_hw; /* try.{src,snk} implemented in hardware */
|
|
+ bool self_powered; /* port belongs to a self powered device */
|
|
|
|
const struct typec_altmode_desc *alt_modes;
|
|
};
|
|
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
|
|
index cb462f9ab7dd5..e0348cb0a1dd7 100644
|
|
--- a/include/linux/virtio_net.h
|
|
+++ b/include/linux/virtio_net.h
|
|
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
|
|
|
if (!skb_partial_csum_set(skb, start, off))
|
|
return -EINVAL;
|
|
+ } else {
|
|
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
|
|
+ * probe and drop if does not match one of the above types.
|
|
+ */
|
|
+ if (gso_type && skb->network_header) {
|
|
+ if (!skb->protocol)
|
|
+ virtio_net_hdr_set_proto(skb, hdr);
|
|
+retry:
|
|
+ skb_probe_transport_header(skb, -1);
|
|
+ if (!skb_transport_header_was_set(skb)) {
|
|
+ /* UFO does not specify ipv4 or 6: try both */
|
|
+ if (gso_type & SKB_GSO_UDP &&
|
|
+ skb->protocol == htons(ETH_P_IP)) {
|
|
+ skb->protocol = htons(ETH_P_IPV6);
|
|
+ goto retry;
|
|
+ }
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
|
diff --git a/include/media/cec.h b/include/media/cec.h
|
|
index 3fe5e5d2bb7e5..707411ef8ba28 100644
|
|
--- a/include/media/cec.h
|
|
+++ b/include/media/cec.h
|
|
@@ -155,6 +155,7 @@ struct cec_adapter {
|
|
unsigned int transmit_queue_sz;
|
|
struct list_head wait_queue;
|
|
struct cec_data *transmitting;
|
|
+ bool transmit_in_progress;
|
|
|
|
struct task_struct *kthread_config;
|
|
struct completion config_completion;
|
|
diff --git a/include/net/ax25.h b/include/net/ax25.h
|
|
index 3f9aea8087e3c..8b7eb46ad72d8 100644
|
|
--- a/include/net/ax25.h
|
|
+++ b/include/net/ax25.h
|
|
@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
|
|
|
|
void __ax25_put_route(ax25_route *ax25_rt);
|
|
|
|
+extern rwlock_t ax25_route_lock;
|
|
+
|
|
+static inline void ax25_route_lock_use(void)
|
|
+{
|
|
+ read_lock(&ax25_route_lock);
|
|
+}
|
|
+
|
|
+static inline void ax25_route_lock_unuse(void)
|
|
+{
|
|
+ read_unlock(&ax25_route_lock);
|
|
+}
|
|
+
|
|
static inline void ax25_put_route(ax25_route *ax25_rt)
|
|
{
|
|
if (refcount_dec_and_test(&ax25_rt->refcount))
|
|
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
|
|
index 00b5e7825508a..74ff688568a0c 100644
|
|
--- a/include/net/inetpeer.h
|
|
+++ b/include/net/inetpeer.h
|
|
@@ -39,6 +39,7 @@ struct inet_peer {
|
|
|
|
u32 metrics[RTAX_MAX];
|
|
u32 rate_tokens; /* rate limiting for ICMP */
|
|
+ u32 n_redirects;
|
|
unsigned long rate_last;
|
|
/*
|
|
* Once inet_peer is queued for deletion (refcnt == 0), following field
|
|
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
|
|
index c5969762a8f44..9c8214d2116d3 100644
|
|
--- a/include/net/ip_fib.h
|
|
+++ b/include/net/ip_fib.h
|
|
@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
|
|
struct netlink_ext_ack *extack);
|
|
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
|
|
struct netlink_callback *cb, struct fib_dump_filter *filter);
|
|
-int fib_table_flush(struct net *net, struct fib_table *table);
|
|
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
|
|
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
|
|
void fib_table_flush_external(struct fib_table *table);
|
|
void fib_free_table(struct fib_table *tb);
|
|
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
|
|
index 5ce926701bd02..5f67efbb77e80 100644
|
|
--- a/include/net/ip_tunnels.h
|
|
+++ b/include/net/ip_tunnels.h
|
|
@@ -307,6 +307,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
|
|
int ip_tunnel_encap_setup(struct ip_tunnel *t,
|
|
struct ip_tunnel_encap *ipencap);
|
|
|
|
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
|
|
+{
|
|
+ int nhlen;
|
|
+
|
|
+ switch (skb->protocol) {
|
|
+#if IS_ENABLED(CONFIG_IPV6)
|
|
+ case htons(ETH_P_IPV6):
|
|
+ nhlen = sizeof(struct ipv6hdr);
|
|
+ break;
|
|
+#endif
|
|
+ case htons(ETH_P_IP):
|
|
+ nhlen = sizeof(struct iphdr);
|
|
+ break;
|
|
+ default:
|
|
+ nhlen = 0;
|
|
+ }
|
|
+
|
|
+ return pskb_network_may_pull(skb, nhlen);
|
|
+}
|
|
+
|
|
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
|
|
{
|
|
const struct ip_tunnel_encap_ops *ops;
|
|
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
|
|
index 3832099289c5a..128487658ff7c 100644
|
|
--- a/include/net/l3mdev.h
|
|
+++ b/include/net/l3mdev.h
|
|
@@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
|
|
|
|
if (netif_is_l3_slave(skb->dev))
|
|
master = netdev_master_upper_dev_get_rcu(skb->dev);
|
|
- else if (netif_is_l3_master(skb->dev))
|
|
+ else if (netif_is_l3_master(skb->dev) ||
|
|
+ netif_has_l3_rx_handler(skb->dev))
|
|
master = skb->dev;
|
|
|
|
if (master && master->l3mdev_ops->l3mdev_l3_rcv)
|
|
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
|
|
index 4b2b2baf8ab4b..f32fc82894732 100644
|
|
--- a/include/net/netfilter/nf_conntrack_count.h
|
|
+++ b/include/net/netfilter/nf_conntrack_count.h
|
|
@@ -5,17 +5,10 @@
|
|
|
|
struct nf_conncount_data;
|
|
|
|
-enum nf_conncount_list_add {
|
|
- NF_CONNCOUNT_ADDED, /* list add was ok */
|
|
- NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
|
|
- NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
|
|
-};
|
|
-
|
|
struct nf_conncount_list {
|
|
spinlock_t list_lock;
|
|
struct list_head head; /* connections with the same filtering key */
|
|
unsigned int count; /* length of list */
|
|
- bool dead;
|
|
};
|
|
|
|
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
|
|
@@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
|
|
const struct nf_conntrack_tuple *tuple,
|
|
const struct nf_conntrack_zone *zone);
|
|
|
|
-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
|
|
- const struct nf_conntrack_tuple *tuple,
|
|
- const struct nf_conntrack_zone *zone,
|
|
- bool *addit);
|
|
+int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
|
|
+ const struct nf_conntrack_tuple *tuple,
|
|
+ const struct nf_conntrack_zone *zone);
|
|
|
|
void nf_conncount_list_init(struct nf_conncount_list *list);
|
|
|
|
-enum nf_conncount_list_add
|
|
-nf_conncount_add(struct nf_conncount_list *list,
|
|
- const struct nf_conntrack_tuple *tuple,
|
|
- const struct nf_conntrack_zone *zone);
|
|
-
|
|
bool nf_conncount_gc_list(struct net *net,
|
|
struct nf_conncount_list *list);
|
|
|
|
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
|
|
index 77e2761d4f2f9..ff4eb9869e5ba 100644
|
|
--- a/include/net/netfilter/nf_flow_table.h
|
|
+++ b/include/net/netfilter/nf_flow_table.h
|
|
@@ -84,7 +84,6 @@ struct flow_offload {
|
|
struct nf_flow_route {
|
|
struct {
|
|
struct dst_entry *dst;
|
|
- int ifindex;
|
|
} tuple[FLOW_OFFLOAD_DIR_MAX];
|
|
};
|
|
|
|
diff --git a/include/net/sock.h b/include/net/sock.h
|
|
index 0e3a09380655e..13f11e905a005 100644
|
|
--- a/include/net/sock.h
|
|
+++ b/include/net/sock.h
|
|
@@ -298,6 +298,7 @@ struct sock_common {
|
|
* @sk_filter: socket filtering instructions
|
|
* @sk_timer: sock cleanup timer
|
|
* @sk_stamp: time stamp of last packet received
|
|
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
|
|
* @sk_tsflags: SO_TIMESTAMPING socket options
|
|
* @sk_tskey: counter to disambiguate concurrent tstamp requests
|
|
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
|
|
@@ -474,6 +475,9 @@ struct sock {
|
|
const struct cred *sk_peer_cred;
|
|
long sk_rcvtimeo;
|
|
ktime_t sk_stamp;
|
|
+#if BITS_PER_LONG==32
|
|
+ seqlock_t sk_stamp_seq;
|
|
+#endif
|
|
u16 sk_tsflags;
|
|
u8 sk_shutdown;
|
|
u32 sk_tskey;
|
|
@@ -2287,6 +2291,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
|
|
atomic_add(segs, &sk->sk_drops);
|
|
}
|
|
|
|
+static inline ktime_t sock_read_timestamp(struct sock *sk)
|
|
+{
|
|
+#if BITS_PER_LONG==32
|
|
+ unsigned int seq;
|
|
+ ktime_t kt;
|
|
+
|
|
+ do {
|
|
+ seq = read_seqbegin(&sk->sk_stamp_seq);
|
|
+ kt = sk->sk_stamp;
|
|
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
|
|
+
|
|
+ return kt;
|
|
+#else
|
|
+ return sk->sk_stamp;
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
|
|
+{
|
|
+#if BITS_PER_LONG==32
|
|
+ write_seqlock(&sk->sk_stamp_seq);
|
|
+ sk->sk_stamp = kt;
|
|
+ write_sequnlock(&sk->sk_stamp_seq);
|
|
+#else
|
|
+ sk->sk_stamp = kt;
|
|
+#endif
|
|
+}
|
|
+
|
|
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
|
|
struct sk_buff *skb);
|
|
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
|
|
@@ -2311,7 +2343,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
|
|
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
|
|
__sock_recv_timestamp(msg, sk, skb);
|
|
else
|
|
- sk->sk_stamp = kt;
|
|
+ sock_write_timestamp(sk, kt);
|
|
|
|
if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
|
|
__sock_recv_wifi_status(msg, sk, skb);
|
|
@@ -2332,9 +2364,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
|
|
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
|
|
__sock_recv_ts_and_drops(msg, sk, skb);
|
|
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
|
|
- sk->sk_stamp = skb->tstamp;
|
|
+ sock_write_timestamp(sk, skb->tstamp);
|
|
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
|
|
- sk->sk_stamp = 0;
|
|
+ sock_write_timestamp(sk, 0);
|
|
}
|
|
|
|
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
|
|
diff --git a/include/net/tls.h b/include/net/tls.h
|
|
index 3cbcd12303fd6..9f167e77d8ca1 100644
|
|
--- a/include/net/tls.h
|
|
+++ b/include/net/tls.h
|
|
@@ -120,6 +120,8 @@ struct tls_rec {
|
|
struct scatterlist sg_aead_out[2];
|
|
|
|
char aad_space[TLS_AAD_SPACE_SIZE];
|
|
+ u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
|
|
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE];
|
|
struct aead_request aead_req;
|
|
u8 aead_req_ctx[];
|
|
};
|
|
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
|
|
index 9c0c2132a2d68..64626b32107b7 100644
|
|
--- a/include/rdma/ib_verbs.h
|
|
+++ b/include/rdma/ib_verbs.h
|
|
@@ -56,7 +56,7 @@
|
|
#include <linux/string.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/netdevice.h>
|
|
-
|
|
+#include <linux/refcount.h>
|
|
#include <linux/if_link.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/mmu_notifier.h>
|
|
@@ -2605,6 +2605,12 @@ struct ib_device {
|
|
|
|
const struct uverbs_object_tree_def *const *driver_specs;
|
|
enum rdma_driver_id driver_id;
|
|
+ /*
|
|
+ * Provides synchronization between device unregistration and netlink
|
|
+ * commands on a device. To be used only by core.
|
|
+ */
|
|
+ refcount_t refcount;
|
|
+ struct completion unreg_completion;
|
|
};
|
|
|
|
struct ib_client {
|
|
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
|
|
index ea8c93bbb0e05..e87f2d5b3cc65 100644
|
|
--- a/include/sound/compress_driver.h
|
|
+++ b/include/sound/compress_driver.h
|
|
@@ -171,7 +171,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
|
|
if (snd_BUG_ON(!stream))
|
|
return;
|
|
|
|
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
|
|
+ if (stream->direction == SND_COMPRESS_PLAYBACK)
|
|
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
|
|
+ else
|
|
+ stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
|
|
+
|
|
wake_up(&stream->runtime->sleep);
|
|
}
|
|
|
|
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
|
|
index 0d98bb9068b17..acacc19002658 100644
|
|
--- a/include/sound/hda_codec.h
|
|
+++ b/include/sound/hda_codec.h
|
|
@@ -68,6 +68,7 @@ struct hda_bus {
|
|
unsigned int response_reset:1; /* controller was reset */
|
|
unsigned int in_reset:1; /* during reset operation */
|
|
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
|
|
+ unsigned int bus_probing :1; /* during probing process */
|
|
|
|
int primary_dig_out_type; /* primary digital out PCM type */
|
|
unsigned int mixer_assigned; /* codec addr for mixer name */
|
|
diff --git a/include/sound/soc.h b/include/sound/soc.h
|
|
index 70c10a8f3e90a..e721082c84a36 100644
|
|
--- a/include/sound/soc.h
|
|
+++ b/include/sound/soc.h
|
|
@@ -553,12 +553,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
|
|
}
|
|
#endif
|
|
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
|
|
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
|
|
unsigned int id, unsigned int id_mask);
|
|
void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
|
|
|
|
+#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
|
|
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
|
|
struct platform_device *pdev);
|
|
@@ -985,6 +985,12 @@ struct snd_soc_dai_link {
|
|
/* Do not create a PCM for this DAI link (Backend link) */
|
|
unsigned int ignore:1;
|
|
|
|
+ /*
|
|
+ * This driver uses legacy platform naming. Set by the core, machine
|
|
+ * drivers should not modify this value.
|
|
+ */
|
|
+ unsigned int legacy_platform:1;
|
|
+
|
|
struct list_head list; /* DAI link list of the soc card */
|
|
struct snd_soc_dobj dobj; /* For topology */
|
|
};
|
|
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
|
|
index e3bdb0550a596..d9fd4eac58c27 100644
|
|
--- a/include/target/target_core_base.h
|
|
+++ b/include/target/target_core_base.h
|
|
@@ -601,6 +601,7 @@ struct se_session {
|
|
struct se_node_acl *se_node_acl;
|
|
struct se_portal_group *se_tpg;
|
|
void *fabric_sess_ptr;
|
|
+ struct percpu_ref cmd_count;
|
|
struct list_head sess_list;
|
|
struct list_head sess_acl_list;
|
|
struct list_head sess_cmd_list;
|
|
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
|
|
index f4147b398431f..eb9d0923c55c0 100644
|
|
--- a/include/target/target_core_fabric.h
|
|
+++ b/include/target/target_core_fabric.h
|
|
@@ -116,7 +116,7 @@ struct se_session *target_setup_session(struct se_portal_group *,
|
|
struct se_session *, void *));
|
|
void target_remove_session(struct se_session *);
|
|
|
|
-void transport_init_session(struct se_session *);
|
|
+int transport_init_session(struct se_session *se_sess);
|
|
struct se_session *transport_alloc_session(enum target_prot_op);
|
|
int transport_alloc_session_tags(struct se_session *, unsigned int,
|
|
unsigned int);
|
|
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
|
|
index 33d291888ba9c..e3f005eae1f76 100644
|
|
--- a/include/trace/events/afs.h
|
|
+++ b/include/trace/events/afs.h
|
|
@@ -25,6 +25,7 @@
|
|
enum afs_call_trace {
|
|
afs_call_trace_alloc,
|
|
afs_call_trace_free,
|
|
+ afs_call_trace_get,
|
|
afs_call_trace_put,
|
|
afs_call_trace_wake,
|
|
afs_call_trace_work,
|
|
@@ -159,6 +160,7 @@ enum afs_file_error {
|
|
#define afs_call_traces \
|
|
EM(afs_call_trace_alloc, "ALLOC") \
|
|
EM(afs_call_trace_free, "FREE ") \
|
|
+ EM(afs_call_trace_get, "GET ") \
|
|
EM(afs_call_trace_put, "PUT ") \
|
|
EM(afs_call_trace_wake, "WAKE ") \
|
|
E_(afs_call_trace_work, "WORK ")
|
|
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
|
|
index 698e0d8a5ca40..d68e9e536814c 100644
|
|
--- a/include/trace/events/ext4.h
|
|
+++ b/include/trace/events/ext4.h
|
|
@@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
|
|
(unsigned long) __entry->ino, __entry->drop)
|
|
);
|
|
|
|
+TRACE_EVENT(ext4_nfs_commit_metadata,
|
|
+ TP_PROTO(struct inode *inode),
|
|
+
|
|
+ TP_ARGS(inode),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field( dev_t, dev )
|
|
+ __field( ino_t, ino )
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->dev = inode->i_sb->s_dev;
|
|
+ __entry->ino = inode->i_ino;
|
|
+ ),
|
|
+
|
|
+ TP_printk("dev %d,%d ino %lu",
|
|
+ MAJOR(__entry->dev), MINOR(__entry->dev),
|
|
+ (unsigned long) __entry->ino)
|
|
+);
|
|
+
|
|
TRACE_EVENT(ext4_mark_inode_dirty,
|
|
TP_PROTO(struct inode *inode, unsigned long IP),
|
|
|
|
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
|
|
index 28e384186c350..8617f4fd6b703 100644
|
|
--- a/include/trace/events/sunrpc.h
|
|
+++ b/include/trace/events/sunrpc.h
|
|
@@ -569,7 +569,8 @@ TRACE_EVENT(svc_process,
|
|
__field(u32, vers)
|
|
__field(u32, proc)
|
|
__string(service, name)
|
|
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
|
|
+ __string(addr, rqst->rq_xprt ?
|
|
+ rqst->rq_xprt->xpt_remotebuf : "(null)")
|
|
),
|
|
|
|
TP_fast_assign(
|
|
@@ -577,7 +578,8 @@ TRACE_EVENT(svc_process,
|
|
__entry->vers = rqst->rq_vers;
|
|
__entry->proc = rqst->rq_proc;
|
|
__assign_str(service, name);
|
|
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
|
|
+ __assign_str(addr, rqst->rq_xprt ?
|
|
+ rqst->rq_xprt->xpt_remotebuf : "(null)");
|
|
),
|
|
|
|
TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
|
|
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
|
|
index f6052e70bf403..a55cb8b10165a 100644
|
|
--- a/include/uapi/linux/in.h
|
|
+++ b/include/uapi/linux/in.h
|
|
@@ -268,7 +268,7 @@ struct sockaddr_in {
|
|
#define IN_MULTICAST(a) IN_CLASSD(a)
|
|
#define IN_MULTICAST_NET 0xe0000000
|
|
|
|
-#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
|
|
+#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
|
|
#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
|
|
|
|
#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
|
|
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
|
|
index 14565d703291b..e8baca85bac6a 100644
|
|
--- a/include/uapi/linux/inet_diag.h
|
|
+++ b/include/uapi/linux/inet_diag.h
|
|
@@ -137,15 +137,21 @@ enum {
|
|
INET_DIAG_TCLASS,
|
|
INET_DIAG_SKMEMINFO,
|
|
INET_DIAG_SHUTDOWN,
|
|
- INET_DIAG_DCTCPINFO,
|
|
- INET_DIAG_PROTOCOL, /* response attribute only */
|
|
+
|
|
+ /*
|
|
+ * Next extenstions cannot be requested in struct inet_diag_req_v2:
|
|
+ * its field idiag_ext has only 8 bits.
|
|
+ */
|
|
+
|
|
+ INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
|
|
+ INET_DIAG_PROTOCOL, /* response attribute only */
|
|
INET_DIAG_SKV6ONLY,
|
|
INET_DIAG_LOCALS,
|
|
INET_DIAG_PEERS,
|
|
INET_DIAG_PAD,
|
|
- INET_DIAG_MARK,
|
|
- INET_DIAG_BBRINFO,
|
|
- INET_DIAG_CLASS_ID,
|
|
+ INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
|
|
+ INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
|
|
+ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
|
|
INET_DIAG_MD5SIG,
|
|
__INET_DIAG_MAX,
|
|
};
|
|
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
|
|
index fb78f6f500f37..f056b2a00d5c7 100644
|
|
--- a/include/uapi/linux/input.h
|
|
+++ b/include/uapi/linux/input.h
|
|
@@ -26,13 +26,17 @@
|
|
*/
|
|
|
|
struct input_event {
|
|
-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
|
|
+#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
|
|
struct timeval time;
|
|
#define input_event_sec time.tv_sec
|
|
#define input_event_usec time.tv_usec
|
|
#else
|
|
__kernel_ulong_t __sec;
|
|
+#if defined(__sparc__) && defined(__arch64__)
|
|
+ unsigned int __usec;
|
|
+#else
|
|
__kernel_ulong_t __usec;
|
|
+#endif
|
|
#define input_event_sec __sec
|
|
#define input_event_usec __usec
|
|
#endif
|
|
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
|
|
index d13fd490b66da..6e73f0274e412 100644
|
|
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
|
|
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
|
|
@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
|
|
PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
|
|
PVRDMA_WR_BIND_MW,
|
|
PVRDMA_WR_REG_SIG_MR,
|
|
+ PVRDMA_WR_ERROR,
|
|
};
|
|
|
|
enum pvrdma_wc_status {
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index ed9352513c324..b902f9c89800c 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -1130,6 +1130,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
|
|
bool "Dead code and data elimination (EXPERIMENTAL)"
|
|
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
|
|
depends on EXPERT
|
|
+ depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
|
|
depends on $(cc-option,-ffunction-sections -fdata-sections)
|
|
depends on $(ld-option,--gc-sections)
|
|
help
|
|
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
|
|
index b2890c268cb34..ac44653025ad4 100644
|
|
--- a/kernel/bpf/core.c
|
|
+++ b/kernel/bpf/core.c
|
|
@@ -52,6 +52,7 @@
|
|
#define DST regs[insn->dst_reg]
|
|
#define SRC regs[insn->src_reg]
|
|
#define FP regs[BPF_REG_FP]
|
|
+#define AX regs[BPF_REG_AX]
|
|
#define ARG1 regs[BPF_REG_ARG1]
|
|
#define CTX regs[BPF_REG_CTX]
|
|
#define IMM insn->imm
|
|
@@ -726,6 +727,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
|
|
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
|
|
BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
|
|
|
|
+ /* Constraints on AX register:
|
|
+ *
|
|
+ * AX register is inaccessible from user space. It is mapped in
|
|
+ * all JITs, and used here for constant blinding rewrites. It is
|
|
+ * typically "stateless" meaning its contents are only valid within
|
|
+ * the executed instruction, but not across several instructions.
|
|
+ * There are a few exceptions however which are further detailed
|
|
+ * below.
|
|
+ *
|
|
+ * Constant blinding is only used by JITs, not in the interpreter.
|
|
+ * The interpreter uses AX in some occasions as a local temporary
|
|
+ * register e.g. in DIV or MOD instructions.
|
|
+ *
|
|
+ * In restricted circumstances, the verifier can also use the AX
|
|
+ * register for rewrites as long as they do not interfere with
|
|
+ * the above cases!
|
|
+ */
|
|
+ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
|
|
+ goto out;
|
|
+
|
|
if (from->imm == 0 &&
|
|
(from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
|
|
from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
|
|
@@ -1055,7 +1076,6 @@ bool bpf_opcode_in_insntable(u8 code)
|
|
*/
|
|
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
|
{
|
|
- u64 tmp;
|
|
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
|
|
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
|
|
static const void *jumptable[256] = {
|
|
@@ -1129,36 +1149,36 @@ select_insn:
|
|
(*(s64 *) &DST) >>= IMM;
|
|
CONT;
|
|
ALU64_MOD_X:
|
|
- div64_u64_rem(DST, SRC, &tmp);
|
|
- DST = tmp;
|
|
+ div64_u64_rem(DST, SRC, &AX);
|
|
+ DST = AX;
|
|
CONT;
|
|
ALU_MOD_X:
|
|
- tmp = (u32) DST;
|
|
- DST = do_div(tmp, (u32) SRC);
|
|
+ AX = (u32) DST;
|
|
+ DST = do_div(AX, (u32) SRC);
|
|
CONT;
|
|
ALU64_MOD_K:
|
|
- div64_u64_rem(DST, IMM, &tmp);
|
|
- DST = tmp;
|
|
+ div64_u64_rem(DST, IMM, &AX);
|
|
+ DST = AX;
|
|
CONT;
|
|
ALU_MOD_K:
|
|
- tmp = (u32) DST;
|
|
- DST = do_div(tmp, (u32) IMM);
|
|
+ AX = (u32) DST;
|
|
+ DST = do_div(AX, (u32) IMM);
|
|
CONT;
|
|
ALU64_DIV_X:
|
|
DST = div64_u64(DST, SRC);
|
|
CONT;
|
|
ALU_DIV_X:
|
|
- tmp = (u32) DST;
|
|
- do_div(tmp, (u32) SRC);
|
|
- DST = (u32) tmp;
|
|
+ AX = (u32) DST;
|
|
+ do_div(AX, (u32) SRC);
|
|
+ DST = (u32) AX;
|
|
CONT;
|
|
ALU64_DIV_K:
|
|
DST = div64_u64(DST, IMM);
|
|
CONT;
|
|
ALU_DIV_K:
|
|
- tmp = (u32) DST;
|
|
- do_div(tmp, (u32) IMM);
|
|
- DST = (u32) tmp;
|
|
+ AX = (u32) DST;
|
|
+ do_div(AX, (u32) IMM);
|
|
+ DST = (u32) AX;
|
|
CONT;
|
|
ALU_END_TO_BE:
|
|
switch (IMM) {
|
|
@@ -1414,7 +1434,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
|
|
static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
|
|
{ \
|
|
u64 stack[stack_size / sizeof(u64)]; \
|
|
- u64 regs[MAX_BPF_REG]; \
|
|
+ u64 regs[MAX_BPF_EXT_REG]; \
|
|
\
|
|
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
|
|
ARG1 = (u64) (unsigned long) ctx; \
|
|
@@ -1427,7 +1447,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
|
|
const struct bpf_insn *insn) \
|
|
{ \
|
|
u64 stack[stack_size / sizeof(u64)]; \
|
|
- u64 regs[MAX_BPF_REG]; \
|
|
+ u64 regs[MAX_BPF_EXT_REG]; \
|
|
\
|
|
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
|
|
BPF_R1 = r1; \
|
|
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
|
|
index 99d243e1ad6e8..52378d3e34b32 100644
|
|
--- a/kernel/bpf/map_in_map.c
|
|
+++ b/kernel/bpf/map_in_map.c
|
|
@@ -12,6 +12,7 @@
|
|
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|
{
|
|
struct bpf_map *inner_map, *inner_map_meta;
|
|
+ u32 inner_map_meta_size;
|
|
struct fd f;
|
|
|
|
f = fdget(inner_map_ufd);
|
|
@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
|
|
+ inner_map_meta_size = sizeof(*inner_map_meta);
|
|
+ /* In some cases verifier needs to access beyond just base map. */
|
|
+ if (inner_map->ops == &array_map_ops)
|
|
+ inner_map_meta_size = sizeof(struct bpf_array);
|
|
+
|
|
+ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
|
|
if (!inner_map_meta) {
|
|
fdput(f);
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
|
|
inner_map_meta->key_size = inner_map->key_size;
|
|
inner_map_meta->value_size = inner_map->value_size;
|
|
inner_map_meta->map_flags = inner_map->map_flags;
|
|
- inner_map_meta->ops = inner_map->ops;
|
|
inner_map_meta->max_entries = inner_map->max_entries;
|
|
|
|
+ /* Misc members not needed in bpf_map_meta_equal() check. */
|
|
+ inner_map_meta->ops = inner_map->ops;
|
|
+ if (inner_map->ops == &array_map_ops) {
|
|
+ inner_map_meta->unpriv_array = inner_map->unpriv_array;
|
|
+ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
|
|
+ container_of(inner_map, struct bpf_array, map)->index_mask;
|
|
+ }
|
|
+
|
|
fdput(f);
|
|
return inner_map_meta;
|
|
}
|
|
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
|
|
index 90daf285de032..d43b145358275 100644
|
|
--- a/kernel/bpf/stackmap.c
|
|
+++ b/kernel/bpf/stackmap.c
|
|
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
|
|
|
|
if (nhdr->n_type == BPF_BUILD_ID &&
|
|
nhdr->n_namesz == sizeof("GNU") &&
|
|
- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
|
|
+ nhdr->n_descsz > 0 &&
|
|
+ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
|
|
memcpy(build_id,
|
|
note_start + note_offs +
|
|
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
|
|
- BPF_BUILD_ID_SIZE);
|
|
+ nhdr->n_descsz);
|
|
+ memset(build_id + nhdr->n_descsz, 0,
|
|
+ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
|
|
return 0;
|
|
}
|
|
new_offs = note_offs + sizeof(Elf32_Nhdr) +
|
|
@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
|
|
return -EFAULT; /* page not mapped */
|
|
|
|
ret = -EINVAL;
|
|
- page_addr = page_address(page);
|
|
+ page_addr = kmap_atomic(page);
|
|
ehdr = (Elf32_Ehdr *)page_addr;
|
|
|
|
/* compare magic x7f "ELF" */
|
|
@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
|
|
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
|
|
ret = stack_map_get_build_id_64(page_addr, build_id);
|
|
out:
|
|
+ kunmap_atomic(page_addr);
|
|
put_page(page);
|
|
return ret;
|
|
}
|
|
@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|
for (i = 0; i < trace_nr; i++) {
|
|
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
|
|
id_offs[i].ip = ips[i];
|
|
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
|
|
}
|
|
return;
|
|
}
|
|
@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|
/* per entry fall back to ips */
|
|
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
|
|
id_offs[i].ip = ips[i];
|
|
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
|
|
continue;
|
|
}
|
|
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
|
|
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
|
|
index 51ba84d4d34a0..e4c8262291521 100644
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -648,6 +648,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
|
|
free_func_state(dst_state->frame[i]);
|
|
dst_state->frame[i] = NULL;
|
|
}
|
|
+ dst_state->speculative = src->speculative;
|
|
dst_state->curframe = src->curframe;
|
|
for (i = 0; i <= src->curframe; i++) {
|
|
dst = dst_state->frame[i];
|
|
@@ -692,7 +693,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
|
|
}
|
|
|
|
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
- int insn_idx, int prev_insn_idx)
|
|
+ int insn_idx, int prev_insn_idx,
|
|
+ bool speculative)
|
|
{
|
|
struct bpf_verifier_state *cur = env->cur_state;
|
|
struct bpf_verifier_stack_elem *elem;
|
|
@@ -710,6 +712,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
err = copy_verifier_state(&elem->st, cur);
|
|
if (err)
|
|
goto err;
|
|
+ elem->st.speculative |= speculative;
|
|
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
|
|
verbose(env, "BPF program is too complex\n");
|
|
goto err;
|
|
@@ -1314,6 +1317,31 @@ static int check_stack_read(struct bpf_verifier_env *env,
|
|
}
|
|
}
|
|
|
|
+static int check_stack_access(struct bpf_verifier_env *env,
|
|
+ const struct bpf_reg_state *reg,
|
|
+ int off, int size)
|
|
+{
|
|
+ /* Stack accesses must be at a fixed offset, so that we
|
|
+ * can determine what type of data were returned. See
|
|
+ * check_stack_read().
|
|
+ */
|
|
+ if (!tnum_is_const(reg->var_off)) {
|
|
+ char tn_buf[48];
|
|
+
|
|
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
+ verbose(env, "variable stack access var_off=%s off=%d size=%d",
|
|
+ tn_buf, off, size);
|
|
+ return -EACCES;
|
|
+ }
|
|
+
|
|
+ if (off >= 0 || off < -MAX_BPF_STACK) {
|
|
+ verbose(env, "invalid stack off=%d size=%d\n", off, size);
|
|
+ return -EACCES;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/* check read/write into map element returned by bpf_map_lookup_elem() */
|
|
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
int size, bool zero_size_allowed)
|
|
@@ -1345,13 +1373,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
|
|
*/
|
|
if (env->log.level)
|
|
print_verifier_state(env, state);
|
|
+
|
|
/* The minimum value is only important with signed
|
|
* comparisons where we can't assume the floor of a
|
|
* value is 0. If we are using signed variables for our
|
|
* index'es we need to make sure that whatever we use
|
|
* will have a set floor within our range.
|
|
*/
|
|
- if (reg->smin_value < 0) {
|
|
+ if (reg->smin_value < 0 &&
|
|
+ (reg->smin_value == S64_MIN ||
|
|
+ (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
|
|
+ reg->smin_value + off < 0)) {
|
|
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
|
regno);
|
|
return -EACCES;
|
|
@@ -1870,24 +1902,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
}
|
|
|
|
} else if (reg->type == PTR_TO_STACK) {
|
|
- /* stack accesses must be at a fixed offset, so that we can
|
|
- * determine what type of data were returned.
|
|
- * See check_stack_read().
|
|
- */
|
|
- if (!tnum_is_const(reg->var_off)) {
|
|
- char tn_buf[48];
|
|
-
|
|
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
- verbose(env, "variable stack access var_off=%s off=%d size=%d",
|
|
- tn_buf, off, size);
|
|
- return -EACCES;
|
|
- }
|
|
off += reg->var_off.value;
|
|
- if (off >= 0 || off < -MAX_BPF_STACK) {
|
|
- verbose(env, "invalid stack off=%d size=%d\n", off,
|
|
- size);
|
|
- return -EACCES;
|
|
- }
|
|
+ err = check_stack_access(env, reg, off, size);
|
|
+ if (err)
|
|
+ return err;
|
|
|
|
state = func(env, reg);
|
|
err = update_stack_depth(env, state, off);
|
|
@@ -2968,6 +2986,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
|
return true;
|
|
}
|
|
|
|
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
|
|
+{
|
|
+ return &env->insn_aux_data[env->insn_idx];
|
|
+}
|
|
+
|
|
+static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
|
|
+ u32 *ptr_limit, u8 opcode, bool off_is_neg)
|
|
+{
|
|
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
|
|
+ (opcode == BPF_SUB && !off_is_neg);
|
|
+ u32 off;
|
|
+
|
|
+ switch (ptr_reg->type) {
|
|
+ case PTR_TO_STACK:
|
|
+ off = ptr_reg->off + ptr_reg->var_off.value;
|
|
+ if (mask_to_left)
|
|
+ *ptr_limit = MAX_BPF_STACK + off;
|
|
+ else
|
|
+ *ptr_limit = -off;
|
|
+ return 0;
|
|
+ case PTR_TO_MAP_VALUE:
|
|
+ if (mask_to_left) {
|
|
+ *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
|
|
+ } else {
|
|
+ off = ptr_reg->smin_value + ptr_reg->off;
|
|
+ *ptr_limit = ptr_reg->map_ptr->value_size - off;
|
|
+ }
|
|
+ return 0;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
|
|
+ const struct bpf_insn *insn)
|
|
+{
|
|
+ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
|
|
+}
|
|
+
|
|
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
|
|
+ u32 alu_state, u32 alu_limit)
|
|
+{
|
|
+ /* If we arrived here from different branches with different
|
|
+ * state or limits to sanitize, then this won't work.
|
|
+ */
|
|
+ if (aux->alu_state &&
|
|
+ (aux->alu_state != alu_state ||
|
|
+ aux->alu_limit != alu_limit))
|
|
+ return -EACCES;
|
|
+
|
|
+ /* Corresponding fixup done in fixup_bpf_calls(). */
|
|
+ aux->alu_state = alu_state;
|
|
+ aux->alu_limit = alu_limit;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sanitize_val_alu(struct bpf_verifier_env *env,
|
|
+ struct bpf_insn *insn)
|
|
+{
|
|
+ struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
+
|
|
+ if (can_skip_alu_sanitation(env, insn))
|
|
+ return 0;
|
|
+
|
|
+ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
|
|
+}
|
|
+
|
|
+static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
|
+ struct bpf_insn *insn,
|
|
+ const struct bpf_reg_state *ptr_reg,
|
|
+ struct bpf_reg_state *dst_reg,
|
|
+ bool off_is_neg)
|
|
+{
|
|
+ struct bpf_verifier_state *vstate = env->cur_state;
|
|
+ struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
|
|
+ u8 opcode = BPF_OP(insn->code);
|
|
+ u32 alu_state, alu_limit;
|
|
+ struct bpf_reg_state tmp;
|
|
+ bool ret;
|
|
+
|
|
+ if (can_skip_alu_sanitation(env, insn))
|
|
+ return 0;
|
|
+
|
|
+ /* We already marked aux for masking from non-speculative
|
|
+ * paths, thus we got here in the first place. We only care
|
|
+ * to explore bad access from here.
|
|
+ */
|
|
+ if (vstate->speculative)
|
|
+ goto do_sim;
|
|
+
|
|
+ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
|
|
+ alu_state |= ptr_is_dst_reg ?
|
|
+ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
|
+
|
|
+ if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
|
|
+ return 0;
|
|
+ if (update_alu_sanitation_state(aux, alu_state, alu_limit))
|
|
+ return -EACCES;
|
|
+do_sim:
|
|
+ /* Simulate and find potential out-of-bounds access under
|
|
+ * speculative execution from truncation as a result of
|
|
+ * masking when off was not within expected range. If off
|
|
+ * sits in dst, then we temporarily need to move ptr there
|
|
+ * to simulate dst (== 0) +/-= ptr. Needed, for example,
|
|
+ * for cases where we use K-based arithmetic in one direction
|
|
+ * and truncated reg-based in the other in order to explore
|
|
+ * bad access.
|
|
+ */
|
|
+ if (!ptr_is_dst_reg) {
|
|
+ tmp = *dst_reg;
|
|
+ *dst_reg = *ptr_reg;
|
|
+ }
|
|
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
|
|
+ if (!ptr_is_dst_reg)
|
|
+ *dst_reg = tmp;
|
|
+ return !ret ? -EFAULT : 0;
|
|
+}
|
|
+
|
|
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
|
* Caller should also handle BPF_MOV case separately.
|
|
* If we return -EACCES, caller may want to try again treating pointer as a
|
|
@@ -2986,8 +3123,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
|
|
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
|
|
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
|
|
+ u32 dst = insn->dst_reg, src = insn->src_reg;
|
|
u8 opcode = BPF_OP(insn->code);
|
|
- u32 dst = insn->dst_reg;
|
|
+ int ret;
|
|
|
|
dst_reg = ®s[dst];
|
|
|
|
@@ -3020,6 +3158,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
|
|
dst, reg_type_str[ptr_reg->type]);
|
|
return -EACCES;
|
|
+ case PTR_TO_MAP_VALUE:
|
|
+ if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
|
|
+ verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
|
|
+ off_reg == dst_reg ? dst : src);
|
|
+ return -EACCES;
|
|
+ }
|
|
+ /* fall-through */
|
|
default:
|
|
break;
|
|
}
|
|
@@ -3036,6 +3181,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
switch (opcode) {
|
|
case BPF_ADD:
|
|
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
+ if (ret < 0) {
|
|
+ verbose(env, "R%d tried to add from different maps or paths\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
/* We can take a fixed offset as long as it doesn't overflow
|
|
* the s32 'off' field
|
|
*/
|
|
@@ -3086,6 +3236,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
}
|
|
break;
|
|
case BPF_SUB:
|
|
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
+ if (ret < 0) {
|
|
+ verbose(env, "R%d tried to sub from different maps or paths\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
if (dst_reg == off_reg) {
|
|
/* scalar -= pointer. Creates an unknown scalar */
|
|
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
|
@@ -3165,6 +3320,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
__update_reg_bounds(dst_reg);
|
|
__reg_deduce_bounds(dst_reg);
|
|
__reg_bound_offset(dst_reg);
|
|
+
|
|
+ /* For unprivileged we require that resulting offset must be in bounds
|
|
+ * in order to be able to sanitize access later on.
|
|
+ */
|
|
+ if (!env->allow_ptr_leaks) {
|
|
+ if (dst_reg->type == PTR_TO_MAP_VALUE &&
|
|
+ check_map_access(env, dst, dst_reg->off, 1, false)) {
|
|
+ verbose(env, "R%d pointer arithmetic of map value goes out of range, "
|
|
+ "prohibited for !root\n", dst);
|
|
+ return -EACCES;
|
|
+ } else if (dst_reg->type == PTR_TO_STACK &&
|
|
+ check_stack_access(env, dst_reg, dst_reg->off +
|
|
+ dst_reg->var_off.value, 1)) {
|
|
+ verbose(env, "R%d stack pointer arithmetic goes out of range, "
|
|
+ "prohibited for !root\n", dst);
|
|
+ return -EACCES;
|
|
+ }
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -3183,6 +3357,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
s64 smin_val, smax_val;
|
|
u64 umin_val, umax_val;
|
|
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
|
|
+ u32 dst = insn->dst_reg;
|
|
+ int ret;
|
|
|
|
if (insn_bitness == 32) {
|
|
/* Relevant for 32-bit RSH: Information can propagate towards
|
|
@@ -3217,6 +3393,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
switch (opcode) {
|
|
case BPF_ADD:
|
|
+ ret = sanitize_val_alu(env, insn);
|
|
+ if (ret < 0) {
|
|
+ verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
|
|
signed_add_overflows(dst_reg->smax_value, smax_val)) {
|
|
dst_reg->smin_value = S64_MIN;
|
|
@@ -3236,6 +3417,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
|
|
break;
|
|
case BPF_SUB:
|
|
+ ret = sanitize_val_alu(env, insn);
|
|
+ if (ret < 0) {
|
|
+ verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
|
|
+ return ret;
|
|
+ }
|
|
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
|
|
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
|
|
/* Overflow possible, we know nothing */
|
|
@@ -3571,12 +3757,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
return err;
|
|
|
|
if (BPF_SRC(insn->code) == BPF_X) {
|
|
+ struct bpf_reg_state *src_reg = regs + insn->src_reg;
|
|
+ struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
|
|
+
|
|
if (BPF_CLASS(insn->code) == BPF_ALU64) {
|
|
/* case: R1 = R2
|
|
* copy register state to dest reg
|
|
*/
|
|
- regs[insn->dst_reg] = regs[insn->src_reg];
|
|
- regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
|
|
+ *dst_reg = *src_reg;
|
|
+ dst_reg->live |= REG_LIVE_WRITTEN;
|
|
} else {
|
|
/* R1 = (u32) R2 */
|
|
if (is_pointer_value(env, insn->src_reg)) {
|
|
@@ -3584,9 +3773,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
"R%d partial copy of pointer\n",
|
|
insn->src_reg);
|
|
return -EACCES;
|
|
+ } else if (src_reg->type == SCALAR_VALUE) {
|
|
+ *dst_reg = *src_reg;
|
|
+ dst_reg->live |= REG_LIVE_WRITTEN;
|
|
+ } else {
|
|
+ mark_reg_unknown(env, regs,
|
|
+ insn->dst_reg);
|
|
}
|
|
- mark_reg_unknown(env, regs, insn->dst_reg);
|
|
- coerce_reg_to_size(®s[insn->dst_reg], 4);
|
|
+ coerce_reg_to_size(dst_reg, 4);
|
|
}
|
|
} else {
|
|
/* case: R = imm
|
|
@@ -4241,7 +4435,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
}
|
|
}
|
|
|
|
- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
|
|
+ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
|
|
+ false);
|
|
if (!other_branch)
|
|
return -EFAULT;
|
|
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
|
|
@@ -4982,6 +5177,12 @@ static bool states_equal(struct bpf_verifier_env *env,
|
|
if (old->curframe != cur->curframe)
|
|
return false;
|
|
|
|
+ /* Verification state from speculative execution simulation
|
|
+ * must never prune a non-speculative execution one.
|
|
+ */
|
|
+ if (old->speculative && !cur->speculative)
|
|
+ return false;
|
|
+
|
|
/* for states to be equal callsites have to be the same
|
|
* and all frame states need to be equivalent
|
|
*/
|
|
@@ -5172,7 +5373,6 @@ static int do_check(struct bpf_verifier_env *env)
|
|
struct bpf_insn *insns = env->prog->insnsi;
|
|
struct bpf_reg_state *regs;
|
|
int insn_cnt = env->prog->len, i;
|
|
- int insn_idx, prev_insn_idx = 0;
|
|
int insn_processed = 0;
|
|
bool do_print_state = false;
|
|
|
|
@@ -5180,6 +5380,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
if (!state)
|
|
return -ENOMEM;
|
|
state->curframe = 0;
|
|
+ state->speculative = false;
|
|
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
|
if (!state->frame[0]) {
|
|
kfree(state);
|
|
@@ -5190,19 +5391,19 @@ static int do_check(struct bpf_verifier_env *env)
|
|
BPF_MAIN_FUNC /* callsite */,
|
|
0 /* frameno */,
|
|
0 /* subprogno, zero == main subprog */);
|
|
- insn_idx = 0;
|
|
+
|
|
for (;;) {
|
|
struct bpf_insn *insn;
|
|
u8 class;
|
|
int err;
|
|
|
|
- if (insn_idx >= insn_cnt) {
|
|
+ if (env->insn_idx >= insn_cnt) {
|
|
verbose(env, "invalid insn idx %d insn_cnt %d\n",
|
|
- insn_idx, insn_cnt);
|
|
+ env->insn_idx, insn_cnt);
|
|
return -EFAULT;
|
|
}
|
|
|
|
- insn = &insns[insn_idx];
|
|
+ insn = &insns[env->insn_idx];
|
|
class = BPF_CLASS(insn->code);
|
|
|
|
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
|
|
@@ -5212,17 +5413,19 @@ static int do_check(struct bpf_verifier_env *env)
|
|
return -E2BIG;
|
|
}
|
|
|
|
- err = is_state_visited(env, insn_idx);
|
|
+ err = is_state_visited(env, env->insn_idx);
|
|
if (err < 0)
|
|
return err;
|
|
if (err == 1) {
|
|
/* found equivalent state, can prune the search */
|
|
if (env->log.level) {
|
|
if (do_print_state)
|
|
- verbose(env, "\nfrom %d to %d: safe\n",
|
|
- prev_insn_idx, insn_idx);
|
|
+ verbose(env, "\nfrom %d to %d%s: safe\n",
|
|
+ env->prev_insn_idx, env->insn_idx,
|
|
+ env->cur_state->speculative ?
|
|
+ " (speculative execution)" : "");
|
|
else
|
|
- verbose(env, "%d: safe\n", insn_idx);
|
|
+ verbose(env, "%d: safe\n", env->insn_idx);
|
|
}
|
|
goto process_bpf_exit;
|
|
}
|
|
@@ -5235,10 +5438,12 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
if (env->log.level > 1 || (env->log.level && do_print_state)) {
|
|
if (env->log.level > 1)
|
|
- verbose(env, "%d:", insn_idx);
|
|
+ verbose(env, "%d:", env->insn_idx);
|
|
else
|
|
- verbose(env, "\nfrom %d to %d:",
|
|
- prev_insn_idx, insn_idx);
|
|
+ verbose(env, "\nfrom %d to %d%s:",
|
|
+ env->prev_insn_idx, env->insn_idx,
|
|
+ env->cur_state->speculative ?
|
|
+ " (speculative execution)" : "");
|
|
print_verifier_state(env, state->frame[state->curframe]);
|
|
do_print_state = false;
|
|
}
|
|
@@ -5249,19 +5454,19 @@ static int do_check(struct bpf_verifier_env *env)
|
|
.private_data = env,
|
|
};
|
|
|
|
- verbose(env, "%d: ", insn_idx);
|
|
+ verbose(env, "%d: ", env->insn_idx);
|
|
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
|
|
}
|
|
|
|
if (bpf_prog_is_dev_bound(env->prog->aux)) {
|
|
- err = bpf_prog_offload_verify_insn(env, insn_idx,
|
|
- prev_insn_idx);
|
|
+ err = bpf_prog_offload_verify_insn(env, env->insn_idx,
|
|
+ env->prev_insn_idx);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
regs = cur_regs(env);
|
|
- env->insn_aux_data[insn_idx].seen = true;
|
|
+ env->insn_aux_data[env->insn_idx].seen = true;
|
|
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
err = check_alu_op(env, insn);
|
|
@@ -5287,13 +5492,13 @@ static int do_check(struct bpf_verifier_env *env)
|
|
/* check that memory (src_reg + off) is readable,
|
|
* the state of dst_reg will be updated by this func
|
|
*/
|
|
- err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
|
|
- BPF_SIZE(insn->code), BPF_READ,
|
|
- insn->dst_reg, false);
|
|
+ err = check_mem_access(env, env->insn_idx, insn->src_reg,
|
|
+ insn->off, BPF_SIZE(insn->code),
|
|
+ BPF_READ, insn->dst_reg, false);
|
|
if (err)
|
|
return err;
|
|
|
|
- prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
|
|
+ prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
|
|
|
|
if (*prev_src_type == NOT_INIT) {
|
|
/* saw a valid insn
|
|
@@ -5318,10 +5523,10 @@ static int do_check(struct bpf_verifier_env *env)
|
|
enum bpf_reg_type *prev_dst_type, dst_reg_type;
|
|
|
|
if (BPF_MODE(insn->code) == BPF_XADD) {
|
|
- err = check_xadd(env, insn_idx, insn);
|
|
+ err = check_xadd(env, env->insn_idx, insn);
|
|
if (err)
|
|
return err;
|
|
- insn_idx++;
|
|
+ env->insn_idx++;
|
|
continue;
|
|
}
|
|
|
|
@@ -5337,13 +5542,13 @@ static int do_check(struct bpf_verifier_env *env)
|
|
dst_reg_type = regs[insn->dst_reg].type;
|
|
|
|
/* check that memory (dst_reg + off) is writeable */
|
|
- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
|
- BPF_SIZE(insn->code), BPF_WRITE,
|
|
- insn->src_reg, false);
|
|
+ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
|
|
+ insn->off, BPF_SIZE(insn->code),
|
|
+ BPF_WRITE, insn->src_reg, false);
|
|
if (err)
|
|
return err;
|
|
|
|
- prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
|
|
+ prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
|
|
|
|
if (*prev_dst_type == NOT_INIT) {
|
|
*prev_dst_type = dst_reg_type;
|
|
@@ -5371,9 +5576,9 @@ static int do_check(struct bpf_verifier_env *env)
|
|
}
|
|
|
|
/* check that memory (dst_reg + off) is writeable */
|
|
- err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
|
- BPF_SIZE(insn->code), BPF_WRITE,
|
|
- -1, false);
|
|
+ err = check_mem_access(env, env->insn_idx, insn->dst_reg,
|
|
+ insn->off, BPF_SIZE(insn->code),
|
|
+ BPF_WRITE, -1, false);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -5391,9 +5596,9 @@ static int do_check(struct bpf_verifier_env *env)
|
|
}
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
- err = check_func_call(env, insn, &insn_idx);
|
|
+ err = check_func_call(env, insn, &env->insn_idx);
|
|
else
|
|
- err = check_helper_call(env, insn->imm, insn_idx);
|
|
+ err = check_helper_call(env, insn->imm, env->insn_idx);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -5406,7 +5611,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- insn_idx += insn->off + 1;
|
|
+ env->insn_idx += insn->off + 1;
|
|
continue;
|
|
|
|
} else if (opcode == BPF_EXIT) {
|
|
@@ -5420,8 +5625,8 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
if (state->curframe) {
|
|
/* exit from nested function */
|
|
- prev_insn_idx = insn_idx;
|
|
- err = prepare_func_exit(env, &insn_idx);
|
|
+ env->prev_insn_idx = env->insn_idx;
|
|
+ err = prepare_func_exit(env, &env->insn_idx);
|
|
if (err)
|
|
return err;
|
|
do_print_state = true;
|
|
@@ -5451,7 +5656,8 @@ static int do_check(struct bpf_verifier_env *env)
|
|
if (err)
|
|
return err;
|
|
process_bpf_exit:
|
|
- err = pop_stack(env, &prev_insn_idx, &insn_idx);
|
|
+ err = pop_stack(env, &env->prev_insn_idx,
|
|
+ &env->insn_idx);
|
|
if (err < 0) {
|
|
if (err != -ENOENT)
|
|
return err;
|
|
@@ -5461,7 +5667,7 @@ process_bpf_exit:
|
|
continue;
|
|
}
|
|
} else {
|
|
- err = check_cond_jmp_op(env, insn, &insn_idx);
|
|
+ err = check_cond_jmp_op(env, insn, &env->insn_idx);
|
|
if (err)
|
|
return err;
|
|
}
|
|
@@ -5478,8 +5684,8 @@ process_bpf_exit:
|
|
if (err)
|
|
return err;
|
|
|
|
- insn_idx++;
|
|
- env->insn_aux_data[insn_idx].seen = true;
|
|
+ env->insn_idx++;
|
|
+ env->insn_aux_data[env->insn_idx].seen = true;
|
|
} else {
|
|
verbose(env, "invalid BPF_LD mode\n");
|
|
return -EINVAL;
|
|
@@ -5489,7 +5695,7 @@ process_bpf_exit:
|
|
return -EINVAL;
|
|
}
|
|
|
|
- insn_idx++;
|
|
+ env->insn_idx++;
|
|
}
|
|
|
|
verbose(env, "processed %d insns (limit %d), stack depth ",
|
|
@@ -5789,10 +5995,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
int i, cnt, size, ctx_field_size, delta = 0;
|
|
const int insn_cnt = env->prog->len;
|
|
struct bpf_insn insn_buf[16], *insn;
|
|
+ u32 target_size, size_default, off;
|
|
struct bpf_prog *new_prog;
|
|
enum bpf_access_type type;
|
|
bool is_narrower_load;
|
|
- u32 target_size;
|
|
|
|
if (ops->gen_prologue || env->seen_direct_write) {
|
|
if (!ops->gen_prologue) {
|
|
@@ -5885,9 +6091,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
* we will apply proper mask to the result.
|
|
*/
|
|
is_narrower_load = size < ctx_field_size;
|
|
+ size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
|
|
+ off = insn->off;
|
|
if (is_narrower_load) {
|
|
- u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
|
|
- u32 off = insn->off;
|
|
u8 size_code;
|
|
|
|
if (type == BPF_WRITE) {
|
|
@@ -5915,12 +6121,23 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
}
|
|
|
|
if (is_narrower_load && size < target_size) {
|
|
- if (ctx_field_size <= 4)
|
|
+ u8 shift = (off & (size_default - 1)) * 8;
|
|
+
|
|
+ if (ctx_field_size <= 4) {
|
|
+ if (shift)
|
|
+ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
|
|
+ insn->dst_reg,
|
|
+ shift);
|
|
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
|
|
(1 << size * 8) - 1);
|
|
- else
|
|
+ } else {
|
|
+ if (shift)
|
|
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
|
|
+ insn->dst_reg,
|
|
+ shift);
|
|
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
|
|
(1 << size * 8) - 1);
|
|
+ }
|
|
}
|
|
|
|
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
|
@@ -6201,6 +6418,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|
continue;
|
|
}
|
|
|
|
+ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
|
|
+ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
|
|
+ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
|
|
+ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
|
|
+ struct bpf_insn insn_buf[16];
|
|
+ struct bpf_insn *patch = &insn_buf[0];
|
|
+ bool issrc, isneg;
|
|
+ u32 off_reg;
|
|
+
|
|
+ aux = &env->insn_aux_data[i + delta];
|
|
+ if (!aux->alu_state)
|
|
+ continue;
|
|
+
|
|
+ isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
|
|
+ issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
|
|
+ BPF_ALU_SANITIZE_SRC;
|
|
+
|
|
+ off_reg = issrc ? insn->src_reg : insn->dst_reg;
|
|
+ if (isneg)
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
+ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
|
|
+ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
|
|
+ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
|
|
+ if (issrc) {
|
|
+ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
|
|
+ off_reg);
|
|
+ insn->src_reg = BPF_REG_AX;
|
|
+ } else {
|
|
+ *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
|
|
+ BPF_REG_AX);
|
|
+ }
|
|
+ if (isneg)
|
|
+ insn->code = insn->code == code_add ?
|
|
+ code_sub : code_add;
|
|
+ *patch++ = *insn;
|
|
+ if (issrc && isneg)
|
|
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
+ cnt = patch - insn_buf;
|
|
+
|
|
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
|
+ if (!new_prog)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ delta += cnt - 1;
|
|
+ env->prog = prog = new_prog;
|
|
+ insn = new_prog->insnsi + i + delta;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (insn->code != (BPF_JMP | BPF_CALL))
|
|
continue;
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
|
|
index 6aaf5dd5383bb..498c6bcf992a4 100644
|
|
--- a/kernel/cgroup/cgroup.c
|
|
+++ b/kernel/cgroup/cgroup.c
|
|
@@ -1744,7 +1744,7 @@ static int parse_cgroup_root_flags(char *data, unsigned int *root_flags)
|
|
|
|
*root_flags = 0;
|
|
|
|
- if (!data)
|
|
+ if (!data || *data == '\0')
|
|
return 0;
|
|
|
|
while ((token = strsep(&data, ",")) != NULL) {
|
|
@@ -4202,20 +4202,25 @@ static void css_task_iter_advance(struct css_task_iter *it)
|
|
|
|
lockdep_assert_held(&css_set_lock);
|
|
repeat:
|
|
- /*
|
|
- * Advance iterator to find next entry. cset->tasks is consumed
|
|
- * first and then ->mg_tasks. After ->mg_tasks, we move onto the
|
|
- * next cset.
|
|
- */
|
|
- next = it->task_pos->next;
|
|
+ if (it->task_pos) {
|
|
+ /*
|
|
+ * Advance iterator to find next entry. cset->tasks is
|
|
+ * consumed first and then ->mg_tasks. After ->mg_tasks,
|
|
+ * we move onto the next cset.
|
|
+ */
|
|
+ next = it->task_pos->next;
|
|
|
|
- if (next == it->tasks_head)
|
|
- next = it->mg_tasks_head->next;
|
|
+ if (next == it->tasks_head)
|
|
+ next = it->mg_tasks_head->next;
|
|
|
|
- if (next == it->mg_tasks_head)
|
|
+ if (next == it->mg_tasks_head)
|
|
+ css_task_iter_advance_css_set(it);
|
|
+ else
|
|
+ it->task_pos = next;
|
|
+ } else {
|
|
+ /* called from start, proceed to the first cset */
|
|
css_task_iter_advance_css_set(it);
|
|
- else
|
|
- it->task_pos = next;
|
|
+ }
|
|
|
|
/* if PROCS, skip over tasks which aren't group leaders */
|
|
if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
|
|
@@ -4255,7 +4260,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
|
|
|
|
it->cset_head = it->cset_pos;
|
|
|
|
- css_task_iter_advance_css_set(it);
|
|
+ css_task_iter_advance(it);
|
|
|
|
spin_unlock_irq(&css_set_lock);
|
|
}
|
|
diff --git a/kernel/cpu.c b/kernel/cpu.c
|
|
index 91d5c38eb7e5b..f3b25d945727a 100644
|
|
--- a/kernel/cpu.c
|
|
+++ b/kernel/cpu.c
|
|
@@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { }
|
|
|
|
#ifdef CONFIG_HOTPLUG_SMT
|
|
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
|
|
-EXPORT_SYMBOL_GPL(cpu_smt_control);
|
|
-
|
|
-static bool cpu_smt_available __read_mostly;
|
|
|
|
void __init cpu_smt_disable(bool force)
|
|
{
|
|
@@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force)
|
|
|
|
/*
|
|
* The decision whether SMT is supported can only be done after the full
|
|
- * CPU identification. Called from architecture code before non boot CPUs
|
|
- * are brought up.
|
|
- */
|
|
-void __init cpu_smt_check_topology_early(void)
|
|
-{
|
|
- if (!topology_smt_supported())
|
|
- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
|
-}
|
|
-
|
|
-/*
|
|
- * If SMT was disabled by BIOS, detect it here, after the CPUs have been
|
|
- * brought online. This ensures the smt/l1tf sysfs entries are consistent
|
|
- * with reality. cpu_smt_available is set to true during the bringup of non
|
|
- * boot CPUs when a SMT sibling is detected. Note, this may overwrite
|
|
- * cpu_smt_control's previous setting.
|
|
+ * CPU identification. Called from architecture code.
|
|
*/
|
|
void __init cpu_smt_check_topology(void)
|
|
{
|
|
- if (!cpu_smt_available)
|
|
+ if (!topology_smt_supported())
|
|
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
|
}
|
|
|
|
@@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable);
|
|
|
|
static inline bool cpu_smt_allowed(unsigned int cpu)
|
|
{
|
|
- if (topology_is_primary_thread(cpu))
|
|
+ if (cpu_smt_control == CPU_SMT_ENABLED)
|
|
return true;
|
|
|
|
- /*
|
|
- * If the CPU is not a 'primary' thread and the booted_once bit is
|
|
- * set then the processor has SMT support. Store this information
|
|
- * for the late check of SMT support in cpu_smt_check_topology().
|
|
- */
|
|
- if (per_cpu(cpuhp_state, cpu).booted_once)
|
|
- cpu_smt_available = true;
|
|
-
|
|
- if (cpu_smt_control == CPU_SMT_ENABLED)
|
|
+ if (topology_is_primary_thread(cpu))
|
|
return true;
|
|
|
|
/*
|
|
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
|
|
index 65c0f13637882..94aa9ae0007ac 100644
|
|
--- a/kernel/debug/debug_core.c
|
|
+++ b/kernel/debug/debug_core.c
|
|
@@ -535,6 +535,8 @@ return_normal:
|
|
arch_kgdb_ops.correct_hw_break();
|
|
if (trace_on)
|
|
tracing_on();
|
|
+ kgdb_info[cpu].debuggerinfo = NULL;
|
|
+ kgdb_info[cpu].task = NULL;
|
|
kgdb_info[cpu].exception_state &=
|
|
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
|
|
kgdb_info[cpu].enter_kgdb--;
|
|
@@ -667,6 +669,8 @@ kgdb_restore:
|
|
if (trace_on)
|
|
tracing_on();
|
|
|
|
+ kgdb_info[cpu].debuggerinfo = NULL;
|
|
+ kgdb_info[cpu].task = NULL;
|
|
kgdb_info[cpu].exception_state &=
|
|
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
|
|
kgdb_info[cpu].enter_kgdb--;
|
|
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
|
|
index 7921ae4fca8de..7e2379aa0a1e7 100644
|
|
--- a/kernel/debug/kdb/kdb_bt.c
|
|
+++ b/kernel/debug/kdb/kdb_bt.c
|
|
@@ -186,7 +186,16 @@ kdb_bt(int argc, const char **argv)
|
|
kdb_printf("btc: cpu status: ");
|
|
kdb_parse("cpu\n");
|
|
for_each_online_cpu(cpu) {
|
|
- sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
|
|
+ void *kdb_tsk = KDB_TSK(cpu);
|
|
+
|
|
+ /* If a CPU failed to round up we could be here */
|
|
+ if (!kdb_tsk) {
|
|
+ kdb_printf("WARNING: no task for cpu %ld\n",
|
|
+ cpu);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ sprintf(buf, "btt 0x%px\n", kdb_tsk);
|
|
kdb_parse(buf);
|
|
touch_nmi_watchdog();
|
|
}
|
|
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
|
|
index 15e1a7af5dd03..53a0df6e4d92c 100644
|
|
--- a/kernel/debug/kdb/kdb_debugger.c
|
|
+++ b/kernel/debug/kdb/kdb_debugger.c
|
|
@@ -118,13 +118,6 @@ int kdb_stub(struct kgdb_state *ks)
|
|
kdb_bp_remove();
|
|
KDB_STATE_CLEAR(DOING_SS);
|
|
KDB_STATE_SET(PAGER);
|
|
- /* zero out any offline cpu data */
|
|
- for_each_present_cpu(i) {
|
|
- if (!cpu_online(i)) {
|
|
- kgdb_info[i].debuggerinfo = NULL;
|
|
- kgdb_info[i].task = NULL;
|
|
- }
|
|
- }
|
|
if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
|
|
ks->pass_exception = 1;
|
|
KDB_FLAG_SET(CATASTROPHIC);
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 84530ab358c37..699bc25d6204b 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
|
|
}
|
|
}
|
|
|
|
+static int perf_event_check_period(struct perf_event *event, u64 value)
|
|
+{
|
|
+ return event->pmu->check_period(event, value);
|
|
+}
|
|
+
|
|
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
{
|
|
u64 value;
|
|
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
|
return -EINVAL;
|
|
|
|
+ if (perf_event_check_period(event, value))
|
|
+ return -EINVAL;
|
|
+
|
|
event_function_call(event, __perf_event_period, &value);
|
|
|
|
return 0;
|
|
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
|
|
return 0;
|
|
}
|
|
|
|
+static int perf_event_nop_int(struct perf_event *event, u64 value)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
|
|
|
|
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
|
|
@@ -9691,6 +9704,9 @@ got_cpu_context:
|
|
pmu->pmu_disable = perf_pmu_nop_void;
|
|
}
|
|
|
|
+ if (!pmu->check_period)
|
|
+ pmu->check_period = perf_event_nop_int;
|
|
+
|
|
if (!pmu->event_idx)
|
|
pmu->event_idx = perf_event_idx_default;
|
|
|
|
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
|
|
index 4a99370763319..5ab4fe3b1dcc0 100644
|
|
--- a/kernel/events/ring_buffer.c
|
|
+++ b/kernel/events/ring_buffer.c
|
|
@@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
|
size = sizeof(struct ring_buffer);
|
|
size += nr_pages * sizeof(void *);
|
|
|
|
+ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
|
|
+ goto fail;
|
|
+
|
|
rb = kzalloc(size, GFP_KERNEL);
|
|
if (!rb)
|
|
goto fail;
|
|
diff --git a/kernel/exit.c b/kernel/exit.c
|
|
index 0e21e6d21f35f..55b4fa6d01ebd 100644
|
|
--- a/kernel/exit.c
|
|
+++ b/kernel/exit.c
|
|
@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
|
|
return NULL;
|
|
}
|
|
|
|
-static struct task_struct *find_child_reaper(struct task_struct *father)
|
|
+static struct task_struct *find_child_reaper(struct task_struct *father,
|
|
+ struct list_head *dead)
|
|
__releases(&tasklist_lock)
|
|
__acquires(&tasklist_lock)
|
|
{
|
|
struct pid_namespace *pid_ns = task_active_pid_ns(father);
|
|
struct task_struct *reaper = pid_ns->child_reaper;
|
|
+ struct task_struct *p, *n;
|
|
|
|
if (likely(reaper != father))
|
|
return reaper;
|
|
@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
|
|
panic("Attempted to kill init! exitcode=0x%08x\n",
|
|
father->signal->group_exit_code ?: father->exit_code);
|
|
}
|
|
+
|
|
+ list_for_each_entry_safe(p, n, dead, ptrace_entry) {
|
|
+ list_del_init(&p->ptrace_entry);
|
|
+ release_task(p);
|
|
+ }
|
|
+
|
|
zap_pid_ns_processes(pid_ns);
|
|
write_lock_irq(&tasklist_lock);
|
|
|
|
@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
|
|
exit_ptrace(father, dead);
|
|
|
|
/* Can drop and reacquire tasklist_lock */
|
|
- reaper = find_child_reaper(father);
|
|
+ reaper = find_child_reaper(father, dead);
|
|
if (list_empty(&father->children))
|
|
return;
|
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index e2a5156bc9c33..906cd0c13d15c 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -221,6 +221,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
|
|
memset(s->addr, 0, THREAD_SIZE);
|
|
|
|
tsk->stack_vm_area = s;
|
|
+ tsk->stack = s->addr;
|
|
return s->addr;
|
|
}
|
|
|
|
@@ -1837,8 +1838,6 @@ static __latent_entropy struct task_struct *copy_process(
|
|
|
|
posix_cpu_timers_init(p);
|
|
|
|
- p->start_time = ktime_get_ns();
|
|
- p->real_start_time = ktime_get_boot_ns();
|
|
p->io_context = NULL;
|
|
audit_set_context(p, NULL);
|
|
cgroup_fork(p);
|
|
@@ -2004,6 +2003,17 @@ static __latent_entropy struct task_struct *copy_process(
|
|
if (retval)
|
|
goto bad_fork_free_pid;
|
|
|
|
+ /*
|
|
+ * From this point on we must avoid any synchronous user-space
|
|
+ * communication until we take the tasklist-lock. In particular, we do
|
|
+ * not want user-space to be able to predict the process start-time by
|
|
+ * stalling fork(2) after we recorded the start_time but before it is
|
|
+ * visible to the system.
|
|
+ */
|
|
+
|
|
+ p->start_time = ktime_get_ns();
|
|
+ p->real_start_time = ktime_get_boot_ns();
|
|
+
|
|
/*
|
|
* Make it visible to the rest of the system, but dont wake it up yet.
|
|
* Need tasklist lock for parent etc handling!
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 5cc8083a4c890..4d1b7db04e105 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -2850,35 +2850,39 @@ retry_private:
|
|
* and BUG when futex_unlock_pi() interleaves with this.
|
|
*
|
|
* Therefore acquire wait_lock while holding hb->lock, but drop the
|
|
- * latter before calling rt_mutex_start_proxy_lock(). This still fully
|
|
- * serializes against futex_unlock_pi() as that does the exact same
|
|
- * lock handoff sequence.
|
|
+ * latter before calling __rt_mutex_start_proxy_lock(). This
|
|
+ * interleaves with futex_unlock_pi() -- which does a similar lock
|
|
+ * handoff -- such that the latter can observe the futex_q::pi_state
|
|
+ * before __rt_mutex_start_proxy_lock() is done.
|
|
*/
|
|
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
|
|
spin_unlock(q.lock_ptr);
|
|
+ /*
|
|
+ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
|
|
+ * such that futex_unlock_pi() is guaranteed to observe the waiter when
|
|
+ * it sees the futex_q::pi_state.
|
|
+ */
|
|
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
|
|
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
|
|
|
|
if (ret) {
|
|
if (ret == 1)
|
|
ret = 0;
|
|
-
|
|
- spin_lock(q.lock_ptr);
|
|
- goto no_block;
|
|
+ goto cleanup;
|
|
}
|
|
|
|
-
|
|
if (unlikely(to))
|
|
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
|
|
|
|
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
|
|
|
+cleanup:
|
|
spin_lock(q.lock_ptr);
|
|
/*
|
|
- * If we failed to acquire the lock (signal/timeout), we must
|
|
+ * If we failed to acquire the lock (deadlock/signal/timeout), we must
|
|
* first acquire the hb->lock before removing the lock from the
|
|
- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
|
|
- * wait lists consistent.
|
|
+ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
|
|
+ * lists consistent.
|
|
*
|
|
* In particular; it is important that futex_unlock_pi() can not
|
|
* observe this inconsistency.
|
|
@@ -3002,6 +3006,10 @@ retry:
|
|
* there is no point where we hold neither; and therefore
|
|
* wake_futex_pi() must observe a state consistent with what we
|
|
* observed.
|
|
+ *
|
|
+ * In particular; this forces __rt_mutex_start_proxy() to
|
|
+ * complete such that we're guaranteed to observe the
|
|
+ * rt_waiter. Also see the WARN in wake_futex_pi().
|
|
*/
|
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
|
spin_unlock(&hb->lock);
|
|
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
|
|
index cb8e3e8ac7b90..4a9191617076f 100644
|
|
--- a/kernel/hung_task.c
|
|
+++ b/kernel/hung_task.c
|
|
@@ -34,7 +34,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
|
|
* is disabled during the critical section. It also controls the size of
|
|
* the RCU grace period. So it needs to be upper-bound.
|
|
*/
|
|
-#define HUNG_TASK_BATCHING 1024
|
|
+#define HUNG_TASK_LOCK_BREAK (HZ / 10)
|
|
|
|
/*
|
|
* Zero means infinite timeout - no checking done:
|
|
@@ -112,8 +112,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|
|
|
trace_sched_process_hang(t);
|
|
|
|
- if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
|
|
- return;
|
|
+ if (sysctl_hung_task_panic) {
|
|
+ console_verbose();
|
|
+ hung_task_show_lock = true;
|
|
+ hung_task_call_panic = true;
|
|
+ }
|
|
|
|
/*
|
|
* Ok, the task did not get scheduled for more than 2 minutes,
|
|
@@ -135,11 +138,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
|
}
|
|
|
|
touch_nmi_watchdog();
|
|
-
|
|
- if (sysctl_hung_task_panic) {
|
|
- hung_task_show_lock = true;
|
|
- hung_task_call_panic = true;
|
|
- }
|
|
}
|
|
|
|
/*
|
|
@@ -173,7 +171,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
|
|
static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
|
{
|
|
int max_count = sysctl_hung_task_check_count;
|
|
- int batch_count = HUNG_TASK_BATCHING;
|
|
+ unsigned long last_break = jiffies;
|
|
struct task_struct *g, *t;
|
|
|
|
/*
|
|
@@ -188,10 +186,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
|
for_each_process_thread(g, t) {
|
|
if (!max_count--)
|
|
goto unlock;
|
|
- if (!--batch_count) {
|
|
- batch_count = HUNG_TASK_BATCHING;
|
|
+ if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
|
|
if (!rcu_lock_break(g, t))
|
|
goto unlock;
|
|
+ last_break = jiffies;
|
|
}
|
|
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
|
|
if (t->state == TASK_UNINTERRUPTIBLE)
|
|
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
|
|
index f4f29b9d90ee7..e12cdf637c71b 100644
|
|
--- a/kernel/irq/affinity.c
|
|
+++ b/kernel/irq/affinity.c
|
|
@@ -117,12 +117,11 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
|
|
*/
|
|
if (numvecs <= nodes) {
|
|
for_each_node_mask(n, nodemsk) {
|
|
- cpumask_copy(masks + curvec, node_to_cpumask[n]);
|
|
- if (++done == numvecs)
|
|
- break;
|
|
+ cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
|
|
if (++curvec == last_affv)
|
|
curvec = affd->pre_vectors;
|
|
}
|
|
+ done = numvecs;
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/kernel/kcov.c b/kernel/kcov.c
|
|
index 97959d7b77e2a..c2277dbdbfb14 100644
|
|
--- a/kernel/kcov.c
|
|
+++ b/kernel/kcov.c
|
|
@@ -112,7 +112,7 @@ void notrace __sanitizer_cov_trace_pc(void)
|
|
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
|
|
|
|
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
|
|
-static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
|
|
+static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
|
|
{
|
|
struct task_struct *t;
|
|
u64 *area;
|
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
|
index 581edcc63c268..978d63a8261c2 100644
|
|
--- a/kernel/locking/rtmutex.c
|
|
+++ b/kernel/locking/rtmutex.c
|
|
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
rt_mutex_set_owner(lock, NULL);
|
|
}
|
|
|
|
+/**
|
|
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
|
|
+ * @lock: the rt_mutex to take
|
|
+ * @waiter: the pre-initialized rt_mutex_waiter
|
|
+ * @task: the task to prepare
|
|
+ *
|
|
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
|
|
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
|
|
+ *
|
|
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
|
|
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
|
|
+ *
|
|
+ * Returns:
|
|
+ * 0 - task blocked on lock
|
|
+ * 1 - acquired the lock for task, caller should wake it up
|
|
+ * <0 - error
|
|
+ *
|
|
+ * Special API call for PI-futex support.
|
|
+ */
|
|
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
struct task_struct *task)
|
|
{
|
|
int ret;
|
|
|
|
+ lockdep_assert_held(&lock->wait_lock);
|
|
+
|
|
if (try_to_take_rt_mutex(lock, task, NULL))
|
|
return 1;
|
|
|
|
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
ret = 0;
|
|
}
|
|
|
|
- if (unlikely(ret))
|
|
- remove_waiter(lock, waiter);
|
|
-
|
|
debug_rt_mutex_print_deadlock(waiter);
|
|
|
|
return ret;
|
|
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
* @waiter: the pre-initialized rt_mutex_waiter
|
|
* @task: the task to prepare
|
|
*
|
|
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
|
|
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
|
|
+ *
|
|
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
|
|
+ * on failure.
|
|
+ *
|
|
* Returns:
|
|
* 0 - task blocked on lock
|
|
* 1 - acquired the lock for task, caller should wake it up
|
|
* <0 - error
|
|
*
|
|
- * Special API call for FUTEX_REQUEUE_PI support.
|
|
+ * Special API call for PI-futex support.
|
|
*/
|
|
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
|
raw_spin_lock_irq(&lock->wait_lock);
|
|
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
|
|
+ if (unlikely(ret))
|
|
+ remove_waiter(lock, waiter);
|
|
raw_spin_unlock_irq(&lock->wait_lock);
|
|
|
|
return ret;
|
|
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
|
|
* @lock: the rt_mutex we were woken on
|
|
* @waiter: the pre-initialized rt_mutex_waiter
|
|
*
|
|
- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
|
|
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
|
|
+ * rt_mutex_wait_proxy_lock().
|
|
*
|
|
* Unless we acquired the lock; we're still enqueued on the wait-list and can
|
|
* in fact still be granted ownership until we're removed. Therefore we can
|
|
diff --git a/kernel/memremap.c b/kernel/memremap.c
|
|
index 9eced2cc9f94d..3eef989ef0352 100644
|
|
--- a/kernel/memremap.c
|
|
+++ b/kernel/memremap.c
|
|
@@ -88,23 +88,25 @@ static void devm_memremap_pages_release(void *data)
|
|
resource_size_t align_start, align_size;
|
|
unsigned long pfn;
|
|
|
|
+ pgmap->kill(pgmap->ref);
|
|
for_each_device_pfn(pfn, pgmap)
|
|
put_page(pfn_to_page(pfn));
|
|
|
|
- if (percpu_ref_tryget_live(pgmap->ref)) {
|
|
- dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
|
|
- percpu_ref_put(pgmap->ref);
|
|
- }
|
|
-
|
|
/* pages are dead and unused, undo the arch mapping */
|
|
align_start = res->start & ~(SECTION_SIZE - 1);
|
|
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
|
- align_start;
|
|
|
|
mem_hotplug_begin();
|
|
- arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
|
|
- &pgmap->altmap : NULL);
|
|
- kasan_remove_zero_shadow(__va(align_start), align_size);
|
|
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
|
+ pfn = align_start >> PAGE_SHIFT;
|
|
+ __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
|
|
+ align_size >> PAGE_SHIFT, NULL);
|
|
+ } else {
|
|
+ arch_remove_memory(align_start, align_size,
|
|
+ pgmap->altmap_valid ? &pgmap->altmap : NULL);
|
|
+ kasan_remove_zero_shadow(__va(align_start), align_size);
|
|
+ }
|
|
mem_hotplug_done();
|
|
|
|
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
|
|
@@ -116,7 +118,7 @@ static void devm_memremap_pages_release(void *data)
|
|
/**
|
|
* devm_memremap_pages - remap and provide memmap backing for the given resource
|
|
* @dev: hosting device for @res
|
|
- * @pgmap: pointer to a struct dev_pgmap
|
|
+ * @pgmap: pointer to a struct dev_pagemap
|
|
*
|
|
* Notes:
|
|
* 1/ At a minimum the res, ref and type members of @pgmap must be initialized
|
|
@@ -125,11 +127,8 @@ static void devm_memremap_pages_release(void *data)
|
|
* 2/ The altmap field may optionally be initialized, in which case altmap_valid
|
|
* must be set to true
|
|
*
|
|
- * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
|
|
- * time (or devm release event). The expected order of events is that ref has
|
|
- * been through percpu_ref_kill() before devm_memremap_pages_release(). The
|
|
- * wait for the completion of all references being dropped and
|
|
- * percpu_ref_exit() must occur after devm_memremap_pages_release().
|
|
+ * 3/ pgmap->ref must be 'live' on entry and will be killed at
|
|
+ * devm_memremap_pages_release() time, or if this routine fails.
|
|
*
|
|
* 4/ res is expected to be a host memory range that could feasibly be
|
|
* treated as a "System RAM" range, i.e. not a device mmio range, but
|
|
@@ -145,6 +144,9 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
pgprot_t pgprot = PAGE_KERNEL;
|
|
int error, nid, is_ram;
|
|
|
|
+ if (!pgmap->ref || !pgmap->kill)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
align_start = res->start & ~(SECTION_SIZE - 1);
|
|
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
|
- align_start;
|
|
@@ -167,18 +169,13 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
is_ram = region_intersects(align_start, align_size,
|
|
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
|
|
|
|
- if (is_ram == REGION_MIXED) {
|
|
- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
|
|
- __func__, res);
|
|
- return ERR_PTR(-ENXIO);
|
|
+ if (is_ram != REGION_DISJOINT) {
|
|
+ WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
|
|
+ is_ram == REGION_MIXED ? "mixed" : "ram", res);
|
|
+ error = -ENXIO;
|
|
+ goto err_array;
|
|
}
|
|
|
|
- if (is_ram == REGION_INTERSECTS)
|
|
- return __va(res->start);
|
|
-
|
|
- if (!pgmap->ref)
|
|
- return ERR_PTR(-EINVAL);
|
|
-
|
|
pgmap->dev = dev;
|
|
|
|
error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
|
|
@@ -196,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
goto err_pfn_remap;
|
|
|
|
mem_hotplug_begin();
|
|
- error = kasan_add_zero_shadow(__va(align_start), align_size);
|
|
- if (error) {
|
|
- mem_hotplug_done();
|
|
- goto err_kasan;
|
|
+
|
|
+ /*
|
|
+ * For device private memory we call add_pages() as we only need to
|
|
+ * allocate and initialize struct page for the device memory. More-
|
|
+ * over the device memory is un-accessible thus we do not want to
|
|
+ * create a linear mapping for the memory like arch_add_memory()
|
|
+ * would do.
|
|
+ *
|
|
+ * For all other device memory types, which are accessible by
|
|
+ * the CPU, we do want the linear mapping and thus use
|
|
+ * arch_add_memory().
|
|
+ */
|
|
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
|
+ error = add_pages(nid, align_start >> PAGE_SHIFT,
|
|
+ align_size >> PAGE_SHIFT, NULL, false);
|
|
+ } else {
|
|
+ error = kasan_add_zero_shadow(__va(align_start), align_size);
|
|
+ if (error) {
|
|
+ mem_hotplug_done();
|
|
+ goto err_kasan;
|
|
+ }
|
|
+
|
|
+ error = arch_add_memory(nid, align_start, align_size, altmap,
|
|
+ false);
|
|
+ }
|
|
+
|
|
+ if (!error) {
|
|
+ struct zone *zone;
|
|
+
|
|
+ zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
|
|
+ move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
|
|
+ align_size >> PAGE_SHIFT, altmap);
|
|
}
|
|
|
|
- error = arch_add_memory(nid, align_start, align_size, altmap, false);
|
|
- if (!error)
|
|
- move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
|
- align_start >> PAGE_SHIFT,
|
|
- align_size >> PAGE_SHIFT, altmap);
|
|
mem_hotplug_done();
|
|
if (error)
|
|
goto err_add_memory;
|
|
@@ -220,7 +240,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
align_size >> PAGE_SHIFT, pgmap);
|
|
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
|
|
|
|
- devm_add_action(dev, devm_memremap_pages_release, pgmap);
|
|
+ error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
|
|
+ pgmap);
|
|
+ if (error)
|
|
+ return ERR_PTR(error);
|
|
|
|
return __va(res->start);
|
|
|
|
@@ -231,9 +254,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
err_pfn_remap:
|
|
pgmap_array_delete(res);
|
|
err_array:
|
|
+ pgmap->kill(pgmap->ref);
|
|
return ERR_PTR(error);
|
|
}
|
|
-EXPORT_SYMBOL(devm_memremap_pages);
|
|
+EXPORT_SYMBOL_GPL(devm_memremap_pages);
|
|
|
|
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
|
|
{
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 49a4058915870..0812a7f80fa74 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -1207,8 +1207,10 @@ static ssize_t store_uevent(struct module_attribute *mattr,
|
|
struct module_kobject *mk,
|
|
const char *buffer, size_t count)
|
|
{
|
|
- kobject_synth_uevent(&mk->kobj, buffer, count);
|
|
- return count;
|
|
+ int rc;
|
|
+
|
|
+ rc = kobject_synth_uevent(&mk->kobj, buffer, count);
|
|
+ return rc ? rc : count;
|
|
}
|
|
|
|
struct module_attribute module_uevent =
|
|
diff --git a/kernel/panic.c b/kernel/panic.c
|
|
index f6d549a29a5c8..d10c340c43b0e 100644
|
|
--- a/kernel/panic.c
|
|
+++ b/kernel/panic.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/kmsg_dump.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/notifier.h>
|
|
+#include <linux/vt_kern.h>
|
|
#include <linux/module.h>
|
|
#include <linux/random.h>
|
|
#include <linux/ftrace.h>
|
|
@@ -237,7 +238,10 @@ void panic(const char *fmt, ...)
|
|
if (_crash_kexec_post_notifiers)
|
|
__crash_kexec(NULL);
|
|
|
|
- bust_spinlocks(0);
|
|
+#ifdef CONFIG_VT
|
|
+ unblank_screen();
|
|
+#endif
|
|
+ console_unblank();
|
|
|
|
/*
|
|
* We may have ended up stopping the CPU holding the lock (in
|
|
diff --git a/kernel/pid.c b/kernel/pid.c
|
|
index b2f6c506035da..20881598bdfac 100644
|
|
--- a/kernel/pid.c
|
|
+++ b/kernel/pid.c
|
|
@@ -233,8 +233,10 @@ out_unlock:
|
|
|
|
out_free:
|
|
spin_lock_irq(&pidmap_lock);
|
|
- while (++i <= ns->level)
|
|
- idr_remove(&ns->idr, (pid->numbers + i)->nr);
|
|
+ while (++i <= ns->level) {
|
|
+ upid = pid->numbers + i;
|
|
+ idr_remove(&upid->ns->idr, upid->nr);
|
|
+ }
|
|
|
|
/* On failure to allocate the first pid, reset the state */
|
|
if (ns->pid_allocated == PIDNS_ADDING)
|
|
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
|
|
index a8846ed7f3529..a180abc8c9259 100644
|
|
--- a/kernel/rcu/srcutree.c
|
|
+++ b/kernel/rcu/srcutree.c
|
|
@@ -451,10 +451,12 @@ static void srcu_gp_start(struct srcu_struct *sp)
|
|
|
|
lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
|
|
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
|
|
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
|
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
|
rcu_seq_current(&sp->srcu_gp_seq));
|
|
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
|
rcu_seq_snap(&sp->srcu_gp_seq));
|
|
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
|
|
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
|
|
rcu_seq_start(&sp->srcu_gp_seq);
|
|
state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index ac855b2f47746..852c60a69e367 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -352,10 +352,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
|
|
}
|
|
}
|
|
|
|
-/* Iterate thr' all leaf cfs_rq's on a runqueue */
|
|
-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
|
|
- list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
|
|
- leaf_cfs_rq_list)
|
|
+/* Iterate through all leaf cfs_rq's on a runqueue: */
|
|
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
|
+ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
|
|
|
|
/* Do the two (enqueued) entities belong to the same group ? */
|
|
static inline struct cfs_rq *
|
|
@@ -447,8 +446,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
|
|
{
|
|
}
|
|
|
|
-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
|
|
- for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
|
|
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
|
+ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
|
|
|
|
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
|
{
|
|
@@ -5935,6 +5934,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
|
|
+EXPORT_SYMBOL_GPL(sched_smt_present);
|
|
|
|
static inline void set_idle_cores(int cpu, int val)
|
|
{
|
|
@@ -7387,27 +7387,10 @@ static inline bool others_have_blocked(struct rq *rq)
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
|
|
-{
|
|
- if (cfs_rq->load.weight)
|
|
- return false;
|
|
-
|
|
- if (cfs_rq->avg.load_sum)
|
|
- return false;
|
|
-
|
|
- if (cfs_rq->avg.util_sum)
|
|
- return false;
|
|
-
|
|
- if (cfs_rq->avg.runnable_load_sum)
|
|
- return false;
|
|
-
|
|
- return true;
|
|
-}
|
|
-
|
|
static void update_blocked_averages(int cpu)
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
- struct cfs_rq *cfs_rq, *pos;
|
|
+ struct cfs_rq *cfs_rq;
|
|
const struct sched_class *curr_class;
|
|
struct rq_flags rf;
|
|
bool done = true;
|
|
@@ -7419,7 +7402,7 @@ static void update_blocked_averages(int cpu)
|
|
* Iterates the task_group tree in a bottom up fashion, see
|
|
* list_add_leaf_cfs_rq() for details.
|
|
*/
|
|
- for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
|
|
+ for_each_leaf_cfs_rq(rq, cfs_rq) {
|
|
struct sched_entity *se;
|
|
|
|
/* throttled entities do not contribute to load */
|
|
@@ -7434,13 +7417,6 @@ static void update_blocked_averages(int cpu)
|
|
if (se && !skip_blocked_update(se))
|
|
update_load_avg(cfs_rq_of(se), se, 0);
|
|
|
|
- /*
|
|
- * There can be a lot of idle CPU cgroups. Don't let fully
|
|
- * decayed cfs_rqs linger on the list.
|
|
- */
|
|
- if (cfs_rq_is_decayed(cfs_rq))
|
|
- list_del_leaf_cfs_rq(cfs_rq);
|
|
-
|
|
/* Don't need periodic decay once load/util_avg are null */
|
|
if (cfs_rq_has_blocked(cfs_rq))
|
|
done = false;
|
|
@@ -10289,10 +10265,10 @@ const struct sched_class fair_sched_class = {
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
void print_cfs_stats(struct seq_file *m, int cpu)
|
|
{
|
|
- struct cfs_rq *cfs_rq, *pos;
|
|
+ struct cfs_rq *cfs_rq;
|
|
|
|
rcu_read_lock();
|
|
- for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
|
|
+ for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
|
|
print_cfs_rq(m, cpu, cfs_rq);
|
|
rcu_read_unlock();
|
|
}
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 9a32bc2088c90..ac969af3e9a0b 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
|
|
}
|
|
EXPORT_SYMBOL_GPL(dequeue_signal);
|
|
|
|
+static int dequeue_synchronous_signal(kernel_siginfo_t *info)
|
|
+{
|
|
+ struct task_struct *tsk = current;
|
|
+ struct sigpending *pending = &tsk->pending;
|
|
+ struct sigqueue *q, *sync = NULL;
|
|
+
|
|
+ /*
|
|
+ * Might a synchronous signal be in the queue?
|
|
+ */
|
|
+ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * Return the first synchronous signal in the queue.
|
|
+ */
|
|
+ list_for_each_entry(q, &pending->list, list) {
|
|
+ /* Synchronous signals have a postive si_code */
|
|
+ if ((q->info.si_code > SI_USER) &&
|
|
+ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
|
|
+ sync = q;
|
|
+ goto next;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+next:
|
|
+ /*
|
|
+ * Check if there is another siginfo for the same signal.
|
|
+ */
|
|
+ list_for_each_entry_continue(q, &pending->list, list) {
|
|
+ if (q->info.si_signo == sync->info.si_signo)
|
|
+ goto still_pending;
|
|
+ }
|
|
+
|
|
+ sigdelset(&pending->signal, sync->info.si_signo);
|
|
+ recalc_sigpending();
|
|
+still_pending:
|
|
+ list_del_init(&sync->list);
|
|
+ copy_siginfo(info, &sync->info);
|
|
+ __sigqueue_free(sync);
|
|
+ return info->si_signo;
|
|
+}
|
|
+
|
|
/*
|
|
* Tell a process that it has a new active signal..
|
|
*
|
|
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
|
|
|
|
result = TRACE_SIGNAL_DELIVERED;
|
|
/*
|
|
- * Skip useless siginfo allocation for SIGKILL SIGSTOP,
|
|
- * and kernel threads.
|
|
+ * Skip useless siginfo allocation for SIGKILL and kernel threads.
|
|
*/
|
|
- if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
|
|
+ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
|
|
goto out_set;
|
|
|
|
/*
|
|
@@ -2394,6 +2435,14 @@ relock:
|
|
goto relock;
|
|
}
|
|
|
|
+ /* Has this task already been marked for death? */
|
|
+ if (signal_group_exit(signal)) {
|
|
+ ksig->info.si_signo = signr = SIGKILL;
|
|
+ sigdelset(¤t->pending.signal, SIGKILL);
|
|
+ recalc_sigpending();
|
|
+ goto fatal;
|
|
+ }
|
|
+
|
|
for (;;) {
|
|
struct k_sigaction *ka;
|
|
|
|
@@ -2407,7 +2456,15 @@ relock:
|
|
goto relock;
|
|
}
|
|
|
|
- signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
|
|
+ /*
|
|
+ * Signals generated by the execution of an instruction
|
|
+ * need to be delivered before any other pending signals
|
|
+ * so that the instruction pointer in the signal stack
|
|
+ * frame points to the faulting instruction.
|
|
+ */
|
|
+ signr = dequeue_synchronous_signal(&ksig->info);
|
|
+ if (!signr)
|
|
+ signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
|
|
|
|
if (!signr)
|
|
break; /* will return 0 */
|
|
@@ -2489,6 +2546,7 @@ relock:
|
|
continue;
|
|
}
|
|
|
|
+ fatal:
|
|
spin_unlock_irq(&sighand->siglock);
|
|
|
|
/*
|
|
diff --git a/kernel/smp.c b/kernel/smp.c
|
|
index 163c451af42e4..f4cf1b0bb3b86 100644
|
|
--- a/kernel/smp.c
|
|
+++ b/kernel/smp.c
|
|
@@ -584,8 +584,6 @@ void __init smp_init(void)
|
|
num_nodes, (num_nodes > 1 ? "s" : ""),
|
|
num_cpus, (num_cpus > 1 ? "s" : ""));
|
|
|
|
- /* Final decision about SMT support */
|
|
- cpu_smt_check_topology();
|
|
/* Any cleanup work */
|
|
smp_cpus_done(setup_max_cpus);
|
|
}
|
|
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
|
|
index 5fc724e4e454c..9ee261fce89ec 100644
|
|
--- a/kernel/sysctl.c
|
|
+++ b/kernel/sysctl.c
|
|
@@ -2779,6 +2779,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
|
|
bool neg;
|
|
|
|
left -= proc_skip_spaces(&p);
|
|
+ if (!left)
|
|
+ break;
|
|
|
|
err = proc_get_long(&p, &left, &val, &neg,
|
|
proc_wspace_sep,
|
|
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
|
|
index 8f0644af40be7..80f955210861a 100644
|
|
--- a/kernel/time/posix-cpu-timers.c
|
|
+++ b/kernel/time/posix-cpu-timers.c
|
|
@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
|
|
* set up the signal and overrun bookkeeping.
|
|
*/
|
|
timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
|
|
+ timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
|
|
|
|
/*
|
|
* This acts as a modification timestamp for the timer,
|
|
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
|
|
index 2d110c9488055..6c94934955383 100644
|
|
--- a/kernel/time/timekeeping.c
|
|
+++ b/kernel/time/timekeeping.c
|
|
@@ -50,7 +50,9 @@ enum timekeeping_adv_mode {
|
|
static struct {
|
|
seqcount_t seq;
|
|
struct timekeeper timekeeper;
|
|
-} tk_core ____cacheline_aligned;
|
|
+} tk_core ____cacheline_aligned = {
|
|
+ .seq = SEQCNT_ZERO(tk_core.seq),
|
|
+};
|
|
|
|
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
|
|
static struct timekeeper shadow_timekeeper;
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index ff1c4b20cd0a6..b331562989bd2 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
|
|
const char tgid_space[] = " ";
|
|
const char space[] = " ";
|
|
|
|
+ print_event_info(buf, m);
|
|
+
|
|
seq_printf(m, "# %s _-----=> irqs-off\n",
|
|
tgid ? tgid_space : space);
|
|
seq_printf(m, "# %s / _----=> need-resched\n",
|
|
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
|
|
index fec67188c4d28..3387408a15c27 100644
|
|
--- a/kernel/trace/trace_kprobe.c
|
|
+++ b/kernel/trace/trace_kprobe.c
|
|
@@ -878,22 +878,14 @@ static const struct file_operations kprobe_profile_ops = {
|
|
static nokprobe_inline int
|
|
fetch_store_strlen(unsigned long addr)
|
|
{
|
|
- mm_segment_t old_fs;
|
|
int ret, len = 0;
|
|
u8 c;
|
|
|
|
- old_fs = get_fs();
|
|
- set_fs(KERNEL_DS);
|
|
- pagefault_disable();
|
|
-
|
|
do {
|
|
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
|
|
+ ret = probe_mem_read(&c, (u8 *)addr + len, 1);
|
|
len++;
|
|
} while (c && ret == 0 && len < MAX_STRING_SIZE);
|
|
|
|
- pagefault_enable();
|
|
- set_fs(old_fs);
|
|
-
|
|
return (ret < 0) ? ret : len;
|
|
}
|
|
|
|
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
|
|
index 5c56afc17cf86..4737bb8c07a38 100644
|
|
--- a/kernel/trace/trace_probe_tmpl.h
|
|
+++ b/kernel/trace/trace_probe_tmpl.h
|
|
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
|
|
if (unlikely(arg->dynamic))
|
|
*dl = make_data_loc(maxlen, dyndata - base);
|
|
ret = process_fetch_insn(arg->code, regs, dl, base);
|
|
- if (unlikely(ret < 0 && arg->dynamic))
|
|
+ if (unlikely(ret < 0 && arg->dynamic)) {
|
|
*dl = make_data_loc(0, dyndata - base);
|
|
- else
|
|
+ } else {
|
|
dyndata += ret;
|
|
+ maxlen -= ret;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
|
|
index 31ea48eceda18..ec8332c5056a6 100644
|
|
--- a/kernel/trace/trace_uprobe.c
|
|
+++ b/kernel/trace/trace_uprobe.c
|
|
@@ -5,7 +5,7 @@
|
|
* Copyright (C) IBM Corporation, 2010-2012
|
|
* Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
|
|
*/
|
|
-#define pr_fmt(fmt) "trace_kprobe: " fmt
|
|
+#define pr_fmt(fmt) "trace_uprobe: " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/uaccess.h>
|
|
@@ -127,6 +127,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
|
|
if (ret >= 0) {
|
|
if (ret == maxlen)
|
|
dst[ret - 1] = '\0';
|
|
+ else
|
|
+ /*
|
|
+ * Include the terminating null byte. In this case it
|
|
+ * was copied by strncpy_from_user but not accounted
|
|
+ * for in ret.
|
|
+ */
|
|
+ ret++;
|
|
*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
|
|
}
|
|
|
|
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
|
|
index 14436f4ca6bd7..30e0f9770f88c 100644
|
|
--- a/lib/int_sqrt.c
|
|
+++ b/lib/int_sqrt.c
|
|
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
|
|
if (x <= ULONG_MAX)
|
|
return int_sqrt((unsigned long) x);
|
|
|
|
- m = 1ULL << (fls64(x) & ~1ULL);
|
|
+ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
|
|
while (m != 0) {
|
|
b = y + m;
|
|
y >>= 1;
|
|
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
|
|
index 11f2ae0f90996..6aabb609dd871 100644
|
|
--- a/lib/seq_buf.c
|
|
+++ b/lib/seq_buf.c
|
|
@@ -144,9 +144,13 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
|
|
|
|
WARN_ON(s->size == 0);
|
|
|
|
+ /* Add 1 to len for the trailing null byte which must be there */
|
|
+ len += 1;
|
|
+
|
|
if (seq_buf_can_fit(s, len)) {
|
|
memcpy(s->buffer + s->len, str, len);
|
|
- s->len += len;
|
|
+ /* Don't count the trailing null byte against the capacity */
|
|
+ s->len += len - 1;
|
|
return 0;
|
|
}
|
|
seq_buf_set_overflow(s);
|
|
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
|
|
index d5a06addeb271..bf864c73e462a 100644
|
|
--- a/lib/test_debug_virtual.c
|
|
+++ b/lib/test_debug_virtual.c
|
|
@@ -5,6 +5,7 @@
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sizes.h>
|
|
+#include <linux/io.h>
|
|
|
|
#include <asm/page.h>
|
|
#ifdef CONFIG_MIPS
|
|
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
|
|
index 82ac39ce53105..aecc0996628f8 100644
|
|
--- a/lib/test_rhashtable.c
|
|
+++ b/lib/test_rhashtable.c
|
|
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
|
|
static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
|
|
int cnt, bool slow)
|
|
{
|
|
- struct rhltable rhlt;
|
|
+ struct rhltable *rhlt;
|
|
unsigned int i, ret;
|
|
const char *key;
|
|
int err = 0;
|
|
|
|
- err = rhltable_init(&rhlt, &test_rht_params_dup);
|
|
- if (WARN_ON(err))
|
|
+ rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
|
|
+ if (WARN_ON(!rhlt))
|
|
+ return -EINVAL;
|
|
+
|
|
+ err = rhltable_init(rhlt, &test_rht_params_dup);
|
|
+ if (WARN_ON(err)) {
|
|
+ kfree(rhlt);
|
|
return err;
|
|
+ }
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
rhl_test_objects[i].value.tid = i;
|
|
- key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
|
|
+ key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
|
|
key += test_rht_params_dup.key_offset;
|
|
|
|
if (slow) {
|
|
- err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
|
|
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
|
|
&rhl_test_objects[i].list_node.rhead));
|
|
if (err == -EAGAIN)
|
|
err = 0;
|
|
} else
|
|
- err = rhltable_insert(&rhlt,
|
|
+ err = rhltable_insert(rhlt,
|
|
&rhl_test_objects[i].list_node,
|
|
test_rht_params_dup);
|
|
if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
|
|
goto skip_print;
|
|
}
|
|
|
|
- ret = print_ht(&rhlt);
|
|
+ ret = print_ht(rhlt);
|
|
WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
|
|
|
|
skip_print:
|
|
- rhltable_destroy(&rhlt);
|
|
+ rhltable_destroy(rhlt);
|
|
+ kfree(rhlt);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/mm/hmm.c b/mm/hmm.c
|
|
index 90c34f3d1243a..50fbaf80f95e8 100644
|
|
--- a/mm/hmm.c
|
|
+++ b/mm/hmm.c
|
|
@@ -986,19 +986,16 @@ static void hmm_devmem_ref_exit(void *data)
|
|
struct hmm_devmem *devmem;
|
|
|
|
devmem = container_of(ref, struct hmm_devmem, ref);
|
|
+ wait_for_completion(&devmem->completion);
|
|
percpu_ref_exit(ref);
|
|
- devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
|
|
}
|
|
|
|
-static void hmm_devmem_ref_kill(void *data)
|
|
+static void hmm_devmem_ref_kill(struct percpu_ref *ref)
|
|
{
|
|
- struct percpu_ref *ref = data;
|
|
struct hmm_devmem *devmem;
|
|
|
|
devmem = container_of(ref, struct hmm_devmem, ref);
|
|
percpu_ref_kill(ref);
|
|
- wait_for_completion(&devmem->completion);
|
|
- devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
|
|
}
|
|
|
|
static int hmm_devmem_fault(struct vm_area_struct *vma,
|
|
@@ -1021,172 +1018,6 @@ static void hmm_devmem_free(struct page *page, void *data)
|
|
devmem->ops->free(devmem, page);
|
|
}
|
|
|
|
-static DEFINE_MUTEX(hmm_devmem_lock);
|
|
-static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
|
|
-
|
|
-static void hmm_devmem_radix_release(struct resource *resource)
|
|
-{
|
|
- resource_size_t key;
|
|
-
|
|
- mutex_lock(&hmm_devmem_lock);
|
|
- for (key = resource->start;
|
|
- key <= resource->end;
|
|
- key += PA_SECTION_SIZE)
|
|
- radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
|
|
- mutex_unlock(&hmm_devmem_lock);
|
|
-}
|
|
-
|
|
-static void hmm_devmem_release(struct device *dev, void *data)
|
|
-{
|
|
- struct hmm_devmem *devmem = data;
|
|
- struct resource *resource = devmem->resource;
|
|
- unsigned long start_pfn, npages;
|
|
- struct zone *zone;
|
|
- struct page *page;
|
|
-
|
|
- if (percpu_ref_tryget_live(&devmem->ref)) {
|
|
- dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
|
|
- percpu_ref_put(&devmem->ref);
|
|
- }
|
|
-
|
|
- /* pages are dead and unused, undo the arch mapping */
|
|
- start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
|
|
- npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
|
|
-
|
|
- page = pfn_to_page(start_pfn);
|
|
- zone = page_zone(page);
|
|
-
|
|
- mem_hotplug_begin();
|
|
- if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
|
|
- __remove_pages(zone, start_pfn, npages, NULL);
|
|
- else
|
|
- arch_remove_memory(start_pfn << PAGE_SHIFT,
|
|
- npages << PAGE_SHIFT, NULL);
|
|
- mem_hotplug_done();
|
|
-
|
|
- hmm_devmem_radix_release(resource);
|
|
-}
|
|
-
|
|
-static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
|
|
-{
|
|
- resource_size_t key, align_start, align_size, align_end;
|
|
- struct device *device = devmem->device;
|
|
- int ret, nid, is_ram;
|
|
-
|
|
- align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
|
|
- align_size = ALIGN(devmem->resource->start +
|
|
- resource_size(devmem->resource),
|
|
- PA_SECTION_SIZE) - align_start;
|
|
-
|
|
- is_ram = region_intersects(align_start, align_size,
|
|
- IORESOURCE_SYSTEM_RAM,
|
|
- IORES_DESC_NONE);
|
|
- if (is_ram == REGION_MIXED) {
|
|
- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
|
|
- __func__, devmem->resource);
|
|
- return -ENXIO;
|
|
- }
|
|
- if (is_ram == REGION_INTERSECTS)
|
|
- return -ENXIO;
|
|
-
|
|
- if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
|
|
- devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
|
|
- else
|
|
- devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
|
-
|
|
- devmem->pagemap.res = *devmem->resource;
|
|
- devmem->pagemap.page_fault = hmm_devmem_fault;
|
|
- devmem->pagemap.page_free = hmm_devmem_free;
|
|
- devmem->pagemap.dev = devmem->device;
|
|
- devmem->pagemap.ref = &devmem->ref;
|
|
- devmem->pagemap.data = devmem;
|
|
-
|
|
- mutex_lock(&hmm_devmem_lock);
|
|
- align_end = align_start + align_size - 1;
|
|
- for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
|
|
- struct hmm_devmem *dup;
|
|
-
|
|
- dup = radix_tree_lookup(&hmm_devmem_radix,
|
|
- key >> PA_SECTION_SHIFT);
|
|
- if (dup) {
|
|
- dev_err(device, "%s: collides with mapping for %s\n",
|
|
- __func__, dev_name(dup->device));
|
|
- mutex_unlock(&hmm_devmem_lock);
|
|
- ret = -EBUSY;
|
|
- goto error;
|
|
- }
|
|
- ret = radix_tree_insert(&hmm_devmem_radix,
|
|
- key >> PA_SECTION_SHIFT,
|
|
- devmem);
|
|
- if (ret) {
|
|
- dev_err(device, "%s: failed: %d\n", __func__, ret);
|
|
- mutex_unlock(&hmm_devmem_lock);
|
|
- goto error_radix;
|
|
- }
|
|
- }
|
|
- mutex_unlock(&hmm_devmem_lock);
|
|
-
|
|
- nid = dev_to_node(device);
|
|
- if (nid < 0)
|
|
- nid = numa_mem_id();
|
|
-
|
|
- mem_hotplug_begin();
|
|
- /*
|
|
- * For device private memory we call add_pages() as we only need to
|
|
- * allocate and initialize struct page for the device memory. More-
|
|
- * over the device memory is un-accessible thus we do not want to
|
|
- * create a linear mapping for the memory like arch_add_memory()
|
|
- * would do.
|
|
- *
|
|
- * For device public memory, which is accesible by the CPU, we do
|
|
- * want the linear mapping and thus use arch_add_memory().
|
|
- */
|
|
- if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
|
|
- ret = arch_add_memory(nid, align_start, align_size, NULL,
|
|
- false);
|
|
- else
|
|
- ret = add_pages(nid, align_start >> PAGE_SHIFT,
|
|
- align_size >> PAGE_SHIFT, NULL, false);
|
|
- if (ret) {
|
|
- mem_hotplug_done();
|
|
- goto error_add_memory;
|
|
- }
|
|
- move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
|
- align_start >> PAGE_SHIFT,
|
|
- align_size >> PAGE_SHIFT, NULL);
|
|
- mem_hotplug_done();
|
|
-
|
|
- /*
|
|
- * Initialization of the pages has been deferred until now in order
|
|
- * to allow us to do the work while not holding the hotplug lock.
|
|
- */
|
|
- memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
|
- align_start >> PAGE_SHIFT,
|
|
- align_size >> PAGE_SHIFT, &devmem->pagemap);
|
|
-
|
|
- return 0;
|
|
-
|
|
-error_add_memory:
|
|
- untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
|
|
-error_radix:
|
|
- hmm_devmem_radix_release(devmem->resource);
|
|
-error:
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
|
|
-{
|
|
- struct hmm_devmem *devmem = data;
|
|
-
|
|
- return devmem->resource == match_data;
|
|
-}
|
|
-
|
|
-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
|
|
-{
|
|
- devres_release(devmem->device, &hmm_devmem_release,
|
|
- &hmm_devmem_match, devmem->resource);
|
|
-}
|
|
-
|
|
/*
|
|
* hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
|
|
*
|
|
@@ -1210,12 +1041,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|
{
|
|
struct hmm_devmem *devmem;
|
|
resource_size_t addr;
|
|
+ void *result;
|
|
int ret;
|
|
|
|
dev_pagemap_get_ops();
|
|
|
|
- devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
|
|
- GFP_KERNEL, dev_to_node(device));
|
|
+ devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
|
|
if (!devmem)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
@@ -1229,11 +1060,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|
ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
|
|
0, GFP_KERNEL);
|
|
if (ret)
|
|
- goto error_percpu_ref;
|
|
+ return ERR_PTR(ret);
|
|
|
|
- ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
|
|
+ ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
|
|
if (ret)
|
|
- goto error_devm_add_action;
|
|
+ return ERR_PTR(ret);
|
|
|
|
size = ALIGN(size, PA_SECTION_SIZE);
|
|
addr = min((unsigned long)iomem_resource.end,
|
|
@@ -1253,54 +1084,40 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|
|
|
devmem->resource = devm_request_mem_region(device, addr, size,
|
|
dev_name(device));
|
|
- if (!devmem->resource) {
|
|
- ret = -ENOMEM;
|
|
- goto error_no_resource;
|
|
- }
|
|
+ if (!devmem->resource)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
break;
|
|
}
|
|
- if (!devmem->resource) {
|
|
- ret = -ERANGE;
|
|
- goto error_no_resource;
|
|
- }
|
|
+ if (!devmem->resource)
|
|
+ return ERR_PTR(-ERANGE);
|
|
|
|
devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
|
|
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
|
devmem->pfn_last = devmem->pfn_first +
|
|
(resource_size(devmem->resource) >> PAGE_SHIFT);
|
|
|
|
- ret = hmm_devmem_pages_create(devmem);
|
|
- if (ret)
|
|
- goto error_pages;
|
|
-
|
|
- devres_add(device, devmem);
|
|
-
|
|
- ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
|
|
- if (ret) {
|
|
- hmm_devmem_remove(devmem);
|
|
- return ERR_PTR(ret);
|
|
- }
|
|
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
|
+ devmem->pagemap.res = *devmem->resource;
|
|
+ devmem->pagemap.page_fault = hmm_devmem_fault;
|
|
+ devmem->pagemap.page_free = hmm_devmem_free;
|
|
+ devmem->pagemap.altmap_valid = false;
|
|
+ devmem->pagemap.ref = &devmem->ref;
|
|
+ devmem->pagemap.data = devmem;
|
|
+ devmem->pagemap.kill = hmm_devmem_ref_kill;
|
|
|
|
+ result = devm_memremap_pages(devmem->device, &devmem->pagemap);
|
|
+ if (IS_ERR(result))
|
|
+ return result;
|
|
return devmem;
|
|
-
|
|
-error_pages:
|
|
- devm_release_mem_region(device, devmem->resource->start,
|
|
- resource_size(devmem->resource));
|
|
-error_no_resource:
|
|
-error_devm_add_action:
|
|
- hmm_devmem_ref_kill(&devmem->ref);
|
|
- hmm_devmem_ref_exit(&devmem->ref);
|
|
-error_percpu_ref:
|
|
- devres_free(devmem);
|
|
- return ERR_PTR(ret);
|
|
}
|
|
-EXPORT_SYMBOL(hmm_devmem_add);
|
|
+EXPORT_SYMBOL_GPL(hmm_devmem_add);
|
|
|
|
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
|
struct device *device,
|
|
struct resource *res)
|
|
{
|
|
struct hmm_devmem *devmem;
|
|
+ void *result;
|
|
int ret;
|
|
|
|
if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
|
|
@@ -1308,8 +1125,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
|
|
|
dev_pagemap_get_ops();
|
|
|
|
- devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
|
|
- GFP_KERNEL, dev_to_node(device));
|
|
+ devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
|
|
if (!devmem)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
@@ -1323,71 +1139,32 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
|
ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
|
|
0, GFP_KERNEL);
|
|
if (ret)
|
|
- goto error_percpu_ref;
|
|
+ return ERR_PTR(ret);
|
|
|
|
- ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
|
|
+ ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
|
|
+ &devmem->ref);
|
|
if (ret)
|
|
- goto error_devm_add_action;
|
|
-
|
|
+ return ERR_PTR(ret);
|
|
|
|
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
|
devmem->pfn_last = devmem->pfn_first +
|
|
(resource_size(devmem->resource) >> PAGE_SHIFT);
|
|
|
|
- ret = hmm_devmem_pages_create(devmem);
|
|
- if (ret)
|
|
- goto error_devm_add_action;
|
|
-
|
|
- devres_add(device, devmem);
|
|
-
|
|
- ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
|
|
- if (ret) {
|
|
- hmm_devmem_remove(devmem);
|
|
- return ERR_PTR(ret);
|
|
- }
|
|
+ devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
|
|
+ devmem->pagemap.res = *devmem->resource;
|
|
+ devmem->pagemap.page_fault = hmm_devmem_fault;
|
|
+ devmem->pagemap.page_free = hmm_devmem_free;
|
|
+ devmem->pagemap.altmap_valid = false;
|
|
+ devmem->pagemap.ref = &devmem->ref;
|
|
+ devmem->pagemap.data = devmem;
|
|
+ devmem->pagemap.kill = hmm_devmem_ref_kill;
|
|
|
|
+ result = devm_memremap_pages(devmem->device, &devmem->pagemap);
|
|
+ if (IS_ERR(result))
|
|
+ return result;
|
|
return devmem;
|
|
-
|
|
-error_devm_add_action:
|
|
- hmm_devmem_ref_kill(&devmem->ref);
|
|
- hmm_devmem_ref_exit(&devmem->ref);
|
|
-error_percpu_ref:
|
|
- devres_free(devmem);
|
|
- return ERR_PTR(ret);
|
|
-}
|
|
-EXPORT_SYMBOL(hmm_devmem_add_resource);
|
|
-
|
|
-/*
|
|
- * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
|
|
- *
|
|
- * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
|
|
- *
|
|
- * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
|
|
- * of the device driver. It will free struct page and remove the resource that
|
|
- * reserved the physical address range for this device memory.
|
|
- */
|
|
-void hmm_devmem_remove(struct hmm_devmem *devmem)
|
|
-{
|
|
- resource_size_t start, size;
|
|
- struct device *device;
|
|
- bool cdm = false;
|
|
-
|
|
- if (!devmem)
|
|
- return;
|
|
-
|
|
- device = devmem->device;
|
|
- start = devmem->resource->start;
|
|
- size = resource_size(devmem->resource);
|
|
-
|
|
- cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
|
|
- hmm_devmem_ref_kill(&devmem->ref);
|
|
- hmm_devmem_ref_exit(&devmem->ref);
|
|
- hmm_devmem_pages_remove(devmem);
|
|
-
|
|
- if (!cdm)
|
|
- devm_release_mem_region(device, start, size);
|
|
}
|
|
-EXPORT_SYMBOL(hmm_devmem_remove);
|
|
+EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
|
|
|
|
/*
|
|
* A device driver that wants to handle multiple devices memory through a
|
|
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
|
|
index a80832487981b..c5c708c83af09 100644
|
|
--- a/mm/hugetlb.c
|
|
+++ b/mm/hugetlb.c
|
|
@@ -4270,7 +4270,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
break;
|
|
}
|
|
if (ret & VM_FAULT_RETRY) {
|
|
- if (nonblocking)
|
|
+ if (nonblocking &&
|
|
+ !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
|
|
*nonblocking = 0;
|
|
*nr_pages = 0;
|
|
/*
|
|
diff --git a/mm/memblock.c b/mm/memblock.c
|
|
index 81ae63ca78d0b..74ac4f89018ab 100644
|
|
--- a/mm/memblock.c
|
|
+++ b/mm/memblock.c
|
|
@@ -26,6 +26,13 @@
|
|
|
|
#include "internal.h"
|
|
|
|
+#define INIT_MEMBLOCK_REGIONS 128
|
|
+#define INIT_PHYSMEM_REGIONS 4
|
|
+
|
|
+#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
|
|
+# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
|
|
+#endif
|
|
+
|
|
/**
|
|
* DOC: memblock overview
|
|
*
|
|
@@ -92,7 +99,7 @@ unsigned long max_pfn;
|
|
unsigned long long max_possible_pfn;
|
|
|
|
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
|
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
|
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
|
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
|
|
#endif
|
|
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
|
|
|
|
.reserved.regions = memblock_reserved_init_regions,
|
|
.reserved.cnt = 1, /* empty dummy entry */
|
|
- .reserved.max = INIT_MEMBLOCK_REGIONS,
|
|
+ .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
|
|
.reserved.name = "reserved",
|
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
|
@@ -262,7 +269,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
|
|
phys_addr_t kernel_end, ret;
|
|
|
|
/* pump up @end */
|
|
- if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
|
|
+ if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
|
|
+ end == MEMBLOCK_ALLOC_KASAN)
|
|
end = memblock.current_limit;
|
|
|
|
/* avoid allocating the first page */
|
|
@@ -1412,13 +1420,15 @@ again:
|
|
done:
|
|
ptr = phys_to_virt(alloc);
|
|
|
|
- /*
|
|
- * The min_count is set to 0 so that bootmem allocated blocks
|
|
- * are never reported as leaks. This is because many of these blocks
|
|
- * are only referred via the physical address which is not
|
|
- * looked up by kmemleak.
|
|
- */
|
|
- kmemleak_alloc(ptr, size, 0, 0);
|
|
+ /* Skip kmemleak for kasan_init() due to high volume. */
|
|
+ if (max_addr != MEMBLOCK_ALLOC_KASAN)
|
|
+ /*
|
|
+ * The min_count is set to 0 so that bootmem allocated
|
|
+ * blocks are never reported as leaks. This is because many
|
|
+ * of these blocks are only referred via the physical
|
|
+ * address which is not looked up by kmemleak.
|
|
+ */
|
|
+ kmemleak_alloc(ptr, size, 0, 0);
|
|
|
|
return ptr;
|
|
}
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 6e1469b80cb7d..7e6bf74ddb1ef 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -1666,6 +1666,9 @@ enum oom_status {
|
|
|
|
static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
|
|
{
|
|
+ enum oom_status ret;
|
|
+ bool locked;
|
|
+
|
|
if (order > PAGE_ALLOC_COSTLY_ORDER)
|
|
return OOM_SKIPPED;
|
|
|
|
@@ -1700,10 +1703,23 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
|
|
return OOM_ASYNC;
|
|
}
|
|
|
|
+ mem_cgroup_mark_under_oom(memcg);
|
|
+
|
|
+ locked = mem_cgroup_oom_trylock(memcg);
|
|
+
|
|
+ if (locked)
|
|
+ mem_cgroup_oom_notify(memcg);
|
|
+
|
|
+ mem_cgroup_unmark_under_oom(memcg);
|
|
if (mem_cgroup_out_of_memory(memcg, mask, order))
|
|
- return OOM_SUCCESS;
|
|
+ ret = OOM_SUCCESS;
|
|
+ else
|
|
+ ret = OOM_FAILED;
|
|
|
|
- return OOM_FAILED;
|
|
+ if (locked)
|
|
+ mem_cgroup_oom_unlock(memcg);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
|
|
index 7c72f2a95785e..831be5ff5f4df 100644
|
|
--- a/mm/memory-failure.c
|
|
+++ b/mm/memory-failure.c
|
|
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
|
|
if (fail || tk->addr_valid == 0) {
|
|
pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
|
|
pfn, tk->tsk->comm, tk->tsk->pid);
|
|
- force_sig(SIGKILL, tk->tsk);
|
|
+ do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
|
|
+ tk->tsk, PIDTYPE_PID);
|
|
}
|
|
|
|
/*
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 4ad2d293ddc26..59c00ae6b9289 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -2993,6 +2993,29 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
vm_fault_t ret;
|
|
|
|
+ /*
|
|
+ * Preallocate pte before we take page_lock because this might lead to
|
|
+ * deadlocks for memcg reclaim which waits for pages under writeback:
|
|
+ * lock_page(A)
|
|
+ * SetPageWriteback(A)
|
|
+ * unlock_page(A)
|
|
+ * lock_page(B)
|
|
+ * lock_page(B)
|
|
+ * pte_alloc_pne
|
|
+ * shrink_page_list
|
|
+ * wait_on_page_writeback(A)
|
|
+ * SetPageWriteback(B)
|
|
+ * unlock_page(B)
|
|
+ * # flush A, B to clear the writeback
|
|
+ */
|
|
+ if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
|
|
+ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
|
|
+ vmf->address);
|
|
+ if (!vmf->prealloc_pte)
|
|
+ return VM_FAULT_OOM;
|
|
+ smp_wmb(); /* See comment in __pte_alloc() */
|
|
+ }
|
|
+
|
|
ret = vma->vm_ops->fault(vmf);
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
|
|
VM_FAULT_DONE_COW)))
|
|
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
|
|
index 2b2b3ccbbfb57..21d94b5677e81 100644
|
|
--- a/mm/memory_hotplug.c
|
|
+++ b/mm/memory_hotplug.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/compaction.h>
|
|
+#include <linux/rmap.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
@@ -1301,23 +1302,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
|
|
static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long pfn;
|
|
- struct page *page;
|
|
+
|
|
for (pfn = start; pfn < end; pfn++) {
|
|
- if (pfn_valid(pfn)) {
|
|
- page = pfn_to_page(pfn);
|
|
- if (PageLRU(page))
|
|
- return pfn;
|
|
- if (__PageMovable(page))
|
|
- return pfn;
|
|
- if (PageHuge(page)) {
|
|
- if (hugepage_migration_supported(page_hstate(page)) &&
|
|
- page_huge_active(page))
|
|
- return pfn;
|
|
- else
|
|
- pfn = round_up(pfn + 1,
|
|
- 1 << compound_order(page)) - 1;
|
|
- }
|
|
- }
|
|
+ struct page *page, *head;
|
|
+ unsigned long skip;
|
|
+
|
|
+ if (!pfn_valid(pfn))
|
|
+ continue;
|
|
+ page = pfn_to_page(pfn);
|
|
+ if (PageLRU(page))
|
|
+ return pfn;
|
|
+ if (__PageMovable(page))
|
|
+ return pfn;
|
|
+
|
|
+ if (!PageHuge(page))
|
|
+ continue;
|
|
+ head = compound_head(page);
|
|
+ if (hugepage_migration_supported(page_hstate(head)) &&
|
|
+ page_huge_active(head))
|
|
+ return pfn;
|
|
+ skip = (1 << compound_order(head)) - (page - head);
|
|
+ pfn += skip - 1;
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -1369,6 +1374,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|
pfn = page_to_pfn(compound_head(page))
|
|
+ hpage_nr_pages(page) - 1;
|
|
|
|
+ /*
|
|
+ * HWPoison pages have elevated reference counts so the migration would
|
|
+ * fail on them. It also doesn't make any sense to migrate them in the
|
|
+ * first place. Still try to unmap such a page in case it is still mapped
|
|
+ * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
|
|
+ * the unmap as the catch all safety net).
|
|
+ */
|
|
+ if (PageHWPoison(page)) {
|
|
+ if (WARN_ON(PageLRU(page)))
|
|
+ isolate_lru_page(page);
|
|
+ if (page_mapped(page))
|
|
+ try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (!get_page_unless_zero(page))
|
|
continue;
|
|
/*
|
|
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
|
|
index d4496d9d34f53..ee2bce59d2bff 100644
|
|
--- a/mm/mempolicy.c
|
|
+++ b/mm/mempolicy.c
|
|
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
|
|
nodemask_t *nodes)
|
|
{
|
|
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
|
|
- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
|
|
+ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
|
|
|
|
if (copy > nbytes) {
|
|
if (copy > PAGE_SIZE)
|
|
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
|
|
int uninitialized_var(pval);
|
|
nodemask_t nodes;
|
|
|
|
- if (nmask != NULL && maxnode < MAX_NUMNODES)
|
|
+ if (nmask != NULL && maxnode < nr_node_ids)
|
|
return -EINVAL;
|
|
|
|
err = do_get_mempolicy(&pval, &nodes, addr, flags);
|
|
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
|
|
unsigned long nr_bits, alloc_size;
|
|
DECLARE_BITMAP(bm, MAX_NUMNODES);
|
|
|
|
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
|
|
+ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
|
|
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
|
|
|
if (nmask)
|
|
diff --git a/mm/migrate.c b/mm/migrate.c
|
|
index f7e4bfdc13b78..9638cd59fef11 100644
|
|
--- a/mm/migrate.c
|
|
+++ b/mm/migrate.c
|
|
@@ -1108,10 +1108,13 @@ out:
|
|
* If migration is successful, decrease refcount of the newpage
|
|
* which will not free the page because new page owner increased
|
|
* refcounter. As well, if it is LRU page, add the page to LRU
|
|
- * list in here.
|
|
+ * list in here. Use the old state of the isolated source page to
|
|
+ * determine if we migrated a LRU page. newpage was already unlocked
|
|
+ * and possibly modified by its owner - don't rely on the page
|
|
+ * state.
|
|
*/
|
|
if (rc == MIGRATEPAGE_SUCCESS) {
|
|
- if (unlikely(__PageMovable(newpage)))
|
|
+ if (unlikely(!is_lru))
|
|
put_page(newpage);
|
|
else
|
|
putback_lru_page(newpage);
|
|
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
|
|
index 6589f60d50186..cc317efdcb43d 100644
|
|
--- a/mm/oom_kill.c
|
|
+++ b/mm/oom_kill.c
|
|
@@ -634,8 +634,8 @@ static int oom_reaper(void *unused)
|
|
|
|
static void wake_oom_reaper(struct task_struct *tsk)
|
|
{
|
|
- /* tsk is already queued? */
|
|
- if (tsk == oom_reaper_list || tsk->oom_reaper_list)
|
|
+ /* mm is already queued? */
|
|
+ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
|
|
return;
|
|
|
|
get_task_struct(tsk);
|
|
@@ -962,6 +962,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
|
|
* still freeing memory.
|
|
*/
|
|
read_lock(&tasklist_lock);
|
|
+
|
|
+ /*
|
|
+ * The task 'p' might have already exited before reaching here. The
|
|
+ * put_task_struct() will free task_struct 'p' while the loop still try
|
|
+ * to access the field of 'p', so, get an extra reference.
|
|
+ */
|
|
+ get_task_struct(p);
|
|
for_each_thread(p, t) {
|
|
list_for_each_entry(child, &t->children, sibling) {
|
|
unsigned int child_points;
|
|
@@ -981,6 +988,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
|
|
}
|
|
}
|
|
}
|
|
+ put_task_struct(p);
|
|
read_unlock(&tasklist_lock);
|
|
|
|
/*
|
|
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
|
|
index 3f690bae6b780..7d1010453fb95 100644
|
|
--- a/mm/page-writeback.c
|
|
+++ b/mm/page-writeback.c
|
|
@@ -2154,6 +2154,7 @@ int write_cache_pages(struct address_space *mapping,
|
|
{
|
|
int ret = 0;
|
|
int done = 0;
|
|
+ int error;
|
|
struct pagevec pvec;
|
|
int nr_pages;
|
|
pgoff_t uninitialized_var(writeback_index);
|
|
@@ -2227,25 +2228,31 @@ continue_unlock:
|
|
goto continue_unlock;
|
|
|
|
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
|
|
- ret = (*writepage)(page, wbc, data);
|
|
- if (unlikely(ret)) {
|
|
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
|
|
+ error = (*writepage)(page, wbc, data);
|
|
+ if (unlikely(error)) {
|
|
+ /*
|
|
+ * Handle errors according to the type of
|
|
+ * writeback. There's no need to continue for
|
|
+ * background writeback. Just push done_index
|
|
+ * past this page so media errors won't choke
|
|
+ * writeout for the entire file. For integrity
|
|
+ * writeback, we must process the entire dirty
|
|
+ * set regardless of errors because the fs may
|
|
+ * still have state to clear for each page. In
|
|
+ * that case we continue processing and return
|
|
+ * the first error.
|
|
+ */
|
|
+ if (error == AOP_WRITEPAGE_ACTIVATE) {
|
|
unlock_page(page);
|
|
- ret = 0;
|
|
- } else {
|
|
- /*
|
|
- * done_index is set past this page,
|
|
- * so media errors will not choke
|
|
- * background writeout for the entire
|
|
- * file. This has consequences for
|
|
- * range_cyclic semantics (ie. it may
|
|
- * not be suitable for data integrity
|
|
- * writeout).
|
|
- */
|
|
+ error = 0;
|
|
+ } else if (wbc->sync_mode != WB_SYNC_ALL) {
|
|
+ ret = error;
|
|
done_index = page->index + 1;
|
|
done = 1;
|
|
break;
|
|
}
|
|
+ if (!ret)
|
|
+ ret = error;
|
|
}
|
|
|
|
/*
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index e95b5b7c9c3d6..a29043ea92128 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -294,6 +294,32 @@ EXPORT_SYMBOL(nr_online_nodes);
|
|
int page_group_by_mobility_disabled __read_mostly;
|
|
|
|
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
|
+/*
|
|
+ * During boot we initialize deferred pages on-demand, as needed, but once
|
|
+ * page_alloc_init_late() has finished, the deferred pages are all initialized,
|
|
+ * and we can permanently disable that path.
|
|
+ */
|
|
+static DEFINE_STATIC_KEY_TRUE(deferred_pages);
|
|
+
|
|
+/*
|
|
+ * Calling kasan_free_pages() only after deferred memory initialization
|
|
+ * has completed. Poisoning pages during deferred memory init will greatly
|
|
+ * lengthen the process and cause problem in large memory systems as the
|
|
+ * deferred pages initialization is done with interrupt disabled.
|
|
+ *
|
|
+ * Assuming that there will be no reference to those newly initialized
|
|
+ * pages before they are ever allocated, this should have no effect on
|
|
+ * KASAN memory tracking as the poison will be properly inserted at page
|
|
+ * allocation time. The only corner case is when pages are allocated by
|
|
+ * on-demand allocation and then freed again before the deferred pages
|
|
+ * initialization is done, but this is not likely to happen.
|
|
+ */
|
|
+static inline void kasan_free_nondeferred_pages(struct page *page, int order)
|
|
+{
|
|
+ if (!static_branch_unlikely(&deferred_pages))
|
|
+ kasan_free_pages(page, order);
|
|
+}
|
|
+
|
|
/* Returns true if the struct page for the pfn is uninitialised */
|
|
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
|
|
{
|
|
@@ -335,6 +361,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
|
|
return false;
|
|
}
|
|
#else
|
|
+#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
|
|
+
|
|
static inline bool early_page_uninitialised(unsigned long pfn)
|
|
{
|
|
return false;
|
|
@@ -1037,7 +1065,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
|
arch_free_page(page, order);
|
|
kernel_poison_pages(page, 1 << order, 0);
|
|
kernel_map_pages(page, 1 << order, 0);
|
|
- kasan_free_pages(page, order);
|
|
+ kasan_free_nondeferred_pages(page, order);
|
|
|
|
return true;
|
|
}
|
|
@@ -1605,13 +1633,6 @@ static int __init deferred_init_memmap(void *data)
|
|
return 0;
|
|
}
|
|
|
|
-/*
|
|
- * During boot we initialize deferred pages on-demand, as needed, but once
|
|
- * page_alloc_init_late() has finished, the deferred pages are all initialized,
|
|
- * and we can permanently disable that path.
|
|
- */
|
|
-static DEFINE_STATIC_KEY_TRUE(deferred_pages);
|
|
-
|
|
/*
|
|
* If this zone has deferred pages, try to grow it by initializing enough
|
|
* deferred pages to satisfy the allocation specified by order, rounded up to
|
|
@@ -5542,18 +5563,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|
cond_resched();
|
|
}
|
|
}
|
|
-#ifdef CONFIG_SPARSEMEM
|
|
- /*
|
|
- * If the zone does not span the rest of the section then
|
|
- * we should at least initialize those pages. Otherwise we
|
|
- * could blow up on a poisoned page in some paths which depend
|
|
- * on full sections being initialized (e.g. memory hotplug).
|
|
- */
|
|
- while (end_pfn % PAGES_PER_SECTION) {
|
|
- __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
|
|
- end_pfn++;
|
|
- }
|
|
-#endif
|
|
}
|
|
|
|
#ifdef CONFIG_ZONE_DEVICE
|
|
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
|
|
index 38de70ab1a0d6..0f643dc2dc658 100644
|
|
--- a/mm/percpu-km.c
|
|
+++ b/mm/percpu-km.c
|
|
@@ -50,6 +50,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
|
|
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
|
|
struct pcpu_chunk *chunk;
|
|
struct page *pages;
|
|
+ unsigned long flags;
|
|
int i;
|
|
|
|
chunk = pcpu_alloc_chunk(gfp);
|
|
@@ -68,9 +69,9 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
|
|
chunk->data = pages;
|
|
chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
|
|
|
|
- spin_lock_irq(&pcpu_lock);
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
pcpu_chunk_populated(chunk, 0, nr_pages, false);
|
|
- spin_unlock_irq(&pcpu_lock);
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
|
pcpu_stats_chunk_alloc();
|
|
trace_percpu_create_chunk(chunk->base_addr);
|
|
diff --git a/mm/slab.c b/mm/slab.c
|
|
index 2a5654bb3b3ff..9d5de959d9d9d 100644
|
|
--- a/mm/slab.c
|
|
+++ b/mm/slab.c
|
|
@@ -679,8 +679,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
|
|
struct alien_cache *alc = NULL;
|
|
|
|
alc = kmalloc_node(memsize, gfp, node);
|
|
- init_arraycache(&alc->ac, entries, batch);
|
|
- spin_lock_init(&alc->lock);
|
|
+ if (alc) {
|
|
+ init_arraycache(&alc->ac, entries, batch);
|
|
+ spin_lock_init(&alc->lock);
|
|
+ }
|
|
return alc;
|
|
}
|
|
|
|
diff --git a/mm/swapfile.c b/mm/swapfile.c
|
|
index 8688ae65ef58a..dbac1d49469d4 100644
|
|
--- a/mm/swapfile.c
|
|
+++ b/mm/swapfile.c
|
|
@@ -2197,7 +2197,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
|
|
*/
|
|
if (PageSwapCache(page) &&
|
|
likely(page_private(page) == entry.val) &&
|
|
- !page_swapped(page))
|
|
+ (!PageTransCompound(page) ||
|
|
+ !swap_page_trans_huge_swapped(si, entry)))
|
|
delete_from_swap_cache(compound_head(page));
|
|
|
|
/*
|
|
@@ -2812,8 +2813,9 @@ static struct swap_info_struct *alloc_swap_info(void)
|
|
struct swap_info_struct *p;
|
|
unsigned int type;
|
|
int i;
|
|
+ int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
|
|
|
|
- p = kvzalloc(sizeof(*p), GFP_KERNEL);
|
|
+ p = kvzalloc(size, GFP_KERNEL);
|
|
if (!p)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
diff --git a/mm/usercopy.c b/mm/usercopy.c
|
|
index 852eb4e53f06e..14faadcedd06c 100644
|
|
--- a/mm/usercopy.c
|
|
+++ b/mm/usercopy.c
|
|
@@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
|
|
/*
|
|
* Validates that the given object is:
|
|
* - not bogus address
|
|
- * - known-safe heap or stack object
|
|
+ * - fully contained by stack (or stack frame, when available)
|
|
+ * - fully within SLAB object (or object whitelist area, when available)
|
|
* - not in kernel text
|
|
*/
|
|
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
|
|
@@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
|
|
/* Check for invalid addresses. */
|
|
check_bogus_address((const unsigned long)ptr, n, to_user);
|
|
|
|
- /* Check for bad heap object. */
|
|
- check_heap_object(ptr, n, to_user);
|
|
-
|
|
/* Check for bad stack object. */
|
|
switch (check_stack_object(ptr, n)) {
|
|
case NOT_STACK:
|
|
@@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
|
|
usercopy_abort("process stack", NULL, to_user, 0, n);
|
|
}
|
|
|
|
+ /* Check for bad heap object. */
|
|
+ check_heap_object(ptr, n, to_user);
|
|
+
|
|
/* Check for object in kernel to avoid text exposure. */
|
|
check_kernel_text_object((const unsigned long)ptr, n, to_user);
|
|
}
|
|
diff --git a/mm/util.c b/mm/util.c
|
|
index 8bf08b5b57606..5c9c7359ee8ab 100644
|
|
--- a/mm/util.c
|
|
+++ b/mm/util.c
|
|
@@ -478,7 +478,7 @@ bool page_mapped(struct page *page)
|
|
return true;
|
|
if (PageHuge(page))
|
|
return false;
|
|
- for (i = 0; i < hpage_nr_pages(page); i++) {
|
|
+ for (i = 0; i < (1 << compound_order(page)); i++) {
|
|
if (atomic_read(&page[i]._mapcount) >= 0)
|
|
return true;
|
|
}
|
|
diff --git a/mm/vmscan.c b/mm/vmscan.c
|
|
index 62ac0c488624f..8e377bbac3a6c 100644
|
|
--- a/mm/vmscan.c
|
|
+++ b/mm/vmscan.c
|
|
@@ -487,16 +487,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
delta = freeable / 2;
|
|
}
|
|
|
|
- /*
|
|
- * Make sure we apply some minimal pressure on default priority
|
|
- * even on small cgroups. Stale objects are not only consuming memory
|
|
- * by themselves, but can also hold a reference to a dying cgroup,
|
|
- * preventing it from being reclaimed. A dying cgroup with all
|
|
- * corresponding structures like per-cpu stats and kmem caches
|
|
- * can be really big, so it may lead to a significant waste of memory.
|
|
- */
|
|
- delta = max_t(unsigned long long, delta, min(freeable, batch_size));
|
|
-
|
|
total_scan += delta;
|
|
if (total_scan < 0) {
|
|
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
|
diff --git a/net/9p/client.c b/net/9p/client.c
|
|
index 2c9a17b9b46bb..357214a51f138 100644
|
|
--- a/net/9p/client.c
|
|
+++ b/net/9p/client.c
|
|
@@ -181,6 +181,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
|
|
ret = r;
|
|
continue;
|
|
}
|
|
+ if (option < 4096) {
|
|
+ p9_debug(P9_DEBUG_ERROR,
|
|
+ "msize should be at least 4k\n");
|
|
+ ret = -EINVAL;
|
|
+ continue;
|
|
+ }
|
|
clnt->msize = option;
|
|
break;
|
|
case Opt_trans:
|
|
@@ -983,10 +989,18 @@ static int p9_client_version(struct p9_client *c)
|
|
else if (!strncmp(version, "9P2000", 6))
|
|
c->proto_version = p9_proto_legacy;
|
|
else {
|
|
+ p9_debug(P9_DEBUG_ERROR,
|
|
+ "server returned an unknown version: %s\n", version);
|
|
err = -EREMOTEIO;
|
|
goto error;
|
|
}
|
|
|
|
+ if (msize < 4096) {
|
|
+ p9_debug(P9_DEBUG_ERROR,
|
|
+ "server returned a msize < 4096: %d\n", msize);
|
|
+ err = -EREMOTEIO;
|
|
+ goto error;
|
|
+ }
|
|
if (msize < c->msize)
|
|
c->msize = msize;
|
|
|
|
@@ -1043,6 +1057,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
|
|
if (clnt->msize > clnt->trans_mod->maxsize)
|
|
clnt->msize = clnt->trans_mod->maxsize;
|
|
|
|
+ if (clnt->msize < 4096) {
|
|
+ p9_debug(P9_DEBUG_ERROR,
|
|
+ "Please specify a msize of at least 4k\n");
|
|
+ err = -EINVAL;
|
|
+ goto free_client;
|
|
+ }
|
|
+
|
|
err = p9_client_version(clnt);
|
|
if (err)
|
|
goto close_trans;
|
|
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
|
|
index c603d33d54108..5d01edf8d819e 100644
|
|
--- a/net/ax25/af_ax25.c
|
|
+++ b/net/ax25/af_ax25.c
|
|
@@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
|
|
break;
|
|
}
|
|
|
|
- dev = dev_get_by_name(&init_net, devname);
|
|
+ rtnl_lock();
|
|
+ dev = __dev_get_by_name(&init_net, devname);
|
|
if (!dev) {
|
|
+ rtnl_unlock();
|
|
res = -ENODEV;
|
|
break;
|
|
}
|
|
|
|
ax25->ax25_dev = ax25_dev_ax25dev(dev);
|
|
+ if (!ax25->ax25_dev) {
|
|
+ rtnl_unlock();
|
|
+ res = -ENODEV;
|
|
+ break;
|
|
+ }
|
|
ax25_fillin_cb(ax25, ax25->ax25_dev);
|
|
- dev_put(dev);
|
|
+ rtnl_unlock();
|
|
break;
|
|
|
|
default:
|
|
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
|
|
index 9a3a301e1e2f0..d92195cd78349 100644
|
|
--- a/net/ax25/ax25_dev.c
|
|
+++ b/net/ax25/ax25_dev.c
|
|
@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
|
|
if ((s = ax25_dev_list) == ax25_dev) {
|
|
ax25_dev_list = s->next;
|
|
spin_unlock_bh(&ax25_dev_lock);
|
|
+ dev->ax25_ptr = NULL;
|
|
dev_put(dev);
|
|
kfree(ax25_dev);
|
|
return;
|
|
@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
|
|
if (s->next == ax25_dev) {
|
|
s->next = ax25_dev->next;
|
|
spin_unlock_bh(&ax25_dev_lock);
|
|
+ dev->ax25_ptr = NULL;
|
|
dev_put(dev);
|
|
kfree(ax25_dev);
|
|
return;
|
|
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
|
|
index 70417e9b932dd..314bbc8010fbe 100644
|
|
--- a/net/ax25/ax25_ip.c
|
|
+++ b/net/ax25/ax25_ip.c
|
|
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
|
|
dst = (ax25_address *)(bp + 1);
|
|
src = (ax25_address *)(bp + 8);
|
|
|
|
+ ax25_route_lock_use();
|
|
route = ax25_get_route(dst, NULL);
|
|
if (route) {
|
|
digipeat = route->digipeat;
|
|
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
|
|
ax25_queue_xmit(skb, dev);
|
|
|
|
put:
|
|
- if (route)
|
|
- ax25_put_route(route);
|
|
|
|
+ ax25_route_lock_unuse();
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
|
|
index a0eff323af12c..66f74c85cf6bd 100644
|
|
--- a/net/ax25/ax25_route.c
|
|
+++ b/net/ax25/ax25_route.c
|
|
@@ -40,7 +40,7 @@
|
|
#include <linux/export.h>
|
|
|
|
static ax25_route *ax25_route_list;
|
|
-static DEFINE_RWLOCK(ax25_route_lock);
|
|
+DEFINE_RWLOCK(ax25_route_lock);
|
|
|
|
void ax25_rt_device_down(struct net_device *dev)
|
|
{
|
|
@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
|
|
* Find AX.25 route
|
|
*
|
|
* Only routes with a reference count of zero can be destroyed.
|
|
+ * Must be called with ax25_route_lock read locked.
|
|
*/
|
|
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
|
|
{
|
|
@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
|
|
ax25_route *ax25_def_rt = NULL;
|
|
ax25_route *ax25_rt;
|
|
|
|
- read_lock(&ax25_route_lock);
|
|
/*
|
|
* Bind to the physical interface we heard them on, or the default
|
|
* route if none is found;
|
|
@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
|
|
if (ax25_spe_rt != NULL)
|
|
ax25_rt = ax25_spe_rt;
|
|
|
|
- if (ax25_rt != NULL)
|
|
- ax25_hold_route(ax25_rt);
|
|
-
|
|
- read_unlock(&ax25_route_lock);
|
|
-
|
|
return ax25_rt;
|
|
}
|
|
|
|
@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
|
|
ax25_route *ax25_rt;
|
|
int err = 0;
|
|
|
|
- if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
|
|
+ ax25_route_lock_use();
|
|
+ ax25_rt = ax25_get_route(addr, NULL);
|
|
+ if (!ax25_rt) {
|
|
+ ax25_route_lock_unuse();
|
|
return -EHOSTUNREACH;
|
|
-
|
|
+ }
|
|
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
|
|
err = -EHOSTUNREACH;
|
|
goto put;
|
|
@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
|
|
}
|
|
|
|
put:
|
|
- ax25_put_route(ax25_rt);
|
|
-
|
|
+ ax25_route_lock_unuse();
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
|
|
index 781c5b6e6e8ed..41be60d54001b 100644
|
|
--- a/net/batman-adv/hard-interface.c
|
|
+++ b/net/batman-adv/hard-interface.c
|
|
@@ -20,7 +20,6 @@
|
|
#include "main.h"
|
|
|
|
#include <linux/atomic.h>
|
|
-#include <linux/bug.h>
|
|
#include <linux/byteorder/generic.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/gfp.h>
|
|
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
|
|
parent_dev = __dev_get_by_index((struct net *)parent_net,
|
|
dev_get_iflink(net_dev));
|
|
/* if we got a NULL parent_dev there is something broken.. */
|
|
- if (WARN(!parent_dev, "Cannot find parent device"))
|
|
+ if (!parent_dev) {
|
|
+ pr_err("Cannot find parent device\n");
|
|
return false;
|
|
+ }
|
|
|
|
if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
|
|
return false;
|
|
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
|
|
index 5db5a0a4c959b..ffc83bebfe403 100644
|
|
--- a/net/batman-adv/soft-interface.c
|
|
+++ b/net/batman-adv/soft-interface.c
|
|
@@ -221,10 +221,14 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
|
|
|
|
netif_trans_update(soft_iface);
|
|
vid = batadv_get_vid(skb, 0);
|
|
+
|
|
+ skb_reset_mac_header(skb);
|
|
ethhdr = eth_hdr(skb);
|
|
|
|
switch (ntohs(ethhdr->h_proto)) {
|
|
case ETH_P_8021Q:
|
|
+ if (!pskb_may_pull(skb, sizeof(*vhdr)))
|
|
+ goto dropped;
|
|
vhdr = vlan_eth_hdr(skb);
|
|
|
|
/* drop batman-in-batman packets to prevent loops */
|
|
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
|
|
index ef9928d7b4fb5..ac2826ce162b9 100644
|
|
--- a/net/bluetooth/hci_event.c
|
|
+++ b/net/bluetooth/hci_event.c
|
|
@@ -5711,6 +5711,12 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
|
|
return true;
|
|
}
|
|
|
|
+ /* Check if request ended in Command Status - no way to retreive
|
|
+ * any extra parameters in this case.
|
|
+ */
|
|
+ if (hdr->evt == HCI_EV_CMD_STATUS)
|
|
+ return false;
|
|
+
|
|
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
|
|
bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
|
|
hdr->evt);
|
|
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
|
|
index e56ba3912a905..8b8abf88befbd 100644
|
|
--- a/net/bridge/br_fdb.c
|
|
+++ b/net/bridge/br_fdb.c
|
|
@@ -1102,6 +1102,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|
err = -ENOMEM;
|
|
goto err_unlock;
|
|
}
|
|
+ if (swdev_notify)
|
|
+ fdb->added_by_user = 1;
|
|
fdb->added_by_external_learn = 1;
|
|
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
|
} else {
|
|
@@ -1121,6 +1123,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
|
modified = true;
|
|
}
|
|
|
|
+ if (swdev_notify)
|
|
+ fdb->added_by_user = 1;
|
|
+
|
|
if (modified)
|
|
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
|
}
|
|
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
|
|
index 5372e2042adfe..48ddc60b4fbde 100644
|
|
--- a/net/bridge/br_forward.c
|
|
+++ b/net/bridge/br_forward.c
|
|
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
|
|
|
|
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
+ skb_push(skb, ETH_HLEN);
|
|
if (!is_skb_forwardable(skb->dev, skb))
|
|
goto drop;
|
|
|
|
- skb_push(skb, ETH_HLEN);
|
|
br_drop_fake_rtable(skb);
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
|
@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
|
|
|
|
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
+ skb->tstamp = 0;
|
|
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
|
|
net, sk, skb, NULL, skb->dev,
|
|
br_dev_queue_push_xmit);
|
|
@@ -97,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
|
|
net = dev_net(indev);
|
|
} else {
|
|
if (unlikely(netpoll_tx_running(to->br->dev))) {
|
|
- if (!is_skb_forwardable(skb->dev, skb)) {
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ if (!is_skb_forwardable(skb->dev, skb))
|
|
kfree_skb(skb);
|
|
- } else {
|
|
- skb_push(skb, ETH_HLEN);
|
|
+ else
|
|
br_netpoll_send_skb(to, skb);
|
|
- }
|
|
return;
|
|
}
|
|
br_hook = NF_BR_LOCAL_OUT;
|
|
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
|
|
index 6bac0d6b7b941..024139b51d3a5 100644
|
|
--- a/net/bridge/br_multicast.c
|
|
+++ b/net/bridge/br_multicast.c
|
|
@@ -1422,14 +1422,7 @@ static void br_multicast_query_received(struct net_bridge *br,
|
|
return;
|
|
|
|
br_multicast_update_query_timer(br, query, max_delay);
|
|
-
|
|
- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
|
|
- * the arrival port for IGMP Queries where the source address
|
|
- * is 0.0.0.0 should not be added to router port list.
|
|
- */
|
|
- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
|
|
- saddr->proto == htons(ETH_P_IPV6))
|
|
- br_multicast_mark_router(br, port);
|
|
+ br_multicast_mark_router(br, port);
|
|
}
|
|
|
|
static void br_ip4_multicast_query(struct net_bridge *br,
|
|
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
|
|
index b1b5e8516724a..ed683e5b73bac 100644
|
|
--- a/net/bridge/br_netfilter_hooks.c
|
|
+++ b/net/bridge/br_netfilter_hooks.c
|
|
@@ -278,7 +278,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
|
|
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
|
|
int ret;
|
|
|
|
- if (neigh->hh.hh_len) {
|
|
+ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
|
|
neigh_hh_bridge(&neigh->hh, skb);
|
|
skb->dev = nf_bridge->physindev;
|
|
ret = br_handle_frame_finish(net, sk, skb);
|
|
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
|
|
index 96c072e71ea2e..5811208863b73 100644
|
|
--- a/net/bridge/br_netfilter_ipv6.c
|
|
+++ b/net/bridge/br_netfilter_ipv6.c
|
|
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
|
|
IPSTATS_MIB_INDISCARDS);
|
|
goto drop;
|
|
}
|
|
+ hdr = ipv6_hdr(skb);
|
|
}
|
|
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
|
|
goto drop;
|
|
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
|
|
index 491828713e0bd..5e55cef0cec39 100644
|
|
--- a/net/bridge/netfilter/ebtables.c
|
|
+++ b/net/bridge/netfilter/ebtables.c
|
|
@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
|
|
tmp.name[sizeof(tmp.name) - 1] = 0;
|
|
|
|
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
|
|
- newinfo = vmalloc(sizeof(*newinfo) + countersize);
|
|
+ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
|
|
+ PAGE_KERNEL);
|
|
if (!newinfo)
|
|
return -ENOMEM;
|
|
|
|
if (countersize)
|
|
memset(newinfo->counters, 0, countersize);
|
|
|
|
- newinfo->entries = vmalloc(tmp.entries_size);
|
|
+ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
|
|
+ PAGE_KERNEL);
|
|
if (!newinfo->entries) {
|
|
ret = -ENOMEM;
|
|
goto free_newinfo;
|
|
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
|
|
index 08cbed7d940e3..419e8edf23ba8 100644
|
|
--- a/net/bridge/netfilter/nft_reject_bridge.c
|
|
+++ b/net/bridge/netfilter/nft_reject_bridge.c
|
|
@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
|
|
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
|
|
return false;
|
|
|
|
+ ip6h = ipv6_hdr(skb);
|
|
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
|
|
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
|
|
return false;
|
|
diff --git a/net/can/bcm.c b/net/can/bcm.c
|
|
index 0af8f0db892a3..79bb8afa9c0c0 100644
|
|
--- a/net/can/bcm.c
|
|
+++ b/net/can/bcm.c
|
|
@@ -67,6 +67,9 @@
|
|
*/
|
|
#define MAX_NFRAMES 256
|
|
|
|
+/* limit timers to 400 days for sending/timeouts */
|
|
+#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
|
|
+
|
|
/* use of last_frames[index].flags */
|
|
#define RX_RECV 0x40 /* received data for this element */
|
|
#define RX_THR 0x80 /* element not been sent due to throttle feature */
|
|
@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
|
|
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
|
|
}
|
|
|
|
+/* check limitations for timeval provided by user */
|
|
+static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
|
|
+{
|
|
+ if ((msg_head->ival1.tv_sec < 0) ||
|
|
+ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
|
|
+ (msg_head->ival1.tv_usec < 0) ||
|
|
+ (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
|
|
+ (msg_head->ival2.tv_sec < 0) ||
|
|
+ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
|
|
+ (msg_head->ival2.tv_usec < 0) ||
|
|
+ (msg_head->ival2.tv_usec >= USEC_PER_SEC))
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
|
|
#define OPSIZ sizeof(struct bcm_op)
|
|
#define MHSIZ sizeof(struct bcm_msg_head)
|
|
@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
|
if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
|
|
return -EINVAL;
|
|
|
|
+ /* check timeval limitations */
|
|
+ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
|
|
+ return -EINVAL;
|
|
+
|
|
/* check the given can_id */
|
|
op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
|
|
if (op) {
|
|
@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
|
(!(msg_head->can_id & CAN_RTR_FLAG))))
|
|
return -EINVAL;
|
|
|
|
+ /* check timeval limitations */
|
|
+ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
|
|
+ return -EINVAL;
|
|
+
|
|
/* check the given can_id */
|
|
op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
|
|
if (op) {
|
|
diff --git a/net/can/gw.c b/net/can/gw.c
|
|
index faa3da88a1277..53859346dc9a9 100644
|
|
--- a/net/can/gw.c
|
|
+++ b/net/can/gw.c
|
|
@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
|
|
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
|
|
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
|
|
|
|
- /* check for checksum updates when the CAN frame has been modified */
|
|
+ /* Has the CAN frame been modified? */
|
|
if (modidx) {
|
|
- if (gwj->mod.csumfunc.crc8)
|
|
+ /* get available space for the processed CAN frame type */
|
|
+ int max_len = nskb->len - offsetof(struct can_frame, data);
|
|
+
|
|
+ /* dlc may have changed, make sure it fits to the CAN frame */
|
|
+ if (cf->can_dlc > max_len)
|
|
+ goto out_delete;
|
|
+
|
|
+ /* check for checksum updates in classic CAN length only */
|
|
+ if (gwj->mod.csumfunc.crc8) {
|
|
+ if (cf->can_dlc > 8)
|
|
+ goto out_delete;
|
|
+
|
|
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
|
|
+ }
|
|
+
|
|
+ if (gwj->mod.csumfunc.xor) {
|
|
+ if (cf->can_dlc > 8)
|
|
+ goto out_delete;
|
|
|
|
- if (gwj->mod.csumfunc.xor)
|
|
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
|
|
+ }
|
|
}
|
|
|
|
/* clear the skb timestamp if not configured the other way */
|
|
@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
|
|
gwj->dropped_frames++;
|
|
else
|
|
gwj->handled_frames++;
|
|
+
|
|
+ return;
|
|
+
|
|
+ out_delete:
|
|
+ /* delete frame due to misconfiguration */
|
|
+ gwj->deleted_frames++;
|
|
+ kfree_skb(nskb);
|
|
+ return;
|
|
}
|
|
|
|
static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
|
|
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
|
|
index 2f126eff275d5..b59b81fc1ab66 100644
|
|
--- a/net/ceph/messenger.c
|
|
+++ b/net/ceph/messenger.c
|
|
@@ -2071,6 +2071,8 @@ static int process_connect(struct ceph_connection *con)
|
|
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
|
|
|
|
if (con->auth) {
|
|
+ int len = le32_to_cpu(con->in_reply.authorizer_len);
|
|
+
|
|
/*
|
|
* Any connection that defines ->get_authorizer()
|
|
* should also define ->add_authorizer_challenge() and
|
|
@@ -2080,8 +2082,7 @@ static int process_connect(struct ceph_connection *con)
|
|
*/
|
|
if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
|
|
ret = con->ops->add_authorizer_challenge(
|
|
- con, con->auth->authorizer_reply_buf,
|
|
- le32_to_cpu(con->in_reply.authorizer_len));
|
|
+ con, con->auth->authorizer_reply_buf, len);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -2091,10 +2092,12 @@ static int process_connect(struct ceph_connection *con)
|
|
return 0;
|
|
}
|
|
|
|
- ret = con->ops->verify_authorizer_reply(con);
|
|
- if (ret < 0) {
|
|
- con->error_msg = "bad authorize reply";
|
|
- return ret;
|
|
+ if (len) {
|
|
+ ret = con->ops->verify_authorizer_reply(con);
|
|
+ if (ret < 0) {
|
|
+ con->error_msg = "bad authorize reply";
|
|
+ return ret;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -3219,9 +3222,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
|
|
dout("con_keepalive %p\n", con);
|
|
mutex_lock(&con->mutex);
|
|
clear_standby(con);
|
|
+ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
|
|
mutex_unlock(&con->mutex);
|
|
- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
|
|
- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
|
|
+
|
|
+ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
|
|
queue_con(con);
|
|
}
|
|
EXPORT_SYMBOL(ceph_con_keepalive);
|
|
diff --git a/net/compat.c b/net/compat.c
|
|
index 47a614b370cd3..d1f3a8a0b3efe 100644
|
|
--- a/net/compat.c
|
|
+++ b/net/compat.c
|
|
@@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
|
|
ctv = (struct compat_timeval __user *) userstamp;
|
|
err = -ENOENT;
|
|
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
|
|
- tv = ktime_to_timeval(sk->sk_stamp);
|
|
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
|
|
+
|
|
if (tv.tv_sec == -1)
|
|
return err;
|
|
if (tv.tv_sec == 0) {
|
|
- sk->sk_stamp = ktime_get_real();
|
|
- tv = ktime_to_timeval(sk->sk_stamp);
|
|
+ ktime_t kt = ktime_get_real();
|
|
+ sock_write_timestamp(sk, kt);
|
|
+ tv = ktime_to_timeval(kt);
|
|
}
|
|
err = 0;
|
|
if (put_user(tv.tv_sec, &ctv->tv_sec) ||
|
|
@@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
|
|
ctv = (struct compat_timespec __user *) userstamp;
|
|
err = -ENOENT;
|
|
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
|
|
- ts = ktime_to_timespec(sk->sk_stamp);
|
|
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
|
|
if (ts.tv_sec == -1)
|
|
return err;
|
|
if (ts.tv_sec == 0) {
|
|
- sk->sk_stamp = ktime_get_real();
|
|
- ts = ktime_to_timespec(sk->sk_stamp);
|
|
+ ktime_t kt = ktime_get_real();
|
|
+ sock_write_timestamp(sk, kt);
|
|
+ ts = ktime_to_timespec(kt);
|
|
}
|
|
err = 0;
|
|
if (put_user(ts.tv_sec, &ctv->tv_sec) ||
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 722d50dbf8a45..de0690e5b3dfa 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -8064,7 +8064,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
|
|
netdev_features_t feature;
|
|
int feature_bit;
|
|
|
|
- for_each_netdev_feature(&upper_disables, feature_bit) {
|
|
+ for_each_netdev_feature(upper_disables, feature_bit) {
|
|
feature = __NETIF_F_BIT(feature_bit);
|
|
if (!(upper->wanted_features & feature)
|
|
&& (features & feature)) {
|
|
@@ -8084,7 +8084,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
|
|
netdev_features_t feature;
|
|
int feature_bit;
|
|
|
|
- for_each_netdev_feature(&upper_disables, feature_bit) {
|
|
+ for_each_netdev_feature(upper_disables, feature_bit) {
|
|
feature = __NETIF_F_BIT(feature_bit);
|
|
if (!(features & feature) && (lower->features & feature)) {
|
|
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
|
|
@@ -8624,6 +8624,9 @@ int init_dummy_netdev(struct net_device *dev)
|
|
set_bit(__LINK_STATE_PRESENT, &dev->state);
|
|
set_bit(__LINK_STATE_START, &dev->state);
|
|
|
|
+ /* napi_busy_loop stats accounting wants this */
|
|
+ dev_net_set(dev, &init_net);
|
|
+
|
|
/* Note : We dont allocate pcpu_refcnt for dummy devices,
|
|
* because users of this 'device' dont need to change
|
|
* its refcount.
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index 8d2c629501e2d..16350f8c8815a 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -2023,18 +2023,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
|
|
u32 flags)
|
|
{
|
|
- /* skb->mac_len is not set on normal egress */
|
|
- unsigned int mlen = skb->network_header - skb->mac_header;
|
|
+ unsigned int mlen = skb_network_offset(skb);
|
|
|
|
- __skb_pull(skb, mlen);
|
|
+ if (mlen) {
|
|
+ __skb_pull(skb, mlen);
|
|
|
|
- /* At ingress, the mac header has already been pulled once.
|
|
- * At egress, skb_pospull_rcsum has to be done in case that
|
|
- * the skb is originated from ingress (i.e. a forwarded skb)
|
|
- * to ensure that rcsum starts at net header.
|
|
- */
|
|
- if (!skb_at_tc_ingress(skb))
|
|
- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
|
+ /* At ingress, the mac header has already been pulled once.
|
|
+ * At egress, skb_pospull_rcsum has to be done in case that
|
|
+ * the skb is originated from ingress (i.e. a forwarded skb)
|
|
+ * to ensure that rcsum starts at net header.
|
|
+ */
|
|
+ if (!skb_at_tc_ingress(skb))
|
|
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
|
+ }
|
|
skb_pop_mac_header(skb);
|
|
skb_reset_mac_len(skb);
|
|
return flags & BPF_F_INGRESS ?
|
|
@@ -3934,6 +3935,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
|
|
break;
|
|
case SO_MAX_PACING_RATE: /* 32bit version */
|
|
+ if (val != ~0U)
|
|
+ cmpxchg(&sk->sk_pacing_status,
|
|
+ SK_PACING_NONE,
|
|
+ SK_PACING_NEEDED);
|
|
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
|
|
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
|
|
sk->sk_max_pacing_rate);
|
|
@@ -3947,7 +3952,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|
sk->sk_rcvlowat = val ? : 1;
|
|
break;
|
|
case SO_MARK:
|
|
- sk->sk_mark = val;
|
|
+ if (sk->sk_mark != val) {
|
|
+ sk->sk_mark = val;
|
|
+ sk_dst_reset(sk);
|
|
+ }
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
@@ -4018,7 +4026,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|
/* Only some options are supported */
|
|
switch (optname) {
|
|
case TCP_BPF_IW:
|
|
- if (val <= 0 || tp->data_segs_out > 0)
|
|
+ if (val <= 0 || tp->data_segs_out > tp->syn_data)
|
|
ret = -EINVAL;
|
|
else
|
|
tp->snd_cwnd = val;
|
|
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
|
|
index 3e85437f71060..a648568c5e8fe 100644
|
|
--- a/net/core/lwt_bpf.c
|
|
+++ b/net/core/lwt_bpf.c
|
|
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
|
|
lwt->name ? : "<unknown>");
|
|
ret = BPF_OK;
|
|
} else {
|
|
+ skb_reset_mac_header(skb);
|
|
ret = skb_do_redirect(skb);
|
|
if (ret == 0)
|
|
ret = BPF_REDIRECT;
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index a8217e221e195..fc3d652a2de0c 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -353,6 +353,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
*/
|
|
void *netdev_alloc_frag(unsigned int fragsz)
|
|
{
|
|
+ fragsz = SKB_DATA_ALIGN(fragsz);
|
|
+
|
|
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
|
|
}
|
|
EXPORT_SYMBOL(netdev_alloc_frag);
|
|
@@ -366,6 +368,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
|
void *napi_alloc_frag(unsigned int fragsz)
|
|
{
|
|
+ fragsz = SKB_DATA_ALIGN(fragsz);
|
|
+
|
|
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
|
|
}
|
|
EXPORT_SYMBOL(napi_alloc_frag);
|
|
@@ -5202,7 +5206,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
|
|
unsigned long chunk;
|
|
struct sk_buff *skb;
|
|
struct page *page;
|
|
- gfp_t gfp_head;
|
|
int i;
|
|
|
|
*errcode = -EMSGSIZE;
|
|
@@ -5212,12 +5215,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
|
|
if (npages > MAX_SKB_FRAGS)
|
|
return NULL;
|
|
|
|
- gfp_head = gfp_mask;
|
|
- if (gfp_head & __GFP_DIRECT_RECLAIM)
|
|
- gfp_head |= __GFP_RETRY_MAYFAIL;
|
|
-
|
|
*errcode = -ENOBUFS;
|
|
- skb = alloc_skb(header_len, gfp_head);
|
|
+ skb = alloc_skb(header_len, gfp_mask);
|
|
if (!skb)
|
|
return NULL;
|
|
|
|
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
|
|
index b7dbb3c976cd2..54d8548076306 100644
|
|
--- a/net/core/skmsg.c
|
|
+++ b/net/core/skmsg.c
|
|
@@ -406,7 +406,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
|
msg->skb = skb;
|
|
|
|
sk_psock_queue_msg(psock, msg);
|
|
- sk->sk_data_ready(sk);
|
|
+ sk_psock_data_ready(sk, psock);
|
|
return copied;
|
|
}
|
|
|
|
@@ -575,6 +575,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
|
{
|
|
rcu_assign_sk_user_data(sk, NULL);
|
|
sk_psock_cork_free(psock);
|
|
+ sk_psock_zap_ingress(psock);
|
|
sk_psock_restore_proto(sk, psock);
|
|
|
|
write_lock_bh(&sk->sk_callback_lock);
|
|
@@ -738,7 +739,7 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
|
|
}
|
|
|
|
/* Called with socket lock held. */
|
|
-static void sk_psock_data_ready(struct sock *sk)
|
|
+static void sk_psock_strp_data_ready(struct sock *sk)
|
|
{
|
|
struct sk_psock *psock;
|
|
|
|
@@ -786,7 +787,7 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
|
|
return;
|
|
|
|
parser->saved_data_ready = sk->sk_data_ready;
|
|
- sk->sk_data_ready = sk_psock_data_ready;
|
|
+ sk->sk_data_ready = sk_psock_strp_data_ready;
|
|
sk->sk_write_space = sk_psock_write_space;
|
|
parser->enabled = true;
|
|
}
|
|
diff --git a/net/core/sock.c b/net/core/sock.c
|
|
index 080a880a1761b..530583ae92bfc 100644
|
|
--- a/net/core/sock.c
|
|
+++ b/net/core/sock.c
|
|
@@ -698,6 +698,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
|
break;
|
|
case SO_DONTROUTE:
|
|
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
|
|
+ sk_dst_reset(sk);
|
|
break;
|
|
case SO_BROADCAST:
|
|
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
|
|
@@ -2743,6 +2744,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
|
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
|
|
|
|
sk->sk_stamp = SK_DEFAULT_STAMP;
|
|
+#if BITS_PER_LONG==32
|
|
+ seqlock_init(&sk->sk_stamp_seq);
|
|
+#endif
|
|
atomic_set(&sk->sk_zckey, 0);
|
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
@@ -2842,12 +2846,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
|
|
struct timeval tv;
|
|
|
|
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
|
|
- tv = ktime_to_timeval(sk->sk_stamp);
|
|
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
|
|
if (tv.tv_sec == -1)
|
|
return -ENOENT;
|
|
if (tv.tv_sec == 0) {
|
|
- sk->sk_stamp = ktime_get_real();
|
|
- tv = ktime_to_timeval(sk->sk_stamp);
|
|
+ ktime_t kt = ktime_get_real();
|
|
+ sock_write_timestamp(sk, kt);
|
|
+ tv = ktime_to_timeval(kt);
|
|
}
|
|
return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
|
|
}
|
|
@@ -2858,11 +2863,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
|
|
struct timespec ts;
|
|
|
|
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
|
|
- ts = ktime_to_timespec(sk->sk_stamp);
|
|
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
|
|
if (ts.tv_sec == -1)
|
|
return -ENOENT;
|
|
if (ts.tv_sec == 0) {
|
|
- sk->sk_stamp = ktime_get_real();
|
|
+ ktime_t kt = ktime_get_real();
|
|
+ sock_write_timestamp(sk, kt);
|
|
ts = ktime_to_timespec(sk->sk_stamp);
|
|
}
|
|
return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
|
|
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
|
|
index 6eb837a47b5c4..baaaeb2b2c423 100644
|
|
--- a/net/dccp/ccid.h
|
|
+++ b/net/dccp/ccid.h
|
|
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
|
|
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
|
|
u8 pkt, u8 opt, u8 *val, u8 len)
|
|
{
|
|
- if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
|
|
+ if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
|
|
return 0;
|
|
return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
|
|
}
|
|
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
|
|
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
|
|
u8 pkt, u8 opt, u8 *val, u8 len)
|
|
{
|
|
- if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
|
|
+ if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
|
|
return 0;
|
|
return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
|
|
}
|
|
diff --git a/net/dsa/master.c b/net/dsa/master.c
|
|
index 5e8c9bef78bd2..dbfdda5fe8ed2 100644
|
|
--- a/net/dsa/master.c
|
|
+++ b/net/dsa/master.c
|
|
@@ -179,6 +179,8 @@ static const struct attribute_group dsa_group = {
|
|
.attrs = dsa_slave_attrs,
|
|
};
|
|
|
|
+static struct lock_class_key dsa_master_addr_list_lock_key;
|
|
+
|
|
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
|
{
|
|
int ret;
|
|
@@ -190,6 +192,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
|
wmb();
|
|
|
|
dev->dsa_ptr = cpu_dp;
|
|
+ lockdep_set_class(&dev->addr_list_lock,
|
|
+ &dsa_master_addr_list_lock_key);
|
|
|
|
ret = dsa_master_ethtool_setup(dev);
|
|
if (ret)
|
|
diff --git a/net/dsa/port.c b/net/dsa/port.c
|
|
index ed0595459df13..792a13068c50b 100644
|
|
--- a/net/dsa/port.c
|
|
+++ b/net/dsa/port.c
|
|
@@ -255,7 +255,7 @@ int dsa_port_vlan_add(struct dsa_port *dp,
|
|
if (netif_is_bridge_master(vlan->obj.orig_dev))
|
|
return -EOPNOTSUPP;
|
|
|
|
- if (br_vlan_enabled(dp->bridge_dev))
|
|
+ if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
|
|
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
|
|
|
|
return 0;
|
|
@@ -273,7 +273,7 @@ int dsa_port_vlan_del(struct dsa_port *dp,
|
|
if (netif_is_bridge_master(vlan->obj.orig_dev))
|
|
return -EOPNOTSUPP;
|
|
|
|
- if (br_vlan_enabled(dp->bridge_dev))
|
|
+ if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
|
|
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
|
|
|
|
return 0;
|
|
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
|
|
index aec78f5aca72d..a3ddc225f1039 100644
|
|
--- a/net/dsa/slave.c
|
|
+++ b/net/dsa/slave.c
|
|
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
|
|
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
|
|
{
|
|
struct net_device *master = dsa_slave_to_master(dev);
|
|
-
|
|
- if (change & IFF_ALLMULTI)
|
|
- dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
|
|
- if (change & IFF_PROMISC)
|
|
- dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
|
|
+ if (dev->flags & IFF_UP) {
|
|
+ if (change & IFF_ALLMULTI)
|
|
+ dev_set_allmulti(master,
|
|
+ dev->flags & IFF_ALLMULTI ? 1 : -1);
|
|
+ if (change & IFF_PROMISC)
|
|
+ dev_set_promiscuity(master,
|
|
+ dev->flags & IFF_PROMISC ? 1 : -1);
|
|
+ }
|
|
}
|
|
|
|
static void dsa_slave_set_rx_mode(struct net_device *dev)
|
|
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
|
int ret;
|
|
|
|
/* Port's PHY and MAC both need to be EEE capable */
|
|
- if (!dev->phydev && !dp->pl)
|
|
+ if (!dev->phydev || !dp->pl)
|
|
return -ENODEV;
|
|
|
|
if (!ds->ops->set_mac_eee)
|
|
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
|
|
int ret;
|
|
|
|
/* Port's PHY and MAC both need to be EEE capable */
|
|
- if (!dev->phydev && !dp->pl)
|
|
+ if (!dev->phydev || !dp->pl)
|
|
return -ENODEV;
|
|
|
|
if (!ds->ops->get_mac_eee)
|
|
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
|
|
index ca53efa17be15..8bec827081cd9 100644
|
|
--- a/net/ieee802154/6lowpan/tx.c
|
|
+++ b/net/ieee802154/6lowpan/tx.c
|
|
@@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
|
|
const struct ipv6hdr *hdr = ipv6_hdr(skb);
|
|
struct neighbour *n;
|
|
|
|
+ if (!daddr)
|
|
+ return -EINVAL;
|
|
+
|
|
/* TODO:
|
|
* if this package isn't ipv6 one, where should it be routed?
|
|
*/
|
|
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
|
|
index 608a6f4223fb9..fecd0e7672b5d 100644
|
|
--- a/net/ipv4/devinet.c
|
|
+++ b/net/ipv4/devinet.c
|
|
@@ -1826,7 +1826,7 @@ put_tgt_net:
|
|
if (fillargs.netnsid >= 0)
|
|
put_net(tgt_net);
|
|
|
|
- return err < 0 ? err : skb->len;
|
|
+ return skb->len ? : err;
|
|
}
|
|
|
|
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
|
|
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
|
|
index 6df95be963116..fe4f6a6242383 100644
|
|
--- a/net/ipv4/fib_frontend.c
|
|
+++ b/net/ipv4/fib_frontend.c
|
|
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
|
|
struct fib_table *tb;
|
|
|
|
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
|
|
- flushed += fib_table_flush(net, tb);
|
|
+ flushed += fib_table_flush(net, tb, false);
|
|
}
|
|
|
|
if (flushed)
|
|
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
|
|
|
|
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
|
|
hlist_del(&tb->tb_hlist);
|
|
- fib_table_flush(net, tb);
|
|
+ fib_table_flush(net, tb, true);
|
|
fib_free_table(tb);
|
|
}
|
|
}
|
|
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
|
|
index 237c9f72b2658..a573e37e0615b 100644
|
|
--- a/net/ipv4/fib_trie.c
|
|
+++ b/net/ipv4/fib_trie.c
|
|
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
|
|
}
|
|
|
|
/* Caller must hold RTNL. */
|
|
-int fib_table_flush(struct net *net, struct fib_table *tb)
|
|
+int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
|
|
{
|
|
struct trie *t = (struct trie *)tb->tb_data;
|
|
struct key_vector *pn = t->kv;
|
|
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
|
|
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
|
|
struct fib_info *fi = fa->fa_info;
|
|
|
|
- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
|
|
- tb->tb_id != fa->tb_id) {
|
|
+ if (!fi || tb->tb_id != fa->tb_id ||
|
|
+ (!(fi->fib_flags & RTNH_F_DEAD) &&
|
|
+ !fib_props[fa->fa_type].error)) {
|
|
+ slen = fa->fa_slen;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Do not flush error routes if network namespace is
|
|
+ * not being dismantled
|
|
+ */
|
|
+ if (!flush_all && fib_props[fa->fa_type].error) {
|
|
slen = fa->fa_slen;
|
|
continue;
|
|
}
|
|
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
|
|
index 7efe740c06ebf..511b32ea25331 100644
|
|
--- a/net/ipv4/gre_demux.c
|
|
+++ b/net/ipv4/gre_demux.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <linux/spinlock.h>
|
|
#include <net/protocol.h>
|
|
#include <net/gre.h>
|
|
+#include <net/erspan.h>
|
|
|
|
#include <net/icmp.h>
|
|
#include <net/route.h>
|
|
@@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
|
hdr_len += 4;
|
|
}
|
|
tpi->hdr_len = hdr_len;
|
|
+
|
|
+ /* ERSPAN ver 1 and 2 protocol sets GRE key field
|
|
+ * to 0 and sets the configured key in the
|
|
+ * inner erspan header field
|
|
+ */
|
|
+ if (greh->protocol == htons(ETH_P_ERSPAN) ||
|
|
+ greh->protocol == htons(ETH_P_ERSPAN2)) {
|
|
+ struct erspan_base_hdr *ershdr;
|
|
+
|
|
+ if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ ershdr = (struct erspan_base_hdr *)options;
|
|
+ tpi->key = cpu_to_be32(get_session_id(ershdr));
|
|
+ }
|
|
+
|
|
return hdr_len;
|
|
}
|
|
EXPORT_SYMBOL(gre_parse_header);
|
|
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
|
|
index 1a4e9ff02762e..5731670c560b0 100644
|
|
--- a/net/ipv4/inet_diag.c
|
|
+++ b/net/ipv4/inet_diag.c
|
|
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
|
|
+ nla_total_size(1) /* INET_DIAG_TOS */
|
|
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
|
+ nla_total_size(4) /* INET_DIAG_MARK */
|
|
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
|
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
|
+ nla_total_size(sizeof(struct inet_diag_msg))
|
|
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
|
|
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
|
goto errout;
|
|
}
|
|
|
|
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
|
|
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
|
|
+ ext & (1 << (INET_DIAG_TCLASS - 1))) {
|
|
u32 classid = 0;
|
|
|
|
#ifdef CONFIG_SOCK_CGROUP_DATA
|
|
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
|
|
#endif
|
|
+ /* Fallback to socket priority if class id isn't set.
|
|
+ * Classful qdiscs use it as direct reference to class.
|
|
+ * For cgroup2 classid is always zero.
|
|
+ */
|
|
+ if (!classid)
|
|
+ classid = sk->sk_priority;
|
|
|
|
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
|
|
goto errout;
|
|
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
|
|
index d757b9642d0d1..be778599bfedf 100644
|
|
--- a/net/ipv4/inetpeer.c
|
|
+++ b/net/ipv4/inetpeer.c
|
|
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
|
|
atomic_set(&p->rid, 0);
|
|
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
|
|
p->rate_tokens = 0;
|
|
+ p->n_redirects = 0;
|
|
/* 60*HZ is arbitrary, but chosen enough high so that the first
|
|
* calculation of tokens is at its maximum.
|
|
*/
|
|
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
|
|
index 38befe829caf5..b5488c5197bcb 100644
|
|
--- a/net/ipv4/ip_gre.c
|
|
+++ b/net/ipv4/ip_gre.c
|
|
@@ -266,20 +266,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
|
int len;
|
|
|
|
itn = net_generic(net, erspan_net_id);
|
|
- len = gre_hdr_len + sizeof(*ershdr);
|
|
-
|
|
- /* Check based hdr len */
|
|
- if (unlikely(!pskb_may_pull(skb, len)))
|
|
- return PACKET_REJECT;
|
|
|
|
iph = ip_hdr(skb);
|
|
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
|
|
ver = ershdr->ver;
|
|
|
|
- /* The original GRE header does not have key field,
|
|
- * Use ERSPAN 10-bit session ID as key.
|
|
- */
|
|
- tpi->key = cpu_to_be32(get_session_id(ershdr));
|
|
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
|
|
tpi->flags | TUNNEL_KEY,
|
|
iph->saddr, iph->daddr, tpi->key);
|
|
@@ -567,8 +558,7 @@ err_free_skb:
|
|
dev->stats.tx_dropped++;
|
|
}
|
|
|
|
-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
- __be16 proto)
|
|
+static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
{
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
struct ip_tunnel_info *tun_info;
|
|
@@ -576,10 +566,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
struct erspan_metadata *md;
|
|
struct rtable *rt = NULL;
|
|
bool truncate = false;
|
|
+ __be16 df, proto;
|
|
struct flowi4 fl;
|
|
int tunnel_hlen;
|
|
int version;
|
|
- __be16 df;
|
|
int nhoff;
|
|
int thoff;
|
|
|
|
@@ -624,18 +614,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
if (version == 1) {
|
|
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
|
|
ntohl(md->u.index), truncate, true);
|
|
+ proto = htons(ETH_P_ERSPAN);
|
|
} else if (version == 2) {
|
|
erspan_build_header_v2(skb,
|
|
ntohl(tunnel_id_to_key32(key->tun_id)),
|
|
md->u.md2.dir,
|
|
get_hwid(&md->u.md2),
|
|
truncate, true);
|
|
+ proto = htons(ETH_P_ERSPAN2);
|
|
} else {
|
|
goto err_free_rt;
|
|
}
|
|
|
|
gre_build_header(skb, 8, TUNNEL_SEQ,
|
|
- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
|
|
+ proto, 0, htonl(tunnel->o_seqno++));
|
|
|
|
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
|
|
|
@@ -674,6 +666,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
const struct iphdr *tnl_params;
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto free_skb;
|
|
+
|
|
if (tunnel->collect_md) {
|
|
gre_fb_xmit(skb, dev, skb->protocol);
|
|
return NETDEV_TX_OK;
|
|
@@ -716,9 +711,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
|
{
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
bool truncate = false;
|
|
+ __be16 proto;
|
|
+
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto free_skb;
|
|
|
|
if (tunnel->collect_md) {
|
|
- erspan_fb_xmit(skb, dev, skb->protocol);
|
|
+ erspan_fb_xmit(skb, dev);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -734,19 +733,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
|
}
|
|
|
|
/* Push ERSPAN header */
|
|
- if (tunnel->erspan_ver == 1)
|
|
+ if (tunnel->erspan_ver == 1) {
|
|
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
|
|
tunnel->index,
|
|
truncate, true);
|
|
- else if (tunnel->erspan_ver == 2)
|
|
+ proto = htons(ETH_P_ERSPAN);
|
|
+ } else if (tunnel->erspan_ver == 2) {
|
|
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
|
|
tunnel->dir, tunnel->hwid,
|
|
truncate, true);
|
|
- else
|
|
+ proto = htons(ETH_P_ERSPAN2);
|
|
+ } else {
|
|
goto free_skb;
|
|
+ }
|
|
|
|
tunnel->parms.o_flags &= ~TUNNEL_KEY;
|
|
- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
|
|
+ __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
|
|
return NETDEV_TX_OK;
|
|
|
|
free_skb:
|
|
@@ -760,6 +762,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
|
|
{
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto free_skb;
|
|
+
|
|
if (tunnel->collect_md) {
|
|
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
|
|
return NETDEV_TX_OK;
|
|
@@ -1454,12 +1459,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|
{
|
|
struct ip_tunnel *t = netdev_priv(dev);
|
|
struct ip_tunnel_parm *p = &t->parms;
|
|
+ __be16 o_flags = p->o_flags;
|
|
+
|
|
+ if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
|
|
+ !t->collect_md)
|
|
+ o_flags |= TUNNEL_KEY;
|
|
|
|
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
|
|
nla_put_be16(skb, IFLA_GRE_IFLAGS,
|
|
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
|
|
nla_put_be16(skb, IFLA_GRE_OFLAGS,
|
|
- gre_tnl_flags_to_gre_flags(p->o_flags)) ||
|
|
+ gre_tnl_flags_to_gre_flags(o_flags)) ||
|
|
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
|
|
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
|
|
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
|
|
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
|
|
index e609b08c9df4f..3163428219cd5 100644
|
|
--- a/net/ipv4/ip_input.c
|
|
+++ b/net/ipv4/ip_input.c
|
|
@@ -489,6 +489,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
|
|
goto drop;
|
|
}
|
|
|
|
+ iph = ip_hdr(skb);
|
|
skb->transport_header = skb->network_header + iph->ihl*4;
|
|
|
|
/* Remove any debris in the socket control block */
|
|
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
|
|
index fffcc130900e5..82f341e84faec 100644
|
|
--- a/net/ipv4/ip_sockglue.c
|
|
+++ b/net/ipv4/ip_sockglue.c
|
|
@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
|
|
|
|
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
|
|
{
|
|
+ __be16 _ports[2], *ports;
|
|
struct sockaddr_in sin;
|
|
- __be16 *ports;
|
|
- int end;
|
|
-
|
|
- end = skb_transport_offset(skb) + 4;
|
|
- if (end > 0 && !pskb_may_pull(skb, end))
|
|
- return;
|
|
|
|
/* All current transport protocols have the port numbers in the
|
|
* first four bytes of the transport header and this function is
|
|
* written with this assumption in mind.
|
|
*/
|
|
- ports = (__be16 *)skb_transport_header(skb);
|
|
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
|
|
+ sizeof(_ports), &_ports);
|
|
+ if (!ports)
|
|
+ return;
|
|
|
|
sin.sin_family = AF_INET;
|
|
sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
|
|
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
|
|
index 284a22154b4e6..c4f5602308edc 100644
|
|
--- a/net/ipv4/ip_tunnel.c
|
|
+++ b/net/ipv4/ip_tunnel.c
|
|
@@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
const struct iphdr *tnl_params, u8 protocol)
|
|
{
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
- unsigned int inner_nhdr_len = 0;
|
|
const struct iphdr *inner_iph;
|
|
struct flowi4 fl4;
|
|
u8 tos, ttl;
|
|
@@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|
__be32 dst;
|
|
bool connected;
|
|
|
|
- /* ensure we can access the inner net header, for several users below */
|
|
- if (skb->protocol == htons(ETH_P_IP))
|
|
- inner_nhdr_len = sizeof(struct iphdr);
|
|
- else if (skb->protocol == htons(ETH_P_IPV6))
|
|
- inner_nhdr_len = sizeof(struct ipv6hdr);
|
|
- if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
|
|
- goto tx_error;
|
|
-
|
|
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
|
|
connected = (tunnel->parms.iph.daddr != 0);
|
|
|
|
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
|
|
index de31b302d69c6..d7b43e700023a 100644
|
|
--- a/net/ipv4/ip_vti.c
|
|
+++ b/net/ipv4/ip_vti.c
|
|
@@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
|
struct flowi fl;
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
memset(&fl, 0, sizeof(fl));
|
|
|
|
switch (skb->protocol) {
|
|
@@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
|
break;
|
|
default:
|
|
- dev->stats.tx_errors++;
|
|
- dev_kfree_skb(skb);
|
|
- return NETDEV_TX_OK;
|
|
+ goto tx_err;
|
|
}
|
|
|
|
/* override mark with tunnel output key */
|
|
fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
|
|
|
|
return vti_xmit(skb, dev, &fl);
|
|
+
|
|
+tx_err:
|
|
+ dev->stats.tx_errors++;
|
|
+ kfree_skb(skb);
|
|
+ return NETDEV_TX_OK;
|
|
}
|
|
|
|
static int vti4_err(struct sk_buff *skb, u32 info)
|
|
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
index 2c8d313ae2169..3cd237b42f446 100644
|
|
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
@@ -56,18 +56,15 @@ struct clusterip_config {
|
|
#endif
|
|
enum clusterip_hashmode hash_mode; /* which hashing mode */
|
|
u_int32_t hash_initval; /* hash initialization */
|
|
- struct rcu_head rcu;
|
|
-
|
|
+ struct rcu_head rcu; /* for call_rcu_bh */
|
|
+ struct net *net; /* netns for pernet list */
|
|
char ifname[IFNAMSIZ]; /* device ifname */
|
|
- struct notifier_block notifier; /* refresh c->ifindex in it */
|
|
};
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
static const struct file_operations clusterip_proc_fops;
|
|
#endif
|
|
|
|
-static unsigned int clusterip_net_id __read_mostly;
|
|
-
|
|
struct clusterip_net {
|
|
struct list_head configs;
|
|
/* lock protects the configs list */
|
|
@@ -75,19 +72,35 @@ struct clusterip_net {
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
struct proc_dir_entry *procdir;
|
|
+ /* mutex protects the config->pde*/
|
|
+ struct mutex mutex;
|
|
#endif
|
|
};
|
|
|
|
+static unsigned int clusterip_net_id __read_mostly;
|
|
+static inline struct clusterip_net *clusterip_pernet(struct net *net)
|
|
+{
|
|
+ return net_generic(net, clusterip_net_id);
|
|
+}
|
|
+
|
|
static inline void
|
|
clusterip_config_get(struct clusterip_config *c)
|
|
{
|
|
refcount_inc(&c->refcount);
|
|
}
|
|
|
|
-
|
|
static void clusterip_config_rcu_free(struct rcu_head *head)
|
|
{
|
|
- kfree(container_of(head, struct clusterip_config, rcu));
|
|
+ struct clusterip_config *config;
|
|
+ struct net_device *dev;
|
|
+
|
|
+ config = container_of(head, struct clusterip_config, rcu);
|
|
+ dev = dev_get_by_name(config->net, config->ifname);
|
|
+ if (dev) {
|
|
+ dev_mc_del(dev, config->clustermac);
|
|
+ dev_put(dev);
|
|
+ }
|
|
+ kfree(config);
|
|
}
|
|
|
|
static inline void
|
|
@@ -101,25 +114,24 @@ clusterip_config_put(struct clusterip_config *c)
|
|
* entry(rule) is removed, remove the config from lists, but don't free it
|
|
* yet, since proc-files could still be holding references */
|
|
static inline void
|
|
-clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
|
|
+clusterip_config_entry_put(struct clusterip_config *c)
|
|
{
|
|
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
|
+ struct clusterip_net *cn = clusterip_pernet(c->net);
|
|
|
|
local_bh_disable();
|
|
if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
|
|
+ list_del_rcu(&c->list);
|
|
+ spin_unlock(&cn->lock);
|
|
+ local_bh_enable();
|
|
/* In case anyone still accesses the file, the open/close
|
|
* functions are also incrementing the refcount on their own,
|
|
* so it's safe to remove the entry even if it's in use. */
|
|
#ifdef CONFIG_PROC_FS
|
|
+ mutex_lock(&cn->mutex);
|
|
if (cn->procdir)
|
|
proc_remove(c->pde);
|
|
+ mutex_unlock(&cn->mutex);
|
|
#endif
|
|
- list_del_rcu(&c->list);
|
|
- spin_unlock(&cn->lock);
|
|
- local_bh_enable();
|
|
-
|
|
- unregister_netdevice_notifier(&c->notifier);
|
|
-
|
|
return;
|
|
}
|
|
local_bh_enable();
|
|
@@ -129,7 +141,7 @@ static struct clusterip_config *
|
|
__clusterip_config_find(struct net *net, __be32 clusterip)
|
|
{
|
|
struct clusterip_config *c;
|
|
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
|
+ struct clusterip_net *cn = clusterip_pernet(net);
|
|
|
|
list_for_each_entry_rcu(c, &cn->configs, list) {
|
|
if (c->clusterip == clusterip)
|
|
@@ -181,32 +193,37 @@ clusterip_netdev_event(struct notifier_block *this, unsigned long event,
|
|
void *ptr)
|
|
{
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
+ struct net *net = dev_net(dev);
|
|
+ struct clusterip_net *cn = clusterip_pernet(net);
|
|
struct clusterip_config *c;
|
|
|
|
- c = container_of(this, struct clusterip_config, notifier);
|
|
- switch (event) {
|
|
- case NETDEV_REGISTER:
|
|
- if (!strcmp(dev->name, c->ifname)) {
|
|
- c->ifindex = dev->ifindex;
|
|
- dev_mc_add(dev, c->clustermac);
|
|
- }
|
|
- break;
|
|
- case NETDEV_UNREGISTER:
|
|
- if (dev->ifindex == c->ifindex) {
|
|
- dev_mc_del(dev, c->clustermac);
|
|
- c->ifindex = -1;
|
|
- }
|
|
- break;
|
|
- case NETDEV_CHANGENAME:
|
|
- if (!strcmp(dev->name, c->ifname)) {
|
|
- c->ifindex = dev->ifindex;
|
|
- dev_mc_add(dev, c->clustermac);
|
|
- } else if (dev->ifindex == c->ifindex) {
|
|
- dev_mc_del(dev, c->clustermac);
|
|
- c->ifindex = -1;
|
|
+ spin_lock_bh(&cn->lock);
|
|
+ list_for_each_entry_rcu(c, &cn->configs, list) {
|
|
+ switch (event) {
|
|
+ case NETDEV_REGISTER:
|
|
+ if (!strcmp(dev->name, c->ifname)) {
|
|
+ c->ifindex = dev->ifindex;
|
|
+ dev_mc_add(dev, c->clustermac);
|
|
+ }
|
|
+ break;
|
|
+ case NETDEV_UNREGISTER:
|
|
+ if (dev->ifindex == c->ifindex) {
|
|
+ dev_mc_del(dev, c->clustermac);
|
|
+ c->ifindex = -1;
|
|
+ }
|
|
+ break;
|
|
+ case NETDEV_CHANGENAME:
|
|
+ if (!strcmp(dev->name, c->ifname)) {
|
|
+ c->ifindex = dev->ifindex;
|
|
+ dev_mc_add(dev, c->clustermac);
|
|
+ } else if (dev->ifindex == c->ifindex) {
|
|
+ dev_mc_del(dev, c->clustermac);
|
|
+ c->ifindex = -1;
|
|
+ }
|
|
+ break;
|
|
}
|
|
- break;
|
|
}
|
|
+ spin_unlock_bh(&cn->lock);
|
|
|
|
return NOTIFY_DONE;
|
|
}
|
|
@@ -215,30 +232,44 @@ static struct clusterip_config *
|
|
clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
|
|
__be32 ip, const char *iniface)
|
|
{
|
|
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
|
+ struct clusterip_net *cn = clusterip_pernet(net);
|
|
struct clusterip_config *c;
|
|
+ struct net_device *dev;
|
|
int err;
|
|
|
|
+ if (iniface[0] == '\0') {
|
|
+ pr_info("Please specify an interface name\n");
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
c = kzalloc(sizeof(*c), GFP_ATOMIC);
|
|
if (!c)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
- strcpy(c->ifname, iniface);
|
|
- c->ifindex = -1;
|
|
- c->clusterip = ip;
|
|
+ dev = dev_get_by_name(net, iniface);
|
|
+ if (!dev) {
|
|
+ pr_info("no such interface %s\n", iniface);
|
|
+ kfree(c);
|
|
+ return ERR_PTR(-ENOENT);
|
|
+ }
|
|
+ c->ifindex = dev->ifindex;
|
|
+ strcpy(c->ifname, dev->name);
|
|
memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
|
|
+ dev_mc_add(dev, c->clustermac);
|
|
+ dev_put(dev);
|
|
+
|
|
+ c->clusterip = ip;
|
|
c->num_total_nodes = i->num_total_nodes;
|
|
clusterip_config_init_nodelist(c, i);
|
|
c->hash_mode = i->hash_mode;
|
|
c->hash_initval = i->hash_initval;
|
|
+ c->net = net;
|
|
refcount_set(&c->refcount, 1);
|
|
|
|
spin_lock_bh(&cn->lock);
|
|
if (__clusterip_config_find(net, ip)) {
|
|
- spin_unlock_bh(&cn->lock);
|
|
- kfree(c);
|
|
-
|
|
- return ERR_PTR(-EBUSY);
|
|
+ err = -EBUSY;
|
|
+ goto out_config_put;
|
|
}
|
|
|
|
list_add_rcu(&c->list, &cn->configs);
|
|
@@ -250,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
|
|
|
|
/* create proc dir entry */
|
|
sprintf(buffer, "%pI4", &ip);
|
|
+ mutex_lock(&cn->mutex);
|
|
c->pde = proc_create_data(buffer, 0600,
|
|
cn->procdir,
|
|
&clusterip_proc_fops, c);
|
|
+ mutex_unlock(&cn->mutex);
|
|
if (!c->pde) {
|
|
err = -ENOMEM;
|
|
goto err;
|
|
@@ -260,22 +293,17 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
|
|
}
|
|
#endif
|
|
|
|
- c->notifier.notifier_call = clusterip_netdev_event;
|
|
- err = register_netdevice_notifier(&c->notifier);
|
|
- if (!err) {
|
|
- refcount_set(&c->entries, 1);
|
|
- return c;
|
|
- }
|
|
+ refcount_set(&c->entries, 1);
|
|
+ return c;
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
- proc_remove(c->pde);
|
|
err:
|
|
#endif
|
|
spin_lock_bh(&cn->lock);
|
|
list_del_rcu(&c->list);
|
|
+out_config_put:
|
|
spin_unlock_bh(&cn->lock);
|
|
clusterip_config_put(c);
|
|
-
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
@@ -475,34 +503,20 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
|
|
&e->ip.dst.s_addr);
|
|
return -EINVAL;
|
|
} else {
|
|
- struct net_device *dev;
|
|
-
|
|
- if (e->ip.iniface[0] == '\0') {
|
|
- pr_info("Please specify an interface name\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- dev = dev_get_by_name(par->net, e->ip.iniface);
|
|
- if (!dev) {
|
|
- pr_info("no such interface %s\n",
|
|
- e->ip.iniface);
|
|
- return -ENOENT;
|
|
- }
|
|
- dev_put(dev);
|
|
-
|
|
config = clusterip_config_init(par->net, cipinfo,
|
|
e->ip.dst.s_addr,
|
|
e->ip.iniface);
|
|
if (IS_ERR(config))
|
|
return PTR_ERR(config);
|
|
}
|
|
- }
|
|
+ } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
|
|
+ return -EINVAL;
|
|
|
|
ret = nf_ct_netns_get(par->net, par->family);
|
|
if (ret < 0) {
|
|
pr_info("cannot load conntrack support for proto=%u\n",
|
|
par->family);
|
|
- clusterip_config_entry_put(par->net, config);
|
|
+ clusterip_config_entry_put(config);
|
|
clusterip_config_put(config);
|
|
return ret;
|
|
}
|
|
@@ -524,7 +538,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
|
|
|
|
/* if no more entries are referencing the config, remove it
|
|
* from the list and destroy the proc entry */
|
|
- clusterip_config_entry_put(par->net, cipinfo->config);
|
|
+ clusterip_config_entry_put(cipinfo->config);
|
|
|
|
clusterip_config_put(cipinfo->config);
|
|
|
|
@@ -806,7 +820,7 @@ static const struct file_operations clusterip_proc_fops = {
|
|
|
|
static int clusterip_net_init(struct net *net)
|
|
{
|
|
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
|
+ struct clusterip_net *cn = clusterip_pernet(net);
|
|
int ret;
|
|
|
|
INIT_LIST_HEAD(&cn->configs);
|
|
@@ -824,6 +838,7 @@ static int clusterip_net_init(struct net *net)
|
|
pr_err("Unable to proc dir entry\n");
|
|
return -ENOMEM;
|
|
}
|
|
+ mutex_init(&cn->mutex);
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
return 0;
|
|
@@ -831,13 +846,15 @@ static int clusterip_net_init(struct net *net)
|
|
|
|
static void clusterip_net_exit(struct net *net)
|
|
{
|
|
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
|
|
+ struct clusterip_net *cn = clusterip_pernet(net);
|
|
+
|
|
#ifdef CONFIG_PROC_FS
|
|
+ mutex_lock(&cn->mutex);
|
|
proc_remove(cn->procdir);
|
|
cn->procdir = NULL;
|
|
+ mutex_unlock(&cn->mutex);
|
|
#endif
|
|
nf_unregister_net_hook(net, &cip_arp_ops);
|
|
- WARN_ON_ONCE(!list_empty(&cn->configs));
|
|
}
|
|
|
|
static struct pernet_operations clusterip_net_ops = {
|
|
@@ -847,6 +864,10 @@ static struct pernet_operations clusterip_net_ops = {
|
|
.size = sizeof(struct clusterip_net),
|
|
};
|
|
|
|
+struct notifier_block cip_netdev_notifier = {
|
|
+ .notifier_call = clusterip_netdev_event
|
|
+};
|
|
+
|
|
static int __init clusterip_tg_init(void)
|
|
{
|
|
int ret;
|
|
@@ -859,11 +880,17 @@ static int __init clusterip_tg_init(void)
|
|
if (ret < 0)
|
|
goto cleanup_subsys;
|
|
|
|
+ ret = register_netdevice_notifier(&cip_netdev_notifier);
|
|
+ if (ret < 0)
|
|
+ goto unregister_target;
|
|
+
|
|
pr_info("ClusterIP Version %s loaded successfully\n",
|
|
CLUSTERIP_VERSION);
|
|
|
|
return 0;
|
|
|
|
+unregister_target:
|
|
+ xt_unregister_target(&clusterip_tg_reg);
|
|
cleanup_subsys:
|
|
unregister_pernet_subsys(&clusterip_net_ops);
|
|
return ret;
|
|
@@ -873,6 +900,7 @@ static void __exit clusterip_tg_exit(void)
|
|
{
|
|
pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
|
|
|
|
+ unregister_netdevice_notifier(&cip_netdev_notifier);
|
|
xt_unregister_target(&clusterip_tg_reg);
|
|
unregister_pernet_subsys(&clusterip_net_ops);
|
|
|
|
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
|
|
index a0aa13bcabda0..0a8a60c1bf9af 100644
|
|
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
|
|
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
|
|
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
|
|
int snmp_version(void *context, size_t hdrlen, unsigned char tag,
|
|
const void *data, size_t datalen)
|
|
{
|
|
+ if (datalen != 1)
|
|
+ return -EINVAL;
|
|
if (*(unsigned char *)data > 1)
|
|
return -ENOTSUPP;
|
|
return 1;
|
|
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
|
|
const void *data, size_t datalen)
|
|
{
|
|
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
|
|
- __be32 *pdata = (__be32 *)data;
|
|
+ __be32 *pdata;
|
|
|
|
+ if (datalen != 4)
|
|
+ return -EINVAL;
|
|
+ pdata = (__be32 *)data;
|
|
if (*pdata == ctx->from) {
|
|
pr_debug("%s: %pI4 to %pI4\n", __func__,
|
|
(void *)&ctx->from, (void *)&ctx->to);
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index c0a9d26c06ceb..d1ddf1d037215 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|
/* No redirected packets during ip_rt_redirect_silence;
|
|
* reset the algorithm.
|
|
*/
|
|
- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
|
|
+ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
|
|
peer->rate_tokens = 0;
|
|
+ peer->n_redirects = 0;
|
|
+ }
|
|
|
|
/* Too many ignored redirects; do not send anything
|
|
* set dst.rate_last to the last seen redirected packet.
|
|
*/
|
|
- if (peer->rate_tokens >= ip_rt_redirect_number) {
|
|
+ if (peer->n_redirects >= ip_rt_redirect_number) {
|
|
peer->rate_last = jiffies;
|
|
goto out_put_peer;
|
|
}
|
|
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
|
|
peer->rate_last = jiffies;
|
|
++peer->rate_tokens;
|
|
+ ++peer->n_redirects;
|
|
#ifdef CONFIG_IP_ROUTE_VERBOSE
|
|
if (log_martians &&
|
|
peer->rate_tokens == ip_rt_redirect_number)
|
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
|
index 9e6bc4d6daa75..b102973102b9e 100644
|
|
--- a/net/ipv4/tcp.c
|
|
+++ b/net/ipv4/tcp.c
|
|
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
|
|
flags = msg->msg_flags;
|
|
|
|
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
|
|
- if (sk->sk_state != TCP_ESTABLISHED) {
|
|
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
|
|
err = -EINVAL;
|
|
goto out_err;
|
|
}
|
|
@@ -2532,6 +2532,7 @@ void tcp_write_queue_purge(struct sock *sk)
|
|
sk_mem_reclaim(sk);
|
|
tcp_clear_all_retrans_hints(tcp_sk(sk));
|
|
tcp_sk(sk)->packets_out = 0;
|
|
+ inet_csk(sk)->icsk_backoff = 0;
|
|
}
|
|
|
|
int tcp_disconnect(struct sock *sk, int flags)
|
|
@@ -2580,7 +2581,6 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|
tp->write_seq += tp->max_window + 2;
|
|
if (tp->write_seq == 0)
|
|
tp->write_seq = 1;
|
|
- icsk->icsk_backoff = 0;
|
|
tp->snd_cwnd = 2;
|
|
icsk->icsk_probes_out = 0;
|
|
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
|
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
|
|
index 3b45fe530f91e..2d6bd7a59b2cd 100644
|
|
--- a/net/ipv4/tcp_bpf.c
|
|
+++ b/net/ipv4/tcp_bpf.c
|
|
@@ -198,7 +198,7 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
|
|
msg->sg.start = i;
|
|
msg->sg.size -= apply_bytes;
|
|
sk_psock_queue_msg(psock, tmp);
|
|
- sk->sk_data_ready(sk);
|
|
+ sk_psock_data_ready(sk, psock);
|
|
} else {
|
|
sk_msg_free(sk, tmp);
|
|
kfree(tmp);
|
|
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
|
|
index de47038afdf02..b654f21064bba 100644
|
|
--- a/net/ipv4/tcp_ipv4.c
|
|
+++ b/net/ipv4/tcp_ipv4.c
|
|
@@ -535,14 +535,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
|
if (sock_owned_by_user(sk))
|
|
break;
|
|
|
|
+ skb = tcp_rtx_queue_head(sk);
|
|
+ if (WARN_ON_ONCE(!skb))
|
|
+ break;
|
|
+
|
|
icsk->icsk_backoff--;
|
|
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
|
|
TCP_TIMEOUT_INIT;
|
|
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
|
|
|
|
- skb = tcp_rtx_queue_head(sk);
|
|
- BUG_ON(!skb);
|
|
-
|
|
tcp_mstamp_refresh(tp);
|
|
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
|
|
remaining = icsk->icsk_rto -
|
|
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
|
|
index f87dbc78b6bcb..71a29e9c06206 100644
|
|
--- a/net/ipv4/tcp_timer.c
|
|
+++ b/net/ipv4/tcp_timer.c
|
|
@@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk)
|
|
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
|
if (icsk->icsk_retransmits) {
|
|
dst_negative_advice(sk);
|
|
- } else if (!tp->syn_data && !tp->syn_fastopen) {
|
|
+ } else {
|
|
sk_rethink_txhash(sk);
|
|
}
|
|
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index 1976fddb9e005..ce125f4dc810e 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -785,15 +785,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
|
|
const int hlen = skb_network_header_len(skb) +
|
|
sizeof(struct udphdr);
|
|
|
|
- if (hlen + cork->gso_size > cork->fragsize)
|
|
+ if (hlen + cork->gso_size > cork->fragsize) {
|
|
+ kfree_skb(skb);
|
|
return -EINVAL;
|
|
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
|
|
+ }
|
|
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
|
|
+ kfree_skb(skb);
|
|
return -EINVAL;
|
|
- if (sk->sk_no_check_tx)
|
|
+ }
|
|
+ if (sk->sk_no_check_tx) {
|
|
+ kfree_skb(skb);
|
|
return -EINVAL;
|
|
+ }
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
|
|
- dst_xfrm(skb_dst(skb)))
|
|
+ dst_xfrm(skb_dst(skb))) {
|
|
+ kfree_skb(skb);
|
|
return -EIO;
|
|
+ }
|
|
|
|
skb_shinfo(skb)->gso_size = cork->gso_size;
|
|
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index 045597b9a7c05..9fa51ab01ac40 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
|
|
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
|
if (ifa == ifp)
|
|
continue;
|
|
- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
|
|
+ if (ifa->prefix_len != ifp->prefix_len ||
|
|
+ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
|
|
ifp->prefix_len))
|
|
continue;
|
|
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
|
|
@@ -5120,6 +5121,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
|
|
if (idev) {
|
|
err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
|
|
&fillargs);
|
|
+ if (err > 0)
|
|
+ err = 0;
|
|
}
|
|
goto put_tgt_net;
|
|
}
|
|
@@ -5154,7 +5157,7 @@ put_tgt_net:
|
|
if (fillargs.netnsid >= 0)
|
|
put_net(tgt_net);
|
|
|
|
- return err < 0 ? err : skb->len;
|
|
+ return skb->len ? : err;
|
|
}
|
|
|
|
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
|
|
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
|
|
index f0cd291034f0f..d99753b5e39b2 100644
|
|
--- a/net/ipv6/af_inet6.c
|
|
+++ b/net/ipv6/af_inet6.c
|
|
@@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
|
|
|
|
/* Check if the address belongs to the host. */
|
|
if (addr_type == IPV6_ADDR_MAPPED) {
|
|
+ struct net_device *dev = NULL;
|
|
int chk_addr_ret;
|
|
|
|
/* Binding to v4-mapped address on a v6-only socket
|
|
@@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
|
|
goto out;
|
|
}
|
|
|
|
+ rcu_read_lock();
|
|
+ if (sk->sk_bound_dev_if) {
|
|
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
|
|
+ if (!dev) {
|
|
+ err = -ENODEV;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+ }
|
|
+
|
|
/* Reproduce AF_INET checks to make the bindings consistent */
|
|
v4addr = addr->sin6_addr.s6_addr32[3];
|
|
- chk_addr_ret = inet_addr_type(net, v4addr);
|
|
+ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
|
|
+ rcu_read_unlock();
|
|
+
|
|
if (!inet_can_nonlocal_bind(net, inet) &&
|
|
v4addr != htonl(INADDR_ANY) &&
|
|
chk_addr_ret != RTN_LOCAL &&
|
|
@@ -350,6 +362,9 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
|
|
err = -EINVAL;
|
|
goto out_unlock;
|
|
}
|
|
+ }
|
|
+
|
|
+ if (sk->sk_bound_dev_if) {
|
|
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
|
|
if (!dev) {
|
|
err = -ENODEV;
|
|
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
|
|
index 1ede7a16a0bec..cb24850d2c7f8 100644
|
|
--- a/net/ipv6/datagram.c
|
|
+++ b/net/ipv6/datagram.c
|
|
@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
|
|
skb_reset_network_header(skb);
|
|
iph = ipv6_hdr(skb);
|
|
iph->daddr = fl6->daddr;
|
|
+ ip6_flow_hdr(iph, 0, 0);
|
|
|
|
serr = SKB_EXT_ERR(skb);
|
|
serr->ee.ee_errno = err;
|
|
@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
|
|
}
|
|
if (np->rxopt.bits.rxorigdstaddr) {
|
|
struct sockaddr_in6 sin6;
|
|
- __be16 *ports;
|
|
- int end;
|
|
+ __be16 _ports[2], *ports;
|
|
|
|
- end = skb_transport_offset(skb) + 4;
|
|
- if (end <= 0 || pskb_may_pull(skb, end)) {
|
|
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
|
|
+ sizeof(_ports), &_ports);
|
|
+ if (ports) {
|
|
/* All current transport protocols have the port numbers in the
|
|
* first four bytes of the transport header and this function is
|
|
* written with this assumption in mind.
|
|
*/
|
|
- ports = (__be16 *)skb_transport_header(skb);
|
|
-
|
|
sin6.sin6_family = AF_INET6;
|
|
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
|
|
sin6.sin6_port = ports[1];
|
|
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
|
|
index c9c53ade55c3c..6d14cbe443f82 100644
|
|
--- a/net/ipv6/icmp.c
|
|
+++ b/net/ipv6/icmp.c
|
|
@@ -421,10 +421,10 @@ static int icmp6_iif(const struct sk_buff *skb)
|
|
static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
const struct in6_addr *force_saddr)
|
|
{
|
|
- struct net *net = dev_net(skb->dev);
|
|
struct inet6_dev *idev = NULL;
|
|
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
|
struct sock *sk;
|
|
+ struct net *net;
|
|
struct ipv6_pinfo *np;
|
|
const struct in6_addr *saddr = NULL;
|
|
struct dst_entry *dst;
|
|
@@ -435,12 +435,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
|
int iif = 0;
|
|
int addr_type = 0;
|
|
int len;
|
|
- u32 mark = IP6_REPLY_MARK(net, skb->mark);
|
|
+ u32 mark;
|
|
|
|
if ((u8 *)hdr < skb->head ||
|
|
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
|
|
return;
|
|
|
|
+ if (!skb->dev)
|
|
+ return;
|
|
+ net = dev_net(skb->dev);
|
|
+ mark = IP6_REPLY_MARK(net, skb->mark);
|
|
/*
|
|
* Make sure we respect the rules
|
|
* i.e. RFC 1885 2.4(e)
|
|
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
|
|
index ae3786132c236..6613d8dbb0e5a 100644
|
|
--- a/net/ipv6/ip6_fib.c
|
|
+++ b/net/ipv6/ip6_fib.c
|
|
@@ -627,7 +627,11 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
|
|
return -ENOENT;
|
|
}
|
|
|
|
- res = fib6_dump_table(tb, skb, cb);
|
|
+ if (!cb->args[0]) {
|
|
+ res = fib6_dump_table(tb, skb, cb);
|
|
+ if (!res)
|
|
+ cb->args[0] = 1;
|
|
+ }
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
|
|
index 515adbdba1d27..e83c41c53f4a4 100644
|
|
--- a/net/ipv6/ip6_gre.c
|
|
+++ b/net/ipv6/ip6_gre.c
|
|
@@ -532,13 +532,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
|
|
struct ip6_tnl *tunnel;
|
|
u8 ver;
|
|
|
|
- if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
|
|
- return PACKET_REJECT;
|
|
-
|
|
ipv6h = ipv6_hdr(skb);
|
|
ershdr = (struct erspan_base_hdr *)skb->data;
|
|
ver = ershdr->ver;
|
|
- tpi->key = cpu_to_be32(get_session_id(ershdr));
|
|
|
|
tunnel = ip6gre_tunnel_lookup(skb->dev,
|
|
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
|
|
@@ -879,6 +875,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
|
|
struct net_device_stats *stats = &t->dev->stats;
|
|
int ret;
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
|
|
goto tx_err;
|
|
|
|
@@ -917,10 +916,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|
__u8 dsfield = false;
|
|
struct flowi6 fl6;
|
|
int err = -EINVAL;
|
|
+ __be16 proto;
|
|
__u32 mtu;
|
|
int nhoff;
|
|
int thoff;
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
|
|
goto tx_err;
|
|
|
|
@@ -993,8 +996,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|
goto tx_err;
|
|
}
|
|
} else {
|
|
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
|
-
|
|
switch (skb->protocol) {
|
|
case htons(ETH_P_IP):
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
|
@@ -1002,7 +1003,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|
&dsfield, &encap_limit);
|
|
break;
|
|
case htons(ETH_P_IPV6):
|
|
- if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
|
|
+ if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
|
|
goto tx_err;
|
|
if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
|
|
&dsfield, &encap_limit))
|
|
@@ -1029,8 +1030,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|
}
|
|
|
|
/* Push GRE header. */
|
|
- gre_build_header(skb, 8, TUNNEL_SEQ,
|
|
- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
|
|
+ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
|
|
+ : htons(ETH_P_ERSPAN2);
|
|
+ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
|
|
|
|
/* TooBig packet may have updated dst->dev's mtu */
|
|
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
|
|
@@ -1163,6 +1165,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
|
|
t->parms.i_flags = p->i_flags;
|
|
t->parms.o_flags = p->o_flags;
|
|
t->parms.fwmark = p->fwmark;
|
|
+ t->parms.erspan_ver = p->erspan_ver;
|
|
+ t->parms.index = p->index;
|
|
+ t->parms.dir = p->dir;
|
|
+ t->parms.hwid = p->hwid;
|
|
dst_cache_reset(&t->dst_cache);
|
|
}
|
|
|
|
@@ -1711,6 +1717,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
|
|
return 0;
|
|
}
|
|
|
|
+static void ip6erspan_set_version(struct nlattr *data[],
|
|
+ struct __ip6_tnl_parm *parms)
|
|
+{
|
|
+ parms->erspan_ver = 1;
|
|
+ if (data[IFLA_GRE_ERSPAN_VER])
|
|
+ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
|
|
+
|
|
+ if (parms->erspan_ver == 1) {
|
|
+ if (data[IFLA_GRE_ERSPAN_INDEX])
|
|
+ parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
|
|
+ } else if (parms->erspan_ver == 2) {
|
|
+ if (data[IFLA_GRE_ERSPAN_DIR])
|
|
+ parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
|
|
+ if (data[IFLA_GRE_ERSPAN_HWID])
|
|
+ parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
|
|
+ }
|
|
+}
|
|
+
|
|
static void ip6gre_netlink_parms(struct nlattr *data[],
|
|
struct __ip6_tnl_parm *parms)
|
|
{
|
|
@@ -1759,20 +1783,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
|
|
|
|
if (data[IFLA_GRE_COLLECT_METADATA])
|
|
parms->collect_md = true;
|
|
-
|
|
- parms->erspan_ver = 1;
|
|
- if (data[IFLA_GRE_ERSPAN_VER])
|
|
- parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
|
|
-
|
|
- if (parms->erspan_ver == 1) {
|
|
- if (data[IFLA_GRE_ERSPAN_INDEX])
|
|
- parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
|
|
- } else if (parms->erspan_ver == 2) {
|
|
- if (data[IFLA_GRE_ERSPAN_DIR])
|
|
- parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
|
|
- if (data[IFLA_GRE_ERSPAN_HWID])
|
|
- parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
|
|
- }
|
|
}
|
|
|
|
static int ip6gre_tap_init(struct net_device *dev)
|
|
@@ -2025,9 +2035,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
|
|
struct nlattr *data[],
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
|
|
+ struct ip6_tnl *t = netdev_priv(dev);
|
|
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
|
|
struct __ip6_tnl_parm p;
|
|
- struct ip6_tnl *t;
|
|
|
|
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
|
|
if (IS_ERR(t))
|
|
@@ -2096,12 +2106,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|
{
|
|
struct ip6_tnl *t = netdev_priv(dev);
|
|
struct __ip6_tnl_parm *p = &t->parms;
|
|
+ __be16 o_flags = p->o_flags;
|
|
+
|
|
+ if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
|
|
+ !p->collect_md)
|
|
+ o_flags |= TUNNEL_KEY;
|
|
|
|
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
|
|
nla_put_be16(skb, IFLA_GRE_IFLAGS,
|
|
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
|
|
nla_put_be16(skb, IFLA_GRE_OFLAGS,
|
|
- gre_tnl_flags_to_gre_flags(p->o_flags)) ||
|
|
+ gre_tnl_flags_to_gre_flags(o_flags)) ||
|
|
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
|
|
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
|
|
nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
|
|
@@ -2196,6 +2211,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
|
|
int err;
|
|
|
|
ip6gre_netlink_parms(data, &nt->parms);
|
|
+ ip6erspan_set_version(data, &nt->parms);
|
|
ign = net_generic(net, ip6gre_net_id);
|
|
|
|
if (nt->parms.collect_md) {
|
|
@@ -2241,6 +2257,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
|
|
if (IS_ERR(t))
|
|
return PTR_ERR(t);
|
|
|
|
+ ip6erspan_set_version(data, &p);
|
|
ip6gre_tunnel_unlink_md(ign, t);
|
|
ip6gre_tunnel_unlink(ign, t);
|
|
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
|
|
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
|
|
index 99179b9c83840..0c6403cf8b522 100644
|
|
--- a/net/ipv6/ip6_tunnel.c
|
|
+++ b/net/ipv6/ip6_tunnel.c
|
|
@@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
u8 tproto;
|
|
int err;
|
|
|
|
- /* ensure we can access the full inner ip header */
|
|
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
- return -1;
|
|
-
|
|
iph = ip_hdr(skb);
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
|
|
|
@@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
u8 tproto;
|
|
int err;
|
|
|
|
- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
|
|
- return -1;
|
|
-
|
|
ipv6h = ipv6_hdr(skb);
|
|
tproto = READ_ONCE(t->parms.proto);
|
|
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
|
|
@@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
struct net_device_stats *stats = &t->dev->stats;
|
|
int ret;
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
switch (skb->protocol) {
|
|
case htons(ETH_P_IP):
|
|
ret = ip4ip6_tnl_xmit(skb, dev);
|
|
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
|
|
index 706fe42e49289..8b6eefff2f7ea 100644
|
|
--- a/net/ipv6/ip6_vti.c
|
|
+++ b/net/ipv6/ip6_vti.c
|
|
@@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
{
|
|
struct ip6_tnl *t = netdev_priv(dev);
|
|
struct net_device_stats *stats = &t->dev->stats;
|
|
- struct ipv6hdr *ipv6h;
|
|
struct flowi fl;
|
|
int ret;
|
|
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
memset(&fl, 0, sizeof(fl));
|
|
|
|
switch (skb->protocol) {
|
|
case htons(ETH_P_IPV6):
|
|
- ipv6h = ipv6_hdr(skb);
|
|
-
|
|
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
|
|
- vti6_addr_conflict(t, ipv6h))
|
|
+ vti6_addr_conflict(t, ipv6_hdr(skb)))
|
|
goto tx_err;
|
|
|
|
xfrm_decode_session(skb, &fl, AF_INET6);
|
|
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
|
|
index 377a2ee5d9ad8..edda3f9daab9a 100644
|
|
--- a/net/ipv6/ip6mr.c
|
|
+++ b/net/ipv6/ip6mr.c
|
|
@@ -51,6 +51,7 @@
|
|
#include <linux/export.h>
|
|
#include <net/ip6_checksum.h>
|
|
#include <linux/netconf.h>
|
|
+#include <net/ip_tunnels.h>
|
|
|
|
#include <linux/nospec.h>
|
|
|
|
@@ -599,13 +600,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
|
|
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
|
|
.flowi6_mark = skb->mark,
|
|
};
|
|
- int err;
|
|
|
|
- err = ip6mr_fib_lookup(net, &fl6, &mrt);
|
|
- if (err < 0) {
|
|
- kfree_skb(skb);
|
|
- return err;
|
|
- }
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
+ if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
|
|
+ goto tx_err;
|
|
|
|
read_lock(&mrt_lock);
|
|
dev->stats.tx_bytes += skb->len;
|
|
@@ -614,6 +614,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
|
|
read_unlock(&mrt_lock);
|
|
kfree_skb(skb);
|
|
return NETDEV_TX_OK;
|
|
+
|
|
+tx_err:
|
|
+ dev->stats.tx_errors++;
|
|
+ kfree_skb(skb);
|
|
+ return NETDEV_TX_OK;
|
|
}
|
|
|
|
static int reg_vif_get_iflink(const struct net_device *dev)
|
|
@@ -1511,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
|
|
continue;
|
|
rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
|
|
list_del_rcu(&c->list);
|
|
+ call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
|
|
+ FIB_EVENT_ENTRY_DEL,
|
|
+ (struct mfc6_cache *)c, mrt->id);
|
|
mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
|
|
mr_cache_put(c);
|
|
}
|
|
@@ -1519,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
|
|
spin_lock_bh(&mfc_unres_lock);
|
|
list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
|
|
list_del(&c->list);
|
|
- call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
|
|
- FIB_EVENT_ENTRY_DEL,
|
|
- (struct mfc6_cache *)c,
|
|
- mrt->id);
|
|
mr6_netlink_event(mrt, (struct mfc6_cache *)c,
|
|
RTM_DELROUTE);
|
|
ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
|
|
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
|
|
index 8b075f0bc3516..6d0b1f3e927bd 100644
|
|
--- a/net/ipv6/netfilter.c
|
|
+++ b/net/ipv6/netfilter.c
|
|
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
|
|
struct sock *sk = sk_to_full_sk(skb->sk);
|
|
unsigned int hh_len;
|
|
struct dst_entry *dst;
|
|
+ int strict = (ipv6_addr_type(&iph->daddr) &
|
|
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
|
|
struct flowi6 fl6 = {
|
|
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
|
|
- rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
|
|
+ strict ? skb_dst(skb)->dev->ifindex : 0,
|
|
.flowi6_mark = skb->mark,
|
|
.flowi6_uid = sock_net_uid(net, sk),
|
|
.daddr = iph->daddr,
|
|
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
|
|
index 8d0ba757a46ce..9b2f272ca1649 100644
|
|
--- a/net/ipv6/seg6.c
|
|
+++ b/net/ipv6/seg6.c
|
|
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
|
|
rcu_read_unlock();
|
|
|
|
genlmsg_end(msg, hdr);
|
|
- genlmsg_reply(msg, info);
|
|
-
|
|
- return 0;
|
|
+ return genlmsg_reply(msg, info);
|
|
|
|
nla_put_failure:
|
|
rcu_read_unlock();
|
|
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
|
|
index 8181ee7e1e270..ee5403cbe655e 100644
|
|
--- a/net/ipv6/seg6_iptunnel.c
|
|
+++ b/net/ipv6/seg6_iptunnel.c
|
|
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|
} else {
|
|
ip6_flow_hdr(hdr, 0, flowlabel);
|
|
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
|
|
+
|
|
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
|
}
|
|
|
|
hdr->nexthdr = NEXTHDR_ROUTING;
|
|
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
|
|
index 51c9f75f34b9b..e8a1dabef803e 100644
|
|
--- a/net/ipv6/sit.c
|
|
+++ b/net/ipv6/sit.c
|
|
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
|
|
}
|
|
|
|
err = 0;
|
|
- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
|
|
+ if (__in6_dev_get(skb->dev) &&
|
|
+ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
|
|
goto out;
|
|
|
|
if (t->parms.iph.daddr == 0)
|
|
@@ -1021,6 +1022,9 @@ tx_error:
|
|
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
|
|
struct net_device *dev)
|
|
{
|
|
+ if (!pskb_inet_may_pull(skb))
|
|
+ goto tx_err;
|
|
+
|
|
switch (skb->protocol) {
|
|
case htons(ETH_P_IP):
|
|
sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
|
|
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
|
|
index 03e6b7a2bc530..f5c213001b055 100644
|
|
--- a/net/ipv6/tcp_ipv6.c
|
|
+++ b/net/ipv6/tcp_ipv6.c
|
|
@@ -734,6 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
|
|
const struct sock *sk_listener,
|
|
struct sk_buff *skb)
|
|
{
|
|
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
|
|
struct inet_request_sock *ireq = inet_rsk(req);
|
|
const struct ipv6_pinfo *np = inet6_sk(sk_listener);
|
|
|
|
@@ -741,7 +742,7 @@ static void tcp_v6_init_req(struct request_sock *req,
|
|
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
|
|
|
/* So that link locals have meaning */
|
|
- if (!sk_listener->sk_bound_dev_if &&
|
|
+ if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
|
|
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
|
ireq->ir_iif = tcp_v6_iif(skb);
|
|
|
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
|
index d2d97d07ef27a..848dd38a907a1 100644
|
|
--- a/net/ipv6/udp.c
|
|
+++ b/net/ipv6/udp.c
|
|
@@ -1056,15 +1056,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
|
|
const int hlen = skb_network_header_len(skb) +
|
|
sizeof(struct udphdr);
|
|
|
|
- if (hlen + cork->gso_size > cork->fragsize)
|
|
+ if (hlen + cork->gso_size > cork->fragsize) {
|
|
+ kfree_skb(skb);
|
|
return -EINVAL;
|
|
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
|
|
+ }
|
|
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
|
|
+ kfree_skb(skb);
|
|
return -EINVAL;
|
|
- if (udp_sk(sk)->no_check6_tx)
|
|
+ }
|
|
+ if (udp_sk(sk)->no_check6_tx) {
|
|
+ kfree_skb(skb);
|
|
return -EINVAL;
|
|
+ }
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
|
|
- dst_xfrm(skb_dst(skb)))
|
|
+ dst_xfrm(skb_dst(skb))) {
|
|
+ kfree_skb(skb);
|
|
return -EIO;
|
|
+ }
|
|
|
|
skb_shinfo(skb)->gso_size = cork->gso_size;
|
|
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
|
|
@@ -1314,10 +1322,7 @@ do_udp_sendmsg:
|
|
ipc6.opt = opt;
|
|
|
|
fl6.flowi6_proto = sk->sk_protocol;
|
|
- if (!ipv6_addr_any(daddr))
|
|
- fl6.daddr = *daddr;
|
|
- else
|
|
- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
|
|
+ fl6.daddr = *daddr;
|
|
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
|
|
fl6.saddr = np->saddr;
|
|
fl6.fl6_sport = inet->inet_sport;
|
|
@@ -1345,6 +1350,9 @@ do_udp_sendmsg:
|
|
}
|
|
}
|
|
|
|
+ if (ipv6_addr_any(&fl6.daddr))
|
|
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
|
|
+
|
|
final_p = fl6_update_dst(&fl6, opt, &final);
|
|
if (final_p)
|
|
connected = false;
|
|
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
|
|
index 4a46df8441c9f..f5b4febeaa25b 100644
|
|
--- a/net/ipv6/xfrm6_tunnel.c
|
|
+++ b/net/ipv6/xfrm6_tunnel.c
|
|
@@ -144,6 +144,9 @@ static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
|
|
index = __xfrm6_tunnel_spi_check(net, spi);
|
|
if (index >= 0)
|
|
goto alloc_spi;
|
|
+
|
|
+ if (spi == XFRM6_TUNNEL_SPI_MAX)
|
|
+ break;
|
|
}
|
|
for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
|
|
index = __xfrm6_tunnel_spi_check(net, spi);
|
|
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
|
|
index 26f1d435696a6..fed6becc5daf8 100644
|
|
--- a/net/l2tp/l2tp_core.c
|
|
+++ b/net/l2tp/l2tp_core.c
|
|
@@ -83,8 +83,7 @@
|
|
#define L2TP_SLFLAG_S 0x40000000
|
|
#define L2TP_SL_SEQ_MASK 0x00ffffff
|
|
|
|
-#define L2TP_HDR_SIZE_SEQ 10
|
|
-#define L2TP_HDR_SIZE_NOSEQ 6
|
|
+#define L2TP_HDR_SIZE_MAX 14
|
|
|
|
/* Default trace flags */
|
|
#define L2TP_DEFAULT_DEBUG_FLAGS 0
|
|
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
|
|
__skb_pull(skb, sizeof(struct udphdr));
|
|
|
|
/* Short packet? */
|
|
- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
|
|
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
|
|
l2tp_info(tunnel, L2TP_MSG_DATA,
|
|
"%s: recv short packet (len=%d)\n",
|
|
tunnel->name, skb->len);
|
|
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
|
|
goto error;
|
|
}
|
|
|
|
+ if (tunnel->version == L2TP_HDR_VER_3 &&
|
|
+ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
|
|
+ goto error;
|
|
+
|
|
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
|
|
l2tp_session_dec_refcount(session);
|
|
|
|
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
|
|
index 9c9afe94d389b..b2ce90260c35f 100644
|
|
--- a/net/l2tp/l2tp_core.h
|
|
+++ b/net/l2tp/l2tp_core.h
|
|
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
|
|
}
|
|
#endif
|
|
|
|
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
|
|
+ unsigned char **ptr, unsigned char **optr)
|
|
+{
|
|
+ int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
|
|
+
|
|
+ if (opt_len > 0) {
|
|
+ int off = *ptr - *optr;
|
|
+
|
|
+ if (!pskb_may_pull(skb, off + opt_len))
|
|
+ return -1;
|
|
+
|
|
+ if (skb->data != *optr) {
|
|
+ *optr = skb->data;
|
|
+ *ptr = skb->data + off;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#define l2tp_printk(ptr, type, func, fmt, ...) \
|
|
do { \
|
|
if (((ptr)->debug) & (type)) \
|
|
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
|
|
index 35f6f86d4dcce..d4c60523c549d 100644
|
|
--- a/net/l2tp/l2tp_ip.c
|
|
+++ b/net/l2tp/l2tp_ip.c
|
|
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
|
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
|
|
}
|
|
|
|
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
|
|
+ goto discard_sess;
|
|
+
|
|
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
|
|
l2tp_session_dec_refcount(session);
|
|
|
|
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
|
|
index 237f1a4a0b0c8..0ae6899edac07 100644
|
|
--- a/net/l2tp/l2tp_ip6.c
|
|
+++ b/net/l2tp/l2tp_ip6.c
|
|
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
|
|
}
|
|
|
|
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
|
|
+ goto discard_sess;
|
|
+
|
|
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
|
|
l2tp_session_dec_refcount(session);
|
|
|
|
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
|
|
index 818aa00603495..517dad83c2fa7 100644
|
|
--- a/net/mac80211/cfg.c
|
|
+++ b/net/mac80211/cfg.c
|
|
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|
BSS_CHANGED_P2P_PS |
|
|
BSS_CHANGED_TXPOWER;
|
|
int err;
|
|
+ int prev_beacon_int;
|
|
|
|
old = sdata_dereference(sdata->u.ap.beacon, sdata);
|
|
if (old)
|
|
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
sdata->needed_rx_chains = sdata->local->rx_chains;
|
|
|
|
+ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
|
|
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
|
|
|
|
if (params->he_cap)
|
|
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
|
|
if (!err)
|
|
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
|
|
mutex_unlock(&local->mtx);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
|
|
return err;
|
|
+ }
|
|
|
|
/*
|
|
* Apply control port protocol, this allows us to
|
|
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
|
|
index 7b8320d4a8e4b..3131356e290a0 100644
|
|
--- a/net/mac80211/main.c
|
|
+++ b/net/mac80211/main.c
|
|
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
|
|
* We need a bit of data queued to build aggregates properly, so
|
|
* instruct the TCP stack to allow more than a single ms of data
|
|
* to be queued in the stack. The value is a bit-shift of 1
|
|
- * second, so 8 is ~4ms of queued data. Only affects local TCP
|
|
+ * second, so 7 is ~8ms of queued data. Only affects local TCP
|
|
* sockets.
|
|
* This is the default, anyhow - drivers may need to override it
|
|
* for local reasons (longer buffers, longer completion time, or
|
|
* similar).
|
|
*/
|
|
- local->hw.tx_sk_pacing_shift = 8;
|
|
+ local->hw.tx_sk_pacing_shift = 7;
|
|
|
|
/* set up some defaults */
|
|
local->hw.queues = 1;
|
|
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
|
|
index 21526630bf655..e84103b405341 100644
|
|
--- a/net/mac80211/mesh.h
|
|
+++ b/net/mac80211/mesh.h
|
|
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
|
|
* @dst: mesh path destination mac address
|
|
* @mpp: mesh proxy mac address
|
|
* @rhash: rhashtable list pointer
|
|
+ * @walk_list: linked list containing all mesh_path objects.
|
|
* @gate_list: list pointer for known gates list
|
|
* @sdata: mesh subif
|
|
* @next_hop: mesh neighbor to which frames for this destination will be
|
|
@@ -105,6 +106,7 @@ struct mesh_path {
|
|
u8 dst[ETH_ALEN];
|
|
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
|
|
struct rhash_head rhash;
|
|
+ struct hlist_node walk_list;
|
|
struct hlist_node gate_list;
|
|
struct ieee80211_sub_if_data *sdata;
|
|
struct sta_info __rcu *next_hop;
|
|
@@ -133,12 +135,16 @@ struct mesh_path {
|
|
* gate's mpath may or may not be resolved and active.
|
|
* @gates_lock: protects updates to known_gates
|
|
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
|
|
+ * @walk_head: linked list containging all mesh_path objects
|
|
+ * @walk_lock: lock protecting walk_head
|
|
* @entries: number of entries in the table
|
|
*/
|
|
struct mesh_table {
|
|
struct hlist_head known_gates;
|
|
spinlock_t gates_lock;
|
|
struct rhashtable rhead;
|
|
+ struct hlist_head walk_head;
|
|
+ spinlock_t walk_lock;
|
|
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
|
|
};
|
|
|
|
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
|
|
index a5125624a76dc..c3a7396fb9556 100644
|
|
--- a/net/mac80211/mesh_pathtbl.c
|
|
+++ b/net/mac80211/mesh_pathtbl.c
|
|
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
|
|
return NULL;
|
|
|
|
INIT_HLIST_HEAD(&newtbl->known_gates);
|
|
+ INIT_HLIST_HEAD(&newtbl->walk_head);
|
|
atomic_set(&newtbl->entries, 0);
|
|
spin_lock_init(&newtbl->gates_lock);
|
|
+ spin_lock_init(&newtbl->walk_lock);
|
|
|
|
return newtbl;
|
|
}
|
|
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
|
|
static struct mesh_path *
|
|
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
|
{
|
|
- int i = 0, ret;
|
|
- struct mesh_path *mpath = NULL;
|
|
- struct rhashtable_iter iter;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return NULL;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
+ int i = 0;
|
|
+ struct mesh_path *mpath;
|
|
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
|
|
if (i++ == idx)
|
|
break;
|
|
}
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
|
|
- if (IS_ERR(mpath) || !mpath)
|
|
+ if (!mpath)
|
|
return NULL;
|
|
|
|
if (mpath_expired(mpath)) {
|
|
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
tbl = sdata->u.mesh.mesh_paths;
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
do {
|
|
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
|
&new_mpath->rhash,
|
|
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
|
|
mpath = rhashtable_lookup_fast(&tbl->rhead,
|
|
dst,
|
|
mesh_rht_params);
|
|
-
|
|
+ else if (!ret)
|
|
+ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
|
|
} while (unlikely(ret == -EEXIST && !mpath));
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
|
|
- if (ret && ret != -EEXIST)
|
|
- return ERR_PTR(ret);
|
|
-
|
|
- /* At this point either new_mpath was added, or we found a
|
|
- * matching entry already in the table; in the latter case
|
|
- * free the unnecessary new entry.
|
|
- */
|
|
- if (ret == -EEXIST) {
|
|
+ if (ret) {
|
|
kfree(new_mpath);
|
|
+
|
|
+ if (ret != -EEXIST)
|
|
+ return ERR_PTR(ret);
|
|
+
|
|
new_mpath = mpath;
|
|
}
|
|
+
|
|
sdata->u.mesh.mesh_paths_generation++;
|
|
return new_mpath;
|
|
}
|
|
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
|
|
|
|
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
|
|
tbl = sdata->u.mesh.mpp_paths;
|
|
+
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
|
|
&new_mpath->rhash,
|
|
mesh_rht_params);
|
|
+ if (!ret)
|
|
+ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
+
|
|
+ if (ret)
|
|
+ kfree(new_mpath);
|
|
|
|
sdata->u.mesh.mpp_paths_generation++;
|
|
return ret;
|
|
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
|
|
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
|
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ rcu_read_lock();
|
|
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
|
|
if (rcu_access_pointer(mpath->next_hop) == sta &&
|
|
mpath->flags & MESH_PATH_ACTIVE &&
|
|
!(mpath->flags & MESH_PATH_FIXED)) {
|
|
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
|
|
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
|
|
}
|
|
}
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static void mesh_path_free_rcu(struct mesh_table *tbl,
|
|
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
|
|
|
|
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
|
|
{
|
|
+ hlist_del_rcu(&mpath->walk_list);
|
|
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
|
|
mesh_path_free_rcu(tbl, mpath);
|
|
}
|
|
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ struct hlist_node *n;
|
|
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
if (rcu_access_pointer(mpath->next_hop) == sta)
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|
{
|
|
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ struct hlist_node *n;
|
|
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
if (ether_addr_equal(mpath->mpp, proxy))
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
static void table_flush_by_iface(struct mesh_table *tbl)
|
|
{
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
-
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
+ struct hlist_node *n;
|
|
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
/**
|
|
@@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
|
|
{
|
|
struct mesh_path *mpath;
|
|
|
|
- rcu_read_lock();
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
|
|
if (!mpath) {
|
|
rcu_read_unlock();
|
|
@@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
|
|
}
|
|
|
|
__mesh_path_del(tbl, mpath);
|
|
- rcu_read_unlock();
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
|
struct mesh_table *tbl)
|
|
{
|
|
struct mesh_path *mpath;
|
|
- struct rhashtable_iter iter;
|
|
- int ret;
|
|
+ struct hlist_node *n;
|
|
|
|
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- rhashtable_walk_start(&iter);
|
|
-
|
|
- while ((mpath = rhashtable_walk_next(&iter))) {
|
|
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
|
- continue;
|
|
- if (IS_ERR(mpath))
|
|
- break;
|
|
+ spin_lock_bh(&tbl->walk_lock);
|
|
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
|
|
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
|
|
(!(mpath->flags & MESH_PATH_FIXED)) &&
|
|
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
|
|
__mesh_path_del(tbl, mpath);
|
|
}
|
|
-
|
|
- rhashtable_walk_stop(&iter);
|
|
- rhashtable_walk_exit(&iter);
|
|
+ spin_unlock_bh(&tbl->walk_lock);
|
|
}
|
|
|
|
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
|
|
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
|
|
index 428f7ad5f9b59..46ecc417c4210 100644
|
|
--- a/net/mac80211/rx.c
|
|
+++ b/net/mac80211/rx.c
|
|
@@ -143,6 +143,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
|
|
/* allocate extra bitmaps */
|
|
if (status->chains)
|
|
len += 4 * hweight8(status->chains);
|
|
+ /* vendor presence bitmap */
|
|
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
|
|
+ len += 4;
|
|
|
|
if (ieee80211_have_rx_timestamp(status)) {
|
|
len = ALIGN(len, 8);
|
|
@@ -207,8 +210,6 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
|
|
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
|
|
struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
|
|
|
|
- /* vendor presence bitmap */
|
|
- len += 4;
|
|
/* alignment for fixed 6-byte vendor data header */
|
|
len = ALIGN(len, 2);
|
|
/* vendor data header */
|
|
@@ -753,6 +754,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
|
struct ieee80211_sub_if_data *monitor_sdata =
|
|
rcu_dereference(local->monitor_sdata);
|
|
bool only_monitor = false;
|
|
+ unsigned int min_head_len;
|
|
|
|
if (status->flag & RX_FLAG_RADIOTAP_HE)
|
|
rtap_space += sizeof(struct ieee80211_radiotap_he);
|
|
@@ -766,6 +768,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
|
rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
|
|
}
|
|
|
|
+ min_head_len = rtap_space;
|
|
+
|
|
/*
|
|
* First, we may need to make a copy of the skb because
|
|
* (1) we need to modify it for radiotap (if not present), and
|
|
@@ -775,18 +779,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
|
* the SKB because it has a bad FCS/PLCP checksum.
|
|
*/
|
|
|
|
- if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
|
|
- if (unlikely(origskb->len <= FCS_LEN)) {
|
|
- /* driver bug */
|
|
- WARN_ON(1);
|
|
- dev_kfree_skb(origskb);
|
|
- return NULL;
|
|
+ if (!(status->flag & RX_FLAG_NO_PSDU)) {
|
|
+ if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
|
|
+ if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
|
|
+ /* driver bug */
|
|
+ WARN_ON(1);
|
|
+ dev_kfree_skb(origskb);
|
|
+ return NULL;
|
|
+ }
|
|
+ present_fcs_len = FCS_LEN;
|
|
}
|
|
- present_fcs_len = FCS_LEN;
|
|
+
|
|
+ /* also consider the hdr->frame_control */
|
|
+ min_head_len += 2;
|
|
}
|
|
|
|
- /* ensure hdr->frame_control and vendor radiotap data are in skb head */
|
|
- if (!pskb_may_pull(origskb, 2 + rtap_space)) {
|
|
+ /* ensure that the expected data elements are in skb head */
|
|
+ if (!pskb_may_pull(origskb, min_head_len)) {
|
|
dev_kfree_skb(origskb);
|
|
return NULL;
|
|
}
|
|
@@ -2631,6 +2640,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|
struct ieee80211_sub_if_data *sdata = rx->sdata;
|
|
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
|
u16 ac, q, hdrlen;
|
|
+ int tailroom = 0;
|
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
@@ -2717,8 +2727,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
|
|
if (!ifmsh->mshcfg.dot11MeshForwarding)
|
|
goto out;
|
|
|
|
+ if (sdata->crypto_tx_tailroom_needed_cnt)
|
|
+ tailroom = IEEE80211_ENCRYPT_TAILROOM;
|
|
+
|
|
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
|
|
- sdata->encrypt_headroom, 0, GFP_ATOMIC);
|
|
+ sdata->encrypt_headroom,
|
|
+ tailroom, GFP_ATOMIC);
|
|
if (!fwd_skb)
|
|
goto out;
|
|
|
|
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
|
|
index 1f536ba573b48..65e511756e64c 100644
|
|
--- a/net/mac80211/tx.c
|
|
+++ b/net/mac80211/tx.c
|
|
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
|
int head_need, bool may_encrypt)
|
|
{
|
|
struct ieee80211_local *local = sdata->local;
|
|
+ struct ieee80211_hdr *hdr;
|
|
+ bool enc_tailroom;
|
|
int tail_need = 0;
|
|
|
|
- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
|
|
+ hdr = (struct ieee80211_hdr *) skb->data;
|
|
+ enc_tailroom = may_encrypt &&
|
|
+ (sdata->crypto_tx_tailroom_needed_cnt ||
|
|
+ ieee80211_is_mgmt(hdr->frame_control));
|
|
+
|
|
+ if (enc_tailroom) {
|
|
tail_need = IEEE80211_ENCRYPT_TAILROOM;
|
|
tail_need -= skb_tailroom(skb);
|
|
tail_need = max_t(int, tail_need, 0);
|
|
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
|
|
|
if (skb_cloned(skb) &&
|
|
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
|
|
- !skb_clone_writable(skb, ETH_HLEN) ||
|
|
- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
|
|
+ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
|
|
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
|
|
else if (head_need || tail_need)
|
|
I802_DEBUG_INC(local->tx_expand_skb_head);
|
|
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
|
|
index bec424316ea40..dddfff7cf44fe 100644
|
|
--- a/net/mac80211/util.c
|
|
+++ b/net/mac80211/util.c
|
|
@@ -299,16 +299,16 @@ out:
|
|
spin_unlock_bh(&fq->lock);
|
|
}
|
|
|
|
-void ieee80211_wake_txqs(unsigned long data)
|
|
+static void
|
|
+__releases(&local->queue_stop_reason_lock)
|
|
+__acquires(&local->queue_stop_reason_lock)
|
|
+_ieee80211_wake_txqs(struct ieee80211_local *local, unsigned long *flags)
|
|
{
|
|
- struct ieee80211_local *local = (struct ieee80211_local *)data;
|
|
struct ieee80211_sub_if_data *sdata;
|
|
int n_acs = IEEE80211_NUM_ACS;
|
|
- unsigned long flags;
|
|
int i;
|
|
|
|
rcu_read_lock();
|
|
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
|
if (local->hw.queues < IEEE80211_NUM_ACS)
|
|
n_acs = 1;
|
|
@@ -317,7 +317,7 @@ void ieee80211_wake_txqs(unsigned long data)
|
|
if (local->queue_stop_reasons[i])
|
|
continue;
|
|
|
|
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, *flags);
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
|
int ac;
|
|
|
|
@@ -329,13 +329,22 @@ void ieee80211_wake_txqs(unsigned long data)
|
|
__ieee80211_wake_txqs(sdata, ac);
|
|
}
|
|
}
|
|
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
+ spin_lock_irqsave(&local->queue_stop_reason_lock, *flags);
|
|
}
|
|
|
|
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
+void ieee80211_wake_txqs(unsigned long data)
|
|
+{
|
|
+ struct ieee80211_local *local = (struct ieee80211_local *)data;
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
+ _ieee80211_wake_txqs(local, &flags);
|
|
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
+}
|
|
+
|
|
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
|
|
{
|
|
struct ieee80211_sub_if_data *sdata;
|
|
@@ -371,7 +380,8 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
|
|
|
|
static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
|
|
enum queue_stop_reason reason,
|
|
- bool refcounted)
|
|
+ bool refcounted,
|
|
+ unsigned long *flags)
|
|
{
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
@@ -405,8 +415,19 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
|
|
} else
|
|
tasklet_schedule(&local->tx_pending_tasklet);
|
|
|
|
- if (local->ops->wake_tx_queue)
|
|
- tasklet_schedule(&local->wake_txqs_tasklet);
|
|
+ /*
|
|
+ * Calling _ieee80211_wake_txqs here can be a problem because it may
|
|
+ * release queue_stop_reason_lock which has been taken by
|
|
+ * __ieee80211_wake_queue's caller. It is certainly not very nice to
|
|
+ * release someone's lock, but it is fine because all the callers of
|
|
+ * __ieee80211_wake_queue call it right before releasing the lock.
|
|
+ */
|
|
+ if (local->ops->wake_tx_queue) {
|
|
+ if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER)
|
|
+ tasklet_schedule(&local->wake_txqs_tasklet);
|
|
+ else
|
|
+ _ieee80211_wake_txqs(local, flags);
|
|
+ }
|
|
}
|
|
|
|
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
|
|
@@ -417,7 +438,7 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
- __ieee80211_wake_queue(hw, queue, reason, refcounted);
|
|
+ __ieee80211_wake_queue(hw, queue, reason, refcounted, &flags);
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
}
|
|
|
|
@@ -514,7 +535,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
|
|
false);
|
|
__skb_queue_tail(&local->pending[queue], skb);
|
|
__ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
|
|
- false);
|
|
+ false, &flags);
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
}
|
|
|
|
@@ -547,7 +568,7 @@ void ieee80211_add_pending_skbs(struct ieee80211_local *local,
|
|
for (i = 0; i < hw->queues; i++)
|
|
__ieee80211_wake_queue(hw, i,
|
|
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
|
|
- false);
|
|
+ false, &flags);
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
}
|
|
|
|
@@ -605,7 +626,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
|
for_each_set_bit(i, &queues, hw->queues)
|
|
- __ieee80211_wake_queue(hw, i, reason, refcounted);
|
|
+ __ieee80211_wake_queue(hw, i, reason, refcounted, &flags);
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
}
|
|
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
|
|
index c00b6a2e8e3cb..13ade5782847b 100644
|
|
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
|
|
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
|
|
@@ -219,10 +219,6 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
|
u32 ip;
|
|
|
|
- /* MAC can be src only */
|
|
- if (!(opt->flags & IPSET_DIM_TWO_SRC))
|
|
- return 0;
|
|
-
|
|
ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
|
|
if (ip < map->first_ip || ip > map->last_ip)
|
|
return -IPSET_ERR_BITMAP_RANGE;
|
|
@@ -233,7 +229,11 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|
return -EINVAL;
|
|
|
|
e.id = ip_to_id(map, ip);
|
|
- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
|
|
+
|
|
+ if (opt->flags & IPSET_DIM_ONE_SRC)
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
|
|
+ else
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
|
|
|
|
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
|
|
}
|
|
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
|
|
index 1ab5ed2f6839a..fd87de3ed55b3 100644
|
|
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
|
|
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
|
|
@@ -103,7 +103,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
|
|
return -EINVAL;
|
|
|
|
- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
|
|
+ if (opt->flags & IPSET_DIM_ONE_SRC)
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
|
|
+ else
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
|
|
+
|
|
if (ether_addr_equal(e.ether, invalid_ether))
|
|
return -EINVAL;
|
|
|
|
@@ -211,15 +215,15 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|
};
|
|
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
|
|
|
- /* MAC can be src only */
|
|
- if (!(opt->flags & IPSET_DIM_TWO_SRC))
|
|
- return 0;
|
|
-
|
|
if (skb_mac_header(skb) < skb->head ||
|
|
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
|
|
return -EINVAL;
|
|
|
|
- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
|
|
+ if (opt->flags & IPSET_DIM_ONE_SRC)
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
|
|
+ else
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
|
|
+
|
|
if (ether_addr_equal(e.ether, invalid_ether))
|
|
return -EINVAL;
|
|
|
|
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
|
|
index f9d5a2a1e3d0f..4fe5f243d0a32 100644
|
|
--- a/net/netfilter/ipset/ip_set_hash_mac.c
|
|
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
|
|
@@ -81,15 +81,15 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
|
|
struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
|
|
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
|
|
|
|
- /* MAC can be src only */
|
|
- if (!(opt->flags & IPSET_DIM_ONE_SRC))
|
|
- return 0;
|
|
-
|
|
if (skb_mac_header(skb) < skb->head ||
|
|
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
|
|
return -EINVAL;
|
|
|
|
- ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
|
|
+ if (opt->flags & IPSET_DIM_ONE_SRC)
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
|
|
+ else
|
|
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
|
|
+
|
|
if (is_zero_ether_addr(e.ether))
|
|
return -EINVAL;
|
|
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
|
|
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
|
|
index 9cd180bda0920..7554c56b2e63c 100644
|
|
--- a/net/netfilter/nf_conncount.c
|
|
+++ b/net/netfilter/nf_conncount.c
|
|
@@ -33,12 +33,6 @@
|
|
|
|
#define CONNCOUNT_SLOTS 256U
|
|
|
|
-#ifdef CONFIG_LOCKDEP
|
|
-#define CONNCOUNT_LOCK_SLOTS 8U
|
|
-#else
|
|
-#define CONNCOUNT_LOCK_SLOTS 256U
|
|
-#endif
|
|
-
|
|
#define CONNCOUNT_GC_MAX_NODES 8
|
|
#define MAX_KEYLEN 5
|
|
|
|
@@ -49,8 +43,6 @@ struct nf_conncount_tuple {
|
|
struct nf_conntrack_zone zone;
|
|
int cpu;
|
|
u32 jiffies32;
|
|
- bool dead;
|
|
- struct rcu_head rcu_head;
|
|
};
|
|
|
|
struct nf_conncount_rb {
|
|
@@ -60,7 +52,7 @@ struct nf_conncount_rb {
|
|
struct rcu_head rcu_head;
|
|
};
|
|
|
|
-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
|
|
+static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
|
|
|
|
struct nf_conncount_data {
|
|
unsigned int keylen;
|
|
@@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
|
|
return memcmp(a, b, klen * sizeof(u32));
|
|
}
|
|
|
|
-enum nf_conncount_list_add
|
|
-nf_conncount_add(struct nf_conncount_list *list,
|
|
- const struct nf_conntrack_tuple *tuple,
|
|
- const struct nf_conntrack_zone *zone)
|
|
-{
|
|
- struct nf_conncount_tuple *conn;
|
|
-
|
|
- if (WARN_ON_ONCE(list->count > INT_MAX))
|
|
- return NF_CONNCOUNT_ERR;
|
|
-
|
|
- conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
|
|
- if (conn == NULL)
|
|
- return NF_CONNCOUNT_ERR;
|
|
-
|
|
- conn->tuple = *tuple;
|
|
- conn->zone = *zone;
|
|
- conn->cpu = raw_smp_processor_id();
|
|
- conn->jiffies32 = (u32)jiffies;
|
|
- conn->dead = false;
|
|
- spin_lock_bh(&list->list_lock);
|
|
- if (list->dead == true) {
|
|
- kmem_cache_free(conncount_conn_cachep, conn);
|
|
- spin_unlock_bh(&list->list_lock);
|
|
- return NF_CONNCOUNT_SKIP;
|
|
- }
|
|
- list_add_tail(&conn->node, &list->head);
|
|
- list->count++;
|
|
- spin_unlock_bh(&list->list_lock);
|
|
- return NF_CONNCOUNT_ADDED;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(nf_conncount_add);
|
|
-
|
|
-static void __conn_free(struct rcu_head *h)
|
|
-{
|
|
- struct nf_conncount_tuple *conn;
|
|
-
|
|
- conn = container_of(h, struct nf_conncount_tuple, rcu_head);
|
|
- kmem_cache_free(conncount_conn_cachep, conn);
|
|
-}
|
|
-
|
|
-static bool conn_free(struct nf_conncount_list *list,
|
|
+static void conn_free(struct nf_conncount_list *list,
|
|
struct nf_conncount_tuple *conn)
|
|
{
|
|
- bool free_entry = false;
|
|
-
|
|
- spin_lock_bh(&list->list_lock);
|
|
-
|
|
- if (conn->dead) {
|
|
- spin_unlock_bh(&list->list_lock);
|
|
- return free_entry;
|
|
- }
|
|
+ lockdep_assert_held(&list->list_lock);
|
|
|
|
list->count--;
|
|
- conn->dead = true;
|
|
- list_del_rcu(&conn->node);
|
|
- if (list->count == 0) {
|
|
- list->dead = true;
|
|
- free_entry = true;
|
|
- }
|
|
+ list_del(&conn->node);
|
|
|
|
- spin_unlock_bh(&list->list_lock);
|
|
- call_rcu(&conn->rcu_head, __conn_free);
|
|
- return free_entry;
|
|
+ kmem_cache_free(conncount_conn_cachep, conn);
|
|
}
|
|
|
|
static const struct nf_conntrack_tuple_hash *
|
|
find_or_evict(struct net *net, struct nf_conncount_list *list,
|
|
- struct nf_conncount_tuple *conn, bool *free_entry)
|
|
+ struct nf_conncount_tuple *conn)
|
|
{
|
|
const struct nf_conntrack_tuple_hash *found;
|
|
unsigned long a, b;
|
|
int cpu = raw_smp_processor_id();
|
|
- __s32 age;
|
|
+ u32 age;
|
|
|
|
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
|
|
if (found)
|
|
@@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list,
|
|
*/
|
|
age = a - b;
|
|
if (conn->cpu == cpu || age >= 2) {
|
|
- *free_entry = conn_free(list, conn);
|
|
+ conn_free(list, conn);
|
|
return ERR_PTR(-ENOENT);
|
|
}
|
|
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
|
|
-void nf_conncount_lookup(struct net *net,
|
|
- struct nf_conncount_list *list,
|
|
- const struct nf_conntrack_tuple *tuple,
|
|
- const struct nf_conntrack_zone *zone,
|
|
- bool *addit)
|
|
+static int __nf_conncount_add(struct net *net,
|
|
+ struct nf_conncount_list *list,
|
|
+ const struct nf_conntrack_tuple *tuple,
|
|
+ const struct nf_conntrack_zone *zone)
|
|
{
|
|
const struct nf_conntrack_tuple_hash *found;
|
|
struct nf_conncount_tuple *conn, *conn_n;
|
|
struct nf_conn *found_ct;
|
|
unsigned int collect = 0;
|
|
- bool free_entry = false;
|
|
-
|
|
- /* best effort only */
|
|
- *addit = tuple ? true : false;
|
|
|
|
/* check the saved connections */
|
|
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
|
|
if (collect > CONNCOUNT_GC_MAX_NODES)
|
|
break;
|
|
|
|
- found = find_or_evict(net, list, conn, &free_entry);
|
|
+ found = find_or_evict(net, list, conn);
|
|
if (IS_ERR(found)) {
|
|
/* Not found, but might be about to be confirmed */
|
|
if (PTR_ERR(found) == -EAGAIN) {
|
|
- if (!tuple)
|
|
- continue;
|
|
-
|
|
if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
|
|
nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
|
|
nf_ct_zone_id(zone, zone->dir))
|
|
- *addit = false;
|
|
- } else if (PTR_ERR(found) == -ENOENT)
|
|
+ return 0; /* already exists */
|
|
+ } else {
|
|
collect++;
|
|
+ }
|
|
continue;
|
|
}
|
|
|
|
found_ct = nf_ct_tuplehash_to_ctrack(found);
|
|
|
|
- if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
|
|
+ if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
|
|
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
|
|
/*
|
|
* We should not see tuples twice unless someone hooks
|
|
@@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net,
|
|
*
|
|
* Attempt to avoid a re-add in this case.
|
|
*/
|
|
- *addit = false;
|
|
+ nf_ct_put(found_ct);
|
|
+ return 0;
|
|
} else if (already_closed(found_ct)) {
|
|
/*
|
|
* we do not care about connections which are
|
|
@@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net,
|
|
|
|
nf_ct_put(found_ct);
|
|
}
|
|
+
|
|
+ if (WARN_ON_ONCE(list->count > INT_MAX))
|
|
+ return -EOVERFLOW;
|
|
+
|
|
+ conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
|
|
+ if (conn == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ conn->tuple = *tuple;
|
|
+ conn->zone = *zone;
|
|
+ conn->cpu = raw_smp_processor_id();
|
|
+ conn->jiffies32 = (u32)jiffies;
|
|
+ list_add_tail(&conn->node, &list->head);
|
|
+ list->count++;
|
|
+ return 0;
|
|
}
|
|
-EXPORT_SYMBOL_GPL(nf_conncount_lookup);
|
|
+
|
|
+int nf_conncount_add(struct net *net,
|
|
+ struct nf_conncount_list *list,
|
|
+ const struct nf_conntrack_tuple *tuple,
|
|
+ const struct nf_conntrack_zone *zone)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ /* check the saved connections */
|
|
+ spin_lock_bh(&list->list_lock);
|
|
+ ret = __nf_conncount_add(net, list, tuple, zone);
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(nf_conncount_add);
|
|
|
|
void nf_conncount_list_init(struct nf_conncount_list *list)
|
|
{
|
|
spin_lock_init(&list->list_lock);
|
|
INIT_LIST_HEAD(&list->head);
|
|
list->count = 0;
|
|
- list->dead = false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
|
|
|
|
-/* Return true if the list is empty */
|
|
+/* Return true if the list is empty. Must be called with BH disabled. */
|
|
bool nf_conncount_gc_list(struct net *net,
|
|
struct nf_conncount_list *list)
|
|
{
|
|
@@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net,
|
|
struct nf_conncount_tuple *conn, *conn_n;
|
|
struct nf_conn *found_ct;
|
|
unsigned int collected = 0;
|
|
- bool free_entry = false;
|
|
bool ret = false;
|
|
|
|
+ /* don't bother if other cpu is already doing GC */
|
|
+ if (!spin_trylock(&list->list_lock))
|
|
+ return false;
|
|
+
|
|
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
|
|
- found = find_or_evict(net, list, conn, &free_entry);
|
|
+ found = find_or_evict(net, list, conn);
|
|
if (IS_ERR(found)) {
|
|
- if (PTR_ERR(found) == -ENOENT) {
|
|
- if (free_entry)
|
|
- return true;
|
|
+ if (PTR_ERR(found) == -ENOENT)
|
|
collected++;
|
|
- }
|
|
continue;
|
|
}
|
|
|
|
@@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net,
|
|
* closed already -> ditch it
|
|
*/
|
|
nf_ct_put(found_ct);
|
|
- if (conn_free(list, conn))
|
|
- return true;
|
|
+ conn_free(list, conn);
|
|
collected++;
|
|
continue;
|
|
}
|
|
|
|
nf_ct_put(found_ct);
|
|
if (collected > CONNCOUNT_GC_MAX_NODES)
|
|
- return false;
|
|
+ break;
|
|
}
|
|
|
|
- spin_lock_bh(&list->list_lock);
|
|
- if (!list->count) {
|
|
- list->dead = true;
|
|
+ if (!list->count)
|
|
ret = true;
|
|
- }
|
|
- spin_unlock_bh(&list->list_lock);
|
|
+ spin_unlock(&list->list_lock);
|
|
|
|
return ret;
|
|
}
|
|
@@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h)
|
|
kmem_cache_free(conncount_rb_cachep, rbconn);
|
|
}
|
|
|
|
+/* caller must hold tree nf_conncount_locks[] lock */
|
|
static void tree_nodes_free(struct rb_root *root,
|
|
struct nf_conncount_rb *gc_nodes[],
|
|
unsigned int gc_count)
|
|
@@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root,
|
|
while (gc_count) {
|
|
rbconn = gc_nodes[--gc_count];
|
|
spin_lock(&rbconn->list.list_lock);
|
|
- rb_erase(&rbconn->node, root);
|
|
- call_rcu(&rbconn->rcu_head, __tree_nodes_free);
|
|
+ if (!rbconn->list.count) {
|
|
+ rb_erase(&rbconn->node, root);
|
|
+ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
|
|
+ }
|
|
spin_unlock(&rbconn->list.list_lock);
|
|
}
|
|
}
|
|
@@ -341,20 +301,19 @@ insert_tree(struct net *net,
|
|
struct rb_root *root,
|
|
unsigned int hash,
|
|
const u32 *key,
|
|
- u8 keylen,
|
|
const struct nf_conntrack_tuple *tuple,
|
|
const struct nf_conntrack_zone *zone)
|
|
{
|
|
- enum nf_conncount_list_add ret;
|
|
struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
|
|
struct rb_node **rbnode, *parent;
|
|
struct nf_conncount_rb *rbconn;
|
|
struct nf_conncount_tuple *conn;
|
|
unsigned int count = 0, gc_count = 0;
|
|
- bool node_found = false;
|
|
-
|
|
- spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
|
|
+ u8 keylen = data->keylen;
|
|
+ bool do_gc = true;
|
|
|
|
+ spin_lock_bh(&nf_conncount_locks[hash]);
|
|
+restart:
|
|
parent = NULL;
|
|
rbnode = &(root->rb_node);
|
|
while (*rbnode) {
|
|
@@ -368,45 +327,32 @@ insert_tree(struct net *net,
|
|
} else if (diff > 0) {
|
|
rbnode = &((*rbnode)->rb_right);
|
|
} else {
|
|
- /* unlikely: other cpu added node already */
|
|
- node_found = true;
|
|
- ret = nf_conncount_add(&rbconn->list, tuple, zone);
|
|
- if (ret == NF_CONNCOUNT_ERR) {
|
|
+ int ret;
|
|
+
|
|
+ ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
|
|
+ if (ret)
|
|
count = 0; /* hotdrop */
|
|
- } else if (ret == NF_CONNCOUNT_ADDED) {
|
|
+ else
|
|
count = rbconn->list.count;
|
|
- } else {
|
|
- /* NF_CONNCOUNT_SKIP, rbconn is already
|
|
- * reclaimed by gc, insert a new tree node
|
|
- */
|
|
- node_found = false;
|
|
- }
|
|
- break;
|
|
+ tree_nodes_free(root, gc_nodes, gc_count);
|
|
+ goto out_unlock;
|
|
}
|
|
|
|
if (gc_count >= ARRAY_SIZE(gc_nodes))
|
|
continue;
|
|
|
|
- if (nf_conncount_gc_list(net, &rbconn->list))
|
|
+ if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
|
|
gc_nodes[gc_count++] = rbconn;
|
|
}
|
|
|
|
if (gc_count) {
|
|
tree_nodes_free(root, gc_nodes, gc_count);
|
|
- /* tree_node_free before new allocation permits
|
|
- * allocator to re-use newly free'd object.
|
|
- *
|
|
- * This is a rare event; in most cases we will find
|
|
- * existing node to re-use. (or gc_count is 0).
|
|
- */
|
|
-
|
|
- if (gc_count >= ARRAY_SIZE(gc_nodes))
|
|
- schedule_gc_worker(data, hash);
|
|
+ schedule_gc_worker(data, hash);
|
|
+ gc_count = 0;
|
|
+ do_gc = false;
|
|
+ goto restart;
|
|
}
|
|
|
|
- if (node_found)
|
|
- goto out_unlock;
|
|
-
|
|
/* expected case: match, insert new node */
|
|
rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
|
|
if (rbconn == NULL)
|
|
@@ -430,7 +376,7 @@ insert_tree(struct net *net,
|
|
rb_link_node_rcu(&rbconn->node, parent, rbnode);
|
|
rb_insert_color(&rbconn->node, root);
|
|
out_unlock:
|
|
- spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
|
|
+ spin_unlock_bh(&nf_conncount_locks[hash]);
|
|
return count;
|
|
}
|
|
|
|
@@ -441,7 +387,6 @@ count_tree(struct net *net,
|
|
const struct nf_conntrack_tuple *tuple,
|
|
const struct nf_conntrack_zone *zone)
|
|
{
|
|
- enum nf_conncount_list_add ret;
|
|
struct rb_root *root;
|
|
struct rb_node *parent;
|
|
struct nf_conncount_rb *rbconn;
|
|
@@ -454,7 +399,6 @@ count_tree(struct net *net,
|
|
parent = rcu_dereference_raw(root->rb_node);
|
|
while (parent) {
|
|
int diff;
|
|
- bool addit;
|
|
|
|
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
|
|
|
|
@@ -464,31 +408,36 @@ count_tree(struct net *net,
|
|
} else if (diff > 0) {
|
|
parent = rcu_dereference_raw(parent->rb_right);
|
|
} else {
|
|
- /* same source network -> be counted! */
|
|
- nf_conncount_lookup(net, &rbconn->list, tuple, zone,
|
|
- &addit);
|
|
+ int ret;
|
|
|
|
- if (!addit)
|
|
+ if (!tuple) {
|
|
+ nf_conncount_gc_list(net, &rbconn->list);
|
|
return rbconn->list.count;
|
|
+ }
|
|
|
|
- ret = nf_conncount_add(&rbconn->list, tuple, zone);
|
|
- if (ret == NF_CONNCOUNT_ERR) {
|
|
- return 0; /* hotdrop */
|
|
- } else if (ret == NF_CONNCOUNT_ADDED) {
|
|
- return rbconn->list.count;
|
|
- } else {
|
|
- /* NF_CONNCOUNT_SKIP, rbconn is already
|
|
- * reclaimed by gc, insert a new tree node
|
|
- */
|
|
+ spin_lock_bh(&rbconn->list.list_lock);
|
|
+ /* Node might be about to be free'd.
|
|
+ * We need to defer to insert_tree() in this case.
|
|
+ */
|
|
+ if (rbconn->list.count == 0) {
|
|
+ spin_unlock_bh(&rbconn->list.list_lock);
|
|
break;
|
|
}
|
|
+
|
|
+ /* same source network -> be counted! */
|
|
+ ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
|
|
+ spin_unlock_bh(&rbconn->list.list_lock);
|
|
+ if (ret)
|
|
+ return 0; /* hotdrop */
|
|
+ else
|
|
+ return rbconn->list.count;
|
|
}
|
|
}
|
|
|
|
if (!tuple)
|
|
return 0;
|
|
|
|
- return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
|
|
+ return insert_tree(net, data, root, hash, key, tuple, zone);
|
|
}
|
|
|
|
static void tree_gc_worker(struct work_struct *work)
|
|
@@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work)
|
|
struct rb_node *node;
|
|
unsigned int tree, next_tree, gc_count = 0;
|
|
|
|
- tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
|
|
+ tree = data->gc_tree % CONNCOUNT_SLOTS;
|
|
root = &data->root[tree];
|
|
|
|
+ local_bh_disable();
|
|
rcu_read_lock();
|
|
for (node = rb_first(root); node != NULL; node = rb_next(node)) {
|
|
rbconn = rb_entry(node, struct nf_conncount_rb, node);
|
|
if (nf_conncount_gc_list(data->net, &rbconn->list))
|
|
- gc_nodes[gc_count++] = rbconn;
|
|
+ gc_count++;
|
|
}
|
|
rcu_read_unlock();
|
|
+ local_bh_enable();
|
|
+
|
|
+ cond_resched();
|
|
|
|
spin_lock_bh(&nf_conncount_locks[tree]);
|
|
+ if (gc_count < ARRAY_SIZE(gc_nodes))
|
|
+ goto next; /* do not bother */
|
|
|
|
- if (gc_count) {
|
|
- tree_nodes_free(root, gc_nodes, gc_count);
|
|
+ gc_count = 0;
|
|
+ node = rb_first(root);
|
|
+ while (node != NULL) {
|
|
+ rbconn = rb_entry(node, struct nf_conncount_rb, node);
|
|
+ node = rb_next(node);
|
|
+
|
|
+ if (rbconn->list.count > 0)
|
|
+ continue;
|
|
+
|
|
+ gc_nodes[gc_count++] = rbconn;
|
|
+ if (gc_count >= ARRAY_SIZE(gc_nodes)) {
|
|
+ tree_nodes_free(root, gc_nodes, gc_count);
|
|
+ gc_count = 0;
|
|
+ }
|
|
}
|
|
|
|
+ tree_nodes_free(root, gc_nodes, gc_count);
|
|
+next:
|
|
clear_bit(tree, data->pending_trees);
|
|
|
|
next_tree = (tree + 1) % CONNCOUNT_SLOTS;
|
|
- next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
|
|
+ next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
|
|
|
|
if (next_tree < CONNCOUNT_SLOTS) {
|
|
data->gc_tree = next_tree;
|
|
@@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void)
|
|
{
|
|
int i;
|
|
|
|
- BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
|
|
- BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
|
|
-
|
|
- for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
|
|
+ for (i = 0; i < CONNCOUNT_SLOTS; ++i)
|
|
spin_lock_init(&nf_conncount_locks[i]);
|
|
|
|
conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
|
|
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
|
|
index b7a4816add765..cc91b4d6aa22f 100644
|
|
--- a/net/netfilter/nf_flow_table_core.c
|
|
+++ b/net/netfilter/nf_flow_table_core.c
|
|
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
|
|
{
|
|
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
|
|
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
|
|
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
|
|
struct dst_entry *dst = route->tuple[dir].dst;
|
|
|
|
ft->dir = dir;
|
|
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
|
|
ft->src_port = ctt->src.u.tcp.port;
|
|
ft->dst_port = ctt->dst.u.tcp.port;
|
|
|
|
- ft->iifidx = route->tuple[dir].ifindex;
|
|
- ft->oifidx = route->tuple[!dir].ifindex;
|
|
+ ft->iifidx = other_dst->dev->ifindex;
|
|
+ ft->oifidx = dst->dev->ifindex;
|
|
ft->dst_cache = dst;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index 6e548d7c9f67b..5114a0d2a41eb 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -307,6 +307,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
|
|
int err;
|
|
|
|
list_for_each_entry(rule, &ctx->chain->rules, list) {
|
|
+ if (!nft_is_active_next(ctx->net, rule))
|
|
+ continue;
|
|
+
|
|
err = nft_delrule(ctx, rule);
|
|
if (err < 0)
|
|
return err;
|
|
@@ -4474,6 +4477,8 @@ err6:
|
|
err5:
|
|
kfree(trans);
|
|
err4:
|
|
+ if (obj)
|
|
+ obj->use--;
|
|
kfree(elem.priv);
|
|
err3:
|
|
if (nla[NFTA_SET_ELEM_DATA] != NULL)
|
|
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
|
|
index 6f41dd74729d9..1f1d90c1716b5 100644
|
|
--- a/net/netfilter/nfnetlink_osf.c
|
|
+++ b/net/netfilter/nfnetlink_osf.c
|
|
@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
|
|
int ttl_check,
|
|
struct nf_osf_hdr_ctx *ctx)
|
|
{
|
|
+ const __u8 *optpinit = ctx->optp;
|
|
unsigned int check_WSS = 0;
|
|
int fmatch = FMATCH_WRONG;
|
|
int foptsize, optnum;
|
|
@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
|
|
}
|
|
}
|
|
|
|
+ if (fmatch != FMATCH_OK)
|
|
+ ctx->optp = optpinit;
|
|
+
|
|
return fmatch == FMATCH_OK;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
|
|
index 7334e0b80a5ef..c90a4640723f5 100644
|
|
--- a/net/netfilter/nft_compat.c
|
|
+++ b/net/netfilter/nft_compat.c
|
|
@@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|
{
|
|
struct xt_target *target = expr->ops->data;
|
|
void *info = nft_expr_priv(expr);
|
|
+ struct module *me = target->me;
|
|
struct xt_tgdtor_param par;
|
|
|
|
par.net = ctx->net;
|
|
@@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|
par.target->destroy(&par);
|
|
|
|
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
|
- module_put(target->me);
|
|
+ module_put(me);
|
|
}
|
|
|
|
static int nft_extension_dump_info(struct sk_buff *skb, int attr,
|
|
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
|
|
index b90d96ba4a129..af1497ab94642 100644
|
|
--- a/net/netfilter/nft_connlimit.c
|
|
+++ b/net/netfilter/nft_connlimit.c
|
|
@@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
|
|
enum ip_conntrack_info ctinfo;
|
|
const struct nf_conn *ct;
|
|
unsigned int count;
|
|
- bool addit;
|
|
|
|
tuple_ptr = &tuple;
|
|
|
|
@@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
|
|
return;
|
|
}
|
|
|
|
- nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
|
|
- &addit);
|
|
- count = priv->list.count;
|
|
-
|
|
- if (!addit)
|
|
- goto out;
|
|
-
|
|
- if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
|
|
+ if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
|
|
regs->verdict.code = NF_DROP;
|
|
return;
|
|
}
|
|
- count++;
|
|
-out:
|
|
+
|
|
+ count = priv->list.count;
|
|
|
|
if ((count > priv->limit) ^ priv->invert) {
|
|
regs->verdict.code = NFT_BREAK;
|
|
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
|
|
index 974525eb92df7..6e6b9adf7d387 100644
|
|
--- a/net/netfilter/nft_flow_offload.c
|
|
+++ b/net/netfilter/nft_flow_offload.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
#include <linux/netfilter/nf_conntrack_common.h>
|
|
#include <net/netfilter/nf_flow_table.h>
|
|
+#include <net/netfilter/nf_conntrack_helper.h>
|
|
|
|
struct nft_flow_offload {
|
|
struct nft_flowtable *flowtable;
|
|
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
|
memset(&fl, 0, sizeof(fl));
|
|
switch (nft_pf(pkt)) {
|
|
case NFPROTO_IPV4:
|
|
- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
|
|
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
|
|
+ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
|
|
break;
|
|
case NFPROTO_IPV6:
|
|
- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
|
|
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
|
|
+ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
|
|
break;
|
|
}
|
|
|
|
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
|
return -ENOENT;
|
|
|
|
route->tuple[dir].dst = this_dst;
|
|
- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
|
|
route->tuple[!dir].dst = other_dst;
|
|
- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
|
|
|
|
return 0;
|
|
}
|
|
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
|
{
|
|
struct nft_flow_offload *priv = nft_expr_priv(expr);
|
|
struct nf_flowtable *flowtable = &priv->flowtable->data;
|
|
+ const struct nf_conn_help *help;
|
|
enum ip_conntrack_info ctinfo;
|
|
struct nf_flow_route route;
|
|
struct flow_offload *flow;
|
|
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
|
goto out;
|
|
}
|
|
|
|
- if (test_bit(IPS_HELPER_BIT, &ct->status))
|
|
+ help = nfct_help(ct);
|
|
+ if (help)
|
|
goto out;
|
|
|
|
if (ctinfo == IP_CT_NEW ||
|
|
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
|
|
index 03f37c4e64fe4..1d3144d199035 100644
|
|
--- a/net/netrom/af_netrom.c
|
|
+++ b/net/netrom/af_netrom.c
|
|
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
|
|
sk_for_each(s, &nr_list)
|
|
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
|
|
s->sk_state == TCP_LISTEN) {
|
|
- bh_lock_sock(s);
|
|
+ sock_hold(s);
|
|
goto found;
|
|
}
|
|
s = NULL;
|
|
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
|
|
struct nr_sock *nr = nr_sk(s);
|
|
|
|
if (nr->my_index == index && nr->my_id == id) {
|
|
- bh_lock_sock(s);
|
|
+ sock_hold(s);
|
|
goto found;
|
|
}
|
|
}
|
|
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
|
|
|
|
if (nr->your_index == index && nr->your_id == id &&
|
|
!ax25cmp(&nr->dest_addr, dest)) {
|
|
- bh_lock_sock(s);
|
|
+ sock_hold(s);
|
|
goto found;
|
|
}
|
|
}
|
|
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
|
|
if (i != 0 && j != 0) {
|
|
if ((sk=nr_find_socket(i, j)) == NULL)
|
|
break;
|
|
- bh_unlock_sock(sk);
|
|
+ sock_put(sk);
|
|
}
|
|
|
|
id++;
|
|
@@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
|
|
if (sk != NULL) {
|
|
+ bh_lock_sock(sk);
|
|
skb_reset_transport_header(skb);
|
|
|
|
if (frametype == NR_CONNACK && skb->len == 22)
|
|
@@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
ret = nr_process_rx_frame(sk, skb);
|
|
bh_unlock_sock(sk);
|
|
+ sock_put(sk);
|
|
return ret;
|
|
}
|
|
|
|
@@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
|
(make = nr_make_new(sk)) == NULL) {
|
|
nr_transmit_refusal(skb, 0);
|
|
if (sk)
|
|
- bh_unlock_sock(sk);
|
|
+ sock_put(sk);
|
|
return 0;
|
|
}
|
|
|
|
+ bh_lock_sock(sk);
|
|
+
|
|
window = skb->data[20];
|
|
|
|
skb->sk = make;
|
|
@@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
|
sk->sk_data_ready(sk);
|
|
|
|
bh_unlock_sock(sk);
|
|
+ sock_put(sk);
|
|
|
|
nr_insert_socket(make);
|
|
|
|
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
|
|
index cbd51ed5a2d7b..908e53ab47a47 100644
|
|
--- a/net/netrom/nr_timer.c
|
|
+++ b/net/netrom/nr_timer.c
|
|
@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
|
|
{
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
- mod_timer(&nr->t1timer, jiffies + nr->t1);
|
|
+ sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
|
|
}
|
|
|
|
void nr_start_t2timer(struct sock *sk)
|
|
{
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
- mod_timer(&nr->t2timer, jiffies + nr->t2);
|
|
+ sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
|
|
}
|
|
|
|
void nr_start_t4timer(struct sock *sk)
|
|
{
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
- mod_timer(&nr->t4timer, jiffies + nr->t4);
|
|
+ sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
|
|
}
|
|
|
|
void nr_start_idletimer(struct sock *sk)
|
|
@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
|
|
struct nr_sock *nr = nr_sk(sk);
|
|
|
|
if (nr->idle > 0)
|
|
- mod_timer(&nr->idletimer, jiffies + nr->idle);
|
|
+ sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
|
|
}
|
|
|
|
void nr_start_heartbeat(struct sock *sk)
|
|
{
|
|
- mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
|
|
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
|
|
}
|
|
|
|
void nr_stop_t1timer(struct sock *sk)
|
|
{
|
|
- del_timer(&nr_sk(sk)->t1timer);
|
|
+ sk_stop_timer(sk, &nr_sk(sk)->t1timer);
|
|
}
|
|
|
|
void nr_stop_t2timer(struct sock *sk)
|
|
{
|
|
- del_timer(&nr_sk(sk)->t2timer);
|
|
+ sk_stop_timer(sk, &nr_sk(sk)->t2timer);
|
|
}
|
|
|
|
void nr_stop_t4timer(struct sock *sk)
|
|
{
|
|
- del_timer(&nr_sk(sk)->t4timer);
|
|
+ sk_stop_timer(sk, &nr_sk(sk)->t4timer);
|
|
}
|
|
|
|
void nr_stop_idletimer(struct sock *sk)
|
|
{
|
|
- del_timer(&nr_sk(sk)->idletimer);
|
|
+ sk_stop_timer(sk, &nr_sk(sk)->idletimer);
|
|
}
|
|
|
|
void nr_stop_heartbeat(struct sock *sk)
|
|
{
|
|
- del_timer(&sk->sk_timer);
|
|
+ sk_stop_timer(sk, &sk->sk_timer);
|
|
}
|
|
|
|
int nr_t1timer_running(struct sock *sk)
|
|
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
|
|
index 35966da847696..f920a347ee1c2 100644
|
|
--- a/net/openvswitch/flow.c
|
|
+++ b/net/openvswitch/flow.c
|
|
@@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
|
|
|
|
nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
|
|
if (flags & IP6_FH_F_FRAG) {
|
|
- if (frag_off)
|
|
+ if (frag_off) {
|
|
key->ip.frag = OVS_FRAG_TYPE_LATER;
|
|
- else
|
|
- key->ip.frag = OVS_FRAG_TYPE_FIRST;
|
|
+ key->ip.proto = nexthdr;
|
|
+ return 0;
|
|
+ }
|
|
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
|
|
} else {
|
|
key->ip.frag = OVS_FRAG_TYPE_NONE;
|
|
}
|
|
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
|
|
index 865ecef681969..c7b6010b2c095 100644
|
|
--- a/net/openvswitch/flow_netlink.c
|
|
+++ b/net/openvswitch/flow_netlink.c
|
|
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
|
|
+ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
|
|
attrs |= 1 << type;
|
|
a[type] = nla;
|
|
}
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 5dda263b4a0a1..1cd1d83a4be08 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -2625,10 +2625,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
|
sll_addr)))
|
|
goto out;
|
|
proto = saddr->sll_protocol;
|
|
- addr = saddr->sll_addr;
|
|
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
|
|
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
|
|
if (addr && dev && saddr->sll_halen < dev->addr_len)
|
|
- goto out;
|
|
+ goto out_put;
|
|
}
|
|
|
|
err = -ENXIO;
|
|
@@ -2825,10 +2825,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
|
|
goto out;
|
|
proto = saddr->sll_protocol;
|
|
- addr = saddr->sll_addr;
|
|
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
|
|
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
|
|
if (addr && dev && saddr->sll_halen < dev->addr_len)
|
|
- goto out;
|
|
+ goto out_unlock;
|
|
}
|
|
|
|
err = -ENXIO;
|
|
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
|
goto out_free;
|
|
} else if (reserve) {
|
|
skb_reserve(skb, -reserve);
|
|
- if (len < reserve)
|
|
+ if (len < reserve + sizeof(struct ipv6hdr) &&
|
|
+ dev->min_header_len != dev->hard_header_len)
|
|
skb_reset_network_header(skb);
|
|
}
|
|
|
|
@@ -4291,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
|
|
if (unlikely(rb->frames_per_block == 0))
|
|
goto out;
|
|
- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
|
|
+ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
|
|
goto out;
|
|
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
|
|
req->tp_frame_nr))
|
|
diff --git a/net/rds/bind.c b/net/rds/bind.c
|
|
index 762d2c6788a38..17c9d9f0c8483 100644
|
|
--- a/net/rds/bind.c
|
|
+++ b/net/rds/bind.c
|
|
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
|
|
__rds_create_bind_key(key, addr, port, scope_id);
|
|
rcu_read_lock();
|
|
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
|
|
- if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
|
|
- rds_sock_addref(rs);
|
|
- else
|
|
+ if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
|
|
+ !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
|
|
rs = NULL;
|
|
+
|
|
rcu_read_unlock();
|
|
|
|
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
|
|
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
|
|
index 77e9f85a2c92c..f2ff21d7df081 100644
|
|
--- a/net/rose/rose_route.c
|
|
+++ b/net/rose/rose_route.c
|
|
@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
|
|
|
|
/*
|
|
* Route a frame to an appropriate AX.25 connection.
|
|
+ * A NULL ax25_cb indicates an internally generated frame.
|
|
*/
|
|
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
|
|
{
|
|
@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
|
|
|
|
if (skb->len < ROSE_MIN_LEN)
|
|
return res;
|
|
+
|
|
+ if (!ax25)
|
|
+ return rose_loopback_queue(skb, NULL);
|
|
+
|
|
frametype = skb->data[2];
|
|
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
|
|
if (frametype == ROSE_CALL_REQUEST &&
|
|
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
|
|
index eaf19ebaa964e..3f7bb11f3290e 100644
|
|
--- a/net/rxrpc/recvmsg.c
|
|
+++ b/net/rxrpc/recvmsg.c
|
|
@@ -596,6 +596,7 @@ error_requeue_call:
|
|
}
|
|
error_no_call:
|
|
release_sock(&rx->sk);
|
|
+error_trace:
|
|
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
|
|
return ret;
|
|
|
|
@@ -604,7 +605,7 @@ wait_interrupted:
|
|
wait_error:
|
|
finish_wait(sk_sleep(&rx->sk), &wait);
|
|
call = NULL;
|
|
- goto error_no_call;
|
|
+ goto error_trace;
|
|
}
|
|
|
|
/**
|
|
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
|
|
index 4cca8f2746621..904730b8ce8f2 100644
|
|
--- a/net/sched/act_tunnel_key.c
|
|
+++ b/net/sched/act_tunnel_key.c
|
|
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
|
|
[TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
|
|
};
|
|
|
|
+static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
|
|
+{
|
|
+ if (!p)
|
|
+ return;
|
|
+ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
|
+ dst_release(&p->tcft_enc_metadata->dst);
|
|
+ kfree_rcu(p, rcu);
|
|
+}
|
|
+
|
|
static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|
struct nlattr *est, struct tc_action **a,
|
|
int ovr, int bind, bool rtnl_held,
|
|
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|
rcu_swap_protected(t->params, params_new,
|
|
lockdep_is_held(&t->tcf_lock));
|
|
spin_unlock_bh(&t->tcf_lock);
|
|
- if (params_new)
|
|
- kfree_rcu(params_new, rcu);
|
|
+ tunnel_key_release_params(params_new);
|
|
|
|
if (ret == ACT_P_CREATED)
|
|
tcf_idr_insert(tn, *a);
|
|
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
|
|
struct tcf_tunnel_key_params *params;
|
|
|
|
params = rcu_dereference_protected(t->params, 1);
|
|
- if (params) {
|
|
- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
|
- dst_release(¶ms->tcft_enc_metadata->dst);
|
|
-
|
|
- kfree_rcu(params, rcu);
|
|
- }
|
|
+ tunnel_key_release_params(params);
|
|
}
|
|
|
|
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index f427a1e00e7ee..1c4436523aa58 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -1053,7 +1053,6 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
|
|
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|
struct tcf_result *res, bool compat_mode)
|
|
{
|
|
- __be16 protocol = tc_skb_protocol(skb);
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
const int max_reclassify_loop = 4;
|
|
const struct tcf_proto *orig_tp = tp;
|
|
@@ -1063,6 +1062,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|
reclassify:
|
|
#endif
|
|
for (; tp; tp = rcu_dereference_bh(tp->next)) {
|
|
+ __be16 protocol = tc_skb_protocol(skb);
|
|
int err;
|
|
|
|
if (tp->protocol != protocol &&
|
|
@@ -1095,7 +1095,6 @@ reset:
|
|
}
|
|
|
|
tp = first_tp;
|
|
- protocol = tc_skb_protocol(skb);
|
|
goto reclassify;
|
|
#endif
|
|
}
|
|
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
|
|
index 208d940464d7b..2f64e3538127d 100644
|
|
--- a/net/sched/cls_flower.c
|
|
+++ b/net/sched/cls_flower.c
|
|
@@ -1176,17 +1176,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|
struct cls_fl_head *head = rtnl_dereference(tp->root);
|
|
struct cls_fl_filter *fold = *arg;
|
|
struct cls_fl_filter *fnew;
|
|
+ struct fl_flow_mask *mask;
|
|
struct nlattr **tb;
|
|
- struct fl_flow_mask mask = {};
|
|
int err;
|
|
|
|
if (!tca[TCA_OPTIONS])
|
|
return -EINVAL;
|
|
|
|
- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
|
|
- if (!tb)
|
|
+ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
|
|
+ if (!mask)
|
|
return -ENOBUFS;
|
|
|
|
+ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
|
|
+ if (!tb) {
|
|
+ err = -ENOBUFS;
|
|
+ goto errout_mask_alloc;
|
|
+ }
|
|
+
|
|
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
|
|
fl_policy, NULL);
|
|
if (err < 0)
|
|
@@ -1229,12 +1235,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|
}
|
|
}
|
|
|
|
- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
|
|
+ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
|
|
tp->chain->tmplt_priv, extack);
|
|
if (err)
|
|
goto errout_idr;
|
|
|
|
- err = fl_check_assign_mask(head, fnew, fold, &mask);
|
|
+ err = fl_check_assign_mask(head, fnew, fold, mask);
|
|
if (err)
|
|
goto errout_idr;
|
|
|
|
@@ -1251,7 +1257,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|
if (!tc_skip_hw(fnew->flags)) {
|
|
err = fl_hw_replace_filter(tp, fnew, extack);
|
|
if (err)
|
|
- goto errout_mask;
|
|
+ goto errout_mask_ht;
|
|
}
|
|
|
|
if (!tc_in_hw(fnew->flags))
|
|
@@ -1278,8 +1284,13 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|
}
|
|
|
|
kfree(tb);
|
|
+ kfree(mask);
|
|
return 0;
|
|
|
|
+errout_mask_ht:
|
|
+ rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
|
|
+ fnew->mask->filter_ht_params);
|
|
+
|
|
errout_mask:
|
|
fl_mask_put(head, fnew->mask, false);
|
|
|
|
@@ -1291,6 +1302,8 @@ errout:
|
|
kfree(fnew);
|
|
errout_tb:
|
|
kfree(tb);
|
|
+errout_mask_alloc:
|
|
+ kfree(mask);
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
|
|
index 9ccc93f257db0..38bb882bb9587 100644
|
|
--- a/net/sched/cls_tcindex.c
|
|
+++ b/net/sched/cls_tcindex.c
|
|
@@ -48,7 +48,7 @@ struct tcindex_data {
|
|
u32 hash; /* hash table size; 0 if undefined */
|
|
u32 alloc_hash; /* allocated size */
|
|
u32 fall_through; /* 0: only classify if explicit match */
|
|
- struct rcu_head rcu;
|
|
+ struct rcu_work rwork;
|
|
};
|
|
|
|
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
|
|
@@ -221,17 +221,11 @@ found:
|
|
return 0;
|
|
}
|
|
|
|
-static int tcindex_destroy_element(struct tcf_proto *tp,
|
|
- void *arg, struct tcf_walker *walker)
|
|
-{
|
|
- bool last;
|
|
-
|
|
- return tcindex_delete(tp, arg, &last, NULL);
|
|
-}
|
|
-
|
|
-static void __tcindex_destroy(struct rcu_head *head)
|
|
+static void tcindex_destroy_work(struct work_struct *work)
|
|
{
|
|
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
|
|
+ struct tcindex_data *p = container_of(to_rcu_work(work),
|
|
+ struct tcindex_data,
|
|
+ rwork);
|
|
|
|
kfree(p->perfect);
|
|
kfree(p->h);
|
|
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
|
|
return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
|
}
|
|
|
|
-static void __tcindex_partial_destroy(struct rcu_head *head)
|
|
+static void tcindex_partial_destroy_work(struct work_struct *work)
|
|
{
|
|
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
|
|
+ struct tcindex_data *p = container_of(to_rcu_work(work),
|
|
+ struct tcindex_data,
|
|
+ rwork);
|
|
|
|
kfree(p->perfect);
|
|
kfree(p);
|
|
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
|
|
kfree(cp->perfect);
|
|
}
|
|
|
|
-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
|
|
+static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
|
|
{
|
|
int i, err = 0;
|
|
|
|
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
|
|
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
|
if (err < 0)
|
|
goto errout;
|
|
+#ifdef CONFIG_NET_CLS_ACT
|
|
+ cp->perfect[i].exts.net = net;
|
|
+#endif
|
|
}
|
|
|
|
return 0;
|
|
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
|
|
{
|
|
struct tcindex_filter_result new_filter_result, *old_r = r;
|
|
- struct tcindex_filter_result cr;
|
|
struct tcindex_data *cp = NULL, *oldp;
|
|
struct tcindex_filter *f = NULL; /* make gcc behave */
|
|
+ struct tcf_result cr = {};
|
|
int err, balloc = 0;
|
|
struct tcf_exts e;
|
|
|
|
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
if (p->perfect) {
|
|
int i;
|
|
|
|
- if (tcindex_alloc_perfect_hash(cp) < 0)
|
|
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
|
|
goto errout;
|
|
for (i = 0; i < cp->hash; i++)
|
|
cp->perfect[i].res = p->perfect[i].res;
|
|
@@ -346,13 +345,10 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
cp->h = p->h;
|
|
|
|
err = tcindex_filter_result_init(&new_filter_result);
|
|
- if (err < 0)
|
|
- goto errout1;
|
|
- err = tcindex_filter_result_init(&cr);
|
|
if (err < 0)
|
|
goto errout1;
|
|
if (old_r)
|
|
- cr.res = r->res;
|
|
+ cr = r->res;
|
|
|
|
if (tb[TCA_TCINDEX_HASH])
|
|
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
|
|
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
err = -ENOMEM;
|
|
if (!cp->perfect && !cp->h) {
|
|
if (valid_perfect_hash(cp)) {
|
|
- if (tcindex_alloc_perfect_hash(cp) < 0)
|
|
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
|
|
goto errout_alloc;
|
|
balloc = 1;
|
|
} else {
|
|
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
}
|
|
|
|
if (tb[TCA_TCINDEX_CLASSID]) {
|
|
- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
|
|
- tcf_bind_filter(tp, &cr.res, base);
|
|
+ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
|
|
+ tcf_bind_filter(tp, &cr, base);
|
|
}
|
|
|
|
if (old_r && old_r != r) {
|
|
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
}
|
|
|
|
oldp = p;
|
|
- r->res = cr.res;
|
|
+ r->res = cr;
|
|
tcf_exts_change(&r->exts, &e);
|
|
|
|
rcu_assign_pointer(tp->root, cp);
|
|
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
|
; /* nothing */
|
|
|
|
rcu_assign_pointer(*fp, f);
|
|
+ } else {
|
|
+ tcf_exts_destroy(&new_filter_result.exts);
|
|
}
|
|
|
|
if (oldp)
|
|
- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
|
|
+ tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
|
|
return 0;
|
|
|
|
errout_alloc:
|
|
@@ -487,7 +485,6 @@ errout_alloc:
|
|
else if (balloc == 2)
|
|
kfree(cp->h);
|
|
errout1:
|
|
- tcf_exts_destroy(&cr.exts);
|
|
tcf_exts_destroy(&new_filter_result.exts);
|
|
errout:
|
|
kfree(cp);
|
|
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct tcindex_data *p = rtnl_dereference(tp->root);
|
|
- struct tcf_walker walker;
|
|
+ int i;
|
|
|
|
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
|
|
- walker.count = 0;
|
|
- walker.skip = 0;
|
|
- walker.fn = tcindex_destroy_element;
|
|
- tcindex_walk(tp, &walker);
|
|
|
|
- call_rcu(&p->rcu, __tcindex_destroy);
|
|
+ if (p->perfect) {
|
|
+ for (i = 0; i < p->hash; i++) {
|
|
+ struct tcindex_filter_result *r = p->perfect + i;
|
|
+
|
|
+ tcf_unbind_filter(tp, &r->res);
|
|
+ if (tcf_exts_get_net(&r->exts))
|
|
+ tcf_queue_work(&r->rwork,
|
|
+ tcindex_destroy_rexts_work);
|
|
+ else
|
|
+ __tcindex_destroy_rexts(r);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; p->h && i < p->hash; i++) {
|
|
+ struct tcindex_filter *f, *next;
|
|
+ bool last;
|
|
+
|
|
+ for (f = rtnl_dereference(p->h[i]); f; f = next) {
|
|
+ next = rtnl_dereference(f->next);
|
|
+ tcindex_delete(tp, &f->result, &last, NULL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ tcf_queue_work(&p->rwork, tcindex_destroy_work);
|
|
}
|
|
|
|
|
|
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
|
|
index 078f01a8d582a..435847d98b51c 100644
|
|
--- a/net/sctp/diag.c
|
|
+++ b/net/sctp/diag.c
|
|
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
|
|
+ nla_total_size(1) /* INET_DIAG_TOS */
|
|
+ nla_total_size(1) /* INET_DIAG_TCLASS */
|
|
+ nla_total_size(4) /* INET_DIAG_MARK */
|
|
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
|
|
+ nla_total_size(addrlen * asoc->peer.transport_count)
|
|
+ nla_total_size(addrlen * addrcnt)
|
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
|
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
|
|
index 7f0539db56047..4fede55b9010c 100644
|
|
--- a/net/sctp/ipv6.c
|
|
+++ b/net/sctp/ipv6.c
|
|
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
|
|
|
|
switch (ev) {
|
|
case NETDEV_UP:
|
|
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
|
|
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
|
if (addr) {
|
|
addr->a.v6.sin6_family = AF_INET6;
|
|
- addr->a.v6.sin6_port = 0;
|
|
- addr->a.v6.sin6_flowinfo = 0;
|
|
addr->a.v6.sin6_addr = ifa->addr;
|
|
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
|
|
addr->valid = 1;
|
|
@@ -279,7 +277,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|
|
|
if (saddr) {
|
|
fl6->saddr = saddr->v6.sin6_addr;
|
|
- fl6->fl6_sport = saddr->v6.sin6_port;
|
|
+ if (!fl6->fl6_sport)
|
|
+ fl6->fl6_sport = saddr->v6.sin6_port;
|
|
|
|
pr_debug("src=%pI6 - ", &fl6->saddr);
|
|
}
|
|
@@ -431,7 +430,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
|
|
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
|
if (addr) {
|
|
addr->a.v6.sin6_family = AF_INET6;
|
|
- addr->a.v6.sin6_port = 0;
|
|
addr->a.v6.sin6_addr = ifp->addr;
|
|
addr->a.v6.sin6_scope_id = dev->ifindex;
|
|
addr->valid = 1;
|
|
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
|
|
index 123e9f2dc2265..edfcf16e704c4 100644
|
|
--- a/net/sctp/offload.c
|
|
+++ b/net/sctp/offload.c
|
|
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
|
|
{
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
skb->csum_not_inet = 0;
|
|
+ gso_reset_checksum(skb, ~0);
|
|
return sctp_compute_cksum(skb, skb_transport_offset(skb));
|
|
}
|
|
|
|
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
|
|
index 9b277bd36d1ad..8410ccc57c40b 100644
|
|
--- a/net/sctp/protocol.c
|
|
+++ b/net/sctp/protocol.c
|
|
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
|
|
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
|
if (addr) {
|
|
addr->a.v4.sin_family = AF_INET;
|
|
- addr->a.v4.sin_port = 0;
|
|
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
|
|
addr->valid = 1;
|
|
INIT_LIST_HEAD(&addr->list);
|
|
@@ -441,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|
}
|
|
if (saddr) {
|
|
fl4->saddr = saddr->v4.sin_addr.s_addr;
|
|
- fl4->fl4_sport = saddr->v4.sin_port;
|
|
+ if (!fl4->fl4_sport)
|
|
+ fl4->fl4_sport = saddr->v4.sin_port;
|
|
}
|
|
|
|
pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
|
|
@@ -776,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
|
|
|
|
switch (ev) {
|
|
case NETDEV_UP:
|
|
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
|
|
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
|
if (addr) {
|
|
addr->a.v4.sin_family = AF_INET;
|
|
- addr->a.v4.sin_port = 0;
|
|
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
|
|
addr->valid = 1;
|
|
spin_lock_bh(&net->sctp.local_addr_lock);
|
|
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
|
|
index f4ac6c592e139..d05c57664e36e 100644
|
|
--- a/net/sctp/sm_make_chunk.c
|
|
+++ b/net/sctp/sm_make_chunk.c
|
|
@@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
|
|
*
|
|
* [INIT ACK back to where the INIT came from.]
|
|
*/
|
|
- retval->transport = chunk->transport;
|
|
+ if (chunk->transport)
|
|
+ retval->transport =
|
|
+ sctp_assoc_lookup_paddr(asoc,
|
|
+ &chunk->transport->ipaddr);
|
|
|
|
retval->subh.init_hdr =
|
|
sctp_addto_chunk(retval, sizeof(initack), &initack);
|
|
@@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
|
|
*
|
|
* [COOKIE ACK back to where the COOKIE ECHO came from.]
|
|
*/
|
|
- if (retval && chunk)
|
|
- retval->transport = chunk->transport;
|
|
+ if (retval && chunk && chunk->transport)
|
|
+ retval->transport =
|
|
+ sctp_assoc_lookup_paddr(asoc,
|
|
+ &chunk->transport->ipaddr);
|
|
|
|
return retval;
|
|
}
|
|
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
|
|
index b8cebd5a87e5c..b7a534f6d7c20 100644
|
|
--- a/net/sctp/socket.c
|
|
+++ b/net/sctp/socket.c
|
|
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
|
struct sctp_transport *transport = NULL;
|
|
struct sctp_sndrcvinfo _sinfo, *sinfo;
|
|
- struct sctp_association *asoc;
|
|
+ struct sctp_association *asoc, *tmp;
|
|
struct sctp_cmsgs cmsgs;
|
|
union sctp_addr *daddr;
|
|
bool new = false;
|
|
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
|
|
|
/* SCTP_SENDALL process */
|
|
if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
|
|
- list_for_each_entry(asoc, &ep->asocs, asocs) {
|
|
+ list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
|
|
err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
|
|
msg_len);
|
|
if (err == 0)
|
|
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
|
|
index 3892e7630f3ad..2936ed17bf9ef 100644
|
|
--- a/net/sctp/stream.c
|
|
+++ b/net/sctp/stream.c
|
|
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
|
|
}
|
|
}
|
|
|
|
+static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
|
|
+{
|
|
+ size_t index = 0;
|
|
+
|
|
+ while (count--) {
|
|
+ if (elem == flex_array_get(fa, index))
|
|
+ break;
|
|
+ index++;
|
|
+ }
|
|
+
|
|
+ return index;
|
|
+}
|
|
+
|
|
/* Migrates chunks from stream queues to new stream queues if needed,
|
|
* but not across associations. Also, removes those chunks to streams
|
|
* higher than the new max.
|
|
@@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
|
|
}
|
|
}
|
|
|
|
- for (i = outcnt; i < stream->outcnt; i++)
|
|
+ for (i = outcnt; i < stream->outcnt; i++) {
|
|
kfree(SCTP_SO(stream, i)->ext);
|
|
+ SCTP_SO(stream, i)->ext = NULL;
|
|
+ }
|
|
}
|
|
|
|
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
|
|
@@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
|
|
|
|
if (stream->out) {
|
|
fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
|
|
+ if (stream->out_curr) {
|
|
+ size_t index = fa_index(stream->out, stream->out_curr,
|
|
+ stream->outcnt);
|
|
+
|
|
+ BUG_ON(index == stream->outcnt);
|
|
+ stream->out_curr = flex_array_get(out, index);
|
|
+ }
|
|
fa_free(stream->out);
|
|
}
|
|
|
|
@@ -585,9 +607,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
|
|
struct sctp_strreset_outreq *outreq = param.v;
|
|
struct sctp_stream *stream = &asoc->stream;
|
|
__u32 result = SCTP_STRRESET_DENIED;
|
|
- __u16 i, nums, flags = 0;
|
|
__be16 *str_p = NULL;
|
|
__u32 request_seq;
|
|
+ __u16 i, nums;
|
|
|
|
request_seq = ntohl(outreq->request_seq);
|
|
|
|
@@ -615,6 +637,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
|
|
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
|
|
goto out;
|
|
|
|
+ nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
|
|
+ str_p = outreq->list_of_streams;
|
|
+ for (i = 0; i < nums; i++) {
|
|
+ if (ntohs(str_p[i]) >= stream->incnt) {
|
|
+ result = SCTP_STRRESET_ERR_WRONG_SSN;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (asoc->strreset_chunk) {
|
|
if (!sctp_chunk_lookup_strreset_param(
|
|
asoc, outreq->response_seq,
|
|
@@ -637,32 +668,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
|
|
sctp_chunk_put(asoc->strreset_chunk);
|
|
asoc->strreset_chunk = NULL;
|
|
}
|
|
-
|
|
- flags = SCTP_STREAM_RESET_INCOMING_SSN;
|
|
}
|
|
|
|
- nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
|
|
- if (nums) {
|
|
- str_p = outreq->list_of_streams;
|
|
- for (i = 0; i < nums; i++) {
|
|
- if (ntohs(str_p[i]) >= stream->incnt) {
|
|
- result = SCTP_STRRESET_ERR_WRONG_SSN;
|
|
- goto out;
|
|
- }
|
|
- }
|
|
-
|
|
+ if (nums)
|
|
for (i = 0; i < nums; i++)
|
|
SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
|
|
- } else {
|
|
+ else
|
|
for (i = 0; i < stream->incnt; i++)
|
|
SCTP_SI(stream, i)->mid = 0;
|
|
- }
|
|
|
|
result = SCTP_STRRESET_PERFORMED;
|
|
|
|
*evp = sctp_ulpevent_make_stream_reset_event(asoc,
|
|
- flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
|
|
- GFP_ATOMIC);
|
|
+ SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
|
|
|
|
out:
|
|
sctp_update_strreset_result(asoc, result);
|
|
@@ -738,9 +756,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
|
|
|
|
result = SCTP_STRRESET_PERFORMED;
|
|
|
|
- *evp = sctp_ulpevent_make_stream_reset_event(asoc,
|
|
- SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
|
|
-
|
|
out:
|
|
sctp_update_strreset_result(asoc, result);
|
|
err:
|
|
@@ -873,6 +888,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
|
|
if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
|
|
goto out;
|
|
|
|
+ in = ntohs(addstrm->number_of_streams);
|
|
+ incnt = stream->incnt + in;
|
|
+ if (!in || incnt > SCTP_MAX_STREAM)
|
|
+ goto out;
|
|
+
|
|
+ if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
|
|
+ goto out;
|
|
+
|
|
if (asoc->strreset_chunk) {
|
|
if (!sctp_chunk_lookup_strreset_param(
|
|
asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
|
|
@@ -896,14 +919,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
|
|
}
|
|
}
|
|
|
|
- in = ntohs(addstrm->number_of_streams);
|
|
- incnt = stream->incnt + in;
|
|
- if (!in || incnt > SCTP_MAX_STREAM)
|
|
- goto out;
|
|
-
|
|
- if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
|
|
- goto out;
|
|
-
|
|
stream->incnt = incnt;
|
|
|
|
result = SCTP_STRRESET_PERFORMED;
|
|
@@ -973,9 +988,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
|
|
|
|
result = SCTP_STRRESET_PERFORMED;
|
|
|
|
- *evp = sctp_ulpevent_make_stream_change_event(asoc,
|
|
- 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
|
|
-
|
|
out:
|
|
sctp_update_strreset_result(asoc, result);
|
|
err:
|
|
@@ -1036,10 +1048,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
|
|
sout->mid_uo = 0;
|
|
}
|
|
}
|
|
-
|
|
- flags = SCTP_STREAM_RESET_OUTGOING_SSN;
|
|
}
|
|
|
|
+ flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
|
|
+
|
|
for (i = 0; i < stream->outcnt; i++)
|
|
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
|
|
|
|
@@ -1058,6 +1070,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
|
|
nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
|
|
sizeof(__u16);
|
|
|
|
+ flags |= SCTP_STREAM_RESET_INCOMING_SSN;
|
|
+
|
|
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
|
|
nums, str_p, GFP_ATOMIC);
|
|
} else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
|
|
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
|
|
index 82cb0e5634bc7..5d2214183601d 100644
|
|
--- a/net/smc/af_smc.c
|
|
+++ b/net/smc/af_smc.c
|
|
@@ -146,6 +146,9 @@ static int smc_release(struct socket *sock)
|
|
sock_set_flag(sk, SOCK_DEAD);
|
|
sk->sk_shutdown |= SHUTDOWN_MASK;
|
|
}
|
|
+
|
|
+ sk->sk_prot->unhash(sk);
|
|
+
|
|
if (smc->clcsock) {
|
|
if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
|
|
/* wake up clcsock accept */
|
|
@@ -170,7 +173,6 @@ static int smc_release(struct socket *sock)
|
|
smc_conn_free(&smc->conn);
|
|
release_sock(sk);
|
|
|
|
- sk->sk_prot->unhash(sk);
|
|
sock_put(sk); /* final sock_put */
|
|
out:
|
|
return rc;
|
|
diff --git a/net/socket.c b/net/socket.c
|
|
index 334fcc617ef27..93a45f15ee40d 100644
|
|
--- a/net/socket.c
|
|
+++ b/net/socket.c
|
|
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
|
|
EXPORT_SYMBOL(dlci_ioctl_set);
|
|
|
|
static long sock_do_ioctl(struct net *net, struct socket *sock,
|
|
- unsigned int cmd, unsigned long arg,
|
|
- unsigned int ifreq_size)
|
|
+ unsigned int cmd, unsigned long arg)
|
|
{
|
|
int err;
|
|
void __user *argp = (void __user *)arg;
|
|
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
|
|
} else {
|
|
struct ifreq ifr;
|
|
bool need_copyout;
|
|
- if (copy_from_user(&ifr, argp, ifreq_size))
|
|
+ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
|
|
return -EFAULT;
|
|
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
|
|
if (!err && need_copyout)
|
|
- if (copy_to_user(argp, &ifr, ifreq_size))
|
|
+ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
|
|
return -EFAULT;
|
|
}
|
|
return err;
|
|
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|
err = open_related_ns(&net->ns, get_net_ns);
|
|
break;
|
|
default:
|
|
- err = sock_do_ioctl(net, sock, cmd, arg,
|
|
- sizeof(struct ifreq));
|
|
+ err = sock_do_ioctl(net, sock, cmd, arg);
|
|
break;
|
|
}
|
|
return err;
|
|
@@ -2750,8 +2748,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
|
|
int err;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
|
|
- sizeof(struct compat_ifreq));
|
|
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
|
|
set_fs(old_fs);
|
|
if (!err)
|
|
err = compat_put_timeval(&ktv, up);
|
|
@@ -2767,8 +2764,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
|
|
int err;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
|
|
- sizeof(struct compat_ifreq));
|
|
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
|
|
set_fs(old_fs);
|
|
if (!err)
|
|
err = compat_put_timespec(&kts, up);
|
|
@@ -2964,6 +2960,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
|
|
return dev_ioctl(net, cmd, &ifreq, NULL);
|
|
}
|
|
|
|
+static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
|
|
+ unsigned int cmd,
|
|
+ struct compat_ifreq __user *uifr32)
|
|
+{
|
|
+ struct ifreq __user *uifr;
|
|
+ int err;
|
|
+
|
|
+ /* Handle the fact that while struct ifreq has the same *layout* on
|
|
+ * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
|
|
+ * which are handled elsewhere, it still has different *size* due to
|
|
+ * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
|
|
+ * resulting in struct ifreq being 32 and 40 bytes respectively).
|
|
+ * As a result, if the struct happens to be at the end of a page and
|
|
+ * the next page isn't readable/writable, we get a fault. To prevent
|
|
+ * that, copy back and forth to the full size.
|
|
+ */
|
|
+
|
|
+ uifr = compat_alloc_user_space(sizeof(*uifr));
|
|
+ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
|
|
+
|
|
+ if (!err) {
|
|
+ switch (cmd) {
|
|
+ case SIOCGIFFLAGS:
|
|
+ case SIOCGIFMETRIC:
|
|
+ case SIOCGIFMTU:
|
|
+ case SIOCGIFMEM:
|
|
+ case SIOCGIFHWADDR:
|
|
+ case SIOCGIFINDEX:
|
|
+ case SIOCGIFADDR:
|
|
+ case SIOCGIFBRDADDR:
|
|
+ case SIOCGIFDSTADDR:
|
|
+ case SIOCGIFNETMASK:
|
|
+ case SIOCGIFPFLAGS:
|
|
+ case SIOCGIFTXQLEN:
|
|
+ case SIOCGMIIPHY:
|
|
+ case SIOCGMIIREG:
|
|
+ case SIOCGIFNAME:
|
|
+ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
|
|
+ err = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return err;
|
|
+}
|
|
+
|
|
static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
|
|
struct compat_ifreq __user *uifr32)
|
|
{
|
|
@@ -3079,8 +3123,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
|
|
}
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
|
|
- sizeof(struct compat_ifreq));
|
|
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
|
|
set_fs(old_fs);
|
|
|
|
out:
|
|
@@ -3180,21 +3223,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
|
|
case SIOCSIFTXQLEN:
|
|
case SIOCBRADDIF:
|
|
case SIOCBRDELIF:
|
|
+ case SIOCGIFNAME:
|
|
case SIOCSIFNAME:
|
|
case SIOCGMIIPHY:
|
|
case SIOCGMIIREG:
|
|
case SIOCSMIIREG:
|
|
- case SIOCSARP:
|
|
- case SIOCGARP:
|
|
- case SIOCDARP:
|
|
- case SIOCATMARK:
|
|
case SIOCBONDENSLAVE:
|
|
case SIOCBONDRELEASE:
|
|
case SIOCBONDSETHWADDR:
|
|
case SIOCBONDCHANGEACTIVE:
|
|
- case SIOCGIFNAME:
|
|
- return sock_do_ioctl(net, sock, cmd, arg,
|
|
- sizeof(struct compat_ifreq));
|
|
+ return compat_ifreq_ioctl(net, sock, cmd, argp);
|
|
+
|
|
+ case SIOCSARP:
|
|
+ case SIOCGARP:
|
|
+ case SIOCDARP:
|
|
+ case SIOCATMARK:
|
|
+ return sock_do_ioctl(net, sock, cmd, arg);
|
|
}
|
|
|
|
return -ENOIOCTLCMD;
|
|
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
|
|
index ba765473d1f06..efeee5586b2ac 100644
|
|
--- a/net/sunrpc/auth_gss/auth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/auth_gss.c
|
|
@@ -1563,8 +1563,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
|
|
cred_len = p++;
|
|
|
|
spin_lock(&ctx->gc_seq_lock);
|
|
- req->rq_seqno = ctx->gc_seq++;
|
|
+ req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
|
|
spin_unlock(&ctx->gc_seq_lock);
|
|
+ if (req->rq_seqno == MAXSEQ)
|
|
+ goto out_expired;
|
|
|
|
*p++ = htonl((u32) RPC_GSS_VERSION);
|
|
*p++ = htonl((u32) ctx->gc_proc);
|
|
@@ -1586,14 +1588,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
|
|
mic.data = (u8 *)(p + 1);
|
|
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
|
|
- clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
|
+ goto out_expired;
|
|
} else if (maj_stat != 0) {
|
|
- printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
|
|
+ pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
|
|
+ task->tk_status = -EIO;
|
|
goto out_put_ctx;
|
|
}
|
|
p = xdr_encode_opaque(p, NULL, mic.len);
|
|
gss_put_ctx(ctx);
|
|
return p;
|
|
+out_expired:
|
|
+ clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
|
+ task->tk_status = -EKEYEXPIRED;
|
|
out_put_ctx:
|
|
gss_put_ctx(ctx);
|
|
return NULL;
|
|
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
|
|
index fb6656295204c..507105127095a 100644
|
|
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
|
|
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
|
|
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
|
|
unsigned char *cksum, unsigned char *buf)
|
|
{
|
|
struct crypto_sync_skcipher *cipher;
|
|
- unsigned char plain[8];
|
|
+ unsigned char *plain;
|
|
s32 code;
|
|
|
|
dprintk("RPC: %s:\n", __func__);
|
|
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
|
|
if (IS_ERR(cipher))
|
|
return PTR_ERR(cipher);
|
|
|
|
+ plain = kmalloc(8, GFP_NOFS);
|
|
+ if (!plain)
|
|
+ return -ENOMEM;
|
|
+
|
|
plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
|
|
plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
|
|
plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
|
|
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
|
|
|
|
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
|
|
out:
|
|
+ kfree(plain);
|
|
crypto_free_sync_skcipher(cipher);
|
|
return code;
|
|
}
|
|
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
|
|
u32 seqnum,
|
|
unsigned char *cksum, unsigned char *buf)
|
|
{
|
|
- unsigned char plain[8];
|
|
+ unsigned char *plain;
|
|
+ s32 code;
|
|
|
|
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
|
|
return krb5_make_rc4_seq_num(kctx, direction, seqnum,
|
|
cksum, buf);
|
|
|
|
+ plain = kmalloc(8, GFP_NOFS);
|
|
+ if (!plain)
|
|
+ return -ENOMEM;
|
|
+
|
|
plain[0] = (unsigned char) (seqnum & 0xff);
|
|
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
|
|
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
|
|
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
|
|
plain[6] = direction;
|
|
plain[7] = direction;
|
|
|
|
- return krb5_encrypt(key, cksum, plain, buf, 8);
|
|
+ code = krb5_encrypt(key, cksum, plain, buf, 8);
|
|
+ kfree(plain);
|
|
+ return code;
|
|
}
|
|
|
|
static s32
|
|
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
|
|
unsigned char *buf, int *direction, s32 *seqnum)
|
|
{
|
|
struct crypto_sync_skcipher *cipher;
|
|
- unsigned char plain[8];
|
|
+ unsigned char *plain;
|
|
s32 code;
|
|
|
|
dprintk("RPC: %s:\n", __func__);
|
|
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
|
|
if (code)
|
|
goto out;
|
|
|
|
+ plain = kmalloc(8, GFP_NOFS);
|
|
+ if (!plain) {
|
|
+ code = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
code = krb5_decrypt(cipher, cksum, buf, plain, 8);
|
|
if (code)
|
|
- goto out;
|
|
+ goto out_plain;
|
|
|
|
if ((plain[4] != plain[5]) || (plain[4] != plain[6])
|
|
|| (plain[4] != plain[7])) {
|
|
code = (s32)KG_BAD_SEQ;
|
|
- goto out;
|
|
+ goto out_plain;
|
|
}
|
|
|
|
*direction = plain[4];
|
|
|
|
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
|
|
(plain[2] << 8) | (plain[3]));
|
|
+out_plain:
|
|
+ kfree(plain);
|
|
out:
|
|
crypto_free_sync_skcipher(cipher);
|
|
return code;
|
|
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
|
|
int *direction, u32 *seqnum)
|
|
{
|
|
s32 code;
|
|
- unsigned char plain[8];
|
|
+ unsigned char *plain;
|
|
struct crypto_sync_skcipher *key = kctx->seq;
|
|
|
|
dprintk("RPC: krb5_get_seq_num:\n");
|
|
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
|
|
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
|
|
return krb5_get_rc4_seq_num(kctx, cksum, buf,
|
|
direction, seqnum);
|
|
+ plain = kmalloc(8, GFP_NOFS);
|
|
+ if (!plain)
|
|
+ return -ENOMEM;
|
|
|
|
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
|
|
- return code;
|
|
+ goto out;
|
|
|
|
if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
|
|
- (plain[4] != plain[7]))
|
|
- return (s32)KG_BAD_SEQ;
|
|
+ (plain[4] != plain[7])) {
|
|
+ code = (s32)KG_BAD_SEQ;
|
|
+ goto out;
|
|
+ }
|
|
|
|
*direction = plain[4];
|
|
|
|
*seqnum = ((plain[0]) |
|
|
(plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
|
|
|
|
- return 0;
|
|
+out:
|
|
+ kfree(plain);
|
|
+ return code;
|
|
}
|
|
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
index 1ece4bc3eb8d8..152790ed309c6 100644
|
|
--- a/net/sunrpc/auth_gss/svcauth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
@@ -1142,7 +1142,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
|
|
struct kvec *resv = &rqstp->rq_res.head[0];
|
|
struct rsi *rsip, rsikey;
|
|
int ret;
|
|
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
|
|
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
|
|
|
|
memset(&rsikey, 0, sizeof(rsikey));
|
|
ret = gss_read_verf(gc, argv, authp,
|
|
@@ -1253,7 +1253,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
|
|
uint64_t handle;
|
|
int status;
|
|
int ret;
|
|
- struct net *net = rqstp->rq_xprt->xpt_net;
|
|
+ struct net *net = SVC_NET(rqstp);
|
|
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
|
|
|
|
memset(&ud, 0, sizeof(ud));
|
|
@@ -1444,7 +1444,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|
__be32 *rpcstart;
|
|
__be32 *reject_stat = resv->iov_base + resv->iov_len;
|
|
int ret;
|
|
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
|
|
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
|
|
|
|
dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
|
|
argv->iov_len);
|
|
@@ -1734,7 +1734,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
|
|
struct rpc_gss_wire_cred *gc = &gsd->clcred;
|
|
struct xdr_buf *resbuf = &rqstp->rq_res;
|
|
int stat = -EINVAL;
|
|
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
|
|
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
|
|
|
|
if (gc->gc_proc != RPC_GSS_PROC_DATA)
|
|
goto out;
|
|
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
|
|
index f96345b1180ee..12bb23b8e0c50 100644
|
|
--- a/net/sunrpc/cache.c
|
|
+++ b/net/sunrpc/cache.c
|
|
@@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
|
|
h->last_refresh = now;
|
|
}
|
|
|
|
+static void cache_fresh_locked(struct cache_head *head, time_t expiry,
|
|
+ struct cache_detail *detail);
|
|
+static void cache_fresh_unlocked(struct cache_head *head,
|
|
+ struct cache_detail *detail);
|
|
+
|
|
static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
|
|
struct cache_head *key,
|
|
int hash)
|
|
@@ -100,6 +105,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
|
|
if (cache_is_expired(detail, tmp)) {
|
|
hlist_del_init_rcu(&tmp->cache_list);
|
|
detail->entries --;
|
|
+ cache_fresh_locked(tmp, 0, detail);
|
|
freeme = tmp;
|
|
break;
|
|
}
|
|
@@ -115,8 +121,10 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
|
|
cache_get(new);
|
|
spin_unlock(&detail->hash_lock);
|
|
|
|
- if (freeme)
|
|
+ if (freeme) {
|
|
+ cache_fresh_unlocked(freeme, detail);
|
|
cache_put(freeme, detail);
|
|
+ }
|
|
return new;
|
|
}
|
|
|
|
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
|
|
index 24cbddc44c884..1ee04e0ec4bca 100644
|
|
--- a/net/sunrpc/clnt.c
|
|
+++ b/net/sunrpc/clnt.c
|
|
@@ -1738,14 +1738,10 @@ rpc_xdr_encode(struct rpc_task *task)
|
|
xdr_buf_init(&req->rq_rcv_buf,
|
|
req->rq_rbuffer,
|
|
req->rq_rcvsize);
|
|
- req->rq_bytes_sent = 0;
|
|
|
|
p = rpc_encode_header(task);
|
|
- if (p == NULL) {
|
|
- printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
|
|
- rpc_exit(task, -EIO);
|
|
+ if (p == NULL)
|
|
return;
|
|
- }
|
|
|
|
encode = task->tk_msg.rpc_proc->p_encode;
|
|
if (encode == NULL)
|
|
@@ -1770,10 +1766,17 @@ call_encode(struct rpc_task *task)
|
|
/* Did the encode result in an error condition? */
|
|
if (task->tk_status != 0) {
|
|
/* Was the error nonfatal? */
|
|
- if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
|
|
+ switch (task->tk_status) {
|
|
+ case -EAGAIN:
|
|
+ case -ENOMEM:
|
|
rpc_delay(task, HZ >> 4);
|
|
- else
|
|
+ break;
|
|
+ case -EKEYEXPIRED:
|
|
+ task->tk_action = call_refresh;
|
|
+ break;
|
|
+ default:
|
|
rpc_exit(task, task->tk_status);
|
|
+ }
|
|
return;
|
|
}
|
|
|
|
@@ -2335,7 +2338,8 @@ rpc_encode_header(struct rpc_task *task)
|
|
*p++ = htonl(clnt->cl_vers); /* program version */
|
|
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
|
|
p = rpcauth_marshcred(task, p);
|
|
- req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
|
|
+ if (p)
|
|
+ req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
|
|
return p;
|
|
}
|
|
|
|
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
|
|
index c7872bc138605..08b5fa4a2852a 100644
|
|
--- a/net/sunrpc/rpcb_clnt.c
|
|
+++ b/net/sunrpc/rpcb_clnt.c
|
|
@@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task)
|
|
case RPCBVERS_3:
|
|
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
|
|
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
|
|
+ if (!map->r_addr) {
|
|
+ status = -ENOMEM;
|
|
+ dprintk("RPC: %5u %s: no memory available\n",
|
|
+ task->tk_pid, __func__);
|
|
+ goto bailout_free_args;
|
|
+ }
|
|
map->r_owner = "";
|
|
break;
|
|
case RPCBVERS_2:
|
|
@@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
|
|
rpc_put_task(child);
|
|
return;
|
|
|
|
+bailout_free_args:
|
|
+ kfree(map);
|
|
bailout_release_client:
|
|
rpc_release_client(rpcb_clnt);
|
|
bailout_nofree:
|
|
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
|
|
index d13e05f1a9905..d65f8d35de87e 100644
|
|
--- a/net/sunrpc/svc.c
|
|
+++ b/net/sunrpc/svc.c
|
|
@@ -1144,6 +1144,8 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
|
|
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
|
|
#endif
|
|
|
|
+extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
|
|
+
|
|
/*
|
|
* Common routine for processing the RPC request.
|
|
*/
|
|
@@ -1172,7 +1174,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|
clear_bit(RQ_DROPME, &rqstp->rq_flags);
|
|
|
|
/* Setup reply header */
|
|
- rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
|
|
+ if (rqstp->rq_prot == IPPROTO_TCP)
|
|
+ svc_tcp_prep_reply_hdr(rqstp);
|
|
|
|
svc_putu32(resv, rqstp->rq_xid);
|
|
|
|
@@ -1244,7 +1247,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|
* for lower versions. RPC_PROG_MISMATCH seems to be the closest
|
|
* fit.
|
|
*/
|
|
- if (versp->vs_need_cong_ctrl &&
|
|
+ if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
|
|
!test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
|
|
goto err_bad_vers;
|
|
|
|
@@ -1336,7 +1339,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|
return 0;
|
|
|
|
close:
|
|
- if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
|
|
+ if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
|
|
svc_close_xprt(rqstp->rq_xprt);
|
|
dprintk("svc: svc_process close\n");
|
|
return 0;
|
|
@@ -1459,10 +1462,10 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
|
|
dprintk("svc: %s(%p)\n", __func__, req);
|
|
|
|
/* Build the svc_rqst used by the common processing routine */
|
|
- rqstp->rq_xprt = serv->sv_bc_xprt;
|
|
rqstp->rq_xid = req->rq_xid;
|
|
rqstp->rq_prot = req->rq_xprt->prot;
|
|
rqstp->rq_server = serv;
|
|
+ rqstp->rq_bc_net = req->rq_xprt->xprt_net;
|
|
|
|
rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
|
|
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
|
|
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
|
|
index 51d36230b6e3e..bd42da287c268 100644
|
|
--- a/net/sunrpc/svc_xprt.c
|
|
+++ b/net/sunrpc/svc_xprt.c
|
|
@@ -468,10 +468,11 @@ out:
|
|
*/
|
|
void svc_reserve(struct svc_rqst *rqstp, int space)
|
|
{
|
|
+ struct svc_xprt *xprt = rqstp->rq_xprt;
|
|
+
|
|
space += rqstp->rq_res.head[0].iov_len;
|
|
|
|
- if (space < rqstp->rq_reserved) {
|
|
- struct svc_xprt *xprt = rqstp->rq_xprt;
|
|
+ if (xprt && space < rqstp->rq_reserved) {
|
|
atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
|
|
rqstp->rq_reserved = space;
|
|
|
|
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
|
|
index 986f3ed7d1a24..b90492c437112 100644
|
|
--- a/net/sunrpc/svcsock.c
|
|
+++ b/net/sunrpc/svcsock.c
|
|
@@ -549,7 +549,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
|
|
/* Don't enable netstamp, sunrpc doesn't
|
|
need that much accuracy */
|
|
}
|
|
- svsk->sk_sk->sk_stamp = skb->tstamp;
|
|
+ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
|
|
|
|
len = skb->len;
|
|
@@ -1173,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
|
|
/*
|
|
* Setup response header. TCP has a 4B record length field.
|
|
*/
|
|
-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
|
|
+void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
|
|
{
|
|
struct kvec *resv = &rqstp->rq_res.head[0];
|
|
|
|
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
|
|
index 73547d17d3c61..f1ec2110efebe 100644
|
|
--- a/net/sunrpc/xprt.c
|
|
+++ b/net/sunrpc/xprt.c
|
|
@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
if (xprt_request_need_enqueue_transmit(task, req)) {
|
|
+ req->rq_bytes_sent = 0;
|
|
spin_lock(&xprt->queue_lock);
|
|
/*
|
|
* Requests that carry congestion control credits are added
|
|
@@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
|
|
INIT_LIST_HEAD(&req->rq_xmit2);
|
|
goto out;
|
|
}
|
|
- } else {
|
|
+ } else if (!req->rq_seqno) {
|
|
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
|
|
if (pos->rq_task->tk_owner != task->tk_owner)
|
|
continue;
|
|
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
|
|
index e5b367a3e517a..88fde80b93475 100644
|
|
--- a/net/sunrpc/xprtrdma/backchannel.c
|
|
+++ b/net/sunrpc/xprtrdma/backchannel.c
|
|
@@ -193,14 +193,15 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
|
|
*/
|
|
int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
|
|
{
|
|
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
|
|
+ struct rpc_xprt *xprt = rqst->rq_xprt;
|
|
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
|
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
|
int rc;
|
|
|
|
- if (!xprt_connected(rqst->rq_xprt))
|
|
- goto drop_connection;
|
|
+ if (!xprt_connected(xprt))
|
|
+ return -ENOTCONN;
|
|
|
|
- if (!xprt_request_get_cong(rqst->rq_xprt, rqst))
|
|
+ if (!xprt_request_get_cong(xprt, rqst))
|
|
return -EBADSLT;
|
|
|
|
rc = rpcrdma_bc_marshal_reply(rqst);
|
|
@@ -216,7 +217,7 @@ failed_marshal:
|
|
if (rc != -ENOTCONN)
|
|
return rc;
|
|
drop_connection:
|
|
- xprt_disconnect_done(rqst->rq_xprt);
|
|
+ xprt_rdma_close(xprt);
|
|
return -ENOTCONN;
|
|
}
|
|
|
|
@@ -339,7 +340,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
|
|
|
|
out_overflow:
|
|
pr_warn("RPC/RDMA backchannel overflow\n");
|
|
- xprt_disconnect_done(xprt);
|
|
+ xprt_force_disconnect(xprt);
|
|
/* This receive buffer gets reposted automatically
|
|
* when the connection is re-established.
|
|
*/
|
|
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
|
|
index fc6378cc0c1c7..20ced24cc61bc 100644
|
|
--- a/net/sunrpc/xprtrdma/frwr_ops.c
|
|
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
|
|
@@ -117,15 +117,15 @@ static void
|
|
frwr_mr_recycle_worker(struct work_struct *work)
|
|
{
|
|
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
|
|
- enum rpcrdma_frwr_state state = mr->frwr.fr_state;
|
|
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
|
|
|
trace_xprtrdma_mr_recycle(mr);
|
|
|
|
- if (state != FRWR_FLUSHED_LI) {
|
|
+ if (mr->mr_dir != DMA_NONE) {
|
|
trace_xprtrdma_mr_unmap(mr);
|
|
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
|
mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
|
+ mr->mr_dir = DMA_NONE;
|
|
}
|
|
|
|
spin_lock(&r_xprt->rx_buf.rb_mrlock);
|
|
@@ -150,6 +150,8 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
|
if (!mr->mr_sg)
|
|
goto out_list_err;
|
|
|
|
+ frwr->fr_state = FRWR_IS_INVALID;
|
|
+ mr->mr_dir = DMA_NONE;
|
|
INIT_LIST_HEAD(&mr->mr_list);
|
|
INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
|
|
sg_init_table(mr->mr_sg, depth);
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
|
|
index f3c147d70286e..b908f2ca08fd4 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
|
|
@@ -200,11 +200,10 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
|
|
svc_rdma_send_ctxt_put(rdma, ctxt);
|
|
goto drop_connection;
|
|
}
|
|
- return rc;
|
|
+ return 0;
|
|
|
|
drop_connection:
|
|
dprintk("svcrdma: failed to send bc call\n");
|
|
- xprt_disconnect_done(xprt);
|
|
return -ENOTCONN;
|
|
}
|
|
|
|
@@ -225,8 +224,11 @@ xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
|
|
|
|
ret = -ENOTCONN;
|
|
rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
|
|
- if (!test_bit(XPT_DEAD, &sxprt->xpt_flags))
|
|
+ if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) {
|
|
ret = rpcrdma_bc_send_request(rdma, rqst);
|
|
+ if (ret == -ENOTCONN)
|
|
+ svc_close_xprt(sxprt);
|
|
+ }
|
|
|
|
mutex_unlock(&sxprt->xpt_mutex);
|
|
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
index 8602a5f1b5156..e8ad7ddf347ad 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
@@ -563,6 +563,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
|
|
DMA_TO_DEVICE);
|
|
}
|
|
|
|
+/* If the xdr_buf has more elements than the device can
|
|
+ * transmit in a single RDMA Send, then the reply will
|
|
+ * have to be copied into a bounce buffer.
|
|
+ */
|
|
+static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
|
|
+ struct xdr_buf *xdr,
|
|
+ __be32 *wr_lst)
|
|
+{
|
|
+ int elements;
|
|
+
|
|
+ /* xdr->head */
|
|
+ elements = 1;
|
|
+
|
|
+ /* xdr->pages */
|
|
+ if (!wr_lst) {
|
|
+ unsigned int remaining;
|
|
+ unsigned long pageoff;
|
|
+
|
|
+ pageoff = xdr->page_base & ~PAGE_MASK;
|
|
+ remaining = xdr->page_len;
|
|
+ while (remaining) {
|
|
+ ++elements;
|
|
+ remaining -= min_t(u32, PAGE_SIZE - pageoff,
|
|
+ remaining);
|
|
+ pageoff = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* xdr->tail */
|
|
+ if (xdr->tail[0].iov_len)
|
|
+ ++elements;
|
|
+
|
|
+ /* assume 1 SGE is needed for the transport header */
|
|
+ return elements >= rdma->sc_max_send_sges;
|
|
+}
|
|
+
|
|
+/* The device is not capable of sending the reply directly.
|
|
+ * Assemble the elements of @xdr into the transport header
|
|
+ * buffer.
|
|
+ */
|
|
+static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
|
|
+ struct svc_rdma_send_ctxt *ctxt,
|
|
+ struct xdr_buf *xdr, __be32 *wr_lst)
|
|
+{
|
|
+ unsigned char *dst, *tailbase;
|
|
+ unsigned int taillen;
|
|
+
|
|
+ dst = ctxt->sc_xprt_buf;
|
|
+ dst += ctxt->sc_sges[0].length;
|
|
+
|
|
+ memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
|
|
+ dst += xdr->head[0].iov_len;
|
|
+
|
|
+ tailbase = xdr->tail[0].iov_base;
|
|
+ taillen = xdr->tail[0].iov_len;
|
|
+ if (wr_lst) {
|
|
+ u32 xdrpad;
|
|
+
|
|
+ xdrpad = xdr_padsize(xdr->page_len);
|
|
+ if (taillen && xdrpad) {
|
|
+ tailbase += xdrpad;
|
|
+ taillen -= xdrpad;
|
|
+ }
|
|
+ } else {
|
|
+ unsigned int len, remaining;
|
|
+ unsigned long pageoff;
|
|
+ struct page **ppages;
|
|
+
|
|
+ ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
|
|
+ pageoff = xdr->page_base & ~PAGE_MASK;
|
|
+ remaining = xdr->page_len;
|
|
+ while (remaining) {
|
|
+ len = min_t(u32, PAGE_SIZE - pageoff, remaining);
|
|
+
|
|
+ memcpy(dst, page_address(*ppages), len);
|
|
+ remaining -= len;
|
|
+ dst += len;
|
|
+ pageoff = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (taillen)
|
|
+ memcpy(dst, tailbase, taillen);
|
|
+
|
|
+ ctxt->sc_sges[0].length += xdr->len;
|
|
+ ib_dma_sync_single_for_device(rdma->sc_pd->device,
|
|
+ ctxt->sc_sges[0].addr,
|
|
+ ctxt->sc_sges[0].length,
|
|
+ DMA_TO_DEVICE);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
|
|
* @rdma: controlling transport
|
|
* @ctxt: send_ctxt for the Send WR
|
|
@@ -585,8 +678,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
|
u32 xdr_pad;
|
|
int ret;
|
|
|
|
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
|
|
- return -EIO;
|
|
+ if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
|
|
+ return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
|
|
+
|
|
+ ++ctxt->sc_cur_sge_no;
|
|
ret = svc_rdma_dma_map_buf(rdma, ctxt,
|
|
xdr->head[0].iov_base,
|
|
xdr->head[0].iov_len);
|
|
@@ -617,8 +712,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
|
while (remaining) {
|
|
len = min_t(u32, PAGE_SIZE - page_off, remaining);
|
|
|
|
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
|
|
- return -EIO;
|
|
+ ++ctxt->sc_cur_sge_no;
|
|
ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
|
|
page_off, len);
|
|
if (ret < 0)
|
|
@@ -632,8 +726,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
|
len = xdr->tail[0].iov_len;
|
|
tail:
|
|
if (len) {
|
|
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
|
|
- return -EIO;
|
|
+ ++ctxt->sc_cur_sge_no;
|
|
ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
index 2f7ec8912f494..ce5c610b49c77 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
@@ -478,12 +478,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
|
|
/* Transport header, head iovec, tail iovec */
|
|
newxprt->sc_max_send_sges = 3;
|
|
/* Add one SGE per page list entry */
|
|
- newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
|
|
- if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
|
|
- pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
|
|
- newxprt->sc_max_send_sges);
|
|
- goto errout;
|
|
- }
|
|
+ newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
|
|
+ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
|
|
+ newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
|
|
newxprt->sc_max_req_size = svcrdma_max_req_size;
|
|
newxprt->sc_max_requests = svcrdma_max_requests;
|
|
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
|
|
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
|
|
index ae2a838289537..e7683d3b1e6c9 100644
|
|
--- a/net/sunrpc/xprtrdma/transport.c
|
|
+++ b/net/sunrpc/xprtrdma/transport.c
|
|
@@ -437,8 +437,7 @@ out1:
|
|
* Caller holds @xprt's send lock to prevent activity on this
|
|
* transport while the connection is torn down.
|
|
*/
|
|
-static void
|
|
-xprt_rdma_close(struct rpc_xprt *xprt)
|
|
+void xprt_rdma_close(struct rpc_xprt *xprt)
|
|
{
|
|
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
|
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
|
|
@@ -449,13 +448,13 @@ xprt_rdma_close(struct rpc_xprt *xprt)
|
|
if (test_and_clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) {
|
|
xprt_clear_connected(xprt);
|
|
rpcrdma_ia_remove(ia);
|
|
- return;
|
|
+ goto out;
|
|
}
|
|
+
|
|
if (ep->rep_connected == -ENODEV)
|
|
return;
|
|
if (ep->rep_connected > 0)
|
|
xprt->reestablish_timeout = 0;
|
|
- xprt_disconnect_done(xprt);
|
|
rpcrdma_ep_disconnect(ep, ia);
|
|
|
|
/* Prepare @xprt for the next connection by reinitializing
|
|
@@ -463,6 +462,10 @@ xprt_rdma_close(struct rpc_xprt *xprt)
|
|
*/
|
|
r_xprt->rx_buf.rb_credits = 1;
|
|
xprt->cwnd = RPC_CWNDSHIFT;
|
|
+
|
|
+out:
|
|
+ ++xprt->connect_cookie;
|
|
+ xprt_disconnect_done(xprt);
|
|
}
|
|
|
|
/**
|
|
@@ -713,7 +716,7 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
|
|
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
|
|
|
if (!xprt_connected(xprt))
|
|
- goto drop_connection;
|
|
+ return -ENOTCONN;
|
|
|
|
if (!xprt_request_get_cong(xprt, rqst))
|
|
return -EBADSLT;
|
|
@@ -745,8 +748,8 @@ failed_marshal:
|
|
if (rc != -ENOTCONN)
|
|
return rc;
|
|
drop_connection:
|
|
- xprt_disconnect_done(xprt);
|
|
- return -ENOTCONN; /* implies disconnect */
|
|
+ xprt_rdma_close(xprt);
|
|
+ return -ENOTCONN;
|
|
}
|
|
|
|
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
|
|
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
|
|
index 3ddba94c939f6..dffedf1df02ce 100644
|
|
--- a/net/sunrpc/xprtrdma/verbs.c
|
|
+++ b/net/sunrpc/xprtrdma/verbs.c
|
|
@@ -316,7 +316,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
|
ep->rep_connected = -EAGAIN;
|
|
goto disconnected;
|
|
case RDMA_CM_EVENT_DISCONNECTED:
|
|
- ++xprt->connect_cookie;
|
|
ep->rep_connected = -ECONNABORTED;
|
|
disconnected:
|
|
xprt_force_disconnect(xprt);
|
|
@@ -913,17 +912,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
|
|
for (i = 0; i <= buf->rb_sc_last; i++) {
|
|
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
|
|
if (!sc)
|
|
- goto out_destroy;
|
|
+ return -ENOMEM;
|
|
|
|
sc->sc_xprt = r_xprt;
|
|
buf->rb_sc_ctxs[i] = sc;
|
|
}
|
|
|
|
return 0;
|
|
-
|
|
-out_destroy:
|
|
- rpcrdma_sendctxs_destroy(buf);
|
|
- return -ENOMEM;
|
|
}
|
|
|
|
/* The sendctx queue is not guaranteed to have a size that is a
|
|
@@ -1329,9 +1324,12 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
|
|
{
|
|
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
|
|
|
- trace_xprtrdma_mr_unmap(mr);
|
|
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
|
- mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
|
+ if (mr->mr_dir != DMA_NONE) {
|
|
+ trace_xprtrdma_mr_unmap(mr);
|
|
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
|
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
|
+ mr->mr_dir = DMA_NONE;
|
|
+ }
|
|
__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
|
|
}
|
|
|
|
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
|
|
index a13ccb643ce07..0af75b1405f85 100644
|
|
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
|
|
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
|
|
@@ -653,6 +653,7 @@ static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
|
|
extern unsigned int xprt_rdma_max_inline_read;
|
|
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
|
|
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
|
|
+void xprt_rdma_close(struct rpc_xprt *xprt);
|
|
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
|
|
int xprt_rdma_init(void);
|
|
void xprt_rdma_cleanup(void);
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index f0b3700cec95e..9cdbb6d6e7f5f 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -48,6 +48,7 @@
|
|
#include <net/udp.h>
|
|
#include <net/tcp.h>
|
|
#include <linux/bvec.h>
|
|
+#include <linux/highmem.h>
|
|
#include <linux/uio.h>
|
|
|
|
#include <trace/events/sunrpc.h>
|
|
@@ -380,6 +381,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
|
|
return sock_recvmsg(sock, msg, flags);
|
|
}
|
|
|
|
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
+static void
|
|
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
|
|
+{
|
|
+ struct bvec_iter bi = {
|
|
+ .bi_size = count,
|
|
+ };
|
|
+ struct bio_vec bv;
|
|
+
|
|
+ bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
|
|
+ for_each_bvec(bv, bvec, bi, bi)
|
|
+ flush_dcache_page(bv.bv_page);
|
|
+}
|
|
+#else
|
|
+static inline void
|
|
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
|
|
+{
|
|
+}
|
|
+#endif
|
|
+
|
|
static ssize_t
|
|
xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
|
|
struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
|
|
@@ -413,6 +434,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
|
|
seek + buf->page_base);
|
|
if (ret <= 0)
|
|
goto sock_err;
|
|
+ xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
|
|
offset += ret - buf->page_base;
|
|
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
|
|
goto out;
|
|
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
|
|
index e65c3a8551e4d..040153ffc357b 100644
|
|
--- a/net/tipc/bearer.c
|
|
+++ b/net/tipc/bearer.c
|
|
@@ -317,7 +317,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
|
|
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
|
|
if (res) {
|
|
bearer_disable(net, b);
|
|
- kfree(b);
|
|
errstr = "failed to create discoverer";
|
|
goto rejected;
|
|
}
|
|
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
|
|
index 6376467e78f86..0b21187d74dff 100644
|
|
--- a/net/tipc/netlink_compat.c
|
|
+++ b/net/tipc/netlink_compat.c
|
|
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
|
|
return limit;
|
|
}
|
|
|
|
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
|
|
+{
|
|
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
|
|
+}
|
|
+
|
|
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
|
|
{
|
|
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
|
|
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
|
|
return buf;
|
|
}
|
|
|
|
+static inline bool string_is_valid(char *s, int len)
|
|
+{
|
|
+ return memchr(s, '\0', len) ? true : false;
|
|
+}
|
|
+
|
|
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
|
|
struct tipc_nl_compat_msg *msg,
|
|
struct sk_buff *arg)
|
|
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
|
|
struct nlattr *prop;
|
|
struct nlattr *bearer;
|
|
struct tipc_bearer_config *b;
|
|
+ int len;
|
|
|
|
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
|
|
|
|
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
|
|
if (!bearer)
|
|
return -EMSGSIZE;
|
|
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
|
|
+ if (!string_is_valid(b->name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
|
|
return -EMSGSIZE;
|
|
|
|
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
|
|
{
|
|
char *name;
|
|
struct nlattr *bearer;
|
|
+ int len;
|
|
|
|
name = (char *)TLV_DATA(msg->req);
|
|
|
|
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
|
|
if (!bearer)
|
|
return -EMSGSIZE;
|
|
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
|
|
+ if (!string_is_valid(name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
|
|
return -EMSGSIZE;
|
|
|
|
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
|
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
|
|
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
|
|
int err;
|
|
+ int len;
|
|
|
|
if (!attrs[TIPC_NLA_LINK])
|
|
return -EINVAL;
|
|
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
|
return err;
|
|
|
|
name = (char *)TLV_DATA(msg->req);
|
|
+
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
|
+ if (!string_is_valid(name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
|
|
return 0;
|
|
|
|
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
|
|
struct nlattr *prop;
|
|
struct nlattr *media;
|
|
struct tipc_link_config *lc;
|
|
+ int len;
|
|
|
|
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
|
|
|
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
|
|
if (!media)
|
|
return -EMSGSIZE;
|
|
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
|
|
+ if (!string_is_valid(lc->name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
|
|
return -EMSGSIZE;
|
|
|
|
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
|
|
struct nlattr *prop;
|
|
struct nlattr *bearer;
|
|
struct tipc_link_config *lc;
|
|
+ int len;
|
|
|
|
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
|
|
|
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
|
|
if (!bearer)
|
|
return -EMSGSIZE;
|
|
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
|
|
+ if (!string_is_valid(lc->name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
|
|
return -EMSGSIZE;
|
|
|
|
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
|
|
struct tipc_link_config *lc;
|
|
struct tipc_bearer *bearer;
|
|
struct tipc_media *media;
|
|
+ int len;
|
|
|
|
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
|
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
|
+ if (!string_is_valid(lc->name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
media = tipc_media_find(lc->name);
|
|
if (media) {
|
|
cmd->doit = &__tipc_nl_media_set;
|
|
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
|
{
|
|
char *name;
|
|
struct nlattr *link;
|
|
+ int len;
|
|
|
|
name = (char *)TLV_DATA(msg->req);
|
|
|
|
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
|
if (!link)
|
|
return -EMSGSIZE;
|
|
|
|
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
|
+ if (!string_is_valid(name, len))
|
|
+ return -EINVAL;
|
|
+
|
|
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
|
|
return -EMSGSIZE;
|
|
|
|
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
|
|
};
|
|
|
|
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
|
|
+ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
|
|
+ return -EINVAL;
|
|
|
|
depth = ntohl(ntq->depth);
|
|
|
|
@@ -1201,7 +1249,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
|
|
}
|
|
|
|
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
|
|
- if (len && !TLV_OK(msg.req, len)) {
|
|
+ if (!len || !TLV_OK(msg.req, len)) {
|
|
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
|
|
err = -EOPNOTSUPP;
|
|
goto send;
|
|
diff --git a/net/tipc/node.c b/net/tipc/node.c
|
|
index 4880197664335..32556f480a606 100644
|
|
--- a/net/tipc/node.c
|
|
+++ b/net/tipc/node.c
|
|
@@ -624,6 +624,12 @@ static void tipc_node_timeout(struct timer_list *t)
|
|
|
|
__skb_queue_head_init(&xmitq);
|
|
|
|
+ /* Initial node interval to value larger (10 seconds), then it will be
|
|
+ * recalculated with link lowest tolerance
|
|
+ */
|
|
+ tipc_node_read_lock(n);
|
|
+ n->keepalive_intv = 10000;
|
|
+ tipc_node_read_unlock(n);
|
|
for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
|
|
tipc_node_read_lock(n);
|
|
le = &n->links[bearer_id];
|
|
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
|
|
index efb16f69bd2c4..a457c0fbbef1a 100644
|
|
--- a/net/tipc/topsrv.c
|
|
+++ b/net/tipc/topsrv.c
|
|
@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
|
|
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
|
|
if (ret == -EWOULDBLOCK)
|
|
return -EWOULDBLOCK;
|
|
- if (ret > 0) {
|
|
+ if (ret == sizeof(s)) {
|
|
read_lock_bh(&sk->sk_callback_lock);
|
|
ret = tipc_conn_rcv_sub(srv, con, &s);
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
|
|
index 29b27858fff10..419314ba94ecf 100644
|
|
--- a/net/tls/tls_sw.c
|
|
+++ b/net/tls/tls_sw.c
|
|
@@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk,
|
|
struct scatterlist *sge = sk_msg_elem(msg_en, start);
|
|
int rc;
|
|
|
|
+ memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
|
|
+
|
|
sge->offset += tls_ctx->tx.prepend_size;
|
|
sge->length -= tls_ctx->tx.prepend_size;
|
|
|
|
@@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk,
|
|
aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
|
|
aead_request_set_crypt(aead_req, rec->sg_aead_in,
|
|
rec->sg_aead_out,
|
|
- data_len, tls_ctx->tx.iv);
|
|
+ data_len, rec->iv_data);
|
|
|
|
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
tls_encrypt_done, sk);
|
|
@@ -1768,7 +1770,9 @@ void tls_sw_free_resources_tx(struct sock *sk)
|
|
if (atomic_read(&ctx->encrypt_pending))
|
|
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
|
|
|
+ release_sock(sk);
|
|
cancel_delayed_work_sync(&ctx->tx_work.work);
|
|
+ lock_sock(sk);
|
|
|
|
/* Tx whatever records we can transmit and abandon the rest */
|
|
tls_tx_records(sk, -1);
|
|
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
|
|
index c361ce7824123..c3d5ab01fba7b 100644
|
|
--- a/net/vmw_vsock/vmci_transport.c
|
|
+++ b/net/vmw_vsock/vmci_transport.c
|
|
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
|
|
|
|
static void vmci_transport_destruct(struct vsock_sock *vsk)
|
|
{
|
|
+ /* transport can be NULL if we hit a failure at init() time */
|
|
+ if (!vmci_trans(vsk))
|
|
+ return;
|
|
+
|
|
/* Ensure that the detach callback doesn't use the sk/vsk
|
|
* we are about to destruct.
|
|
*/
|
|
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
|
|
index 882d97bdc6bfd..550ac9d827fe7 100644
|
|
--- a/net/wireless/ap.c
|
|
+++ b/net/wireless/ap.c
|
|
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
|
|
cfg80211_sched_dfs_chan_update(rdev);
|
|
}
|
|
|
|
+ schedule_work(&cfg80211_disconnect_work);
|
|
+
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/wireless/core.h b/net/wireless/core.h
|
|
index c61dbba8bf479..7f4d5f2f91120 100644
|
|
--- a/net/wireless/core.h
|
|
+++ b/net/wireless/core.h
|
|
@@ -444,6 +444,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
|
|
bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
|
|
u32 center_freq_khz, u32 bw_khz);
|
|
|
|
+extern struct work_struct cfg80211_disconnect_work;
|
|
+
|
|
/**
|
|
* cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
|
|
* @wiphy: the wiphy to validate against
|
|
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
|
|
index f741d8376a463..7d34cb884840e 100644
|
|
--- a/net/wireless/sme.c
|
|
+++ b/net/wireless/sme.c
|
|
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
|
|
rtnl_unlock();
|
|
}
|
|
|
|
-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
|
|
+DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
|
|
|
|
|
|
/*
|
|
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
|
|
index 5121729b8b631..ec3a828672ef5 100644
|
|
--- a/net/x25/af_x25.c
|
|
+++ b/net/x25/af_x25.c
|
|
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
|
|
unsigned int lci = 1;
|
|
struct sock *sk;
|
|
|
|
- read_lock_bh(&x25_list_lock);
|
|
-
|
|
- while ((sk = __x25_find_socket(lci, nb)) != NULL) {
|
|
+ while ((sk = x25_find_socket(lci, nb)) != NULL) {
|
|
sock_put(sk);
|
|
if (++lci == 4096) {
|
|
lci = 0;
|
|
break;
|
|
}
|
|
+ cond_resched();
|
|
}
|
|
|
|
- read_unlock_bh(&x25_list_lock);
|
|
return lci;
|
|
}
|
|
|
|
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
|
|
index a264cf2accd0f..d4de871e7d4d7 100644
|
|
--- a/net/xdp/xdp_umem.c
|
|
+++ b/net/xdp/xdp_umem.c
|
|
@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
|
* not know if the device has more tx queues than rx, or the opposite.
|
|
* This might also change during run time.
|
|
*/
|
|
-static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
|
|
- u16 queue_id)
|
|
+static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
|
|
+ u16 queue_id)
|
|
{
|
|
+ if (queue_id >= max_t(unsigned int,
|
|
+ dev->real_num_rx_queues,
|
|
+ dev->real_num_tx_queues))
|
|
+ return -EINVAL;
|
|
+
|
|
if (queue_id < dev->real_num_rx_queues)
|
|
dev->_rx[queue_id].umem = umem;
|
|
if (queue_id < dev->real_num_tx_queues)
|
|
dev->_tx[queue_id].umem = umem;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
|
|
@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
|
goto out_rtnl_unlock;
|
|
}
|
|
|
|
- xdp_reg_umem_at_qid(dev, umem, queue_id);
|
|
+ err = xdp_reg_umem_at_qid(dev, umem, queue_id);
|
|
+ if (err)
|
|
+ goto out_rtnl_unlock;
|
|
+
|
|
umem->dev = dev;
|
|
umem->queue_id = queue_id;
|
|
if (force_copy)
|
|
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
|
|
index 119a427d9b2b2..6ea8036fcdbeb 100644
|
|
--- a/net/xfrm/xfrm_policy.c
|
|
+++ b/net/xfrm/xfrm_policy.c
|
|
@@ -1628,7 +1628,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
|
|
dst_copy_metrics(dst1, dst);
|
|
|
|
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
|
|
- __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
|
|
+ __u32 mark = 0;
|
|
+
|
|
+ if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
|
|
+ mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
|
|
|
|
family = xfrm[i]->props.family;
|
|
dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
|
|
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
|
|
index 277c1c46fe94e..c6d26afcf89df 100644
|
|
--- a/net/xfrm/xfrm_user.c
|
|
+++ b/net/xfrm/xfrm_user.c
|
|
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
|
|
if (!ut[i].family)
|
|
ut[i].family = family;
|
|
|
|
- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
|
|
- (ut[i].family != prev_family))
|
|
- return -EINVAL;
|
|
-
|
|
+ switch (ut[i].mode) {
|
|
+ case XFRM_MODE_TUNNEL:
|
|
+ case XFRM_MODE_BEET:
|
|
+ break;
|
|
+ default:
|
|
+ if (ut[i].family != prev_family)
|
|
+ return -EINVAL;
|
|
+ break;
|
|
+ }
|
|
if (ut[i].mode >= XFRM_MODE_MAX)
|
|
return -EINVAL;
|
|
|
|
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
|
|
index be0a961450bc2..f5ce993c78e42 100644
|
|
--- a/samples/bpf/Makefile
|
|
+++ b/samples/bpf/Makefile
|
|
@@ -273,6 +273,7 @@ $(obj)/%.o: $(src)/%.c
|
|
-Wno-gnu-variable-sized-type-not-at-end \
|
|
-Wno-address-of-packed-member -Wno-tautological-compare \
|
|
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
|
|
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
|
|
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
|
|
ifeq ($(DWARF2BTF),y)
|
|
$(BTF_PAHOLE) -J $@
|
|
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
|
|
new file mode 100644
|
|
index 0000000000000..5cd7c1d1a5d56
|
|
--- /dev/null
|
|
+++ b/samples/bpf/asm_goto_workaround.h
|
|
@@ -0,0 +1,16 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/* Copyright (c) 2019 Facebook */
|
|
+#ifndef __ASM_GOTO_WORKAROUND_H
|
|
+#define __ASM_GOTO_WORKAROUND_H
|
|
+
|
|
+/* this will bring in asm_volatile_goto macro definition
|
|
+ * if enabled by compiler and config options.
|
|
+ */
|
|
+#include <linux/types.h>
|
|
+
|
|
+#ifdef asm_volatile_goto
|
|
+#undef asm_volatile_goto
|
|
+#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
|
|
index e6d7e0fe155b4..96783207de4aa 100644
|
|
--- a/samples/bpf/bpf_load.c
|
|
+++ b/samples/bpf/bpf_load.c
|
|
@@ -54,6 +54,23 @@ static int populate_prog_array(const char *event, int prog_fd)
|
|
return 0;
|
|
}
|
|
|
|
+static int write_kprobe_events(const char *val)
|
|
+{
|
|
+ int fd, ret, flags;
|
|
+
|
|
+ if ((val != NULL) && (val[0] == '\0'))
|
|
+ flags = O_WRONLY | O_TRUNC;
|
|
+ else
|
|
+ flags = O_WRONLY | O_APPEND;
|
|
+
|
|
+ fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
|
|
+
|
|
+ ret = write(fd, val, strlen(val));
|
|
+ close(fd);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
|
|
{
|
|
bool is_socket = strncmp(event, "socket", 6) == 0;
|
|
@@ -165,10 +182,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
|
|
|
|
#ifdef __x86_64__
|
|
if (strncmp(event, "sys_", 4) == 0) {
|
|
- snprintf(buf, sizeof(buf),
|
|
- "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events",
|
|
- is_kprobe ? 'p' : 'r', event, event);
|
|
- err = system(buf);
|
|
+ snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
|
|
+ is_kprobe ? 'p' : 'r', event, event);
|
|
+ err = write_kprobe_events(buf);
|
|
if (err >= 0) {
|
|
need_normal_check = false;
|
|
event_prefix = "__x64_";
|
|
@@ -176,10 +192,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
|
|
}
|
|
#endif
|
|
if (need_normal_check) {
|
|
- snprintf(buf, sizeof(buf),
|
|
- "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
|
|
- is_kprobe ? 'p' : 'r', event, event);
|
|
- err = system(buf);
|
|
+ snprintf(buf, sizeof(buf), "%c:%s %s",
|
|
+ is_kprobe ? 'p' : 'r', event, event);
|
|
+ err = write_kprobe_events(buf);
|
|
if (err < 0) {
|
|
printf("failed to create kprobe '%s' error '%s'\n",
|
|
event, strerror(errno));
|
|
@@ -519,7 +534,7 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
|
|
return 1;
|
|
|
|
/* clear all kprobes */
|
|
- i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
|
|
+ i = write_kprobe_events("");
|
|
|
|
/* scan over all elf sections to get license and map info */
|
|
for (i = 1; i < ehdr.e_shnum; i++) {
|
|
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
|
|
index 49b13553eaaec..e8f1bd6b29b12 100644
|
|
--- a/samples/livepatch/livepatch-shadow-fix1.c
|
|
+++ b/samples/livepatch/livepatch-shadow-fix1.c
|
|
@@ -89,6 +89,11 @@ struct dummy *livepatch_fix1_dummy_alloc(void)
|
|
* pointer to handle resource release.
|
|
*/
|
|
leak = kzalloc(sizeof(int), GFP_KERNEL);
|
|
+ if (!leak) {
|
|
+ kfree(d);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
|
|
shadow_leak_ctor, leak);
|
|
|
|
diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
|
|
index 4c54b250332da..4aa8a88d3cd63 100644
|
|
--- a/samples/livepatch/livepatch-shadow-mod.c
|
|
+++ b/samples/livepatch/livepatch-shadow-mod.c
|
|
@@ -118,6 +118,10 @@ noinline struct dummy *dummy_alloc(void)
|
|
|
|
/* Oops, forgot to save leak! */
|
|
leak = kzalloc(sizeof(int), GFP_KERNEL);
|
|
+ if (!leak) {
|
|
+ kfree(d);
|
|
+ return NULL;
|
|
+ }
|
|
|
|
pr_info("%s: dummy @ %p, expires @ %lx\n",
|
|
__func__, d, d->jiffies_expire);
|
|
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
|
|
index 33e67bd1dc343..32234481ad7db 100644
|
|
--- a/samples/mei/mei-amt-version.c
|
|
+++ b/samples/mei/mei-amt-version.c
|
|
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
|
|
|
|
me->verbose = verbose;
|
|
|
|
- me->fd = open("/dev/mei", O_RDWR);
|
|
+ me->fd = open("/dev/mei0", O_RDWR);
|
|
if (me->fd == -1) {
|
|
mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
|
|
goto err;
|
|
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
|
|
index 3d09844405c92..b8c866193ae69 100644
|
|
--- a/scripts/Kbuild.include
|
|
+++ b/scripts/Kbuild.include
|
|
@@ -262,9 +262,8 @@ ifndef CONFIG_TRIM_UNUSED_KSYMS
|
|
|
|
cmd_and_fixdep = \
|
|
$(echo-cmd) $(cmd_$(1)); \
|
|
- scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;\
|
|
- rm -f $(depfile); \
|
|
- mv -f $(dot-target).tmp $(dot-target).cmd;
|
|
+ scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(dot-target).cmd;\
|
|
+ rm -f $(depfile);
|
|
|
|
else
|
|
|
|
@@ -287,9 +286,8 @@ cmd_and_fixdep = \
|
|
$(echo-cmd) $(cmd_$(1)); \
|
|
$(ksym_dep_filter) | \
|
|
scripts/basic/fixdep -e $(depfile) $@ '$(make-cmd)' \
|
|
- > $(dot-target).tmp; \
|
|
- rm -f $(depfile); \
|
|
- mv -f $(dot-target).tmp $(dot-target).cmd;
|
|
+ > $(dot-target).cmd; \
|
|
+ rm -f $(depfile);
|
|
|
|
endif
|
|
|
|
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
|
|
index 6a6be9f440cf9..1d56f181b9175 100644
|
|
--- a/scripts/Makefile.build
|
|
+++ b/scripts/Makefile.build
|
|
@@ -527,18 +527,16 @@ FORCE:
|
|
# optimization, we don't need to read them if the target does not
|
|
# exist, we will rebuild anyway in that case.
|
|
|
|
-cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
|
|
+existing-targets := $(wildcard $(sort $(targets)))
|
|
|
|
-ifneq ($(cmd_files),)
|
|
- include $(cmd_files)
|
|
-endif
|
|
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
|
|
|
|
ifneq ($(KBUILD_SRC),)
|
|
# Create directories for object files if they do not exist
|
|
obj-dirs := $(sort $(obj) $(patsubst %/,%, $(dir $(targets))))
|
|
-# If cmd_files exist, their directories apparently exist. Skip mkdir.
|
|
-exist-dirs := $(sort $(patsubst %/,%, $(dir $(cmd_files))))
|
|
-obj-dirs := $(strip $(filter-out $(exist-dirs), $(obj-dirs)))
|
|
+# If targets exist, their directories apparently exist. Skip mkdir.
|
|
+existing-dirs := $(sort $(patsubst %/,%, $(dir $(existing-targets))))
|
|
+obj-dirs := $(strip $(filter-out $(existing-dirs), $(obj-dirs)))
|
|
ifneq ($(obj-dirs),)
|
|
$(shell mkdir -p $(obj-dirs))
|
|
endif
|
|
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
|
|
index 64220e36ce3b9..98a7d63a723e3 100755
|
|
--- a/scripts/decode_stacktrace.sh
|
|
+++ b/scripts/decode_stacktrace.sh
|
|
@@ -78,7 +78,7 @@ parse_symbol() {
|
|
fi
|
|
|
|
# Strip out the base of the path
|
|
- code=${code//$basepath/""}
|
|
+ code=${code//^$basepath/""}
|
|
|
|
# In the case of inlines, move everything to same line
|
|
code=${code//$'\n'/' '}
|
|
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
|
|
index 086d27223c0cf..0aebd7565b032 100644
|
|
--- a/scripts/gdb/linux/proc.py
|
|
+++ b/scripts/gdb/linux/proc.py
|
|
@@ -41,7 +41,7 @@ class LxVersion(gdb.Command):
|
|
|
|
def invoke(self, arg, from_tty):
|
|
# linux_banner should contain a newline
|
|
- gdb.write(gdb.parse_and_eval("linux_banner").string())
|
|
+ gdb.write(gdb.parse_and_eval("(char *)linux_banner").string())
|
|
|
|
LxVersion()
|
|
|
|
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
|
|
index 25bd2b89fe3f3..c2f577d719647 100644
|
|
--- a/scripts/kconfig/zconf.l
|
|
+++ b/scripts/kconfig/zconf.l
|
|
@@ -73,7 +73,7 @@ static void warn_ignored_character(char chr)
|
|
{
|
|
fprintf(stderr,
|
|
"%s:%d:warning: ignoring unsupported character '%c'\n",
|
|
- zconf_curname(), zconf_lineno(), chr);
|
|
+ current_file->name, yylineno, chr);
|
|
}
|
|
%}
|
|
|
|
@@ -221,6 +221,8 @@ n [A-Za-z0-9_-]
|
|
}
|
|
<<EOF>> {
|
|
BEGIN(INITIAL);
|
|
+ yylval.string = text;
|
|
+ return T_WORD_QUOTE;
|
|
}
|
|
}
|
|
|
|
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
|
|
index 0d998c54564df..5a77efd39b3fa 100644
|
|
--- a/scripts/mod/modpost.c
|
|
+++ b/scripts/mod/modpost.c
|
|
@@ -1204,6 +1204,30 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
|
|
return 1;
|
|
}
|
|
|
|
+static inline int is_arm_mapping_symbol(const char *str)
|
|
+{
|
|
+ return str[0] == '$' && strchr("axtd", str[1])
|
|
+ && (str[2] == '\0' || str[2] == '.');
|
|
+}
|
|
+
|
|
+/*
|
|
+ * If there's no name there, ignore it; likewise, ignore it if it's
|
|
+ * one of the magic symbols emitted used by current ARM tools.
|
|
+ *
|
|
+ * Otherwise if find_symbols_between() returns those symbols, they'll
|
|
+ * fail the whitelist tests and cause lots of false alarms ... fixable
|
|
+ * only by merging __exit and __init sections into __text, bloating
|
|
+ * the kernel (which is especially evil on embedded platforms).
|
|
+ */
|
|
+static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
|
|
+{
|
|
+ const char *name = elf->strtab + sym->st_name;
|
|
+
|
|
+ if (!name || !strlen(name))
|
|
+ return 0;
|
|
+ return !is_arm_mapping_symbol(name);
|
|
+}
|
|
+
|
|
/**
|
|
* Find symbol based on relocation record info.
|
|
* In some cases the symbol supplied is a valid symbol so
|
|
@@ -1229,6 +1253,8 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
|
|
continue;
|
|
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
|
|
continue;
|
|
+ if (!is_valid_name(elf, sym))
|
|
+ continue;
|
|
if (sym->st_value == addr)
|
|
return sym;
|
|
/* Find a symbol nearby - addr are maybe negative */
|
|
@@ -1247,30 +1273,6 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
|
|
return NULL;
|
|
}
|
|
|
|
-static inline int is_arm_mapping_symbol(const char *str)
|
|
-{
|
|
- return str[0] == '$' && strchr("axtd", str[1])
|
|
- && (str[2] == '\0' || str[2] == '.');
|
|
-}
|
|
-
|
|
-/*
|
|
- * If there's no name there, ignore it; likewise, ignore it if it's
|
|
- * one of the magic symbols emitted used by current ARM tools.
|
|
- *
|
|
- * Otherwise if find_symbols_between() returns those symbols, they'll
|
|
- * fail the whitelist tests and cause lots of false alarms ... fixable
|
|
- * only by merging __exit and __init sections into __text, bloating
|
|
- * the kernel (which is especially evil on embedded platforms).
|
|
- */
|
|
-static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
|
|
-{
|
|
- const char *name = elf->strtab + sym->st_name;
|
|
-
|
|
- if (!name || !strlen(name))
|
|
- return 0;
|
|
- return !is_arm_mapping_symbol(name);
|
|
-}
|
|
-
|
|
/*
|
|
* Find symbols before or equal addr and after addr - in the section sec.
|
|
* If we find two symbols with equal offset prefer one with a valid name.
|
|
@@ -2157,7 +2159,7 @@ static void add_intree_flag(struct buffer *b, int is_intree)
|
|
/* Cannot check for assembler */
|
|
static void add_retpoline(struct buffer *b)
|
|
{
|
|
- buf_printf(b, "\n#ifdef RETPOLINE\n");
|
|
+ buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
|
|
buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
|
|
buf_printf(b, "#endif\n");
|
|
}
|
|
diff --git a/security/keys/key.c b/security/keys/key.c
|
|
index d97c9394b5dd4..249a6da4d2770 100644
|
|
--- a/security/keys/key.c
|
|
+++ b/security/keys/key.c
|
|
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
|
|
|
|
spin_lock(&user->lock);
|
|
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
|
|
- if (user->qnkeys + 1 >= maxkeys ||
|
|
- user->qnbytes + quotalen >= maxbytes ||
|
|
+ if (user->qnkeys + 1 > maxkeys ||
|
|
+ user->qnbytes + quotalen > maxbytes ||
|
|
user->qnbytes + quotalen < user->qnbytes)
|
|
goto no_quota;
|
|
}
|
|
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
|
|
index 70e65a2ff2073..8bdea5abad118 100644
|
|
--- a/security/keys/keyctl_pkey.c
|
|
+++ b/security/keys/keyctl_pkey.c
|
|
@@ -50,6 +50,8 @@ static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
|
|
if (*p == '\0' || *p == ' ' || *p == '\t')
|
|
continue;
|
|
token = match_token(p, param_keys, args);
|
|
+ if (token == Opt_err)
|
|
+ return -EINVAL;
|
|
if (__test_and_set_bit(token, &token_mask))
|
|
return -EINVAL;
|
|
q = args[0].from;
|
|
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
|
|
index 41bcf57e96f21..99a55145ddcd2 100644
|
|
--- a/security/keys/keyring.c
|
|
+++ b/security/keys/keyring.c
|
|
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
|
|
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
|
|
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
|
|
|
|
- if (ctx->index_key.description)
|
|
- ctx->index_key.desc_len = strlen(ctx->index_key.description);
|
|
-
|
|
/* Check to see if this top-level keyring is what we are looking for
|
|
* and whether it is valid or not.
|
|
*/
|
|
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
|
|
struct keyring_search_context ctx = {
|
|
.index_key.type = type,
|
|
.index_key.description = description,
|
|
+ .index_key.desc_len = strlen(description),
|
|
.cred = current_cred(),
|
|
.match_data.cmp = key_default_cmp,
|
|
.match_data.raw_data = description,
|
|
diff --git a/security/keys/proc.c b/security/keys/proc.c
|
|
index 5af2934965d80..d38be9db2cc07 100644
|
|
--- a/security/keys/proc.c
|
|
+++ b/security/keys/proc.c
|
|
@@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
|
|
int rc;
|
|
|
|
struct keyring_search_context ctx = {
|
|
- .index_key.type = key->type,
|
|
- .index_key.description = key->description,
|
|
+ .index_key = key->index_key,
|
|
.cred = m->file->f_cred,
|
|
.match_data.cmp = lookup_user_key_possessed,
|
|
.match_data.raw_data = key,
|
|
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
|
|
index 114f7408feee6..7385536986497 100644
|
|
--- a/security/keys/request_key.c
|
|
+++ b/security/keys/request_key.c
|
|
@@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
|
|
struct keyring_search_context ctx = {
|
|
.index_key.type = type,
|
|
.index_key.description = description,
|
|
+ .index_key.desc_len = strlen(description),
|
|
.cred = current_cred(),
|
|
.match_data.cmp = key_default_cmp,
|
|
.match_data.raw_data = description,
|
|
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
|
|
index 424e1d90412ea..6797843154f03 100644
|
|
--- a/security/keys/request_key_auth.c
|
|
+++ b/security/keys/request_key_auth.c
|
|
@@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
|
|
struct key *authkey;
|
|
key_ref_t authkey_ref;
|
|
|
|
- sprintf(description, "%x", target_id);
|
|
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
|
|
|
|
authkey_ref = search_process_keyrings(&ctx);
|
|
|
|
diff --git a/security/security.c b/security/security.c
|
|
index 04d173eb93f6e..414a45d70c7be 100644
|
|
--- a/security/security.c
|
|
+++ b/security/security.c
|
|
@@ -1014,6 +1014,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
|
|
|
|
void security_cred_free(struct cred *cred)
|
|
{
|
|
+ /*
|
|
+ * There is a failure case in prepare_creds() that
|
|
+ * may result in a call here with ->security being NULL.
|
|
+ */
|
|
+ if (unlikely(cred->security == NULL))
|
|
+ return;
|
|
+
|
|
call_void_hook(cred_free, cred);
|
|
}
|
|
|
|
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
|
|
index a67459eb62d5c..0f27db6d94a92 100644
|
|
--- a/security/selinux/hooks.c
|
|
+++ b/security/selinux/hooks.c
|
|
@@ -2934,7 +2934,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
|
|
return rc;
|
|
|
|
/* Allow all mounts performed by the kernel */
|
|
- if (flags & MS_KERNMOUNT)
|
|
+ if (flags & (MS_KERNMOUNT | MS_SUBMOUNT))
|
|
return 0;
|
|
|
|
ad.type = LSM_AUDIT_DATA_DENTRY;
|
|
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
|
|
index f4eadd3f73500..d31a52e56b9ec 100644
|
|
--- a/security/selinux/ss/policydb.c
|
|
+++ b/security/selinux/ss/policydb.c
|
|
@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
|
|
kfree(key);
|
|
if (datum) {
|
|
levdatum = datum;
|
|
- ebitmap_destroy(&levdatum->level->cat);
|
|
+ if (levdatum->level)
|
|
+ ebitmap_destroy(&levdatum->level->cat);
|
|
kfree(levdatum->level);
|
|
}
|
|
kfree(datum);
|
|
@@ -2108,6 +2109,7 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
|
|
{
|
|
int i, j, rc;
|
|
u32 nel, len;
|
|
+ __be64 prefixbuf[1];
|
|
__le32 buf[3];
|
|
struct ocontext *l, *c;
|
|
u32 nodebuf[8];
|
|
@@ -2217,21 +2219,30 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
|
|
goto out;
|
|
break;
|
|
}
|
|
- case OCON_IBPKEY:
|
|
- rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
|
|
+ case OCON_IBPKEY: {
|
|
+ u32 pkey_lo, pkey_hi;
|
|
+
|
|
+ rc = next_entry(prefixbuf, fp, sizeof(u64));
|
|
+ if (rc)
|
|
+ goto out;
|
|
+
|
|
+ /* we need to have subnet_prefix in CPU order */
|
|
+ c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
|
|
+
|
|
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
|
|
if (rc)
|
|
goto out;
|
|
|
|
- c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
|
|
+ pkey_lo = le32_to_cpu(buf[0]);
|
|
+ pkey_hi = le32_to_cpu(buf[1]);
|
|
|
|
- if (nodebuf[2] > 0xffff ||
|
|
- nodebuf[3] > 0xffff) {
|
|
+ if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
|
|
rc = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
- c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
|
|
- c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
|
|
+ c->u.ibpkey.low_pkey = pkey_lo;
|
|
+ c->u.ibpkey.high_pkey = pkey_hi;
|
|
|
|
rc = context_read_and_validate(&c->context[0],
|
|
p,
|
|
@@ -2239,7 +2250,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
|
|
if (rc)
|
|
goto out;
|
|
break;
|
|
- case OCON_IBENDPORT:
|
|
+ }
|
|
+ case OCON_IBENDPORT: {
|
|
+ u32 port;
|
|
+
|
|
rc = next_entry(buf, fp, sizeof(u32) * 2);
|
|
if (rc)
|
|
goto out;
|
|
@@ -2249,12 +2263,13 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
|
|
if (rc)
|
|
goto out;
|
|
|
|
- if (buf[1] > 0xff || buf[1] == 0) {
|
|
+ port = le32_to_cpu(buf[1]);
|
|
+ if (port > U8_MAX || port == 0) {
|
|
rc = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
- c->u.ibendport.port = le32_to_cpu(buf[1]);
|
|
+ c->u.ibendport.port = port;
|
|
|
|
rc = context_read_and_validate(&c->context[0],
|
|
p,
|
|
@@ -2262,7 +2277,8 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
|
|
if (rc)
|
|
goto out;
|
|
break;
|
|
- }
|
|
+ } /* end case */
|
|
+ } /* end switch */
|
|
}
|
|
}
|
|
rc = 0;
|
|
@@ -3105,6 +3121,7 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
|
|
{
|
|
unsigned int i, j, rc;
|
|
size_t nel, len;
|
|
+ __be64 prefixbuf[1];
|
|
__le32 buf[3];
|
|
u32 nodebuf[8];
|
|
struct ocontext *c;
|
|
@@ -3192,12 +3209,17 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
|
|
return rc;
|
|
break;
|
|
case OCON_IBPKEY:
|
|
- *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
|
|
+ /* subnet_prefix is in CPU order */
|
|
+ prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
|
|
|
|
- nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
|
|
- nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
|
|
+ rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
|
|
+ buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
|
|
|
|
- rc = put_entry(nodebuf, sizeof(u32), 4, fp);
|
|
+ rc = put_entry(buf, sizeof(u32), 2, fp);
|
|
if (rc)
|
|
return rc;
|
|
rc = context_write(p, &c->context[0], fp);
|
|
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
|
|
index 81fb4c1631e96..cd720c06b78ca 100644
|
|
--- a/security/smack/smack_lsm.c
|
|
+++ b/security/smack/smack_lsm.c
|
|
@@ -4333,6 +4333,12 @@ static int smack_key_permission(key_ref_t key_ref,
|
|
int request = 0;
|
|
int rc;
|
|
|
|
+ /*
|
|
+ * Validate requested permissions
|
|
+ */
|
|
+ if (perm & ~KEY_NEED_ALL)
|
|
+ return -EINVAL;
|
|
+
|
|
keyp = key_ref_to_ptr(key_ref);
|
|
if (keyp == NULL)
|
|
return -EINVAL;
|
|
@@ -4356,10 +4362,10 @@ static int smack_key_permission(key_ref_t key_ref,
|
|
ad.a.u.key_struct.key = keyp->serial;
|
|
ad.a.u.key_struct.key_desc = keyp->description;
|
|
#endif
|
|
- if (perm & KEY_NEED_READ)
|
|
- request = MAY_READ;
|
|
+ if (perm & (KEY_NEED_READ | KEY_NEED_SEARCH | KEY_NEED_VIEW))
|
|
+ request |= MAY_READ;
|
|
if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
|
|
- request = MAY_WRITE;
|
|
+ request |= MAY_WRITE;
|
|
rc = smk_access(tkp, keyp->security, request, &ad);
|
|
rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
|
|
return rc;
|
|
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
|
|
index ffda91a4a1aaf..02514fe558b41 100644
|
|
--- a/security/yama/yama_lsm.c
|
|
+++ b/security/yama/yama_lsm.c
|
|
@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
|
|
break;
|
|
case YAMA_SCOPE_RELATIONAL:
|
|
rcu_read_lock();
|
|
- if (!task_is_descendant(current, child) &&
|
|
+ if (!pid_alive(child))
|
|
+ rc = -EPERM;
|
|
+ if (!rc && !task_is_descendant(current, child) &&
|
|
!ptracer_exception_found(current, child) &&
|
|
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
|
|
rc = -EPERM;
|
|
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
|
|
index fdb9b92fc8d6b..01b9d62eef14d 100644
|
|
--- a/sound/core/pcm.c
|
|
+++ b/sound/core/pcm.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <linux/time.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/device.h>
|
|
+#include <linux/nospec.h>
|
|
#include <sound/core.h>
|
|
#include <sound/minors.h>
|
|
#include <sound/pcm.h>
|
|
@@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
|
|
return -EFAULT;
|
|
if (stream < 0 || stream > 1)
|
|
return -EINVAL;
|
|
+ stream = array_index_nospec(stream, 2);
|
|
if (get_user(subdevice, &info->subdevice))
|
|
return -EFAULT;
|
|
mutex_lock(®ister_mutex);
|
|
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
|
|
index 40013b26f6719..6c0b30391ba99 100644
|
|
--- a/sound/core/pcm_lib.c
|
|
+++ b/sound/core/pcm_lib.c
|
|
@@ -2177,16 +2177,11 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
|
|
snd_pcm_update_hw_ptr(substream);
|
|
|
|
if (!is_playback &&
|
|
- runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
|
|
- if (size >= runtime->start_threshold) {
|
|
- err = snd_pcm_start(substream);
|
|
- if (err < 0)
|
|
- goto _end_unlock;
|
|
- } else {
|
|
- /* nothing to do */
|
|
- err = 0;
|
|
+ runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
|
|
+ size >= runtime->start_threshold) {
|
|
+ err = snd_pcm_start(substream);
|
|
+ if (err < 0)
|
|
goto _end_unlock;
|
|
- }
|
|
}
|
|
|
|
avail = snd_pcm_avail(substream);
|
|
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
|
|
index 8a146b0392761..44cedb65bb88b 100644
|
|
--- a/sound/firewire/Kconfig
|
|
+++ b/sound/firewire/Kconfig
|
|
@@ -41,6 +41,7 @@ config SND_OXFW
|
|
* Mackie(Loud) U.420/U.420d
|
|
* TASCAM FireOne
|
|
* Stanton Controllers & Systems 1 Deck/Mixer
|
|
+ * APOGEE duet FireWire
|
|
|
|
To compile this driver as a module, choose M here: the module
|
|
will be called snd-oxfw.
|
|
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
|
|
index 54cdd4ffa9ceb..ac20acf48fc69 100644
|
|
--- a/sound/firewire/amdtp-stream-trace.h
|
|
+++ b/sound/firewire/amdtp-stream-trace.h
|
|
@@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
|
|
__entry->index = index;
|
|
),
|
|
TP_printk(
|
|
- "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
|
|
+ "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
|
|
__entry->second,
|
|
__entry->cycle,
|
|
__entry->src,
|
|
@@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
|
|
__entry->dest = fw_parent_device(s->unit)->node_id;
|
|
__entry->payload_quadlets = payload_length / 4;
|
|
__entry->data_blocks = data_blocks,
|
|
- __entry->data_blocks = s->data_block_counter,
|
|
+ __entry->data_block_counter = s->data_block_counter,
|
|
__entry->packet_index = s->packet_index;
|
|
__entry->irq = !!in_interrupt();
|
|
__entry->index = index;
|
|
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
|
|
index 9be76c808fccf..3ada55ed5381d 100644
|
|
--- a/sound/firewire/amdtp-stream.c
|
|
+++ b/sound/firewire/amdtp-stream.c
|
|
@@ -654,15 +654,17 @@ end:
|
|
}
|
|
|
|
static int handle_in_packet_without_header(struct amdtp_stream *s,
|
|
- unsigned int payload_quadlets, unsigned int cycle,
|
|
+ unsigned int payload_length, unsigned int cycle,
|
|
unsigned int index)
|
|
{
|
|
__be32 *buffer;
|
|
+ unsigned int payload_quadlets;
|
|
unsigned int data_blocks;
|
|
struct snd_pcm_substream *pcm;
|
|
unsigned int pcm_frames;
|
|
|
|
buffer = s->buffer.packets[s->packet_index].buffer;
|
|
+ payload_quadlets = payload_length / 4;
|
|
data_blocks = payload_quadlets / s->data_block_quadlets;
|
|
|
|
trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
|
|
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
|
|
index 672d134884547..d91874275d2c3 100644
|
|
--- a/sound/firewire/bebob/bebob.c
|
|
+++ b/sound/firewire/bebob/bebob.c
|
|
@@ -408,7 +408,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
|
|
/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
|
|
SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
|
|
/* Apogee Electronics, Ensemble */
|
|
- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
|
|
+ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
|
|
/* ESI, Quatafire610 */
|
|
SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
|
|
/* AcousticReality, eARMasterOne */
|
|
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
|
|
index 654a50319198e..4d191172fe3fb 100644
|
|
--- a/sound/firewire/fireface/ff-protocol-ff400.c
|
|
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
|
|
@@ -152,7 +152,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
|
|
if (reg == NULL)
|
|
return -ENOMEM;
|
|
|
|
- if (enable) {
|
|
+ if (!enable) {
|
|
/*
|
|
* Each quadlet is corresponding to data channels in a data
|
|
* blocks in reverse order. Precisely, quadlets for available
|
|
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
|
|
index afb78d90384b8..3d27f3378d5dd 100644
|
|
--- a/sound/firewire/oxfw/oxfw.c
|
|
+++ b/sound/firewire/oxfw/oxfw.c
|
|
@@ -20,6 +20,7 @@
|
|
#define VENDOR_LACIE 0x00d04b
|
|
#define VENDOR_TASCAM 0x00022e
|
|
#define OUI_STANTON 0x001260
|
|
+#define OUI_APOGEE 0x0003db
|
|
|
|
#define MODEL_SATELLITE 0x00200f
|
|
|
|
@@ -397,6 +398,13 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
|
|
.vendor_id = OUI_STANTON,
|
|
.model_id = 0x002000,
|
|
},
|
|
+ // APOGEE, duet FireWire
|
|
+ {
|
|
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
|
|
+ IEEE1394_MATCH_MODEL_ID,
|
|
+ .vendor_id = OUI_APOGEE,
|
|
+ .model_id = 0x01dddd,
|
|
+ },
|
|
{ }
|
|
};
|
|
MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
|
|
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
|
|
index 598d140bb7cb7..5fc497c6d7384 100644
|
|
--- a/sound/pci/cs46xx/dsp_spos.c
|
|
+++ b/sound/pci/cs46xx/dsp_spos.c
|
|
@@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
|
|
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
|
|
int i;
|
|
|
|
+ if (!ins)
|
|
+ return 0;
|
|
+
|
|
snd_info_free_entry(ins->proc_sym_info_entry);
|
|
ins->proc_sym_info_entry = NULL;
|
|
|
|
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
|
|
index 6ebe817801ea9..1f25e6d029d82 100644
|
|
--- a/sound/pci/emu10k1/emufx.c
|
|
+++ b/sound/pci/emu10k1/emufx.c
|
|
@@ -36,6 +36,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/moduleparam.h>
|
|
+#include <linux/nospec.h>
|
|
|
|
#include <sound/core.h>
|
|
#include <sound/tlv.h>
|
|
@@ -1026,6 +1027,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
|
|
|
|
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
|
|
return -EINVAL;
|
|
+ ipcm->substream = array_index_nospec(ipcm->substream,
|
|
+ EMU10K1_FX8010_PCM_COUNT);
|
|
if (ipcm->channels > 32)
|
|
return -EINVAL;
|
|
pcm = &emu->fx8010.pcm[ipcm->substream];
|
|
@@ -1072,6 +1075,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
|
|
|
|
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
|
|
return -EINVAL;
|
|
+ ipcm->substream = array_index_nospec(ipcm->substream,
|
|
+ EMU10K1_FX8010_PCM_COUNT);
|
|
pcm = &emu->fx8010.pcm[ipcm->substream];
|
|
mutex_lock(&emu->fx8010.lock);
|
|
spin_lock_irq(&emu->reg_lock);
|
|
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
|
|
index 9174f1b3a987f..1ec706ced75ca 100644
|
|
--- a/sound/pci/hda/hda_bind.c
|
|
+++ b/sound/pci/hda/hda_bind.c
|
|
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev)
|
|
err = snd_hda_codec_build_controls(codec);
|
|
if (err < 0)
|
|
goto error_module;
|
|
- if (codec->card->registered) {
|
|
+ /* only register after the bus probe finished; otherwise it's racy */
|
|
+ if (!codec->bus->bus_probing && codec->card->registered) {
|
|
err = snd_card_register(codec->card);
|
|
if (err < 0)
|
|
goto error_module;
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index 76f03abd15ab7..356fda583847c 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -2228,6 +2228,7 @@ static int azx_probe_continue(struct azx *chip)
|
|
int dev = chip->dev_index;
|
|
int err;
|
|
|
|
+ to_hda_bus(bus)->bus_probing = 1;
|
|
hda->probe_continued = 1;
|
|
|
|
/* bind with i915 if needed */
|
|
@@ -2323,6 +2324,7 @@ i915_power_fail:
|
|
if (err < 0)
|
|
hda->init_failed = 1;
|
|
complete_all(&hda->probe_wait);
|
|
+ to_hda_bus(bus)->bus_probing = 0;
|
|
return err;
|
|
}
|
|
|
|
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
|
|
index dd7d4242d6d2a..86841d46a8fc0 100644
|
|
--- a/sound/pci/hda/hda_tegra.c
|
|
+++ b/sound/pci/hda/hda_tegra.c
|
|
@@ -233,10 +233,12 @@ static int hda_tegra_suspend(struct device *dev)
|
|
struct snd_card *card = dev_get_drvdata(dev);
|
|
struct azx *chip = card->private_data;
|
|
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
|
|
+ struct hdac_bus *bus = azx_bus(chip);
|
|
|
|
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
|
|
|
|
azx_stop_chip(chip);
|
|
+ synchronize_irq(bus->irq);
|
|
azx_enter_link_reset(chip);
|
|
hda_tegra_disable_clocks(hda);
|
|
|
|
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
|
|
index 950e02e717669..a4ee7656d9ee9 100644
|
|
--- a/sound/pci/hda/patch_conexant.c
|
|
+++ b/sound/pci/hda/patch_conexant.c
|
|
@@ -923,6 +923,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
|
|
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
|
|
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
|
|
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
|
|
@@ -930,6 +932,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
|
|
SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
|
|
SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
|
+ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
|
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 15021c8393728..1bddfa7dc2169 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -117,6 +117,7 @@ struct alc_spec {
|
|
int codec_variant; /* flag for other variants */
|
|
unsigned int has_alc5505_dsp:1;
|
|
unsigned int no_depop_delay:1;
|
|
+ unsigned int done_hp_init:1;
|
|
|
|
/* for PLL fix */
|
|
hda_nid_t pll_nid;
|
|
@@ -514,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
|
|
}
|
|
}
|
|
|
|
+/* get a primary headphone pin if available */
|
|
+static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
|
|
+{
|
|
+ if (spec->gen.autocfg.hp_pins[0])
|
|
+ return spec->gen.autocfg.hp_pins[0];
|
|
+ if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
|
|
+ return spec->gen.autocfg.line_out_pins[0];
|
|
+ return 0;
|
|
+}
|
|
|
|
/*
|
|
* Realtek SSID verification
|
|
@@ -724,9 +734,7 @@ do_sku:
|
|
* 15 : 1 --> enable the function "Mute internal speaker
|
|
* when the external headphone out jack is plugged"
|
|
*/
|
|
- if (!spec->gen.autocfg.hp_pins[0] &&
|
|
- !(spec->gen.autocfg.line_out_pins[0] &&
|
|
- spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
|
|
+ if (!alc_get_hp_pin(spec)) {
|
|
hda_nid_t nid;
|
|
tmp = (ass >> 11) & 0x3; /* HP to chassis */
|
|
nid = ports[tmp];
|
|
@@ -1847,6 +1855,8 @@ enum {
|
|
ALC887_FIXUP_BASS_CHMAP,
|
|
ALC1220_FIXUP_GB_DUAL_CODECS,
|
|
ALC1220_FIXUP_CLEVO_P950,
|
|
+ ALC1220_FIXUP_SYSTEM76_ORYP5,
|
|
+ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
|
|
};
|
|
|
|
static void alc889_fixup_coef(struct hda_codec *codec,
|
|
@@ -2048,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
|
|
snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
|
|
}
|
|
|
|
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action);
|
|
+
|
|
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix,
|
|
+ int action)
|
|
+{
|
|
+ alc1220_fixup_clevo_p950(codec, fix, action);
|
|
+ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
|
|
+}
|
|
+
|
|
static const struct hda_fixup alc882_fixups[] = {
|
|
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
@@ -2292,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc1220_fixup_clevo_p950,
|
|
},
|
|
+ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc1220_fixup_system76_oryp5,
|
|
+ },
|
|
+ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
|
|
+ {}
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
@@ -2368,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
|
|
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
|
|
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
|
|
+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
|
|
+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
|
|
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
|
|
@@ -2958,7 +2994,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
|
|
static void alc282_init(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
int coef78;
|
|
|
|
@@ -2995,7 +3031,7 @@ static void alc282_init(struct hda_codec *codec)
|
|
static void alc282_shutup(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
int coef78;
|
|
|
|
@@ -3073,14 +3109,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
|
|
static void alc283_init(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
|
|
- if (!spec->gen.autocfg.hp_outs) {
|
|
- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
|
|
- hp_pin = spec->gen.autocfg.line_out_pins[0];
|
|
- }
|
|
-
|
|
alc283_restore_default_value(codec);
|
|
|
|
if (!hp_pin)
|
|
@@ -3114,14 +3145,9 @@ static void alc283_init(struct hda_codec *codec)
|
|
static void alc283_shutup(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
|
|
- if (!spec->gen.autocfg.hp_outs) {
|
|
- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
|
|
- hp_pin = spec->gen.autocfg.line_out_pins[0];
|
|
- }
|
|
-
|
|
if (!hp_pin) {
|
|
alc269_shutup(codec);
|
|
return;
|
|
@@ -3155,7 +3181,7 @@ static void alc283_shutup(struct hda_codec *codec)
|
|
static void alc256_init(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
|
|
if (!hp_pin)
|
|
@@ -3191,7 +3217,7 @@ static void alc256_init(struct hda_codec *codec)
|
|
static void alc256_shutup(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
|
|
if (!hp_pin) {
|
|
@@ -3227,7 +3253,7 @@ static void alc256_shutup(struct hda_codec *codec)
|
|
static void alc225_init(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp1_pin_sense, hp2_pin_sense;
|
|
|
|
if (!hp_pin)
|
|
@@ -3270,7 +3296,7 @@ static void alc225_init(struct hda_codec *codec)
|
|
static void alc225_shutup(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp1_pin_sense, hp2_pin_sense;
|
|
|
|
if (!hp_pin) {
|
|
@@ -3314,7 +3340,7 @@ static void alc225_shutup(struct hda_codec *codec)
|
|
static void alc_default_init(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
|
|
if (!hp_pin)
|
|
@@ -3343,7 +3369,7 @@ static void alc_default_init(struct hda_codec *codec)
|
|
static void alc_default_shutup(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
bool hp_pin_sense;
|
|
|
|
if (!hp_pin) {
|
|
@@ -3372,6 +3398,48 @@ static void alc_default_shutup(struct hda_codec *codec)
|
|
snd_hda_shutup_pins(codec);
|
|
}
|
|
|
|
+static void alc294_hp_init(struct hda_codec *codec)
|
|
+{
|
|
+ struct alc_spec *spec = codec->spec;
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
+ int i, val;
|
|
+
|
|
+ if (!hp_pin)
|
|
+ return;
|
|
+
|
|
+ snd_hda_codec_write(codec, hp_pin, 0,
|
|
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
|
+
|
|
+ msleep(100);
|
|
+
|
|
+ snd_hda_codec_write(codec, hp_pin, 0,
|
|
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
|
+
|
|
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
|
|
+ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
|
|
+
|
|
+ /* Wait for depop procedure finish */
|
|
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
|
|
+ for (i = 0; i < 20 && val & 0x0080; i++) {
|
|
+ msleep(50);
|
|
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
|
|
+ }
|
|
+ /* Set HP depop to auto mode */
|
|
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
|
|
+ msleep(50);
|
|
+}
|
|
+
|
|
+static void alc294_init(struct hda_codec *codec)
|
|
+{
|
|
+ struct alc_spec *spec = codec->spec;
|
|
+
|
|
+ if (!spec->done_hp_init) {
|
|
+ alc294_hp_init(codec);
|
|
+ spec->done_hp_init = true;
|
|
+ }
|
|
+ alc_default_init(codec);
|
|
+}
|
|
+
|
|
static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
|
|
unsigned int val)
|
|
{
|
|
@@ -4102,6 +4170,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
|
case 0x10ec0295:
|
|
case 0x10ec0289:
|
|
case 0x10ec0299:
|
|
+ alc_process_coef_fw(codec, alc225_pre_hsmode);
|
|
alc_process_coef_fw(codec, coef0225);
|
|
break;
|
|
case 0x10ec0867:
|
|
@@ -4736,7 +4805,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
|
|
struct alc_spec *spec = codec->spec;
|
|
|
|
hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
|
|
|
|
int new_headset_mode;
|
|
|
|
@@ -5015,7 +5084,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
|
|
static void alc_shutup_dell_xps13(struct hda_codec *codec)
|
|
{
|
|
struct alc_spec *spec = codec->spec;
|
|
- int hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ int hp_pin = alc_get_hp_pin(spec);
|
|
|
|
/* Prevent pop noises when headphones are plugged in */
|
|
snd_hda_codec_write(codec, hp_pin, 0,
|
|
@@ -5108,7 +5177,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
|
|
|
|
if (action == HDA_FIXUP_ACT_PROBE) {
|
|
int mic_pin = find_ext_mic_pin(codec);
|
|
- int hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ int hp_pin = alc_get_hp_pin(spec);
|
|
|
|
if (snd_BUG_ON(!mic_pin || !hp_pin))
|
|
return;
|
|
@@ -5380,6 +5449,13 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
|
|
snd_hda_override_wcaps(codec, 0x03, 0);
|
|
}
|
|
|
|
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
|
|
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
|
|
+}
|
|
+
|
|
/* for hda_fixup_thinkpad_acpi() */
|
|
#include "thinkpad_helper.c"
|
|
|
|
@@ -5492,6 +5568,7 @@ enum {
|
|
ALC293_FIXUP_LENOVO_SPK_NOISE,
|
|
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
|
|
ALC255_FIXUP_DELL_SPK_NOISE,
|
|
+ ALC225_FIXUP_DISABLE_MIC_VREF,
|
|
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
ALC295_FIXUP_DISABLE_DAC3,
|
|
ALC280_FIXUP_HP_HEADSET_MIC,
|
|
@@ -5523,6 +5600,8 @@ enum {
|
|
ALC294_FIXUP_ASUS_MIC,
|
|
ALC294_FIXUP_ASUS_HEADSET_MIC,
|
|
ALC294_FIXUP_ASUS_SPK,
|
|
+ ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
|
|
+ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
|
|
};
|
|
|
|
static const struct hda_fixup alc269_fixups[] = {
|
|
@@ -6191,6 +6270,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
|
|
},
|
|
+ [ALC225_FIXUP_DISABLE_MIC_VREF] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc_fixup_disable_mic_vref,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
|
|
+ },
|
|
[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
|
|
.type = HDA_FIXUP_VERBS,
|
|
.v.verbs = (const struct hda_verb[]) {
|
|
@@ -6200,7 +6285,7 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
{}
|
|
},
|
|
.chained = true,
|
|
- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
|
|
+ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
|
|
},
|
|
[ALC280_FIXUP_HP_HEADSET_MIC] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
@@ -6424,7 +6509,7 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
[ALC294_FIXUP_ASUS_HEADSET_MIC] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
.v.pins = (const struct hda_pintbl[]) {
|
|
- { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
|
|
+ { 0x19, 0x01a1103c }, /* use as headset mic */
|
|
{ }
|
|
},
|
|
.chained = true,
|
|
@@ -6441,6 +6526,26 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
|
|
},
|
|
+ [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
|
|
+ },
|
|
+ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
|
|
+ .type = HDA_FIXUP_VERBS,
|
|
+ .v.verbs = (const struct hda_verb[]) {
|
|
+ /* Disable PCBEEP-IN passthrough */
|
|
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
|
|
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
@@ -6503,6 +6608,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
|
|
+ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
|
|
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
|
@@ -6573,6 +6679,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
|
|
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
+ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
|
|
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
@@ -6617,6 +6724,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
|
|
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
|
|
+ SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
|
|
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
|
|
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
|
|
@@ -6825,7 +6933,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
|
{.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
|
|
{.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
|
|
{.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
|
|
- {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
|
|
+ {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
|
|
{.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
|
|
{.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
|
|
{.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
|
|
@@ -7119,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|
{0x12, 0x90a60130},
|
|
{0x19, 0x03a11020},
|
|
{0x21, 0x0321101f}),
|
|
- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
|
|
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
|
|
{0x12, 0x90a60130},
|
|
{0x14, 0x90170110},
|
|
{0x19, 0x04a11040},
|
|
@@ -7271,37 +7379,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
|
|
alc_update_coef_idx(codec, 0x4, 0, 1<<11);
|
|
}
|
|
|
|
-static void alc294_hp_init(struct hda_codec *codec)
|
|
-{
|
|
- struct alc_spec *spec = codec->spec;
|
|
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
- int i, val;
|
|
-
|
|
- if (!hp_pin)
|
|
- return;
|
|
-
|
|
- snd_hda_codec_write(codec, hp_pin, 0,
|
|
- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
|
-
|
|
- msleep(100);
|
|
-
|
|
- snd_hda_codec_write(codec, hp_pin, 0,
|
|
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
|
-
|
|
- alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
|
|
- alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
|
|
-
|
|
- /* Wait for depop procedure finish */
|
|
- val = alc_read_coefex_idx(codec, 0x58, 0x01);
|
|
- for (i = 0; i < 20 && val & 0x0080; i++) {
|
|
- msleep(50);
|
|
- val = alc_read_coefex_idx(codec, 0x58, 0x01);
|
|
- }
|
|
- /* Set HP depop to auto mode */
|
|
- alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
|
|
- msleep(50);
|
|
-}
|
|
-
|
|
/*
|
|
*/
|
|
static int patch_alc269(struct hda_codec *codec)
|
|
@@ -7427,7 +7504,7 @@ static int patch_alc269(struct hda_codec *codec)
|
|
spec->codec_variant = ALC269_TYPE_ALC294;
|
|
spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
|
|
alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
|
|
- alc294_hp_init(codec);
|
|
+ spec->init_hook = alc294_init;
|
|
break;
|
|
case 0x10ec0300:
|
|
spec->codec_variant = ALC269_TYPE_ALC300;
|
|
@@ -7439,7 +7516,7 @@ static int patch_alc269(struct hda_codec *codec)
|
|
spec->codec_variant = ALC269_TYPE_ALC700;
|
|
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
|
|
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
|
|
- alc294_hp_init(codec);
|
|
+ spec->init_hook = alc294_init;
|
|
break;
|
|
|
|
}
|
|
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
|
|
index 1bff4b1b39cd0..ba99ff0e93e03 100644
|
|
--- a/sound/pci/rme9652/hdsp.c
|
|
+++ b/sound/pci/rme9652/hdsp.c
|
|
@@ -30,6 +30,7 @@
|
|
#include <linux/math64.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/io.h>
|
|
+#include <linux/nospec.h>
|
|
|
|
#include <sound/core.h>
|
|
#include <sound/control.h>
|
|
@@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
|
|
struct snd_pcm_channel_info *info)
|
|
{
|
|
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
|
|
- int mapped_channel;
|
|
+ unsigned int channel = info->channel;
|
|
|
|
- if (snd_BUG_ON(info->channel >= hdsp->max_channels))
|
|
+ if (snd_BUG_ON(channel >= hdsp->max_channels))
|
|
return -EINVAL;
|
|
+ channel = array_index_nospec(channel, hdsp->max_channels);
|
|
|
|
- if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
|
|
+ if (hdsp->channel_map[channel] < 0)
|
|
return -EINVAL;
|
|
|
|
- info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
|
|
+ info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
|
|
info->first = 0;
|
|
info->step = 32;
|
|
return 0;
|
|
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
|
|
index cdebab2f8ce53..7ada2c1f4964d 100644
|
|
--- a/sound/soc/amd/acp-pcm-dma.c
|
|
+++ b/sound/soc/amd/acp-pcm-dma.c
|
|
@@ -1151,18 +1151,21 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
|
|
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
|
|
DRV_NAME);
|
|
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
|
|
+ struct device *parent = component->dev->parent;
|
|
|
|
switch (adata->asic_type) {
|
|
case CHIP_STONEY:
|
|
ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
|
|
SNDRV_DMA_TYPE_DEV,
|
|
- NULL, ST_MIN_BUFFER,
|
|
+ parent,
|
|
+ ST_MIN_BUFFER,
|
|
ST_MAX_BUFFER);
|
|
break;
|
|
default:
|
|
ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
|
|
SNDRV_DMA_TYPE_DEV,
|
|
- NULL, MIN_BUFFER,
|
|
+ parent,
|
|
+ MIN_BUFFER,
|
|
MAX_BUFFER);
|
|
break;
|
|
}
|
|
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
|
|
index d00734d31e042..e5b6769b97977 100644
|
|
--- a/sound/soc/codecs/hdmi-codec.c
|
|
+++ b/sound/soc/codecs/hdmi-codec.c
|
|
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
|
|
if (hcd->spdif)
|
|
hcp->daidrv[i] = hdmi_spdif_dai;
|
|
|
|
+ dev_set_drvdata(dev, hcp);
|
|
+
|
|
ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
|
|
dai_count);
|
|
if (ret) {
|
|
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
|
|
__func__, ret);
|
|
return ret;
|
|
}
|
|
-
|
|
- dev_set_drvdata(dev, hcp);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
|
|
index 52cc950c9fd17..445d025e1409d 100644
|
|
--- a/sound/soc/codecs/pcm3168a.c
|
|
+++ b/sound/soc/codecs/pcm3168a.c
|
|
@@ -770,15 +770,22 @@ err_clk:
|
|
}
|
|
EXPORT_SYMBOL_GPL(pcm3168a_probe);
|
|
|
|
-void pcm3168a_remove(struct device *dev)
|
|
+static void pcm3168a_disable(struct device *dev)
|
|
{
|
|
struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
|
|
|
|
- pm_runtime_disable(dev);
|
|
regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
|
|
- pcm3168a->supplies);
|
|
+ pcm3168a->supplies);
|
|
clk_disable_unprepare(pcm3168a->scki);
|
|
}
|
|
+
|
|
+void pcm3168a_remove(struct device *dev)
|
|
+{
|
|
+ pm_runtime_disable(dev);
|
|
+#ifndef CONFIG_PM
|
|
+ pcm3168a_disable(dev);
|
|
+#endif
|
|
+}
|
|
EXPORT_SYMBOL_GPL(pcm3168a_remove);
|
|
|
|
#ifdef CONFIG_PM
|
|
@@ -833,10 +840,7 @@ static int pcm3168a_rt_suspend(struct device *dev)
|
|
|
|
regcache_cache_only(pcm3168a->regmap, true);
|
|
|
|
- regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
|
|
- pcm3168a->supplies);
|
|
-
|
|
- clk_disable_unprepare(pcm3168a->scki);
|
|
+ pcm3168a_disable(dev);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
|
|
index 4d46f4567c3a8..bec2eefa8b0f0 100644
|
|
--- a/sound/soc/codecs/rt5514-spi.c
|
|
+++ b/sound/soc/codecs/rt5514-spi.c
|
|
@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
|
|
|
|
rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
|
|
GFP_KERNEL);
|
|
+ if (!rt5514_dsp)
|
|
+ return -ENOMEM;
|
|
|
|
rt5514_dsp->dev = &rt5514_spi->dev;
|
|
mutex_init(&rt5514_dsp->dma_lock);
|
|
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
|
|
index e2b5a11b16d19..f03195d2ab2ea 100644
|
|
--- a/sound/soc/codecs/tlv320aic32x4.c
|
|
+++ b/sound/soc/codecs/tlv320aic32x4.c
|
|
@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
|
|
case SND_SOC_BIAS_PREPARE:
|
|
break;
|
|
case SND_SOC_BIAS_STANDBY:
|
|
+ /* Initial cold start */
|
|
+ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
|
|
+ break;
|
|
+
|
|
/* Switch off BCLK_N Divider */
|
|
snd_soc_component_update_bits(component, AIC32X4_BCLKN,
|
|
AIC32X4_BCLKEN, 0);
|
|
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
|
|
index ccdf088461b7f..54c306707c02c 100644
|
|
--- a/sound/soc/codecs/wm9705.c
|
|
+++ b/sound/soc/codecs/wm9705.c
|
|
@@ -325,8 +325,7 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
|
|
if (wm9705->mfd_pdata) {
|
|
wm9705->ac97 = wm9705->mfd_pdata->ac97;
|
|
regmap = wm9705->mfd_pdata->regmap;
|
|
- } else {
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
+ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
|
|
wm9705->ac97 = snd_soc_new_ac97_component(component, WM9705_VENDOR_ID,
|
|
WM9705_VENDOR_ID_MASK);
|
|
if (IS_ERR(wm9705->ac97)) {
|
|
@@ -339,7 +338,8 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
|
|
snd_soc_free_ac97_component(wm9705->ac97);
|
|
return PTR_ERR(regmap);
|
|
}
|
|
-#endif
|
|
+ } else {
|
|
+ return -ENXIO;
|
|
}
|
|
|
|
snd_soc_component_set_drvdata(component, wm9705->ac97);
|
|
@@ -350,14 +350,12 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
|
|
|
|
static void wm9705_soc_remove(struct snd_soc_component *component)
|
|
{
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
struct wm9705_priv *wm9705 = snd_soc_component_get_drvdata(component);
|
|
|
|
- if (!wm9705->mfd_pdata) {
|
|
+ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9705->mfd_pdata) {
|
|
snd_soc_component_exit_regmap(component);
|
|
snd_soc_free_ac97_component(wm9705->ac97);
|
|
}
|
|
-#endif
|
|
}
|
|
|
|
static const struct snd_soc_component_driver soc_component_dev_wm9705 = {
|
|
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
|
|
index e873baa9e7780..01949eaba4fd4 100644
|
|
--- a/sound/soc/codecs/wm9712.c
|
|
+++ b/sound/soc/codecs/wm9712.c
|
|
@@ -642,8 +642,7 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
|
|
if (wm9712->mfd_pdata) {
|
|
wm9712->ac97 = wm9712->mfd_pdata->ac97;
|
|
regmap = wm9712->mfd_pdata->regmap;
|
|
- } else {
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
+ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
|
|
int ret;
|
|
|
|
wm9712->ac97 = snd_soc_new_ac97_component(component, WM9712_VENDOR_ID,
|
|
@@ -660,7 +659,8 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
|
|
snd_soc_free_ac97_component(wm9712->ac97);
|
|
return PTR_ERR(regmap);
|
|
}
|
|
-#endif
|
|
+ } else {
|
|
+ return -ENXIO;
|
|
}
|
|
|
|
snd_soc_component_init_regmap(component, regmap);
|
|
@@ -673,14 +673,12 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
|
|
|
|
static void wm9712_soc_remove(struct snd_soc_component *component)
|
|
{
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
|
|
|
|
- if (!wm9712->mfd_pdata) {
|
|
+ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9712->mfd_pdata) {
|
|
snd_soc_component_exit_regmap(component);
|
|
snd_soc_free_ac97_component(wm9712->ac97);
|
|
}
|
|
-#endif
|
|
}
|
|
|
|
static const struct snd_soc_component_driver soc_component_dev_wm9712 = {
|
|
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
|
|
index 643863bb32e0d..5a2fdf4f69bf3 100644
|
|
--- a/sound/soc/codecs/wm9713.c
|
|
+++ b/sound/soc/codecs/wm9713.c
|
|
@@ -1214,8 +1214,7 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
|
|
if (wm9713->mfd_pdata) {
|
|
wm9713->ac97 = wm9713->mfd_pdata->ac97;
|
|
regmap = wm9713->mfd_pdata->regmap;
|
|
- } else {
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
+ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
|
|
wm9713->ac97 = snd_soc_new_ac97_component(component, WM9713_VENDOR_ID,
|
|
WM9713_VENDOR_ID_MASK);
|
|
if (IS_ERR(wm9713->ac97))
|
|
@@ -1225,7 +1224,8 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
|
|
snd_soc_free_ac97_component(wm9713->ac97);
|
|
return PTR_ERR(regmap);
|
|
}
|
|
-#endif
|
|
+ } else {
|
|
+ return -ENXIO;
|
|
}
|
|
|
|
snd_soc_component_init_regmap(component, regmap);
|
|
@@ -1238,14 +1238,12 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
|
|
|
|
static void wm9713_soc_remove(struct snd_soc_component *component)
|
|
{
|
|
-#ifdef CONFIG_SND_SOC_AC97_BUS
|
|
struct wm9713_priv *wm9713 = snd_soc_component_get_drvdata(component);
|
|
|
|
- if (!wm9713->mfd_pdata) {
|
|
+ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9713->mfd_pdata) {
|
|
snd_soc_component_exit_regmap(component);
|
|
snd_soc_free_ac97_component(wm9713->ac97);
|
|
}
|
|
-#endif
|
|
}
|
|
|
|
static const struct snd_soc_component_driver soc_component_dev_wm9713 = {
|
|
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
|
|
index 6ec19fb4a934d..2e75b5bc5f1da 100644
|
|
--- a/sound/soc/fsl/Kconfig
|
|
+++ b/sound/soc/fsl/Kconfig
|
|
@@ -221,7 +221,7 @@ config SND_SOC_PHYCORE_AC97
|
|
|
|
config SND_SOC_EUKREA_TLV320
|
|
tristate "Eukrea TLV320"
|
|
- depends on ARCH_MXC && I2C
|
|
+ depends on ARCH_MXC && !ARM64 && I2C
|
|
select SND_SOC_TLV320AIC23_I2C
|
|
select SND_SOC_IMX_AUDMUX
|
|
select SND_SOC_IMX_SSI
|
|
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
|
|
index afc5598660955..91a2436ce9525 100644
|
|
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
|
|
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
|
|
@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
|
|
struct snd_pcm_hw_params *params,
|
|
struct snd_soc_dai *dai)
|
|
{
|
|
- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
|
|
+ int ret;
|
|
+
|
|
+ ret =
|
|
+ snd_pcm_lib_malloc_pages(substream,
|
|
+ params_buffer_bytes(params));
|
|
+ if (ret)
|
|
+ return ret;
|
|
memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
|
|
index 27413ebae9566..b8c456753f015 100644
|
|
--- a/sound/soc/intel/atom/sst/sst_loader.c
|
|
+++ b/sound/soc/intel/atom/sst/sst_loader.c
|
|
@@ -354,14 +354,14 @@ static int sst_request_fw(struct intel_sst_drv *sst)
|
|
const struct firmware *fw;
|
|
|
|
retval = request_firmware(&fw, sst->firmware_name, sst->dev);
|
|
- if (fw == NULL) {
|
|
- dev_err(sst->dev, "fw is returning as null\n");
|
|
- return -EINVAL;
|
|
- }
|
|
if (retval) {
|
|
dev_err(sst->dev, "request fw failed %d\n", retval);
|
|
return retval;
|
|
}
|
|
+ if (fw == NULL) {
|
|
+ dev_err(sst->dev, "fw is returning as null\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
mutex_lock(&sst->sst_lock);
|
|
retval = sst_cache_and_parse_fw(sst, fw);
|
|
mutex_unlock(&sst->sst_lock);
|
|
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
|
|
index 9d9f6e41d81c0..08a5152e635ac 100644
|
|
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
|
|
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
|
|
@@ -389,6 +389,20 @@ static struct snd_soc_card snd_soc_card_cht = {
|
|
};
|
|
|
|
static const struct dmi_system_id cht_max98090_quirk_table[] = {
|
|
+ {
|
|
+ /* Clapper model Chromebook */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
|
|
+ },
|
|
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
|
|
+ },
|
|
+ {
|
|
+ /* Gnawty model Chromebook (Acer Chromebook CB3-111) */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
|
|
+ },
|
|
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
|
|
+ },
|
|
{
|
|
/* Swanky model Chromebook (Toshiba Chromebook 2) */
|
|
.matches = {
|
|
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
|
|
index b29d0f65611eb..2d49492d60692 100644
|
|
--- a/sound/soc/soc-core.c
|
|
+++ b/sound/soc/soc-core.c
|
|
@@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
|
|
* this function should be removed in the future
|
|
*/
|
|
/* convert Legacy platform link */
|
|
- if (!platform) {
|
|
+ if (!platform || dai_link->legacy_platform) {
|
|
platform = devm_kzalloc(card->dev,
|
|
sizeof(struct snd_soc_dai_link_component),
|
|
GFP_KERNEL);
|
|
if (!platform)
|
|
return -ENOMEM;
|
|
|
|
- dai_link->platform = platform;
|
|
- platform->name = dai_link->platform_name;
|
|
- platform->of_node = dai_link->platform_of_node;
|
|
- platform->dai_name = NULL;
|
|
+ dai_link->platform = platform;
|
|
+ dai_link->legacy_platform = 1;
|
|
+ platform->name = dai_link->platform_name;
|
|
+ platform->of_node = dai_link->platform_of_node;
|
|
+ platform->dai_name = NULL;
|
|
}
|
|
|
|
/* if there's no platform we match on the empty platform */
|
|
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
|
|
index e557946718a9e..d9fcae071b477 100644
|
|
--- a/sound/synth/emux/emux_hwdep.c
|
|
+++ b/sound/synth/emux/emux_hwdep.c
|
|
@@ -22,9 +22,9 @@
|
|
#include <sound/core.h>
|
|
#include <sound/hwdep.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <linux/nospec.h>
|
|
#include "emux_voice.h"
|
|
|
|
-
|
|
#define TMP_CLIENT_ID 0x1001
|
|
|
|
/*
|
|
@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
|
|
return -EFAULT;
|
|
if (info.mode < 0 || info.mode >= EMUX_MD_END)
|
|
return -EINVAL;
|
|
+ info.mode = array_index_nospec(info.mode, EMUX_MD_END);
|
|
|
|
if (info.port < 0) {
|
|
for (i = 0; i < emu->num_ports; i++)
|
|
emu->portptrs[i]->ctrls[info.mode] = info.value;
|
|
} else {
|
|
- if (info.port < emu->num_ports)
|
|
+ if (info.port < emu->num_ports) {
|
|
+ info.port = array_index_nospec(info.port, emu->num_ports);
|
|
emu->portptrs[info.port]->ctrls[info.mode] = info.value;
|
|
+ }
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/sound/usb/card.c b/sound/usb/card.c
|
|
index a105947eaf55c..746a72e23cf9f 100644
|
|
--- a/sound/usb/card.c
|
|
+++ b/sound/usb/card.c
|
|
@@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
|
|
h1 = snd_usb_find_csint_desc(host_iface->extra,
|
|
host_iface->extralen,
|
|
NULL, UAC_HEADER);
|
|
- if (!h1) {
|
|
+ if (!h1 || h1->bLength < sizeof(*h1)) {
|
|
dev_err(&dev->dev, "cannot find UAC_HEADER\n");
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
|
|
index c63c84b54969b..e7d441d0e839f 100644
|
|
--- a/sound/usb/mixer.c
|
|
+++ b/sound/usb/mixer.c
|
|
@@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
|
|
struct uac_mixer_unit_descriptor *desc)
|
|
{
|
|
int mu_channels;
|
|
+ void *c;
|
|
|
|
- if (desc->bLength < 11)
|
|
+ if (desc->bLength < sizeof(*desc))
|
|
return -EINVAL;
|
|
if (!desc->bNrInPins)
|
|
return -EINVAL;
|
|
@@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
|
|
case UAC_VERSION_1:
|
|
case UAC_VERSION_2:
|
|
default:
|
|
+ if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
|
|
+ return 0; /* no bmControls -> skip */
|
|
mu_channels = uac_mixer_unit_bNrChannels(desc);
|
|
break;
|
|
case UAC_VERSION_3:
|
|
@@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
|
|
}
|
|
|
|
if (!mu_channels)
|
|
- return -EINVAL;
|
|
+ return 0;
|
|
+
|
|
+ c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
|
|
+ if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
|
|
+ return 0; /* no bmControls -> skip */
|
|
|
|
return mu_channels;
|
|
}
|
|
@@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|
struct uac_mixer_unit_descriptor *d = p1;
|
|
|
|
err = uac_mixer_unit_get_channels(state, d);
|
|
- if (err < 0)
|
|
+ if (err <= 0)
|
|
return err;
|
|
|
|
term->channels = err;
|
|
@@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
|
|
|
|
if (state->mixer->protocol == UAC_VERSION_2) {
|
|
struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
|
|
+ if (d_v2->bLength < sizeof(*d_v2))
|
|
+ return -EINVAL;
|
|
control = UAC2_TE_CONNECTOR;
|
|
term_id = d_v2->bTerminalID;
|
|
bmctls = le16_to_cpu(d_v2->bmControls);
|
|
} else if (state->mixer->protocol == UAC_VERSION_3) {
|
|
struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
|
|
+ if (d_v3->bLength < sizeof(*d_v3))
|
|
+ return -EINVAL;
|
|
control = UAC3_TE_INSERTION;
|
|
term_id = d_v3->bTerminalID;
|
|
bmctls = le32_to_cpu(d_v3->bmControls);
|
|
@@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
|
|
if (err < 0)
|
|
continue;
|
|
/* no bmControls field (e.g. Maya44) -> ignore */
|
|
- if (desc->bLength <= 10 + input_pins)
|
|
+ if (!num_outs)
|
|
continue;
|
|
err = check_input_term(state, desc->baSourceID[pin], &iterm);
|
|
if (err < 0)
|
|
@@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
|
|
char *name)
|
|
{
|
|
struct uac_processing_unit_descriptor *desc = raw_desc;
|
|
- int num_ins = desc->bNrInPins;
|
|
+ int num_ins;
|
|
struct usb_mixer_elem_info *cval;
|
|
struct snd_kcontrol *kctl;
|
|
int i, err, nameid, type, len;
|
|
@@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
|
|
0, NULL, default_value_info
|
|
};
|
|
|
|
- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
|
|
+ if (desc->bLength < 13) {
|
|
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ num_ins = desc->bNrInPins;
|
|
+ if (desc->bLength < 13 + num_ins ||
|
|
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
|
|
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
|
|
return -EINVAL;
|
|
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
|
|
index 382847154227e..db114f3977e0f 100644
|
|
--- a/sound/usb/pcm.c
|
|
+++ b/sound/usb/pcm.c
|
|
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
|
|
return 0;
|
|
}
|
|
|
|
+/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
|
|
+ * applies. Returns 1 if a quirk was found.
|
|
+ */
|
|
static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|
struct usb_device *dev,
|
|
struct usb_interface_descriptor *altsd,
|
|
@@ -384,7 +387,7 @@ add_sync_ep:
|
|
|
|
subs->data_endpoint->sync_master = subs->sync_endpoint;
|
|
|
|
- return 0;
|
|
+ return 1;
|
|
}
|
|
|
|
static int set_sync_endpoint(struct snd_usb_substream *subs,
|
|
@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
|
|
if (err < 0)
|
|
return err;
|
|
|
|
+ /* endpoint set by quirk */
|
|
+ if (err > 0)
|
|
+ return 0;
|
|
+
|
|
if (altsd->bNumEndpoints < 2)
|
|
return 0;
|
|
|
|
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
|
|
index 37fc0447c0710..b345beb447bd1 100644
|
|
--- a/sound/usb/quirks-table.h
|
|
+++ b/sound/usb/quirks-table.h
|
|
@@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
|
}
|
|
}
|
|
},
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ },
|
|
}
|
|
}
|
|
},
|
|
@@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
|
}
|
|
}
|
|
},
|
|
+ {
|
|
+ .ifnum = -1
|
|
+ },
|
|
}
|
|
}
|
|
},
|
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
|
index 6623cafc94f2c..d71e01954975e 100644
|
|
--- a/sound/usb/quirks.c
|
|
+++ b/sound/usb/quirks.c
|
|
@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
|
|
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
|
|
break;
|
|
|
|
+ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
|
|
case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
|
|
case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
|
|
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
|
|
@@ -1447,6 +1448,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
|
|
case 0x20b1: /* XMOS based devices */
|
|
case 0x152a: /* Thesycon devices */
|
|
case 0x25ce: /* Mytek devices */
|
|
+ case 0x2ab6: /* T+A devices */
|
|
if (fp->dsd_raw)
|
|
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
|
|
break;
|
|
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
|
|
index 67cf849aa16b9..d9e3de495c163 100644
|
|
--- a/sound/usb/stream.c
|
|
+++ b/sound/usb/stream.c
|
|
@@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
|
|
csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
|
|
|
|
if (!csep || csep->bLength < 7 ||
|
|
- csep->bDescriptorSubtype != UAC_EP_GENERAL) {
|
|
- usb_audio_warn(chip,
|
|
- "%u:%d : no or invalid class specific endpoint descriptor\n",
|
|
- iface_no, altsd->bAlternateSetting);
|
|
- return 0;
|
|
- }
|
|
+ csep->bDescriptorSubtype != UAC_EP_GENERAL)
|
|
+ goto error;
|
|
|
|
if (protocol == UAC_VERSION_1) {
|
|
attributes = csep->bmAttributes;
|
|
@@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
|
|
struct uac2_iso_endpoint_descriptor *csep2 =
|
|
(struct uac2_iso_endpoint_descriptor *) csep;
|
|
|
|
+ if (csep2->bLength < sizeof(*csep2))
|
|
+ goto error;
|
|
attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
|
|
|
|
/* emulate the endpoint attributes of a v1 device */
|
|
@@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
|
|
struct uac3_iso_endpoint_descriptor *csep3 =
|
|
(struct uac3_iso_endpoint_descriptor *) csep;
|
|
|
|
+ if (csep3->bLength < sizeof(*csep3))
|
|
+ goto error;
|
|
/* emulate the endpoint attributes of a v1 device */
|
|
if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
|
|
attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
|
|
}
|
|
|
|
return attributes;
|
|
+
|
|
+ error:
|
|
+ usb_audio_warn(chip,
|
|
+ "%u:%d : no or invalid class specific endpoint descriptor\n",
|
|
+ iface_no, altsd->bAlternateSetting);
|
|
+ return 0;
|
|
}
|
|
|
|
/* find an input terminal descriptor (either UAC1 or UAC2) with the given
|
|
@@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
|
|
*/
|
|
static void *
|
|
snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
|
|
- int terminal_id)
|
|
+ int terminal_id, bool uac23)
|
|
{
|
|
struct uac2_input_terminal_descriptor *term = NULL;
|
|
+ size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
|
|
+ sizeof(struct uac_input_terminal_descriptor);
|
|
|
|
while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
|
|
ctrl_iface->extralen,
|
|
term, UAC_INPUT_TERMINAL))) {
|
|
+ if (term->bLength < minlen)
|
|
+ continue;
|
|
if (term->bTerminalID == terminal_id)
|
|
return term;
|
|
}
|
|
@@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface,
|
|
while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
|
|
ctrl_iface->extralen,
|
|
term, UAC_OUTPUT_TERMINAL))) {
|
|
- if (term->bTerminalID == terminal_id)
|
|
+ if (term->bLength >= sizeof(*term) &&
|
|
+ term->bTerminalID == terminal_id)
|
|
return term;
|
|
}
|
|
|
|
@@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
|
|
format = le16_to_cpu(as->wFormatTag); /* remember the format value */
|
|
|
|
iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
|
|
- as->bTerminalLink);
|
|
+ as->bTerminalLink,
|
|
+ false);
|
|
if (iterm) {
|
|
num_channels = iterm->bNrChannels;
|
|
chconfig = le16_to_cpu(iterm->wChannelConfig);
|
|
@@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
|
|
* to extract the clock
|
|
*/
|
|
input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
|
|
- as->bTerminalLink);
|
|
+ as->bTerminalLink,
|
|
+ true);
|
|
if (input_term) {
|
|
clock = input_term->bCSourceID;
|
|
if (!chconfig && (num_channels == input_term->bNrChannels))
|
|
@@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
|
|
* to extract the clock
|
|
*/
|
|
input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
|
|
- as->bTerminalLink);
|
|
+ as->bTerminalLink,
|
|
+ true);
|
|
if (input_term) {
|
|
clock = input_term->bCSourceID;
|
|
goto found_clock;
|
|
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
|
|
new file mode 100644
|
|
index 0000000000000..0b3cb52fd29dc
|
|
--- /dev/null
|
|
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
|
|
@@ -0,0 +1,25 @@
|
|
+/*
|
|
+ * Copyright (C) 2012 ARM Ltd.
|
|
+ * Copyright (C) 2015 Regents of the University of California
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
|
|
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
|
|
+
|
|
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
|
|
+
|
|
+#include <asm-generic/bitsperlong.h>
|
|
+
|
|
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
|
|
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
|
|
index 70fd48d79f611..05d715e6b1285 100644
|
|
--- a/tools/bpf/bpftool/common.c
|
|
+++ b/tools/bpf/bpftool/common.c
|
|
@@ -58,7 +58,7 @@
|
|
#define BPF_FS_MAGIC 0xcafe4a11
|
|
#endif
|
|
|
|
-void p_err(const char *fmt, ...)
|
|
+void __printf(1, 2) p_err(const char *fmt, ...)
|
|
{
|
|
va_list ap;
|
|
|
|
@@ -76,7 +76,7 @@ void p_err(const char *fmt, ...)
|
|
va_end(ap);
|
|
}
|
|
|
|
-void p_info(const char *fmt, ...)
|
|
+void __printf(1, 2) p_info(const char *fmt, ...)
|
|
{
|
|
va_list ap;
|
|
|
|
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
|
|
index c6eef76322ae9..4e4149421d07b 100644
|
|
--- a/tools/bpf/bpftool/json_writer.c
|
|
+++ b/tools/bpf/bpftool/json_writer.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <malloc.h>
|
|
#include <inttypes.h>
|
|
#include <stdint.h>
|
|
+#include <linux/compiler.h>
|
|
|
|
#include "json_writer.h"
|
|
|
|
@@ -156,7 +157,8 @@ void jsonw_name(json_writer_t *self, const char *name)
|
|
putc(' ', self->out);
|
|
}
|
|
|
|
-void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
|
|
+void __printf(2, 0)
|
|
+jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
|
|
{
|
|
jsonw_eor(self);
|
|
putc('"', self->out);
|
|
@@ -164,7 +166,7 @@ void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
|
|
putc('"', self->out);
|
|
}
|
|
|
|
-void jsonw_printf(json_writer_t *self, const char *fmt, ...)
|
|
+void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...)
|
|
{
|
|
va_list ap;
|
|
|
|
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
|
|
index 7bf38f0e152e0..9988d5c126b62 100644
|
|
--- a/tools/bpf/bpftool/map.c
|
|
+++ b/tools/bpf/bpftool/map.c
|
|
@@ -383,7 +383,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
|
|
printf(single_line ? " " : "\n");
|
|
|
|
printf("value:%c", break_names ? '\n' : ' ');
|
|
- fprint_hex(stdout, value, info->value_size, " ");
|
|
+ if (value)
|
|
+ fprint_hex(stdout, value, info->value_size, " ");
|
|
+ else
|
|
+ printf("<no entry>");
|
|
|
|
printf("\n");
|
|
} else {
|
|
@@ -398,8 +401,11 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
|
|
for (i = 0; i < n; i++) {
|
|
printf("value (CPU %02d):%c",
|
|
i, info->value_size > 16 ? '\n' : ' ');
|
|
- fprint_hex(stdout, value + i * step,
|
|
- info->value_size, " ");
|
|
+ if (value)
|
|
+ fprint_hex(stdout, value + i * step,
|
|
+ info->value_size, " ");
|
|
+ else
|
|
+ printf("<no entry>");
|
|
printf("\n");
|
|
}
|
|
}
|
|
@@ -731,7 +737,11 @@ static int dump_map_elem(int fd, void *key, void *value,
|
|
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
|
|
jsonw_end_object(json_wtr);
|
|
} else {
|
|
- print_entry_error(map_info, key, strerror(lookup_errno));
|
|
+ if (errno == ENOENT)
|
|
+ print_entry_plain(map_info, key, NULL);
|
|
+ else
|
|
+ print_entry_error(map_info, key,
|
|
+ strerror(lookup_errno));
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
|
|
index ccee180dfb761..69b01a6158bdd 100644
|
|
--- a/tools/bpf/bpftool/prog.c
|
|
+++ b/tools/bpf/bpftool/prog.c
|
|
@@ -84,7 +84,7 @@ static const char * const attach_type_strings[] = {
|
|
[__MAX_BPF_ATTACH_TYPE] = NULL,
|
|
};
|
|
|
|
-enum bpf_attach_type parse_attach_type(const char *str)
|
|
+static enum bpf_attach_type parse_attach_type(const char *str)
|
|
{
|
|
enum bpf_attach_type type;
|
|
|
|
@@ -713,7 +713,7 @@ struct map_replace {
|
|
char *name;
|
|
};
|
|
|
|
-int map_replace_compar(const void *p1, const void *p2)
|
|
+static int map_replace_compar(const void *p1, const void *p2)
|
|
{
|
|
const struct map_replace *a = p1, *b = p2;
|
|
|
|
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
|
|
index 3284759df98ad..98083e4dc0f9d 100644
|
|
--- a/tools/bpf/bpftool/xlated_dumper.c
|
|
+++ b/tools/bpf/bpftool/xlated_dumper.c
|
|
@@ -114,7 +114,7 @@ struct kernel_sym *kernel_syms_search(struct dump_data *dd,
|
|
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
|
|
}
|
|
|
|
-static void print_insn(void *private_data, const char *fmt, ...)
|
|
+static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
|
|
{
|
|
va_list args;
|
|
|
|
@@ -123,7 +123,7 @@ static void print_insn(void *private_data, const char *fmt, ...)
|
|
va_end(args);
|
|
}
|
|
|
|
-static void
|
|
+static void __printf(2, 3)
|
|
print_insn_for_graph(void *private_data, const char *fmt, ...)
|
|
{
|
|
char buf[64], *p;
|
|
@@ -154,7 +154,8 @@ print_insn_for_graph(void *private_data, const char *fmt, ...)
|
|
printf("%s", buf);
|
|
}
|
|
|
|
-static void print_insn_json(void *private_data, const char *fmt, ...)
|
|
+static void __printf(2, 3)
|
|
+print_insn_json(void *private_data, const char *fmt, ...)
|
|
{
|
|
unsigned int l = strlen(fmt);
|
|
char chomped_fmt[l];
|
|
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
|
|
index d74bb9414d7c6..81dda411e9d39 100644
|
|
--- a/tools/build/Makefile.feature
|
|
+++ b/tools/build/Makefile.feature
|
|
@@ -80,8 +80,8 @@ FEATURE_TESTS_EXTRA := \
|
|
cplus-demangle \
|
|
hello \
|
|
libbabeltrace \
|
|
- liberty \
|
|
- liberty-z \
|
|
+ libbfd-liberty \
|
|
+ libbfd-liberty-z \
|
|
libunwind-debug-frame \
|
|
libunwind-debug-frame-arm \
|
|
libunwind-debug-frame-aarch64 \
|
|
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
|
|
index 304b984f11b99..912b82d4b70ab 100644
|
|
--- a/tools/build/feature/Makefile
|
|
+++ b/tools/build/feature/Makefile
|
|
@@ -16,8 +16,8 @@ FILES= \
|
|
test-libbfd.bin \
|
|
test-disassembler-four-args.bin \
|
|
test-reallocarray.bin \
|
|
- test-liberty.bin \
|
|
- test-liberty-z.bin \
|
|
+ test-libbfd-liberty.bin \
|
|
+ test-libbfd-liberty-z.bin \
|
|
test-cplus-demangle.bin \
|
|
test-libelf.bin \
|
|
test-libelf-getphdrnum.bin \
|
|
@@ -204,7 +204,7 @@ $(OUTPUT)test-libpython-version.bin:
|
|
$(BUILD)
|
|
|
|
$(OUTPUT)test-libbfd.bin:
|
|
- $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
|
|
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
|
|
|
|
$(OUTPUT)test-disassembler-four-args.bin:
|
|
$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
|
|
@@ -212,10 +212,10 @@ $(OUTPUT)test-disassembler-four-args.bin:
|
|
$(OUTPUT)test-reallocarray.bin:
|
|
$(BUILD)
|
|
|
|
-$(OUTPUT)test-liberty.bin:
|
|
+$(OUTPUT)test-libbfd-liberty.bin:
|
|
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
|
|
|
|
-$(OUTPUT)test-liberty-z.bin:
|
|
+$(OUTPUT)test-libbfd-liberty-z.bin:
|
|
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
|
|
|
|
$(OUTPUT)test-cplus-demangle.bin:
|
|
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
|
|
index bbb2a8ef367ca..d7e06fe0270ee 100644
|
|
--- a/tools/hv/hv_kvp_daemon.c
|
|
+++ b/tools/hv/hv_kvp_daemon.c
|
|
@@ -1178,6 +1178,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
|
|
FILE *file;
|
|
char cmd[PATH_MAX];
|
|
char *mac_addr;
|
|
+ int str_len;
|
|
|
|
/*
|
|
* Set the configuration for the specified interface with
|
|
@@ -1301,8 +1302,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
|
|
* invoke the external script to do its magic.
|
|
*/
|
|
|
|
- snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
|
|
- "hv_set_ifconfig", if_file);
|
|
+ str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
|
|
+ "hv_set_ifconfig", if_file);
|
|
+ /*
|
|
+ * This is a little overcautious, but it's necessary to suppress some
|
|
+ * false warnings from gcc 8.0.1.
|
|
+ */
|
|
+ if (str_len <= 0 || (unsigned int)str_len >= sizeof(cmd)) {
|
|
+ syslog(LOG_ERR, "Cmd '%s' (len=%d) may be too long",
|
|
+ cmd, str_len);
|
|
+ return HV_E_FAIL;
|
|
+ }
|
|
+
|
|
if (system(cmd)) {
|
|
syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
|
|
cmd, errno, strerror(errno));
|
|
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
|
|
index 3040830d77976..84545666a09c4 100644
|
|
--- a/tools/iio/iio_generic_buffer.c
|
|
+++ b/tools/iio/iio_generic_buffer.c
|
|
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
|
|
|
|
int main(int argc, char **argv)
|
|
{
|
|
- unsigned long long num_loops = 2;
|
|
+ long long num_loops = 2;
|
|
unsigned long timedelay = 1000000;
|
|
unsigned long buf_len = 128;
|
|
|
|
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
|
|
index 8dd6aefdafa4f..57aaeaf8e1920 100644
|
|
--- a/tools/include/uapi/asm/bitsperlong.h
|
|
+++ b/tools/include/uapi/asm/bitsperlong.h
|
|
@@ -13,6 +13,10 @@
|
|
#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
|
|
#elif defined(__ia64__)
|
|
#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
|
|
+#elif defined(__riscv)
|
|
+#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
|
|
+#elif defined(__alpha__)
|
|
+#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
|
|
#else
|
|
#include <asm-generic/bitsperlong.h>
|
|
#endif
|
|
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
|
|
new file mode 100644
|
|
index 0000000000000..0d18b1d1fbbc8
|
|
--- /dev/null
|
|
+++ b/tools/include/uapi/linux/pkt_sched.h
|
|
@@ -0,0 +1,1163 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
+#ifndef __LINUX_PKT_SCHED_H
|
|
+#define __LINUX_PKT_SCHED_H
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+/* Logical priority bands not depending on specific packet scheduler.
|
|
+ Every scheduler will map them to real traffic classes, if it has
|
|
+ no more precise mechanism to classify packets.
|
|
+
|
|
+ These numbers have no special meaning, though their coincidence
|
|
+ with obsolete IPv6 values is not occasional :-). New IPv6 drafts
|
|
+ preferred full anarchy inspired by diffserv group.
|
|
+
|
|
+ Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
|
|
+ class, actually, as rule it will be handled with more care than
|
|
+ filler or even bulk.
|
|
+ */
|
|
+
|
|
+#define TC_PRIO_BESTEFFORT 0
|
|
+#define TC_PRIO_FILLER 1
|
|
+#define TC_PRIO_BULK 2
|
|
+#define TC_PRIO_INTERACTIVE_BULK 4
|
|
+#define TC_PRIO_INTERACTIVE 6
|
|
+#define TC_PRIO_CONTROL 7
|
|
+
|
|
+#define TC_PRIO_MAX 15
|
|
+
|
|
+/* Generic queue statistics, available for all the elements.
|
|
+ Particular schedulers may have also their private records.
|
|
+ */
|
|
+
|
|
+struct tc_stats {
|
|
+ __u64 bytes; /* Number of enqueued bytes */
|
|
+ __u32 packets; /* Number of enqueued packets */
|
|
+ __u32 drops; /* Packets dropped because of lack of resources */
|
|
+ __u32 overlimits; /* Number of throttle events when this
|
|
+ * flow goes out of allocated bandwidth */
|
|
+ __u32 bps; /* Current flow byte rate */
|
|
+ __u32 pps; /* Current flow packet rate */
|
|
+ __u32 qlen;
|
|
+ __u32 backlog;
|
|
+};
|
|
+
|
|
+struct tc_estimator {
|
|
+ signed char interval;
|
|
+ unsigned char ewma_log;
|
|
+};
|
|
+
|
|
+/* "Handles"
|
|
+ ---------
|
|
+
|
|
+ All the traffic control objects have 32bit identifiers, or "handles".
|
|
+
|
|
+ They can be considered as opaque numbers from user API viewpoint,
|
|
+ but actually they always consist of two fields: major and
|
|
+ minor numbers, which are interpreted by kernel specially,
|
|
+ that may be used by applications, though not recommended.
|
|
+
|
|
+ F.e. qdisc handles always have minor number equal to zero,
|
|
+ classes (or flows) have major equal to parent qdisc major, and
|
|
+ minor uniquely identifying class inside qdisc.
|
|
+
|
|
+ Macros to manipulate handles:
|
|
+ */
|
|
+
|
|
+#define TC_H_MAJ_MASK (0xFFFF0000U)
|
|
+#define TC_H_MIN_MASK (0x0000FFFFU)
|
|
+#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
|
|
+#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
|
|
+#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
|
|
+
|
|
+#define TC_H_UNSPEC (0U)
|
|
+#define TC_H_ROOT (0xFFFFFFFFU)
|
|
+#define TC_H_INGRESS (0xFFFFFFF1U)
|
|
+#define TC_H_CLSACT TC_H_INGRESS
|
|
+
|
|
+#define TC_H_MIN_PRIORITY 0xFFE0U
|
|
+#define TC_H_MIN_INGRESS 0xFFF2U
|
|
+#define TC_H_MIN_EGRESS 0xFFF3U
|
|
+
|
|
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
|
|
+enum tc_link_layer {
|
|
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
|
|
+ TC_LINKLAYER_ETHERNET,
|
|
+ TC_LINKLAYER_ATM,
|
|
+};
|
|
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
|
|
+
|
|
+struct tc_ratespec {
|
|
+ unsigned char cell_log;
|
|
+ __u8 linklayer; /* lower 4 bits */
|
|
+ unsigned short overhead;
|
|
+ short cell_align;
|
|
+ unsigned short mpu;
|
|
+ __u32 rate;
|
|
+};
|
|
+
|
|
+#define TC_RTAB_SIZE 1024
|
|
+
|
|
+struct tc_sizespec {
|
|
+ unsigned char cell_log;
|
|
+ unsigned char size_log;
|
|
+ short cell_align;
|
|
+ int overhead;
|
|
+ unsigned int linklayer;
|
|
+ unsigned int mpu;
|
|
+ unsigned int mtu;
|
|
+ unsigned int tsize;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_STAB_UNSPEC,
|
|
+ TCA_STAB_BASE,
|
|
+ TCA_STAB_DATA,
|
|
+ __TCA_STAB_MAX
|
|
+};
|
|
+
|
|
+#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
|
|
+
|
|
+/* FIFO section */
|
|
+
|
|
+struct tc_fifo_qopt {
|
|
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
|
|
+};
|
|
+
|
|
+/* SKBPRIO section */
|
|
+
|
|
+/*
|
|
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
|
|
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
|
|
+ * to map one to one the DS field of IPV4 and IPV6 headers.
|
|
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
|
|
+ */
|
|
+
|
|
+#define SKBPRIO_MAX_PRIORITY 64
|
|
+
|
|
+struct tc_skbprio_qopt {
|
|
+ __u32 limit; /* Queue length in packets. */
|
|
+};
|
|
+
|
|
+/* PRIO section */
|
|
+
|
|
+#define TCQ_PRIO_BANDS 16
|
|
+#define TCQ_MIN_PRIO_BANDS 2
|
|
+
|
|
+struct tc_prio_qopt {
|
|
+ int bands; /* Number of bands */
|
|
+ __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
|
|
+};
|
|
+
|
|
+/* MULTIQ section */
|
|
+
|
|
+struct tc_multiq_qopt {
|
|
+ __u16 bands; /* Number of bands */
|
|
+ __u16 max_bands; /* Maximum number of queues */
|
|
+};
|
|
+
|
|
+/* PLUG section */
|
|
+
|
|
+#define TCQ_PLUG_BUFFER 0
|
|
+#define TCQ_PLUG_RELEASE_ONE 1
|
|
+#define TCQ_PLUG_RELEASE_INDEFINITE 2
|
|
+#define TCQ_PLUG_LIMIT 3
|
|
+
|
|
+struct tc_plug_qopt {
|
|
+ /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
|
|
+ * buffer any incoming packets
|
|
+ * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
|
|
+ * to beginning of the next plug.
|
|
+ * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
|
|
+ * Stop buffering packets until the next TCQ_PLUG_BUFFER
|
|
+ * command is received (just act as a pass-thru queue).
|
|
+ * TCQ_PLUG_LIMIT: Increase/decrease queue size
|
|
+ */
|
|
+ int action;
|
|
+ __u32 limit;
|
|
+};
|
|
+
|
|
+/* TBF section */
|
|
+
|
|
+struct tc_tbf_qopt {
|
|
+ struct tc_ratespec rate;
|
|
+ struct tc_ratespec peakrate;
|
|
+ __u32 limit;
|
|
+ __u32 buffer;
|
|
+ __u32 mtu;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_TBF_UNSPEC,
|
|
+ TCA_TBF_PARMS,
|
|
+ TCA_TBF_RTAB,
|
|
+ TCA_TBF_PTAB,
|
|
+ TCA_TBF_RATE64,
|
|
+ TCA_TBF_PRATE64,
|
|
+ TCA_TBF_BURST,
|
|
+ TCA_TBF_PBURST,
|
|
+ TCA_TBF_PAD,
|
|
+ __TCA_TBF_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
|
|
+
|
|
+
|
|
+/* TEQL section */
|
|
+
|
|
+/* TEQL does not require any parameters */
|
|
+
|
|
+/* SFQ section */
|
|
+
|
|
+struct tc_sfq_qopt {
|
|
+ unsigned quantum; /* Bytes per round allocated to flow */
|
|
+ int perturb_period; /* Period of hash perturbation */
|
|
+ __u32 limit; /* Maximal packets in queue */
|
|
+ unsigned divisor; /* Hash divisor */
|
|
+ unsigned flows; /* Maximal number of flows */
|
|
+};
|
|
+
|
|
+struct tc_sfqred_stats {
|
|
+ __u32 prob_drop; /* Early drops, below max threshold */
|
|
+ __u32 forced_drop; /* Early drops, after max threshold */
|
|
+ __u32 prob_mark; /* Marked packets, below max threshold */
|
|
+ __u32 forced_mark; /* Marked packets, after max threshold */
|
|
+ __u32 prob_mark_head; /* Marked packets, below max threshold */
|
|
+ __u32 forced_mark_head;/* Marked packets, after max threshold */
|
|
+};
|
|
+
|
|
+struct tc_sfq_qopt_v1 {
|
|
+ struct tc_sfq_qopt v0;
|
|
+ unsigned int depth; /* max number of packets per flow */
|
|
+ unsigned int headdrop;
|
|
+/* SFQRED parameters */
|
|
+ __u32 limit; /* HARD maximal flow queue length (bytes) */
|
|
+ __u32 qth_min; /* Min average length threshold (bytes) */
|
|
+ __u32 qth_max; /* Max average length threshold (bytes) */
|
|
+ unsigned char Wlog; /* log(W) */
|
|
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
|
|
+ unsigned char Scell_log; /* cell size for idle damping */
|
|
+ unsigned char flags;
|
|
+ __u32 max_P; /* probability, high resolution */
|
|
+/* SFQRED stats */
|
|
+ struct tc_sfqred_stats stats;
|
|
+};
|
|
+
|
|
+
|
|
+struct tc_sfq_xstats {
|
|
+ __s32 allot;
|
|
+};
|
|
+
|
|
+/* RED section */
|
|
+
|
|
+enum {
|
|
+ TCA_RED_UNSPEC,
|
|
+ TCA_RED_PARMS,
|
|
+ TCA_RED_STAB,
|
|
+ TCA_RED_MAX_P,
|
|
+ __TCA_RED_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_RED_MAX (__TCA_RED_MAX - 1)
|
|
+
|
|
+struct tc_red_qopt {
|
|
+ __u32 limit; /* HARD maximal queue length (bytes) */
|
|
+ __u32 qth_min; /* Min average length threshold (bytes) */
|
|
+ __u32 qth_max; /* Max average length threshold (bytes) */
|
|
+ unsigned char Wlog; /* log(W) */
|
|
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
|
|
+ unsigned char Scell_log; /* cell size for idle damping */
|
|
+ unsigned char flags;
|
|
+#define TC_RED_ECN 1
|
|
+#define TC_RED_HARDDROP 2
|
|
+#define TC_RED_ADAPTATIVE 4
|
|
+};
|
|
+
|
|
+struct tc_red_xstats {
|
|
+ __u32 early; /* Early drops */
|
|
+ __u32 pdrop; /* Drops due to queue limits */
|
|
+ __u32 other; /* Drops due to drop() calls */
|
|
+ __u32 marked; /* Marked packets */
|
|
+};
|
|
+
|
|
+/* GRED section */
|
|
+
|
|
+#define MAX_DPs 16
|
|
+
|
|
+enum {
|
|
+ TCA_GRED_UNSPEC,
|
|
+ TCA_GRED_PARMS,
|
|
+ TCA_GRED_STAB,
|
|
+ TCA_GRED_DPS,
|
|
+ TCA_GRED_MAX_P,
|
|
+ TCA_GRED_LIMIT,
|
|
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
|
|
+ __TCA_GRED_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ TCA_GRED_VQ_ENTRY_UNSPEC,
|
|
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
|
|
+ __TCA_GRED_VQ_ENTRY_MAX,
|
|
+};
|
|
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ TCA_GRED_VQ_UNSPEC,
|
|
+ TCA_GRED_VQ_PAD,
|
|
+ TCA_GRED_VQ_DP, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
|
|
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
|
|
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
|
|
+ TCA_GRED_VQ_FLAGS, /* u32 */
|
|
+ __TCA_GRED_VQ_MAX
|
|
+};
|
|
+
|
|
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
|
|
+
|
|
+struct tc_gred_qopt {
|
|
+ __u32 limit; /* HARD maximal queue length (bytes) */
|
|
+ __u32 qth_min; /* Min average length threshold (bytes) */
|
|
+ __u32 qth_max; /* Max average length threshold (bytes) */
|
|
+ __u32 DP; /* up to 2^32 DPs */
|
|
+ __u32 backlog;
|
|
+ __u32 qave;
|
|
+ __u32 forced;
|
|
+ __u32 early;
|
|
+ __u32 other;
|
|
+ __u32 pdrop;
|
|
+ __u8 Wlog; /* log(W) */
|
|
+ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
|
|
+ __u8 Scell_log; /* cell size for idle damping */
|
|
+ __u8 prio; /* prio of this VQ */
|
|
+ __u32 packets;
|
|
+ __u32 bytesin;
|
|
+};
|
|
+
|
|
+/* gred setup */
|
|
+struct tc_gred_sopt {
|
|
+ __u32 DPs;
|
|
+ __u32 def_DP;
|
|
+ __u8 grio;
|
|
+ __u8 flags;
|
|
+ __u16 pad1;
|
|
+};
|
|
+
|
|
+/* CHOKe section */
|
|
+
|
|
+enum {
|
|
+ TCA_CHOKE_UNSPEC,
|
|
+ TCA_CHOKE_PARMS,
|
|
+ TCA_CHOKE_STAB,
|
|
+ TCA_CHOKE_MAX_P,
|
|
+ __TCA_CHOKE_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
|
|
+
|
|
+struct tc_choke_qopt {
|
|
+ __u32 limit; /* Hard queue length (packets) */
|
|
+ __u32 qth_min; /* Min average threshold (packets) */
|
|
+ __u32 qth_max; /* Max average threshold (packets) */
|
|
+ unsigned char Wlog; /* log(W) */
|
|
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
|
|
+ unsigned char Scell_log; /* cell size for idle damping */
|
|
+ unsigned char flags; /* see RED flags */
|
|
+};
|
|
+
|
|
+struct tc_choke_xstats {
|
|
+ __u32 early; /* Early drops */
|
|
+ __u32 pdrop; /* Drops due to queue limits */
|
|
+ __u32 other; /* Drops due to drop() calls */
|
|
+ __u32 marked; /* Marked packets */
|
|
+ __u32 matched; /* Drops due to flow match */
|
|
+};
|
|
+
|
|
+/* HTB section */
|
|
+#define TC_HTB_NUMPRIO 8
|
|
+#define TC_HTB_MAXDEPTH 8
|
|
+#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
|
|
+
|
|
+struct tc_htb_opt {
|
|
+ struct tc_ratespec rate;
|
|
+ struct tc_ratespec ceil;
|
|
+ __u32 buffer;
|
|
+ __u32 cbuffer;
|
|
+ __u32 quantum;
|
|
+ __u32 level; /* out only */
|
|
+ __u32 prio;
|
|
+};
|
|
+struct tc_htb_glob {
|
|
+ __u32 version; /* to match HTB/TC */
|
|
+ __u32 rate2quantum; /* bps->quantum divisor */
|
|
+ __u32 defcls; /* default class number */
|
|
+ __u32 debug; /* debug flags */
|
|
+
|
|
+ /* stats */
|
|
+ __u32 direct_pkts; /* count of non shaped packets */
|
|
+};
|
|
+enum {
|
|
+ TCA_HTB_UNSPEC,
|
|
+ TCA_HTB_PARMS,
|
|
+ TCA_HTB_INIT,
|
|
+ TCA_HTB_CTAB,
|
|
+ TCA_HTB_RTAB,
|
|
+ TCA_HTB_DIRECT_QLEN,
|
|
+ TCA_HTB_RATE64,
|
|
+ TCA_HTB_CEIL64,
|
|
+ TCA_HTB_PAD,
|
|
+ __TCA_HTB_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
|
|
+
|
|
+struct tc_htb_xstats {
|
|
+ __u32 lends;
|
|
+ __u32 borrows;
|
|
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
|
|
+ __s32 tokens;
|
|
+ __s32 ctokens;
|
|
+};
|
|
+
|
|
+/* HFSC section */
|
|
+
|
|
+struct tc_hfsc_qopt {
|
|
+ __u16 defcls; /* default class */
|
|
+};
|
|
+
|
|
+struct tc_service_curve {
|
|
+ __u32 m1; /* slope of the first segment in bps */
|
|
+ __u32 d; /* x-projection of the first segment in us */
|
|
+ __u32 m2; /* slope of the second segment in bps */
|
|
+};
|
|
+
|
|
+struct tc_hfsc_stats {
|
|
+ __u64 work; /* total work done */
|
|
+ __u64 rtwork; /* work done by real-time criteria */
|
|
+ __u32 period; /* current period */
|
|
+ __u32 level; /* class level in hierarchy */
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_HFSC_UNSPEC,
|
|
+ TCA_HFSC_RSC,
|
|
+ TCA_HFSC_FSC,
|
|
+ TCA_HFSC_USC,
|
|
+ __TCA_HFSC_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
|
|
+
|
|
+
|
|
+/* CBQ section */
|
|
+
|
|
+#define TC_CBQ_MAXPRIO 8
|
|
+#define TC_CBQ_MAXLEVEL 8
|
|
+#define TC_CBQ_DEF_EWMA 5
|
|
+
|
|
+struct tc_cbq_lssopt {
|
|
+ unsigned char change;
|
|
+ unsigned char flags;
|
|
+#define TCF_CBQ_LSS_BOUNDED 1
|
|
+#define TCF_CBQ_LSS_ISOLATED 2
|
|
+ unsigned char ewma_log;
|
|
+ unsigned char level;
|
|
+#define TCF_CBQ_LSS_FLAGS 1
|
|
+#define TCF_CBQ_LSS_EWMA 2
|
|
+#define TCF_CBQ_LSS_MAXIDLE 4
|
|
+#define TCF_CBQ_LSS_MINIDLE 8
|
|
+#define TCF_CBQ_LSS_OFFTIME 0x10
|
|
+#define TCF_CBQ_LSS_AVPKT 0x20
|
|
+ __u32 maxidle;
|
|
+ __u32 minidle;
|
|
+ __u32 offtime;
|
|
+ __u32 avpkt;
|
|
+};
|
|
+
|
|
+struct tc_cbq_wrropt {
|
|
+ unsigned char flags;
|
|
+ unsigned char priority;
|
|
+ unsigned char cpriority;
|
|
+ unsigned char __reserved;
|
|
+ __u32 allot;
|
|
+ __u32 weight;
|
|
+};
|
|
+
|
|
+struct tc_cbq_ovl {
|
|
+ unsigned char strategy;
|
|
+#define TC_CBQ_OVL_CLASSIC 0
|
|
+#define TC_CBQ_OVL_DELAY 1
|
|
+#define TC_CBQ_OVL_LOWPRIO 2
|
|
+#define TC_CBQ_OVL_DROP 3
|
|
+#define TC_CBQ_OVL_RCLASSIC 4
|
|
+ unsigned char priority2;
|
|
+ __u16 pad;
|
|
+ __u32 penalty;
|
|
+};
|
|
+
|
|
+struct tc_cbq_police {
|
|
+ unsigned char police;
|
|
+ unsigned char __res1;
|
|
+ unsigned short __res2;
|
|
+};
|
|
+
|
|
+struct tc_cbq_fopt {
|
|
+ __u32 split;
|
|
+ __u32 defmap;
|
|
+ __u32 defchange;
|
|
+};
|
|
+
|
|
+struct tc_cbq_xstats {
|
|
+ __u32 borrows;
|
|
+ __u32 overactions;
|
|
+ __s32 avgidle;
|
|
+ __s32 undertime;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_CBQ_UNSPEC,
|
|
+ TCA_CBQ_LSSOPT,
|
|
+ TCA_CBQ_WRROPT,
|
|
+ TCA_CBQ_FOPT,
|
|
+ TCA_CBQ_OVL_STRATEGY,
|
|
+ TCA_CBQ_RATE,
|
|
+ TCA_CBQ_RTAB,
|
|
+ TCA_CBQ_POLICE,
|
|
+ __TCA_CBQ_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
|
|
+
|
|
+/* dsmark section */
|
|
+
|
|
+enum {
|
|
+ TCA_DSMARK_UNSPEC,
|
|
+ TCA_DSMARK_INDICES,
|
|
+ TCA_DSMARK_DEFAULT_INDEX,
|
|
+ TCA_DSMARK_SET_TC_INDEX,
|
|
+ TCA_DSMARK_MASK,
|
|
+ TCA_DSMARK_VALUE,
|
|
+ __TCA_DSMARK_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
|
|
+
|
|
+/* ATM section */
|
|
+
|
|
+enum {
|
|
+ TCA_ATM_UNSPEC,
|
|
+ TCA_ATM_FD, /* file/socket descriptor */
|
|
+ TCA_ATM_PTR, /* pointer to descriptor - later */
|
|
+ TCA_ATM_HDR, /* LL header */
|
|
+ TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
|
|
+ TCA_ATM_ADDR, /* PVC address (for output only) */
|
|
+ TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
|
|
+ __TCA_ATM_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
|
|
+
|
|
+/* Network emulator */
|
|
+
|
|
+enum {
|
|
+ TCA_NETEM_UNSPEC,
|
|
+ TCA_NETEM_CORR,
|
|
+ TCA_NETEM_DELAY_DIST,
|
|
+ TCA_NETEM_REORDER,
|
|
+ TCA_NETEM_CORRUPT,
|
|
+ TCA_NETEM_LOSS,
|
|
+ TCA_NETEM_RATE,
|
|
+ TCA_NETEM_ECN,
|
|
+ TCA_NETEM_RATE64,
|
|
+ TCA_NETEM_PAD,
|
|
+ TCA_NETEM_LATENCY64,
|
|
+ TCA_NETEM_JITTER64,
|
|
+ TCA_NETEM_SLOT,
|
|
+ TCA_NETEM_SLOT_DIST,
|
|
+ __TCA_NETEM_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
|
|
+
|
|
+struct tc_netem_qopt {
|
|
+ __u32 latency; /* added delay (us) */
|
|
+ __u32 limit; /* fifo limit (packets) */
|
|
+ __u32 loss; /* random packet loss (0=none ~0=100%) */
|
|
+ __u32 gap; /* re-ordering gap (0 for none) */
|
|
+ __u32 duplicate; /* random packet dup (0=none ~0=100%) */
|
|
+ __u32 jitter; /* random jitter in latency (us) */
|
|
+};
|
|
+
|
|
+struct tc_netem_corr {
|
|
+ __u32 delay_corr; /* delay correlation */
|
|
+ __u32 loss_corr; /* packet loss correlation */
|
|
+ __u32 dup_corr; /* duplicate correlation */
|
|
+};
|
|
+
|
|
+struct tc_netem_reorder {
|
|
+ __u32 probability;
|
|
+ __u32 correlation;
|
|
+};
|
|
+
|
|
+struct tc_netem_corrupt {
|
|
+ __u32 probability;
|
|
+ __u32 correlation;
|
|
+};
|
|
+
|
|
+struct tc_netem_rate {
|
|
+ __u32 rate; /* byte/s */
|
|
+ __s32 packet_overhead;
|
|
+ __u32 cell_size;
|
|
+ __s32 cell_overhead;
|
|
+};
|
|
+
|
|
+struct tc_netem_slot {
|
|
+ __s64 min_delay; /* nsec */
|
|
+ __s64 max_delay;
|
|
+ __s32 max_packets;
|
|
+ __s32 max_bytes;
|
|
+ __s64 dist_delay; /* nsec */
|
|
+ __s64 dist_jitter; /* nsec */
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NETEM_LOSS_UNSPEC,
|
|
+ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
|
|
+ NETEM_LOSS_GE, /* Gilbert Elliot models */
|
|
+ __NETEM_LOSS_MAX
|
|
+};
|
|
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
|
|
+
|
|
+/* State transition probabilities for 4 state model */
|
|
+struct tc_netem_gimodel {
|
|
+ __u32 p13;
|
|
+ __u32 p31;
|
|
+ __u32 p32;
|
|
+ __u32 p14;
|
|
+ __u32 p23;
|
|
+};
|
|
+
|
|
+/* Gilbert-Elliot models */
|
|
+struct tc_netem_gemodel {
|
|
+ __u32 p;
|
|
+ __u32 r;
|
|
+ __u32 h;
|
|
+ __u32 k1;
|
|
+};
|
|
+
|
|
+#define NETEM_DIST_SCALE 8192
|
|
+#define NETEM_DIST_MAX 16384
|
|
+
|
|
+/* DRR */
|
|
+
|
|
+enum {
|
|
+ TCA_DRR_UNSPEC,
|
|
+ TCA_DRR_QUANTUM,
|
|
+ __TCA_DRR_MAX
|
|
+};
|
|
+
|
|
+#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
|
|
+
|
|
+struct tc_drr_stats {
|
|
+ __u32 deficit;
|
|
+};
|
|
+
|
|
+/* MQPRIO */
|
|
+#define TC_QOPT_BITMASK 15
|
|
+#define TC_QOPT_MAX_QUEUE 16
|
|
+
|
|
+enum {
|
|
+ TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
|
|
+ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
|
|
+ __TC_MQPRIO_HW_OFFLOAD_MAX
|
|
+};
|
|
+
|
|
+#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ TC_MQPRIO_MODE_DCB,
|
|
+ TC_MQPRIO_MODE_CHANNEL,
|
|
+ __TC_MQPRIO_MODE_MAX
|
|
+};
|
|
+
|
|
+#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ TC_MQPRIO_SHAPER_DCB,
|
|
+ TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
|
|
+ __TC_MQPRIO_SHAPER_MAX
|
|
+};
|
|
+
|
|
+#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
|
|
+
|
|
+struct tc_mqprio_qopt {
|
|
+ __u8 num_tc;
|
|
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
|
|
+ __u8 hw;
|
|
+ __u16 count[TC_QOPT_MAX_QUEUE];
|
|
+ __u16 offset[TC_QOPT_MAX_QUEUE];
|
|
+};
|
|
+
|
|
+#define TC_MQPRIO_F_MODE 0x1
|
|
+#define TC_MQPRIO_F_SHAPER 0x2
|
|
+#define TC_MQPRIO_F_MIN_RATE 0x4
|
|
+#define TC_MQPRIO_F_MAX_RATE 0x8
|
|
+
|
|
+enum {
|
|
+ TCA_MQPRIO_UNSPEC,
|
|
+ TCA_MQPRIO_MODE,
|
|
+ TCA_MQPRIO_SHAPER,
|
|
+ TCA_MQPRIO_MIN_RATE64,
|
|
+ TCA_MQPRIO_MAX_RATE64,
|
|
+ __TCA_MQPRIO_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
|
|
+
|
|
+/* SFB */
|
|
+
|
|
+enum {
|
|
+ TCA_SFB_UNSPEC,
|
|
+ TCA_SFB_PARMS,
|
|
+ __TCA_SFB_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
|
|
+
|
|
+/*
|
|
+ * Note: increment, decrement are Q0.16 fixed-point values.
|
|
+ */
|
|
+struct tc_sfb_qopt {
|
|
+ __u32 rehash_interval; /* delay between hash move, in ms */
|
|
+ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
|
|
+ __u32 max; /* max len of qlen_min */
|
|
+ __u32 bin_size; /* maximum queue length per bin */
|
|
+ __u32 increment; /* probability increment, (d1 in Blue) */
|
|
+ __u32 decrement; /* probability decrement, (d2 in Blue) */
|
|
+ __u32 limit; /* max SFB queue length */
|
|
+ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
|
|
+ __u32 penalty_burst;
|
|
+};
|
|
+
|
|
+struct tc_sfb_xstats {
|
|
+ __u32 earlydrop;
|
|
+ __u32 penaltydrop;
|
|
+ __u32 bucketdrop;
|
|
+ __u32 queuedrop;
|
|
+ __u32 childdrop; /* drops in child qdisc */
|
|
+ __u32 marked;
|
|
+ __u32 maxqlen;
|
|
+ __u32 maxprob;
|
|
+ __u32 avgprob;
|
|
+};
|
|
+
|
|
+#define SFB_MAX_PROB 0xFFFF
|
|
+
|
|
+/* QFQ */
|
|
+enum {
|
|
+ TCA_QFQ_UNSPEC,
|
|
+ TCA_QFQ_WEIGHT,
|
|
+ TCA_QFQ_LMAX,
|
|
+ __TCA_QFQ_MAX
|
|
+};
|
|
+
|
|
+#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
|
|
+
|
|
+struct tc_qfq_stats {
|
|
+ __u32 weight;
|
|
+ __u32 lmax;
|
|
+};
|
|
+
|
|
+/* CODEL */
|
|
+
|
|
+enum {
|
|
+ TCA_CODEL_UNSPEC,
|
|
+ TCA_CODEL_TARGET,
|
|
+ TCA_CODEL_LIMIT,
|
|
+ TCA_CODEL_INTERVAL,
|
|
+ TCA_CODEL_ECN,
|
|
+ TCA_CODEL_CE_THRESHOLD,
|
|
+ __TCA_CODEL_MAX
|
|
+};
|
|
+
|
|
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
|
|
+
|
|
+struct tc_codel_xstats {
|
|
+ __u32 maxpacket; /* largest packet we've seen so far */
|
|
+ __u32 count; /* how many drops we've done since the last time we
|
|
+ * entered dropping state
|
|
+ */
|
|
+ __u32 lastcount; /* count at entry to dropping state */
|
|
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
|
|
+ __s32 drop_next; /* time to drop next packet */
|
|
+ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
|
|
+ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
|
|
+ __u32 dropping; /* are we in dropping state ? */
|
|
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
|
|
+};
|
|
+
|
|
+/* FQ_CODEL */
|
|
+
|
|
+enum {
|
|
+ TCA_FQ_CODEL_UNSPEC,
|
|
+ TCA_FQ_CODEL_TARGET,
|
|
+ TCA_FQ_CODEL_LIMIT,
|
|
+ TCA_FQ_CODEL_INTERVAL,
|
|
+ TCA_FQ_CODEL_ECN,
|
|
+ TCA_FQ_CODEL_FLOWS,
|
|
+ TCA_FQ_CODEL_QUANTUM,
|
|
+ TCA_FQ_CODEL_CE_THRESHOLD,
|
|
+ TCA_FQ_CODEL_DROP_BATCH_SIZE,
|
|
+ TCA_FQ_CODEL_MEMORY_LIMIT,
|
|
+ __TCA_FQ_CODEL_MAX
|
|
+};
|
|
+
|
|
+#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ TCA_FQ_CODEL_XSTATS_QDISC,
|
|
+ TCA_FQ_CODEL_XSTATS_CLASS,
|
|
+};
|
|
+
|
|
+struct tc_fq_codel_qd_stats {
|
|
+ __u32 maxpacket; /* largest packet we've seen so far */
|
|
+ __u32 drop_overlimit; /* number of time max qdisc
|
|
+ * packet limit was hit
|
|
+ */
|
|
+ __u32 ecn_mark; /* number of packets we ECN marked
|
|
+ * instead of being dropped
|
|
+ */
|
|
+ __u32 new_flow_count; /* number of time packets
|
|
+ * created a 'new flow'
|
|
+ */
|
|
+ __u32 new_flows_len; /* count of flows in new list */
|
|
+ __u32 old_flows_len; /* count of flows in old list */
|
|
+ __u32 ce_mark; /* packets above ce_threshold */
|
|
+ __u32 memory_usage; /* in bytes */
|
|
+ __u32 drop_overmemory;
|
|
+};
|
|
+
|
|
+struct tc_fq_codel_cl_stats {
|
|
+ __s32 deficit;
|
|
+ __u32 ldelay; /* in-queue delay seen by most recently
|
|
+ * dequeued packet
|
|
+ */
|
|
+ __u32 count;
|
|
+ __u32 lastcount;
|
|
+ __u32 dropping;
|
|
+ __s32 drop_next;
|
|
+};
|
|
+
|
|
+struct tc_fq_codel_xstats {
|
|
+ __u32 type;
|
|
+ union {
|
|
+ struct tc_fq_codel_qd_stats qdisc_stats;
|
|
+ struct tc_fq_codel_cl_stats class_stats;
|
|
+ };
|
|
+};
|
|
+
|
|
+/* FQ */
|
|
+
|
|
+enum {
|
|
+ TCA_FQ_UNSPEC,
|
|
+
|
|
+ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
|
|
+
|
|
+ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
|
|
+
|
|
+ TCA_FQ_QUANTUM, /* RR quantum */
|
|
+
|
|
+ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
|
|
+
|
|
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
|
|
+
|
|
+ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
|
|
+
|
|
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
|
|
+
|
|
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
|
|
+
|
|
+ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
|
|
+
|
|
+ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
|
|
+
|
|
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
|
|
+
|
|
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
|
|
+
|
|
+ __TCA_FQ_MAX
|
|
+};
|
|
+
|
|
+#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
|
|
+
|
|
+struct tc_fq_qd_stats {
|
|
+ __u64 gc_flows;
|
|
+ __u64 highprio_packets;
|
|
+ __u64 tcp_retrans;
|
|
+ __u64 throttled;
|
|
+ __u64 flows_plimit;
|
|
+ __u64 pkts_too_long;
|
|
+ __u64 allocation_errors;
|
|
+ __s64 time_next_delayed_flow;
|
|
+ __u32 flows;
|
|
+ __u32 inactive_flows;
|
|
+ __u32 throttled_flows;
|
|
+ __u32 unthrottle_latency_ns;
|
|
+ __u64 ce_mark; /* packets above ce_threshold */
|
|
+};
|
|
+
|
|
+/* Heavy-Hitter Filter */
|
|
+
|
|
+enum {
|
|
+ TCA_HHF_UNSPEC,
|
|
+ TCA_HHF_BACKLOG_LIMIT,
|
|
+ TCA_HHF_QUANTUM,
|
|
+ TCA_HHF_HH_FLOWS_LIMIT,
|
|
+ TCA_HHF_RESET_TIMEOUT,
|
|
+ TCA_HHF_ADMIT_BYTES,
|
|
+ TCA_HHF_EVICT_TIMEOUT,
|
|
+ TCA_HHF_NON_HH_WEIGHT,
|
|
+ __TCA_HHF_MAX
|
|
+};
|
|
+
|
|
+#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
|
|
+
|
|
+struct tc_hhf_xstats {
|
|
+ __u32 drop_overlimit; /* number of times max qdisc packet limit
|
|
+ * was hit
|
|
+ */
|
|
+ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
|
|
+ __u32 hh_tot_count; /* number of captured heavy-hitters so far */
|
|
+ __u32 hh_cur_count; /* number of current heavy-hitters */
|
|
+};
|
|
+
|
|
+/* PIE */
|
|
+enum {
|
|
+ TCA_PIE_UNSPEC,
|
|
+ TCA_PIE_TARGET,
|
|
+ TCA_PIE_LIMIT,
|
|
+ TCA_PIE_TUPDATE,
|
|
+ TCA_PIE_ALPHA,
|
|
+ TCA_PIE_BETA,
|
|
+ TCA_PIE_ECN,
|
|
+ TCA_PIE_BYTEMODE,
|
|
+ __TCA_PIE_MAX
|
|
+};
|
|
+#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
|
|
+
|
|
+struct tc_pie_xstats {
|
|
+ __u32 prob; /* current probability */
|
|
+ __u32 delay; /* current delay in ms */
|
|
+ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
|
|
+ __u32 packets_in; /* total number of packets enqueued */
|
|
+ __u32 dropped; /* packets dropped due to pie_action */
|
|
+ __u32 overlimit; /* dropped due to lack of space in queue */
|
|
+ __u32 maxq; /* maximum queue size */
|
|
+ __u32 ecn_mark; /* packets marked with ecn*/
|
|
+};
|
|
+
|
|
+/* CBS */
|
|
+struct tc_cbs_qopt {
|
|
+ __u8 offload;
|
|
+ __u8 _pad[3];
|
|
+ __s32 hicredit;
|
|
+ __s32 locredit;
|
|
+ __s32 idleslope;
|
|
+ __s32 sendslope;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_CBS_UNSPEC,
|
|
+ TCA_CBS_PARMS,
|
|
+ __TCA_CBS_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
|
|
+
|
|
+
|
|
+/* ETF */
|
|
+struct tc_etf_qopt {
|
|
+ __s32 delta;
|
|
+ __s32 clockid;
|
|
+ __u32 flags;
|
|
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
|
|
+#define TC_ETF_OFFLOAD_ON BIT(1)
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_ETF_UNSPEC,
|
|
+ TCA_ETF_PARMS,
|
|
+ __TCA_ETF_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
|
|
+
|
|
+
|
|
+/* CAKE */
|
|
+enum {
|
|
+ TCA_CAKE_UNSPEC,
|
|
+ TCA_CAKE_PAD,
|
|
+ TCA_CAKE_BASE_RATE64,
|
|
+ TCA_CAKE_DIFFSERV_MODE,
|
|
+ TCA_CAKE_ATM,
|
|
+ TCA_CAKE_FLOW_MODE,
|
|
+ TCA_CAKE_OVERHEAD,
|
|
+ TCA_CAKE_RTT,
|
|
+ TCA_CAKE_TARGET,
|
|
+ TCA_CAKE_AUTORATE,
|
|
+ TCA_CAKE_MEMORY,
|
|
+ TCA_CAKE_NAT,
|
|
+ TCA_CAKE_RAW,
|
|
+ TCA_CAKE_WASH,
|
|
+ TCA_CAKE_MPU,
|
|
+ TCA_CAKE_INGRESS,
|
|
+ TCA_CAKE_ACK_FILTER,
|
|
+ TCA_CAKE_SPLIT_GSO,
|
|
+ __TCA_CAKE_MAX
|
|
+};
|
|
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ __TCA_CAKE_STATS_INVALID,
|
|
+ TCA_CAKE_STATS_PAD,
|
|
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
|
|
+ TCA_CAKE_STATS_MEMORY_LIMIT,
|
|
+ TCA_CAKE_STATS_MEMORY_USED,
|
|
+ TCA_CAKE_STATS_AVG_NETOFF,
|
|
+ TCA_CAKE_STATS_MIN_NETLEN,
|
|
+ TCA_CAKE_STATS_MAX_NETLEN,
|
|
+ TCA_CAKE_STATS_MIN_ADJLEN,
|
|
+ TCA_CAKE_STATS_MAX_ADJLEN,
|
|
+ TCA_CAKE_STATS_TIN_STATS,
|
|
+ TCA_CAKE_STATS_DEFICIT,
|
|
+ TCA_CAKE_STATS_COBALT_COUNT,
|
|
+ TCA_CAKE_STATS_DROPPING,
|
|
+ TCA_CAKE_STATS_DROP_NEXT_US,
|
|
+ TCA_CAKE_STATS_P_DROP,
|
|
+ TCA_CAKE_STATS_BLUE_TIMER_US,
|
|
+ __TCA_CAKE_STATS_MAX
|
|
+};
|
|
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ __TCA_CAKE_TIN_STATS_INVALID,
|
|
+ TCA_CAKE_TIN_STATS_PAD,
|
|
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
|
|
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
|
|
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
|
|
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
|
|
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
|
|
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
|
|
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
|
|
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
|
|
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
|
|
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
|
|
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
|
|
+ TCA_CAKE_TIN_STATS_TARGET_US,
|
|
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
|
|
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
|
|
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
|
|
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
|
|
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
|
|
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
|
|
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
|
|
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
|
|
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
|
|
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
|
|
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
|
|
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
|
|
+ __TCA_CAKE_TIN_STATS_MAX
|
|
+};
|
|
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
|
|
+#define TC_CAKE_MAX_TINS (8)
|
|
+
|
|
+enum {
|
|
+ CAKE_FLOW_NONE = 0,
|
|
+ CAKE_FLOW_SRC_IP,
|
|
+ CAKE_FLOW_DST_IP,
|
|
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
|
|
+ CAKE_FLOW_FLOWS,
|
|
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
|
|
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
|
|
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
|
|
+ CAKE_FLOW_MAX,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
|
|
+ CAKE_DIFFSERV_DIFFSERV4,
|
|
+ CAKE_DIFFSERV_DIFFSERV8,
|
|
+ CAKE_DIFFSERV_BESTEFFORT,
|
|
+ CAKE_DIFFSERV_PRECEDENCE,
|
|
+ CAKE_DIFFSERV_MAX
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CAKE_ACK_NONE = 0,
|
|
+ CAKE_ACK_FILTER,
|
|
+ CAKE_ACK_AGGRESSIVE,
|
|
+ CAKE_ACK_MAX
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CAKE_ATM_NONE = 0,
|
|
+ CAKE_ATM_ATM,
|
|
+ CAKE_ATM_PTM,
|
|
+ CAKE_ATM_MAX
|
|
+};
|
|
+
|
|
+
|
|
+/* TAPRIO */
|
|
+enum {
|
|
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
|
|
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
|
|
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
|
|
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
|
|
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
|
|
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
|
|
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
|
|
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
|
|
+};
|
|
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
|
|
+
|
|
+/* The format for schedule entry list is:
|
|
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
|
|
+ * [TCA_TAPRIO_SCHED_ENTRY]
|
|
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
|
|
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
|
|
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
|
|
+ */
|
|
+enum {
|
|
+ TCA_TAPRIO_SCHED_UNSPEC,
|
|
+ TCA_TAPRIO_SCHED_ENTRY,
|
|
+ __TCA_TAPRIO_SCHED_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
|
|
+
|
|
+enum {
|
|
+ TCA_TAPRIO_ATTR_UNSPEC,
|
|
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
|
|
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
|
|
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
|
|
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
|
|
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
|
|
+ TCA_TAPRIO_PAD,
|
|
+ __TCA_TAPRIO_ATTR_MAX,
|
|
+};
|
|
+
|
|
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
|
|
+
|
|
+#endif
|
|
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
|
|
index 03f9bcc4ef501..961e1b9fc5927 100644
|
|
--- a/tools/lib/bpf/bpf.c
|
|
+++ b/tools/lib/bpf/bpf.c
|
|
@@ -69,6 +69,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
|
{
|
|
__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
|
|
union bpf_attr attr;
|
|
+ int ret;
|
|
|
|
memset(&attr, '\0', sizeof(attr));
|
|
|
|
@@ -86,7 +87,15 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
|
attr.map_ifindex = create_attr->map_ifindex;
|
|
attr.inner_map_fd = create_attr->inner_map_fd;
|
|
|
|
- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
|
+ ret = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
|
+ if (ret < 0 && errno == EINVAL && create_attr->name) {
|
|
+ /* Retry the same syscall, but without the name.
|
|
+ * Pre v4.14 kernels don't support map names.
|
|
+ */
|
|
+ memset(attr.map_name, 0, sizeof(attr.map_name));
|
|
+ return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
|
+ }
|
|
+ return ret;
|
|
}
|
|
|
|
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
|
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
|
|
index 95563b8e1ad74..ed61fb3a46c08 100644
|
|
--- a/tools/lib/subcmd/Makefile
|
|
+++ b/tools/lib/subcmd/Makefile
|
|
@@ -36,8 +36,6 @@ endif
|
|
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
|
|
|
|
CFLAGS += -I$(srctree)/tools/include/
|
|
-CFLAGS += -I$(srctree)/include/uapi
|
|
-CFLAGS += -I$(srctree)/include
|
|
|
|
SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
|
|
|
|
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
|
|
index 3692f29fee464..8ea1a02812b03 100644
|
|
--- a/tools/lib/traceevent/event-parse.c
|
|
+++ b/tools/lib/traceevent/event-parse.c
|
|
@@ -3498,7 +3498,7 @@ struct tep_event_format *
|
|
tep_find_event_by_name(struct tep_handle *pevent,
|
|
const char *sys, const char *name)
|
|
{
|
|
- struct tep_event_format *event;
|
|
+ struct tep_event_format *event = NULL;
|
|
int i;
|
|
|
|
if (pevent->last_event &&
|
|
@@ -4221,7 +4221,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
|
|
unsigned long long ip, val;
|
|
char *ptr;
|
|
void *bptr;
|
|
- int vsize;
|
|
+ int vsize = 0;
|
|
|
|
field = pevent->bprint_buf_field;
|
|
ip_field = pevent->bprint_ip_field;
|
|
@@ -4881,7 +4881,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
|
|
char format[32];
|
|
int show_func;
|
|
int len_as_arg;
|
|
- int len_arg;
|
|
+ int len_arg = 0;
|
|
int len;
|
|
int ls;
|
|
|
|
@@ -4970,6 +4970,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
|
|
|
|
if (arg->type == TEP_PRINT_BSTRING) {
|
|
trace_seq_puts(s, arg->string.string);
|
|
+ arg = arg->next;
|
|
break;
|
|
}
|
|
|
|
@@ -5146,8 +5147,8 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
|
|
static int migrate_disable_exists;
|
|
unsigned int lat_flags;
|
|
unsigned int pc;
|
|
- int lock_depth;
|
|
- int migrate_disable;
|
|
+ int lock_depth = 0;
|
|
+ int migrate_disable = 0;
|
|
int hardirq;
|
|
int softirq;
|
|
void *data = record->data;
|
|
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
|
|
index a0e8c23f91255..a0f366c7a3185 100644
|
|
--- a/tools/perf/Makefile.config
|
|
+++ b/tools/perf/Makefile.config
|
|
@@ -294,6 +294,8 @@ ifndef NO_BIONIC
|
|
$(call feature_check,bionic)
|
|
ifeq ($(feature-bionic), 1)
|
|
BIONIC := 1
|
|
+ CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE
|
|
+ CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE
|
|
EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
|
|
EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
|
|
endif
|
|
@@ -691,18 +693,20 @@ endif
|
|
|
|
ifeq ($(feature-libbfd), 1)
|
|
EXTLIBS += -lbfd
|
|
+else
|
|
+ # we are on a system that requires -liberty and (maybe) -lz
|
|
+ # to link against -lbfd; test each case individually here
|
|
|
|
# call all detections now so we get correct
|
|
# status in VF output
|
|
- $(call feature_check,liberty)
|
|
- $(call feature_check,liberty-z)
|
|
- $(call feature_check,cplus-demangle)
|
|
+ $(call feature_check,libbfd-liberty)
|
|
+ $(call feature_check,libbfd-liberty-z)
|
|
|
|
- ifeq ($(feature-liberty), 1)
|
|
- EXTLIBS += -liberty
|
|
+ ifeq ($(feature-libbfd-liberty), 1)
|
|
+ EXTLIBS += -lbfd -liberty
|
|
else
|
|
- ifeq ($(feature-liberty-z), 1)
|
|
- EXTLIBS += -liberty -lz
|
|
+ ifeq ($(feature-libbfd-liberty-z), 1)
|
|
+ EXTLIBS += -lbfd -liberty -lz
|
|
endif
|
|
endif
|
|
endif
|
|
@@ -712,24 +716,24 @@ ifdef NO_DEMANGLE
|
|
else
|
|
ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
|
|
EXTLIBS += -liberty
|
|
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
|
|
else
|
|
- ifneq ($(feature-libbfd), 1)
|
|
- ifneq ($(feature-liberty), 1)
|
|
- ifneq ($(feature-liberty-z), 1)
|
|
- # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
|
|
- # or any of 'bfd iberty z' trinity
|
|
- ifeq ($(feature-cplus-demangle), 1)
|
|
- EXTLIBS += -liberty
|
|
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
|
|
- else
|
|
- msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
|
|
- CFLAGS += -DNO_DEMANGLE
|
|
- endif
|
|
- endif
|
|
+ ifeq ($(filter -liberty,$(EXTLIBS)),)
|
|
+ $(call feature_check,cplus-demangle)
|
|
+
|
|
+ # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
|
|
+ # or any of 'bfd iberty z' trinity
|
|
+ ifeq ($(feature-cplus-demangle), 1)
|
|
+ EXTLIBS += -liberty
|
|
+ else
|
|
+ msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
|
|
+ CFLAGS += -DNO_DEMANGLE
|
|
endif
|
|
endif
|
|
endif
|
|
+
|
|
+ ifneq ($(filter -liberty,$(EXTLIBS)),)
|
|
+ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
|
|
+ endif
|
|
endif
|
|
|
|
ifneq ($(filter -lbfd,$(EXTLIBS)),)
|
|
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
|
|
index 82657c01a3b8e..5f69fd0b745a4 100644
|
|
--- a/tools/perf/arch/common.c
|
|
+++ b/tools/perf/arch/common.c
|
|
@@ -200,3 +200,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
|
|
|
|
return perf_env__lookup_binutils_path(env, "objdump", path);
|
|
}
|
|
+
|
|
+/*
|
|
+ * Some architectures have a single address space for kernel and user addresses,
|
|
+ * which makes it possible to determine if an address is in kernel space or user
|
|
+ * space.
|
|
+ */
|
|
+bool perf_env__single_address_space(struct perf_env *env)
|
|
+{
|
|
+ return strcmp(perf_env__arch(env), "sparc");
|
|
+}
|
|
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
|
|
index 2167001b18c5e..c298a446d1f6f 100644
|
|
--- a/tools/perf/arch/common.h
|
|
+++ b/tools/perf/arch/common.h
|
|
@@ -5,5 +5,6 @@
|
|
#include "../util/env.h"
|
|
|
|
int perf_env__lookup_objdump(struct perf_env *env, const char **path);
|
|
+bool perf_env__single_address_space(struct perf_env *env);
|
|
|
|
#endif /* ARCH_PERF_COMMON_H */
|
|
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
|
|
index db0ba8caf5a24..ba8ecaf522004 100644
|
|
--- a/tools/perf/arch/x86/util/intel-pt.c
|
|
+++ b/tools/perf/arch/x86/util/intel-pt.c
|
|
@@ -524,10 +524,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
|
|
struct perf_evsel *evsel)
|
|
{
|
|
int err;
|
|
+ char c;
|
|
|
|
if (!evsel)
|
|
return 0;
|
|
|
|
+ /*
|
|
+ * If supported, force pass-through config term (pt=1) even if user
|
|
+ * sets pt=0, which avoids senseless kernel errors.
|
|
+ */
|
|
+ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
|
|
+ !(evsel->attr.config & 1)) {
|
|
+ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
|
|
+ evsel->attr.config |= 1;
|
|
+ }
|
|
+
|
|
err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
|
|
"cyc_thresh", "caps/psb_cyc",
|
|
evsel->attr.config);
|
|
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
|
|
index b32409a0e546d..081353d7b095c 100644
|
|
--- a/tools/perf/arch/x86/util/kvm-stat.c
|
|
+++ b/tools/perf/arch/x86/util/kvm-stat.c
|
|
@@ -156,7 +156,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
|
|
if (strstr(cpuid, "Intel")) {
|
|
kvm->exit_reasons = vmx_exit_reasons;
|
|
kvm->exit_reasons_isa = "VMX";
|
|
- } else if (strstr(cpuid, "AMD")) {
|
|
+ } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
|
|
kvm->exit_reasons = svm_exit_reasons;
|
|
kvm->exit_reasons_isa = "SVM";
|
|
} else
|
|
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
|
|
index b5bc85bd0bbea..a7b4d3f611c59 100644
|
|
--- a/tools/perf/builtin-script.c
|
|
+++ b/tools/perf/builtin-script.c
|
|
@@ -728,8 +728,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
|
|
if (PRINT_FIELD(DSO)) {
|
|
memset(&alf, 0, sizeof(alf));
|
|
memset(&alt, 0, sizeof(alt));
|
|
- thread__find_map(thread, sample->cpumode, from, &alf);
|
|
- thread__find_map(thread, sample->cpumode, to, &alt);
|
|
+ thread__find_map_fb(thread, sample->cpumode, from, &alf);
|
|
+ thread__find_map_fb(thread, sample->cpumode, to, &alt);
|
|
}
|
|
|
|
printed += fprintf(fp, " 0x%"PRIx64, from);
|
|
@@ -775,8 +775,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
|
|
from = br->entries[i].from;
|
|
to = br->entries[i].to;
|
|
|
|
- thread__find_symbol(thread, sample->cpumode, from, &alf);
|
|
- thread__find_symbol(thread, sample->cpumode, to, &alt);
|
|
+ thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
|
|
+ thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
|
|
|
|
printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
|
|
if (PRINT_FIELD(DSO)) {
|
|
@@ -820,11 +820,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
|
|
from = br->entries[i].from;
|
|
to = br->entries[i].to;
|
|
|
|
- if (thread__find_map(thread, sample->cpumode, from, &alf) &&
|
|
+ if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
|
|
!alf.map->dso->adjust_symbols)
|
|
from = map__map_ip(alf.map, from);
|
|
|
|
- if (thread__find_map(thread, sample->cpumode, to, &alt) &&
|
|
+ if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
|
|
!alt.map->dso->adjust_symbols)
|
|
to = map__map_ip(alt.map, to);
|
|
|
|
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
|
|
index a635abfa77b6a..63a3afc7f32b6 100644
|
|
--- a/tools/perf/builtin-stat.c
|
|
+++ b/tools/perf/builtin-stat.c
|
|
@@ -561,7 +561,8 @@ try_again:
|
|
break;
|
|
}
|
|
}
|
|
- wait4(child_pid, &status, 0, &stat_config.ru_data);
|
|
+ if (child_pid != -1)
|
|
+ wait4(child_pid, &status, 0, &stat_config.ru_data);
|
|
|
|
if (workload_exec_errno) {
|
|
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
|
|
@@ -709,7 +710,7 @@ static int parse_metric_groups(const struct option *opt,
|
|
return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
|
|
}
|
|
|
|
-static const struct option stat_options[] = {
|
|
+static struct option stat_options[] = {
|
|
OPT_BOOLEAN('T', "transaction", &transaction_run,
|
|
"hardware transaction statistics"),
|
|
OPT_CALLBACK('e', "event", &evsel_list, "event",
|
|
@@ -1599,6 +1600,12 @@ int cmd_stat(int argc, const char **argv)
|
|
return -ENOMEM;
|
|
|
|
parse_events__shrink_config_terms();
|
|
+
|
|
+ /* String-parsing callback-based options would segfault when negated */
|
|
+ set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
|
|
+ set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
|
|
+ set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
|
|
+
|
|
argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
|
|
(const char **) stat_usage,
|
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
|
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
|
|
index a827919c62630..775b99833e513 100644
|
|
--- a/tools/perf/builtin-timechart.c
|
|
+++ b/tools/perf/builtin-timechart.c
|
|
@@ -43,6 +43,10 @@
|
|
#include "util/data.h"
|
|
#include "util/debug.h"
|
|
|
|
+#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
|
|
+FILE *open_memstream(char **ptr, size_t *sizeloc);
|
|
+#endif
|
|
+
|
|
#define SUPPORT_OLD_POWER_EVENTS 1
|
|
#define PWR_EVENT_EXIT -1
|
|
|
|
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
|
|
index 36c903faed0bf..71e9737f4614d 100644
|
|
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
|
|
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
|
|
@@ -73,7 +73,7 @@
|
|
},
|
|
{
|
|
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
|
|
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
|
|
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
|
|
"MetricGroup": "Memory_Bound;Memory_Lat",
|
|
"MetricName": "Load_Miss_Real_Latency"
|
|
},
|
|
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
|
|
index 36c903faed0bf..71e9737f4614d 100644
|
|
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
|
|
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
|
|
@@ -73,7 +73,7 @@
|
|
},
|
|
{
|
|
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
|
|
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
|
|
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
|
|
"MetricGroup": "Memory_Bound;Memory_Lat",
|
|
"MetricName": "Load_Miss_Real_Latency"
|
|
},
|
|
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
|
|
index ff9b60b99f52a..44090a9a19f3b 100644
|
|
--- a/tools/perf/tests/attr.py
|
|
+++ b/tools/perf/tests/attr.py
|
|
@@ -116,7 +116,7 @@ class Event(dict):
|
|
if not self.has_key(t) or not other.has_key(t):
|
|
continue
|
|
if not data_equal(self[t], other[t]):
|
|
- log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
|
|
+ log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
|
|
|
|
# Test file description needs to have following sections:
|
|
# [config]
|
|
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
|
|
index a467615c5a0ee..910e25e641883 100644
|
|
--- a/tools/perf/tests/bp_signal.c
|
|
+++ b/tools/perf/tests/bp_signal.c
|
|
@@ -291,12 +291,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
|
|
|
|
bool test__bp_signal_is_supported(void)
|
|
{
|
|
-/*
|
|
- * The powerpc so far does not have support to even create
|
|
- * instruction breakpoint using the perf event interface.
|
|
- * Once it's there we can release this.
|
|
- */
|
|
-#if defined(__powerpc__) || defined(__s390x__)
|
|
+ /*
|
|
+ * PowerPC and S390 do not support creation of instruction
|
|
+ * breakpoints using the perf_event interface.
|
|
+ *
|
|
+ * ARM requires explicit rounding down of the instruction
|
|
+ * pointer in Thumb mode, and then requires the single-step
|
|
+ * to be handled explicitly in the overflow handler to avoid
|
|
+ * stepping into the SIGIO handler and getting stuck on the
|
|
+ * breakpointed instruction.
|
|
+ *
|
|
+ * Just disable the test for these architectures until these
|
|
+ * issues are resolved.
|
|
+ */
|
|
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
|
|
return false;
|
|
#else
|
|
return true;
|
|
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
|
|
index 5f8501c68da49..5cbba70bcdd0b 100644
|
|
--- a/tools/perf/tests/evsel-tp-sched.c
|
|
+++ b/tools/perf/tests/evsel-tp-sched.c
|
|
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
|
|
return -1;
|
|
}
|
|
|
|
- is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED);
|
|
+ is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
|
|
if (should_be_signed && !is_signed) {
|
|
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
|
|
evsel->name, name, is_signed, should_be_signed);
|
|
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
|
|
index 1c16e56cd93ed..7cb99b433888b 100644
|
|
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
|
|
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
|
|
@@ -13,7 +13,8 @@ add_probe_vfs_getname() {
|
|
local verbose=$1
|
|
if [ $had_vfs_getname -eq 1 ] ; then
|
|
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
|
|
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
|
|
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
|
|
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
|
|
fi
|
|
}
|
|
|
|
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
|
|
index 32ef7bdca1cf8..dc2212e121849 100644
|
|
--- a/tools/perf/util/callchain.c
|
|
+++ b/tools/perf/util/callchain.c
|
|
@@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
|
|
cnode->cycles_count += node->branch_flags.cycles;
|
|
cnode->iter_count += node->nr_loop_iter;
|
|
cnode->iter_cycles += node->iter_cycles;
|
|
+ cnode->from_count++;
|
|
}
|
|
}
|
|
|
|
@@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
|
|
static int branch_from_str(char *bf, int bfsize,
|
|
u64 branch_count,
|
|
u64 cycles_count, u64 iter_count,
|
|
- u64 iter_cycles)
|
|
+ u64 iter_cycles, u64 from_count)
|
|
{
|
|
int printed = 0, i = 0;
|
|
- u64 cycles;
|
|
+ u64 cycles, v = 0;
|
|
|
|
cycles = cycles_count / branch_count;
|
|
if (cycles) {
|
|
@@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
|
|
bf + printed, bfsize - printed);
|
|
}
|
|
|
|
- if (iter_count) {
|
|
- printed += count_pri64_printf(i++, "iter",
|
|
- iter_count,
|
|
- bf + printed, bfsize - printed);
|
|
+ if (iter_count && from_count) {
|
|
+ v = iter_count / from_count;
|
|
+ if (v) {
|
|
+ printed += count_pri64_printf(i++, "iter",
|
|
+ v, bf + printed, bfsize - printed);
|
|
|
|
- printed += count_pri64_printf(i++, "avg_cycles",
|
|
- iter_cycles / iter_count,
|
|
- bf + printed, bfsize - printed);
|
|
+ printed += count_pri64_printf(i++, "avg_cycles",
|
|
+ iter_cycles / iter_count,
|
|
+ bf + printed, bfsize - printed);
|
|
+ }
|
|
}
|
|
|
|
if (i)
|
|
@@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
|
|
u64 branch_count, u64 predicted_count,
|
|
u64 abort_count, u64 cycles_count,
|
|
u64 iter_count, u64 iter_cycles,
|
|
+ u64 from_count,
|
|
struct branch_type_stat *brtype_stat)
|
|
{
|
|
int printed;
|
|
@@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
|
|
predicted_count, abort_count, brtype_stat);
|
|
} else {
|
|
printed = branch_from_str(bf, bfsize, branch_count,
|
|
- cycles_count, iter_count, iter_cycles);
|
|
+ cycles_count, iter_count, iter_cycles,
|
|
+ from_count);
|
|
}
|
|
|
|
if (!printed)
|
|
@@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
|
|
u64 branch_count, u64 predicted_count,
|
|
u64 abort_count, u64 cycles_count,
|
|
u64 iter_count, u64 iter_cycles,
|
|
+ u64 from_count,
|
|
struct branch_type_stat *brtype_stat)
|
|
{
|
|
char str[256];
|
|
|
|
counts_str_build(str, sizeof(str), branch_count,
|
|
predicted_count, abort_count, cycles_count,
|
|
- iter_count, iter_cycles, brtype_stat);
|
|
+ iter_count, iter_cycles, from_count, brtype_stat);
|
|
|
|
if (fp)
|
|
return fprintf(fp, "%s", str);
|
|
@@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
|
|
u64 branch_count, predicted_count;
|
|
u64 abort_count, cycles_count;
|
|
u64 iter_count, iter_cycles;
|
|
+ u64 from_count;
|
|
|
|
branch_count = clist->branch_count;
|
|
predicted_count = clist->predicted_count;
|
|
@@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
|
|
cycles_count = clist->cycles_count;
|
|
iter_count = clist->iter_count;
|
|
iter_cycles = clist->iter_cycles;
|
|
+ from_count = clist->from_count;
|
|
|
|
return callchain_counts_printf(fp, bf, bfsize, branch_count,
|
|
predicted_count, abort_count,
|
|
cycles_count, iter_count, iter_cycles,
|
|
- &clist->brtype_stat);
|
|
+ from_count, &clist->brtype_stat);
|
|
}
|
|
|
|
static void free_callchain_node(struct callchain_node *node)
|
|
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
|
|
index 154560b1eb653..99d38ac019b89 100644
|
|
--- a/tools/perf/util/callchain.h
|
|
+++ b/tools/perf/util/callchain.h
|
|
@@ -118,6 +118,7 @@ struct callchain_list {
|
|
bool has_children;
|
|
};
|
|
u64 branch_count;
|
|
+ u64 from_count;
|
|
u64 predicted_count;
|
|
u64 abort_count;
|
|
u64 cycles_count;
|
|
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
|
|
index 73430b73570d5..c2f0c92623f09 100644
|
|
--- a/tools/perf/util/cs-etm.c
|
|
+++ b/tools/perf/util/cs-etm.c
|
|
@@ -1005,7 +1005,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
|
|
}
|
|
|
|
swap_packet:
|
|
- if (etmq->etm->synth_opts.last_branch) {
|
|
+ if (etm->sample_branches || etm->synth_opts.last_branch) {
|
|
/*
|
|
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
|
|
* the next incoming packet.
|
|
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
|
|
index bbed90e5d9bb8..cee717a3794fe 100644
|
|
--- a/tools/perf/util/dso.c
|
|
+++ b/tools/perf/util/dso.c
|
|
@@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
|
|
unlink(tmpbuf);
|
|
|
|
if (pathname && (fd >= 0))
|
|
- strncpy(pathname, tmpbuf, len);
|
|
+ strlcpy(pathname, tmpbuf, len);
|
|
|
|
return fd;
|
|
}
|
|
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
|
|
index 59f38c7693f86..4c23779e271a3 100644
|
|
--- a/tools/perf/util/env.c
|
|
+++ b/tools/perf/util/env.c
|
|
@@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
|
|
struct utsname uts;
|
|
char *arch_name;
|
|
|
|
- if (!env) { /* Assume local operation */
|
|
+ if (!env || !env->arch) { /* Assume local operation */
|
|
if (uname(&uts) < 0)
|
|
return NULL;
|
|
arch_name = uts.machine;
|
|
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
|
|
index e9c108a6b1c34..24493200cf804 100644
|
|
--- a/tools/perf/util/event.c
|
|
+++ b/tools/perf/util/event.c
|
|
@@ -1577,6 +1577,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
|
|
return al->map;
|
|
}
|
|
|
|
+/*
|
|
+ * For branch stacks or branch samples, the sample cpumode might not be correct
|
|
+ * because it applies only to the sample 'ip' and not necessary to 'addr' or
|
|
+ * branch stack addresses. If possible, use a fallback to deal with those cases.
|
|
+ */
|
|
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
|
|
+ struct addr_location *al)
|
|
+{
|
|
+ struct map *map = thread__find_map(thread, cpumode, addr, al);
|
|
+ struct machine *machine = thread->mg->machine;
|
|
+ u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
|
|
+
|
|
+ if (map || addr_cpumode == cpumode)
|
|
+ return map;
|
|
+
|
|
+ return thread__find_map(thread, addr_cpumode, addr, al);
|
|
+}
|
|
+
|
|
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
|
|
u64 addr, struct addr_location *al)
|
|
{
|
|
@@ -1586,6 +1604,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
|
|
return al->sym;
|
|
}
|
|
|
|
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
|
|
+ u64 addr, struct addr_location *al)
|
|
+{
|
|
+ al->sym = NULL;
|
|
+ if (thread__find_map_fb(thread, cpumode, addr, al))
|
|
+ al->sym = map__find_symbol(al->map, al->addr);
|
|
+ return al->sym;
|
|
+}
|
|
+
|
|
/*
|
|
* Callers need to drop the reference to al->thread, obtained in
|
|
* machine__findnew_thread()
|
|
@@ -1679,7 +1706,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
|
|
void thread__resolve(struct thread *thread, struct addr_location *al,
|
|
struct perf_sample *sample)
|
|
{
|
|
- thread__find_map(thread, sample->cpumode, sample->addr, al);
|
|
+ thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
|
|
|
|
al->cpu = sample->cpu;
|
|
al->sym = NULL;
|
|
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
|
|
index 668d2a9ef0f4b..8a806b0758b08 100644
|
|
--- a/tools/perf/util/evlist.c
|
|
+++ b/tools/perf/util/evlist.c
|
|
@@ -34,6 +34,10 @@
|
|
#include <linux/log2.h>
|
|
#include <linux/err.h>
|
|
|
|
+#ifdef LACKS_SIGQUEUE_PROTOTYPE
|
|
+int sigqueue(pid_t pid, int sig, const union sigval value);
|
|
+#endif
|
|
+
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
|
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
|
|
|
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
|
|
index 4fd45be95a433..05f40bb51a883 100644
|
|
--- a/tools/perf/util/header.c
|
|
+++ b/tools/perf/util/header.c
|
|
@@ -2659,6 +2659,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
|
|
struct perf_header *header = &session->header;
|
|
int fd = perf_data__fd(session->data);
|
|
struct stat st;
|
|
+ time_t stctime;
|
|
int ret, bit;
|
|
|
|
hd.fp = fp;
|
|
@@ -2668,7 +2669,8 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
|
|
if (ret == -1)
|
|
return -1;
|
|
|
|
- fprintf(fp, "# captured on : %s", ctime(&st.st_ctime));
|
|
+ stctime = st.st_ctime;
|
|
+ fprintf(fp, "# captured on : %s", ctime(&stctime));
|
|
|
|
fprintf(fp, "# header version : %u\n", header->version);
|
|
fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
|
|
@@ -3544,7 +3546,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool,
|
|
if (ev == NULL)
|
|
return -ENOMEM;
|
|
|
|
- strncpy(ev->data, evsel->unit, size);
|
|
+ strlcpy(ev->data, evsel->unit, size + 1);
|
|
err = process(tool, (union perf_event *)ev, NULL, NULL);
|
|
free(ev);
|
|
return err;
|
|
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
|
|
index 8f36ce813bc5b..ea228dd0a1876 100644
|
|
--- a/tools/perf/util/machine.c
|
|
+++ b/tools/perf/util/machine.c
|
|
@@ -2005,7 +2005,7 @@ static void save_iterations(struct iterations *iter,
|
|
{
|
|
int i;
|
|
|
|
- iter->nr_loop_iter = nr;
|
|
+ iter->nr_loop_iter++;
|
|
iter->cycles = 0;
|
|
|
|
for (i = 0; i < nr; i++)
|
|
@@ -2592,6 +2592,33 @@ int machine__get_kernel_start(struct machine *machine)
|
|
return err;
|
|
}
|
|
|
|
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
|
|
+{
|
|
+ u8 addr_cpumode = cpumode;
|
|
+ bool kernel_ip;
|
|
+
|
|
+ if (!machine->single_address_space)
|
|
+ goto out;
|
|
+
|
|
+ kernel_ip = machine__kernel_ip(machine, addr);
|
|
+ switch (cpumode) {
|
|
+ case PERF_RECORD_MISC_KERNEL:
|
|
+ case PERF_RECORD_MISC_USER:
|
|
+ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
|
|
+ PERF_RECORD_MISC_USER;
|
|
+ break;
|
|
+ case PERF_RECORD_MISC_GUEST_KERNEL:
|
|
+ case PERF_RECORD_MISC_GUEST_USER:
|
|
+ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
|
|
+ PERF_RECORD_MISC_GUEST_USER;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+out:
|
|
+ return addr_cpumode;
|
|
+}
|
|
+
|
|
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
|
|
{
|
|
return dsos__findnew(&machine->dsos, filename);
|
|
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
|
|
index d856b85862e23..ebde3ea70225b 100644
|
|
--- a/tools/perf/util/machine.h
|
|
+++ b/tools/perf/util/machine.h
|
|
@@ -42,6 +42,7 @@ struct machine {
|
|
u16 id_hdr_size;
|
|
bool comm_exec;
|
|
bool kptr_restrict_warned;
|
|
+ bool single_address_space;
|
|
char *root_dir;
|
|
char *mmap_name;
|
|
struct threads threads[THREADS__TABLE_SIZE];
|
|
@@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
|
|
return ip >= kernel_start;
|
|
}
|
|
|
|
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
|
|
+
|
|
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
|
|
pid_t tid);
|
|
struct comm *machine__thread_exec_comm(struct machine *machine,
|
|
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
|
|
index 59be3466d64d3..920e1e6551dd8 100644
|
|
--- a/tools/perf/util/parse-events.c
|
|
+++ b/tools/perf/util/parse-events.c
|
|
@@ -2462,7 +2462,7 @@ restart:
|
|
if (!name_only && strlen(syms->alias))
|
|
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
|
|
else
|
|
- strncpy(name, syms->symbol, MAX_NAME_LEN);
|
|
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
|
|
|
|
evt_list[evt_i] = strdup(name);
|
|
if (evt_list[evt_i] == NULL)
|
|
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
|
|
index 7e49baad304d7..7348eea0248fd 100644
|
|
--- a/tools/perf/util/pmu.c
|
|
+++ b/tools/perf/util/pmu.c
|
|
@@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
|
|
int fd, ret = -1;
|
|
char path[PATH_MAX];
|
|
|
|
- snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
|
|
+ scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
|
|
|
|
fd = open(path, O_RDONLY);
|
|
if (fd == -1)
|
|
@@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
|
|
ssize_t sret;
|
|
int fd;
|
|
|
|
- snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
|
|
+ scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
|
|
|
|
fd = open(path, O_RDONLY);
|
|
if (fd == -1)
|
|
@@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
|
|
char path[PATH_MAX];
|
|
int fd;
|
|
|
|
- snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
|
|
+ scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
|
|
|
|
fd = open(path, O_RDONLY);
|
|
if (fd == -1)
|
|
@@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
|
|
char path[PATH_MAX];
|
|
int fd;
|
|
|
|
- snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
|
|
+ scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
|
|
|
|
fd = open(path, O_RDONLY);
|
|
if (fd == -1)
|
|
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
|
|
index aac7817d9e14d..0b1195cad0e5b 100644
|
|
--- a/tools/perf/util/probe-file.c
|
|
+++ b/tools/perf/util/probe-file.c
|
|
@@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
|
|
|
|
if (target && build_id_cache__cached(target)) {
|
|
/* This is a cached buildid */
|
|
- strncpy(sbuildid, target, SBUILD_ID_SIZE);
|
|
+ strlcpy(sbuildid, target, SBUILD_ID_SIZE);
|
|
dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
|
|
goto found;
|
|
}
|
|
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
|
|
index 50150dfc0cdfb..4dd8fd05a2601 100644
|
|
--- a/tools/perf/util/python.c
|
|
+++ b/tools/perf/util/python.c
|
|
@@ -939,7 +939,8 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
|
|
|
|
file = PyFile_FromFile(fp, "perf", "r", NULL);
|
|
#else
|
|
- file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1);
|
|
+ file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
|
|
+ NULL, NULL, NULL, 0);
|
|
#endif
|
|
if (file == NULL)
|
|
goto free_list;
|
|
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
|
|
index a2eeebbfb25f5..68b2570304ece 100644
|
|
--- a/tools/perf/util/s390-cpumsf.c
|
|
+++ b/tools/perf/util/s390-cpumsf.c
|
|
@@ -506,7 +506,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
|
|
aux_ts = get_trailer_time(buf);
|
|
if (!aux_ts) {
|
|
pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
|
|
- sfq->buffer->data_offset);
|
|
+ (s64)sfq->buffer->data_offset);
|
|
aux_ts = ~0ULL;
|
|
goto out;
|
|
}
|
|
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
|
|
index 69aa93d4ee991..0c4b050f6fc22 100644
|
|
--- a/tools/perf/util/scripting-engines/trace-event-python.c
|
|
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
|
|
@@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
|
|
pydict_set_item_string_decref(pyelem, "cycles",
|
|
PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
|
|
|
|
- thread__find_map(thread, sample->cpumode,
|
|
- br->entries[i].from, &al);
|
|
+ thread__find_map_fb(thread, sample->cpumode,
|
|
+ br->entries[i].from, &al);
|
|
dsoname = get_dsoname(al.map);
|
|
pydict_set_item_string_decref(pyelem, "from_dsoname",
|
|
_PyUnicode_FromString(dsoname));
|
|
|
|
- thread__find_map(thread, sample->cpumode,
|
|
- br->entries[i].to, &al);
|
|
+ thread__find_map_fb(thread, sample->cpumode,
|
|
+ br->entries[i].to, &al);
|
|
dsoname = get_dsoname(al.map);
|
|
pydict_set_item_string_decref(pyelem, "to_dsoname",
|
|
_PyUnicode_FromString(dsoname));
|
|
@@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
|
|
if (!pyelem)
|
|
Py_FatalError("couldn't create Python dictionary");
|
|
|
|
- thread__find_symbol(thread, sample->cpumode,
|
|
- br->entries[i].from, &al);
|
|
+ thread__find_symbol_fb(thread, sample->cpumode,
|
|
+ br->entries[i].from, &al);
|
|
get_symoff(al.sym, &al, true, bf, sizeof(bf));
|
|
pydict_set_item_string_decref(pyelem, "from",
|
|
_PyUnicode_FromString(bf));
|
|
|
|
- thread__find_symbol(thread, sample->cpumode,
|
|
- br->entries[i].to, &al);
|
|
+ thread__find_symbol_fb(thread, sample->cpumode,
|
|
+ br->entries[i].to, &al);
|
|
get_symoff(al.sym, &al, true, bf, sizeof(bf));
|
|
pydict_set_item_string_decref(pyelem, "to",
|
|
_PyUnicode_FromString(bf));
|
|
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
|
|
index 7d2c8ce6cfadb..f8eab197f35cc 100644
|
|
--- a/tools/perf/util/session.c
|
|
+++ b/tools/perf/util/session.c
|
|
@@ -24,6 +24,7 @@
|
|
#include "thread.h"
|
|
#include "thread-stack.h"
|
|
#include "stat.h"
|
|
+#include "arch/common.h"
|
|
|
|
static int perf_session__deliver_event(struct perf_session *session,
|
|
union perf_event *event,
|
|
@@ -150,6 +151,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
|
|
session->machines.host.env = &perf_env;
|
|
}
|
|
|
|
+ session->machines.host.single_address_space =
|
|
+ perf_env__single_address_space(session->machines.host.env);
|
|
+
|
|
if (!data || perf_data__is_write(data)) {
|
|
/*
|
|
* In O_RDONLY mode this will be performed when reading the
|
|
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
|
|
index e7b4c44ebb625..665ee374fc012 100644
|
|
--- a/tools/perf/util/stat-display.c
|
|
+++ b/tools/perf/util/stat-display.c
|
|
@@ -59,6 +59,15 @@ static void print_noise(struct perf_stat_config *config,
|
|
print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg);
|
|
}
|
|
|
|
+static void print_cgroup(struct perf_stat_config *config, struct perf_evsel *evsel)
|
|
+{
|
|
+ if (nr_cgroups) {
|
|
+ const char *cgrp_name = evsel->cgrp ? evsel->cgrp->name : "";
|
|
+ fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
static void aggr_printout(struct perf_stat_config *config,
|
|
struct perf_evsel *evsel, int id, int nr)
|
|
{
|
|
@@ -336,8 +345,7 @@ static void abs_printout(struct perf_stat_config *config,
|
|
|
|
fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel));
|
|
|
|
- if (evsel->cgrp)
|
|
- fprintf(output, "%s%s", config->csv_sep, evsel->cgrp->name);
|
|
+ print_cgroup(config, evsel);
|
|
}
|
|
|
|
static bool is_mixed_hw_group(struct perf_evsel *counter)
|
|
@@ -431,9 +439,7 @@ static void printout(struct perf_stat_config *config, int id, int nr,
|
|
config->csv_output ? 0 : -25,
|
|
perf_evsel__name(counter));
|
|
|
|
- if (counter->cgrp)
|
|
- fprintf(config->output, "%s%s",
|
|
- config->csv_sep, counter->cgrp->name);
|
|
+ print_cgroup(config, counter);
|
|
|
|
if (!config->csv_output)
|
|
pm(config, &os, NULL, NULL, "", 0);
|
|
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
|
|
index 1cbada2dc6be6..f735ee038713a 100644
|
|
--- a/tools/perf/util/svghelper.c
|
|
+++ b/tools/perf/util/svghelper.c
|
|
@@ -334,7 +334,7 @@ static char *cpu_model(void)
|
|
if (file) {
|
|
while (fgets(buf, 255, file)) {
|
|
if (strstr(buf, "model name")) {
|
|
- strncpy(cpu_m, &buf[13], 255);
|
|
+ strlcpy(cpu_m, &buf[13], 255);
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
|
|
index 30e2b4c165fe7..5920c3bb8ffe4 100644
|
|
--- a/tools/perf/util/thread.h
|
|
+++ b/tools/perf/util/thread.h
|
|
@@ -96,9 +96,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
|
|
|
|
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
|
|
struct addr_location *al);
|
|
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
|
|
+ struct addr_location *al);
|
|
|
|
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
|
|
u64 addr, struct addr_location *al);
|
|
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
|
|
+ u64 addr, struct addr_location *al);
|
|
|
|
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
|
|
struct addr_location *al);
|
|
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
|
|
index 84e2b648e622f..2fa3c5757bcb5 100755
|
|
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
|
|
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
|
|
@@ -585,9 +585,9 @@ current_max_cpu = 0
|
|
|
|
read_trace_data(filename)
|
|
|
|
-clear_trace_file()
|
|
-# Free the memory
|
|
if interval:
|
|
+ clear_trace_file()
|
|
+ # Free the memory
|
|
free_trace_buffer()
|
|
|
|
if graph_data_present == False:
|
|
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
|
|
index ff9d3a5825e1f..c6635fee27d8c 100644
|
|
--- a/tools/testing/nvdimm/test/iomap.c
|
|
+++ b/tools/testing/nvdimm/test/iomap.c
|
|
@@ -104,16 +104,29 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
|
|
}
|
|
EXPORT_SYMBOL(__wrap_devm_memremap);
|
|
|
|
+static void nfit_test_kill(void *_pgmap)
|
|
+{
|
|
+ struct dev_pagemap *pgmap = _pgmap;
|
|
+
|
|
+ pgmap->kill(pgmap->ref);
|
|
+}
|
|
+
|
|
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
{
|
|
resource_size_t offset = pgmap->res.start;
|
|
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
|
|
|
|
- if (nfit_res)
|
|
+ if (nfit_res) {
|
|
+ int rc;
|
|
+
|
|
+ rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
|
|
+ if (rc)
|
|
+ return ERR_PTR(rc);
|
|
return nfit_res->buf + offset - nfit_res->res.start;
|
|
+ }
|
|
return devm_memremap_pages(dev, pgmap);
|
|
}
|
|
-EXPORT_SYMBOL(__wrap_devm_memremap_pages);
|
|
+EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
|
|
|
|
pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
|
|
{
|
|
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
|
|
index f0017c831e57b..a43a07a09a980 100644
|
|
--- a/tools/testing/selftests/Makefile
|
|
+++ b/tools/testing/selftests/Makefile
|
|
@@ -6,6 +6,7 @@ TARGETS += capabilities
|
|
TARGETS += cgroup
|
|
TARGETS += cpufreq
|
|
TARGETS += cpu-hotplug
|
|
+TARGETS += drivers/dma-buf
|
|
TARGETS += efivarfs
|
|
TARGETS += exec
|
|
TARGETS += filesystems
|
|
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
|
|
index d9a7254783754..72c25a3cb6589 100644
|
|
--- a/tools/testing/selftests/android/Makefile
|
|
+++ b/tools/testing/selftests/android/Makefile
|
|
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
|
|
|
|
include ../lib.mk
|
|
|
|
-all: khdr
|
|
+all:
|
|
@for DIR in $(SUBDIRS); do \
|
|
BUILD_TARGET=$(OUTPUT)/$$DIR; \
|
|
mkdir $$BUILD_TARGET -p; \
|
|
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
|
|
index e39dfb4e7970f..d5e992f7c7dd5 100644
|
|
--- a/tools/testing/selftests/bpf/Makefile
|
|
+++ b/tools/testing/selftests/bpf/Makefile
|
|
@@ -53,7 +53,10 @@ TEST_PROGS := test_kmod.sh \
|
|
test_flow_dissector.sh \
|
|
test_xdp_vlan.sh
|
|
|
|
-TEST_PROGS_EXTENDED := with_addr.sh
|
|
+TEST_PROGS_EXTENDED := with_addr.sh \
|
|
+ with_tunnels.sh \
|
|
+ tcp_client.py \
|
|
+ tcp_server.py
|
|
|
|
# Compile but not part of 'make run_tests'
|
|
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
|
|
@@ -135,6 +138,16 @@ endif
|
|
endif
|
|
endif
|
|
|
|
+# Have one program compiled without "-target bpf" to test whether libbpf loads
|
|
+# it successfully
|
|
+$(OUTPUT)/test_xdp.o: test_xdp.c
|
|
+ $(CLANG) $(CLANG_FLAGS) \
|
|
+ -O2 -emit-llvm -c $< -o - | \
|
|
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
|
|
+ifeq ($(DWARF2BTF),y)
|
|
+ $(BTF_PAHOLE) -J $@
|
|
+endif
|
|
+
|
|
$(OUTPUT)/%.o: %.c
|
|
$(CLANG) $(CLANG_FLAGS) \
|
|
-O2 -target bpf -emit-llvm -c $< -o - | \
|
|
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
|
|
index 156d89f1edcc4..2989b2e2d856d 100755
|
|
--- a/tools/testing/selftests/bpf/test_libbpf.sh
|
|
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
|
|
@@ -33,17 +33,11 @@ trap exit_handler 0 2 3 6 9
|
|
|
|
libbpf_open_file test_l4lb.o
|
|
|
|
-# TODO: fix libbpf to load noinline functions
|
|
-# [warning] libbpf: incorrect bpf_call opcode
|
|
-#libbpf_open_file test_l4lb_noinline.o
|
|
+# Load a program with BPF-to-BPF calls
|
|
+libbpf_open_file test_l4lb_noinline.o
|
|
|
|
-# TODO: fix test_xdp_meta.c to load with libbpf
|
|
-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
|
|
-#libbpf_open_file test_xdp_meta.o
|
|
-
|
|
-# TODO: fix libbpf to handle .eh_frame
|
|
-# [warning] libbpf: relocation failed: no section(10)
|
|
-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
|
|
+# Load a program compiled without the "-target bpf" flag
|
|
+libbpf_open_file test_xdp.o
|
|
|
|
# Success
|
|
exit 0
|
|
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
|
|
index 2d3c04f455302..3ec4ce156074c 100644
|
|
--- a/tools/testing/selftests/bpf/test_progs.c
|
|
+++ b/tools/testing/selftests/bpf/test_progs.c
|
|
@@ -51,10 +51,10 @@ static struct {
|
|
struct iphdr iph;
|
|
struct tcphdr tcp;
|
|
} __packed pkt_v4 = {
|
|
- .eth.h_proto = bpf_htons(ETH_P_IP),
|
|
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
|
|
.iph.ihl = 5,
|
|
.iph.protocol = 6,
|
|
- .iph.tot_len = bpf_htons(MAGIC_BYTES),
|
|
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
|
|
.tcp.urg_ptr = 123,
|
|
};
|
|
|
|
@@ -64,9 +64,9 @@ static struct {
|
|
struct ipv6hdr iph;
|
|
struct tcphdr tcp;
|
|
} __packed pkt_v6 = {
|
|
- .eth.h_proto = bpf_htons(ETH_P_IPV6),
|
|
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
|
|
.iph.nexthdr = 6,
|
|
- .iph.payload_len = bpf_htons(MAGIC_BYTES),
|
|
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
|
|
.tcp.urg_ptr = 123,
|
|
};
|
|
|
|
@@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
|
|
int i, j;
|
|
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
|
|
int build_id_matches = 0;
|
|
+ int retry = 1;
|
|
|
|
+retry:
|
|
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
|
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
|
goto out;
|
|
@@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
|
|
previous_key = key;
|
|
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
|
|
|
|
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
|
|
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
|
|
+ * try it one more time.
|
|
+ */
|
|
+ if (build_id_matches < 1 && retry--) {
|
|
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
|
+ close(pmu_fd);
|
|
+ bpf_object__close(obj);
|
|
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
|
|
+ __func__);
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
if (CHECK(build_id_matches < 1, "build id match",
|
|
"Didn't find expected build ID from the map\n"))
|
|
goto disable_pmu;
|
|
@@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
|
|
int i, j;
|
|
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
|
|
int build_id_matches = 0;
|
|
+ int retry = 1;
|
|
|
|
+retry:
|
|
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
|
|
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
|
return;
|
|
@@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
|
|
previous_key = key;
|
|
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
|
|
|
|
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
|
|
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
|
|
+ * try it one more time.
|
|
+ */
|
|
+ if (build_id_matches < 1 && retry--) {
|
|
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
|
+ close(pmu_fd);
|
|
+ bpf_object__close(obj);
|
|
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
|
|
+ __func__);
|
|
+ goto retry;
|
|
+ }
|
|
+
|
|
if (CHECK(build_id_matches < 1, "build id match",
|
|
"Didn't find expected build ID from the map\n"))
|
|
goto disable_pmu;
|
|
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
|
|
index aeeb76a54d633..e38f1cb7089d3 100644
|
|
--- a/tools/testing/selftests/bpf/test_sock_addr.c
|
|
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
|
|
@@ -44,6 +44,7 @@
|
|
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
|
|
#define SRC6_IP "::1"
|
|
#define SRC6_REWRITE_IP "::6"
|
|
+#define WILDCARD6_IP "::"
|
|
#define SERV6_PORT 6060
|
|
#define SERV6_REWRITE_PORT 6666
|
|
|
|
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
|
|
static int bind6_prog_load(const struct sock_addr_test *test);
|
|
static int connect4_prog_load(const struct sock_addr_test *test);
|
|
static int connect6_prog_load(const struct sock_addr_test *test);
|
|
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
|
|
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
|
|
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
|
|
|
|
static struct sock_addr_test tests[] = {
|
|
/* bind */
|
|
@@ -462,6 +465,34 @@ static struct sock_addr_test tests[] = {
|
|
SRC6_REWRITE_IP,
|
|
SYSCALL_ENOTSUPP,
|
|
},
|
|
+ {
|
|
+ "sendmsg6: set dst IP = [::] (BSD'ism)",
|
|
+ sendmsg6_rw_wildcard_prog_load,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ AF_INET6,
|
|
+ SOCK_DGRAM,
|
|
+ SERV6_IP,
|
|
+ SERV6_PORT,
|
|
+ SERV6_REWRITE_IP,
|
|
+ SERV6_REWRITE_PORT,
|
|
+ SRC6_REWRITE_IP,
|
|
+ SUCCESS,
|
|
+ },
|
|
+ {
|
|
+ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
|
|
+ sendmsg_allow_prog_load,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ BPF_CGROUP_UDP6_SENDMSG,
|
|
+ AF_INET6,
|
|
+ SOCK_DGRAM,
|
|
+ WILDCARD6_IP,
|
|
+ SERV6_PORT,
|
|
+ SERV6_REWRITE_IP,
|
|
+ SERV6_PORT,
|
|
+ SRC6_IP,
|
|
+ SUCCESS,
|
|
+ },
|
|
{
|
|
"sendmsg6: deny call",
|
|
sendmsg_deny_prog_load,
|
|
@@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
|
|
return load_path(test, CONNECT6_PROG_PATH);
|
|
}
|
|
|
|
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
|
|
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
|
|
+ int32_t rc)
|
|
{
|
|
struct bpf_insn insns[] = {
|
|
- /* return 0 */
|
|
- BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ /* return rc */
|
|
+ BPF_MOV64_IMM(BPF_REG_0, rc),
|
|
BPF_EXIT_INSN(),
|
|
};
|
|
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
|
|
}
|
|
|
|
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
|
|
+{
|
|
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
|
|
+}
|
|
+
|
|
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
|
|
+{
|
|
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
|
|
+}
|
|
+
|
|
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
|
|
{
|
|
struct sockaddr_in dst4_rw_addr;
|
|
@@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
|
|
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
|
|
}
|
|
|
|
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
|
|
+{
|
|
+ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
|
|
+}
|
|
+
|
|
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
|
|
{
|
|
return load_path(test, SENDMSG6_PROG_PATH);
|
|
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
|
|
index f8eac4a544f45..444f49176a2d7 100644
|
|
--- a/tools/testing/selftests/bpf/test_verifier.c
|
|
+++ b/tools/testing/selftests/bpf/test_verifier.c
|
|
@@ -2903,6 +2903,19 @@ static struct bpf_test tests[] = {
|
|
.result_unpriv = REJECT,
|
|
.result = ACCEPT,
|
|
},
|
|
+ {
|
|
+ "alu32: mov u32 const",
|
|
+ .insns = {
|
|
+ BPF_MOV32_IMM(BPF_REG_7, 0),
|
|
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
|
|
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
|
|
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
|
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
|
|
+ BPF_EXIT_INSN(),
|
|
+ },
|
|
+ .result = ACCEPT,
|
|
+ .retval = 0,
|
|
+ },
|
|
{
|
|
"unpriv: partial copy of pointer",
|
|
.insns = {
|
|
diff --git a/tools/testing/selftests/drivers/dma-buf/Makefile b/tools/testing/selftests/drivers/dma-buf/Makefile
|
|
index 4154c3d7aa585..f22c3f7cf612d 100644
|
|
--- a/tools/testing/selftests/drivers/dma-buf/Makefile
|
|
+++ b/tools/testing/selftests/drivers/dma-buf/Makefile
|
|
@@ -2,4 +2,6 @@ CFLAGS += -I../../../../../usr/include/
|
|
|
|
TEST_GEN_PROGS := udmabuf
|
|
|
|
+top_srcdir ?=../../../../..
|
|
+
|
|
include ../../lib.mk
|
|
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
|
|
index 376b1d6730bd9..4de902ea14d82 100644
|
|
--- a/tools/testing/selftests/drivers/dma-buf/udmabuf.c
|
|
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
|
|
@@ -4,7 +4,7 @@
|
|
#include <unistd.h>
|
|
#include <string.h>
|
|
#include <errno.h>
|
|
-#include <fcntl.h>
|
|
+#include <linux/fcntl.h>
|
|
#include <malloc.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
@@ -33,12 +33,19 @@ int main(int argc, char *argv[])
|
|
exit(77);
|
|
}
|
|
|
|
- memfd = memfd_create("udmabuf-test", MFD_CLOEXEC);
|
|
+ memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
|
|
if (memfd < 0) {
|
|
printf("%s: [skip,no-memfd]\n", TEST_PREFIX);
|
|
exit(77);
|
|
}
|
|
|
|
+ ret = fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK);
|
|
+ if (ret < 0) {
|
|
+ printf("%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
|
|
+ exit(77);
|
|
+ }
|
|
+
|
|
+
|
|
size = getpagesize() * NUM_PAGES;
|
|
ret = ftruncate(memfd, size);
|
|
if (ret == -1) {
|
|
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
|
|
index ad1eeb14fda7e..30996306cabcf 100644
|
|
--- a/tools/testing/selftests/futex/functional/Makefile
|
|
+++ b/tools/testing/selftests/futex/functional/Makefile
|
|
@@ -19,6 +19,7 @@ TEST_GEN_FILES := \
|
|
TEST_PROGS := run.sh
|
|
|
|
top_srcdir = ../../../../..
|
|
+KSFT_KHDR_INSTALL := 1
|
|
include ../../lib.mk
|
|
|
|
$(TEST_GEN_FILES): $(HEADERS)
|
|
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
|
|
index 46648427d537e..07f572a1bd3f6 100644
|
|
--- a/tools/testing/selftests/gpio/Makefile
|
|
+++ b/tools/testing/selftests/gpio/Makefile
|
|
@@ -10,8 +10,6 @@ TEST_PROGS_EXTENDED := gpio-mockup-chardev
|
|
GPIODIR := $(realpath ../../../gpio)
|
|
GPIOOBJ := gpio-utils.o
|
|
|
|
-include ../lib.mk
|
|
-
|
|
all: $(TEST_PROGS_EXTENDED)
|
|
|
|
override define CLEAN
|
|
@@ -19,7 +17,9 @@ override define CLEAN
|
|
$(MAKE) -C $(GPIODIR) OUTPUT=$(GPIODIR)/ clean
|
|
endef
|
|
|
|
-$(TEST_PROGS_EXTENDED):| khdr
|
|
+KSFT_KHDR_INSTALL := 1
|
|
+include ../lib.mk
|
|
+
|
|
$(TEST_PROGS_EXTENDED): $(GPIODIR)/$(GPIOOBJ)
|
|
|
|
$(GPIODIR)/$(GPIOOBJ):
|
|
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
|
|
index 6ae3730c4ee35..76d654ef3234a 100644
|
|
--- a/tools/testing/selftests/kselftest_harness.h
|
|
+++ b/tools/testing/selftests/kselftest_harness.h
|
|
@@ -354,7 +354,7 @@
|
|
* ASSERT_EQ(expected, measured): expected == measured
|
|
*/
|
|
#define ASSERT_EQ(expected, seen) \
|
|
- __EXPECT(expected, seen, ==, 1)
|
|
+ __EXPECT(expected, #expected, seen, #seen, ==, 1)
|
|
|
|
/**
|
|
* ASSERT_NE(expected, seen)
|
|
@@ -365,7 +365,7 @@
|
|
* ASSERT_NE(expected, measured): expected != measured
|
|
*/
|
|
#define ASSERT_NE(expected, seen) \
|
|
- __EXPECT(expected, seen, !=, 1)
|
|
+ __EXPECT(expected, #expected, seen, #seen, !=, 1)
|
|
|
|
/**
|
|
* ASSERT_LT(expected, seen)
|
|
@@ -376,7 +376,7 @@
|
|
* ASSERT_LT(expected, measured): expected < measured
|
|
*/
|
|
#define ASSERT_LT(expected, seen) \
|
|
- __EXPECT(expected, seen, <, 1)
|
|
+ __EXPECT(expected, #expected, seen, #seen, <, 1)
|
|
|
|
/**
|
|
* ASSERT_LE(expected, seen)
|
|
@@ -387,7 +387,7 @@
|
|
* ASSERT_LE(expected, measured): expected <= measured
|
|
*/
|
|
#define ASSERT_LE(expected, seen) \
|
|
- __EXPECT(expected, seen, <=, 1)
|
|
+ __EXPECT(expected, #expected, seen, #seen, <=, 1)
|
|
|
|
/**
|
|
* ASSERT_GT(expected, seen)
|
|
@@ -398,7 +398,7 @@
|
|
* ASSERT_GT(expected, measured): expected > measured
|
|
*/
|
|
#define ASSERT_GT(expected, seen) \
|
|
- __EXPECT(expected, seen, >, 1)
|
|
+ __EXPECT(expected, #expected, seen, #seen, >, 1)
|
|
|
|
/**
|
|
* ASSERT_GE(expected, seen)
|
|
@@ -409,7 +409,7 @@
|
|
* ASSERT_GE(expected, measured): expected >= measured
|
|
*/
|
|
#define ASSERT_GE(expected, seen) \
|
|
- __EXPECT(expected, seen, >=, 1)
|
|
+ __EXPECT(expected, #expected, seen, #seen, >=, 1)
|
|
|
|
/**
|
|
* ASSERT_NULL(seen)
|
|
@@ -419,7 +419,7 @@
|
|
* ASSERT_NULL(measured): NULL == measured
|
|
*/
|
|
#define ASSERT_NULL(seen) \
|
|
- __EXPECT(NULL, seen, ==, 1)
|
|
+ __EXPECT(NULL, "NULL", seen, #seen, ==, 1)
|
|
|
|
/**
|
|
* ASSERT_TRUE(seen)
|
|
@@ -429,7 +429,7 @@
|
|
* ASSERT_TRUE(measured): measured != 0
|
|
*/
|
|
#define ASSERT_TRUE(seen) \
|
|
- ASSERT_NE(0, seen)
|
|
+ __EXPECT(0, "0", seen, #seen, !=, 1)
|
|
|
|
/**
|
|
* ASSERT_FALSE(seen)
|
|
@@ -439,7 +439,7 @@
|
|
* ASSERT_FALSE(measured): measured == 0
|
|
*/
|
|
#define ASSERT_FALSE(seen) \
|
|
- ASSERT_EQ(0, seen)
|
|
+ __EXPECT(0, "0", seen, #seen, ==, 1)
|
|
|
|
/**
|
|
* ASSERT_STREQ(expected, seen)
|
|
@@ -472,7 +472,7 @@
|
|
* EXPECT_EQ(expected, measured): expected == measured
|
|
*/
|
|
#define EXPECT_EQ(expected, seen) \
|
|
- __EXPECT(expected, seen, ==, 0)
|
|
+ __EXPECT(expected, #expected, seen, #seen, ==, 0)
|
|
|
|
/**
|
|
* EXPECT_NE(expected, seen)
|
|
@@ -483,7 +483,7 @@
|
|
* EXPECT_NE(expected, measured): expected != measured
|
|
*/
|
|
#define EXPECT_NE(expected, seen) \
|
|
- __EXPECT(expected, seen, !=, 0)
|
|
+ __EXPECT(expected, #expected, seen, #seen, !=, 0)
|
|
|
|
/**
|
|
* EXPECT_LT(expected, seen)
|
|
@@ -494,7 +494,7 @@
|
|
* EXPECT_LT(expected, measured): expected < measured
|
|
*/
|
|
#define EXPECT_LT(expected, seen) \
|
|
- __EXPECT(expected, seen, <, 0)
|
|
+ __EXPECT(expected, #expected, seen, #seen, <, 0)
|
|
|
|
/**
|
|
* EXPECT_LE(expected, seen)
|
|
@@ -505,7 +505,7 @@
|
|
* EXPECT_LE(expected, measured): expected <= measured
|
|
*/
|
|
#define EXPECT_LE(expected, seen) \
|
|
- __EXPECT(expected, seen, <=, 0)
|
|
+ __EXPECT(expected, #expected, seen, #seen, <=, 0)
|
|
|
|
/**
|
|
* EXPECT_GT(expected, seen)
|
|
@@ -516,7 +516,7 @@
|
|
* EXPECT_GT(expected, measured): expected > measured
|
|
*/
|
|
#define EXPECT_GT(expected, seen) \
|
|
- __EXPECT(expected, seen, >, 0)
|
|
+ __EXPECT(expected, #expected, seen, #seen, >, 0)
|
|
|
|
/**
|
|
* EXPECT_GE(expected, seen)
|
|
@@ -527,7 +527,7 @@
|
|
* EXPECT_GE(expected, measured): expected >= measured
|
|
*/
|
|
#define EXPECT_GE(expected, seen) \
|
|
- __EXPECT(expected, seen, >=, 0)
|
|
+ __EXPECT(expected, #expected, seen, #seen, >=, 0)
|
|
|
|
/**
|
|
* EXPECT_NULL(seen)
|
|
@@ -537,7 +537,7 @@
|
|
* EXPECT_NULL(measured): NULL == measured
|
|
*/
|
|
#define EXPECT_NULL(seen) \
|
|
- __EXPECT(NULL, seen, ==, 0)
|
|
+ __EXPECT(NULL, "NULL", seen, #seen, ==, 0)
|
|
|
|
/**
|
|
* EXPECT_TRUE(seen)
|
|
@@ -547,7 +547,7 @@
|
|
* EXPECT_TRUE(measured): 0 != measured
|
|
*/
|
|
#define EXPECT_TRUE(seen) \
|
|
- EXPECT_NE(0, seen)
|
|
+ __EXPECT(0, "0", seen, #seen, !=, 0)
|
|
|
|
/**
|
|
* EXPECT_FALSE(seen)
|
|
@@ -557,7 +557,7 @@
|
|
* EXPECT_FALSE(measured): 0 == measured
|
|
*/
|
|
#define EXPECT_FALSE(seen) \
|
|
- EXPECT_EQ(0, seen)
|
|
+ __EXPECT(0, "0", seen, #seen, ==, 0)
|
|
|
|
/**
|
|
* EXPECT_STREQ(expected, seen)
|
|
@@ -597,7 +597,7 @@
|
|
if (_metadata->passed && _metadata->step < 255) \
|
|
_metadata->step++;
|
|
|
|
-#define __EXPECT(_expected, _seen, _t, _assert) do { \
|
|
+#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
|
|
/* Avoid multiple evaluation of the cases */ \
|
|
__typeof__(_expected) __exp = (_expected); \
|
|
__typeof__(_seen) __seen = (_seen); \
|
|
@@ -606,8 +606,8 @@
|
|
unsigned long long __exp_print = (uintptr_t)__exp; \
|
|
unsigned long long __seen_print = (uintptr_t)__seen; \
|
|
__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
|
|
- #_expected, __exp_print, #_t, \
|
|
- #_seen, __seen_print); \
|
|
+ _expected_str, __exp_print, #_t, \
|
|
+ _seen_str, __seen_print); \
|
|
_metadata->passed = 0; \
|
|
/* Ensure the optional handler is triggered */ \
|
|
_metadata->trigger = 1; \
|
|
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
|
|
index 01a219229238d..52bfe5e769079 100644
|
|
--- a/tools/testing/selftests/kvm/Makefile
|
|
+++ b/tools/testing/selftests/kvm/Makefile
|
|
@@ -1,6 +1,7 @@
|
|
all:
|
|
|
|
top_srcdir = ../../../..
|
|
+KSFT_KHDR_INSTALL := 1
|
|
UNAME_M := $(shell uname -m)
|
|
|
|
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
|
|
@@ -44,7 +45,6 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
|
|
|
|
all: $(STATIC_LIBS)
|
|
$(TEST_GEN_PROGS): $(STATIC_LIBS)
|
|
-$(STATIC_LIBS):| khdr
|
|
|
|
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
|
|
cscope:
|
|
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
|
|
index 92c2cfd1b1828..ea3c73e8f4f6e 100644
|
|
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
|
|
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
|
|
@@ -113,8 +113,8 @@ int main(int argc, char *argv[])
|
|
for (stage = 1;; stage++) {
|
|
_vcpu_run(vm, VCPU_ID);
|
|
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
|
- "Unexpected exit reason: %u (%s),\n",
|
|
- run->exit_reason,
|
|
+ "Stage %d: unexpected exit reason: %u (%s),\n",
|
|
+ stage, run->exit_reason,
|
|
exit_reason_str(run->exit_reason));
|
|
|
|
memset(®s1, 0, sizeof(regs1));
|
|
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
|
|
index 03da41f0f7364..4b3f556265f1b 100644
|
|
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
|
|
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
|
|
@@ -152,8 +152,8 @@ int main(int argc, char *argv[])
|
|
for (stage = 1;; stage++) {
|
|
_vcpu_run(vm, VCPU_ID);
|
|
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
|
- "Unexpected exit reason: %u (%s),\n",
|
|
- run->exit_reason,
|
|
+ "Stage %d: unexpected exit reason: %u (%s),\n",
|
|
+ stage, run->exit_reason,
|
|
exit_reason_str(run->exit_reason));
|
|
|
|
memset(®s1, 0, sizeof(regs1));
|
|
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
|
|
index 0a8e75886224b..8b0f16409ed7e 100644
|
|
--- a/tools/testing/selftests/lib.mk
|
|
+++ b/tools/testing/selftests/lib.mk
|
|
@@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
|
|
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
|
|
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
|
|
|
|
+ifdef KSFT_KHDR_INSTALL
|
|
top_srcdir ?= ../../../..
|
|
include $(top_srcdir)/scripts/subarch.include
|
|
ARCH ?= $(SUBARCH)
|
|
|
|
-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
|
|
-
|
|
.PHONY: khdr
|
|
khdr:
|
|
make ARCH=$(ARCH) -C $(top_srcdir) headers_install
|
|
|
|
-ifdef KSFT_KHDR_INSTALL
|
|
-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
|
|
+all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
|
|
+else
|
|
+all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
|
|
endif
|
|
|
|
.ONESHELL:
|
|
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
|
|
index d8313d0438b74..b90dff8d3a94b 100755
|
|
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
|
|
@@ -1,7 +1,7 @@
|
|
#!/bin/bash
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
|
|
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
|
|
NUM_NETIFS=4
|
|
CHECK_TC="yes"
|
|
source lib.sh
|
|
@@ -96,6 +96,51 @@ flooding()
|
|
flood_test $swp2 $h1 $h2
|
|
}
|
|
|
|
+vlan_deletion()
|
|
+{
|
|
+ # Test that the deletion of a VLAN on a bridge port does not affect
|
|
+ # the PVID VLAN
|
|
+ log_info "Add and delete a VLAN on bridge port $swp1"
|
|
+
|
|
+ bridge vlan add vid 10 dev $swp1
|
|
+ bridge vlan del vid 10 dev $swp1
|
|
+
|
|
+ ping_ipv4
|
|
+ ping_ipv6
|
|
+}
|
|
+
|
|
+extern_learn()
|
|
+{
|
|
+ local mac=de:ad:be:ef:13:37
|
|
+ local ageing_time
|
|
+
|
|
+ # Test that externally learned FDB entries can roam, but not age out
|
|
+ RET=0
|
|
+
|
|
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
|
|
+
|
|
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
|
|
+ check_err $? "Did not find FDB entry when should"
|
|
+
|
|
+ # Wait for 10 seconds after the ageing time to make sure the FDB entry
|
|
+ # was not aged out
|
|
+ ageing_time=$(bridge_ageing_time_get br0)
|
|
+ sleep $((ageing_time + 10))
|
|
+
|
|
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
|
|
+ check_err $? "FDB entry was aged out when should not"
|
|
+
|
|
+ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
|
|
+
|
|
+ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
|
|
+ check_err $? "FDB entry did not roam when should"
|
|
+
|
|
+ log_test "Externally learned FDB entry - ageing & roaming"
|
|
+
|
|
+ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
|
|
+ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
|
|
+}
|
|
+
|
|
trap cleanup EXIT
|
|
|
|
setup_prepare
|
|
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
|
|
index 61ae2782388e9..5d56cc0838f62 100644
|
|
--- a/tools/testing/selftests/net/ip_defrag.c
|
|
+++ b/tools/testing/selftests/net/ip_defrag.c
|
|
@@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
|
|
{
|
|
struct ip *iphdr = (struct ip *)ip_frame;
|
|
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
|
|
+ const bool ipv4 = !ipv6;
|
|
int res;
|
|
int offset;
|
|
int frag_len;
|
|
@@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
|
|
iphdr->ip_sum = 0;
|
|
}
|
|
|
|
+ /* Occasionally test in-order fragments. */
|
|
+ if (!cfg_overlap && (rand() % 100 < 15)) {
|
|
+ offset = 0;
|
|
+ while (offset < (UDP_HLEN + payload_len)) {
|
|
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
+ offset += max_frag_len;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
|
|
+ if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
|
|
+ (payload_len > 9 * max_frag_len)) {
|
|
+ offset = 6 * max_frag_len;
|
|
+ while (offset < (UDP_HLEN + payload_len)) {
|
|
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
+ offset += max_frag_len;
|
|
+ }
|
|
+ offset = 3 * max_frag_len;
|
|
+ while (offset < 6 * max_frag_len) {
|
|
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
+ offset += max_frag_len;
|
|
+ }
|
|
+ offset = 0;
|
|
+ while (offset < 3 * max_frag_len) {
|
|
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
+ offset += max_frag_len;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
/* Odd fragments. */
|
|
offset = max_frag_len;
|
|
while (offset < (UDP_HLEN + payload_len)) {
|
|
send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
+ /* IPv4 ignores duplicates, so randomly send a duplicate. */
|
|
+ if (ipv4 && (1 == rand() % 100))
|
|
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
offset += 2 * max_frag_len;
|
|
}
|
|
|
|
if (cfg_overlap) {
|
|
/* Send an extra random fragment. */
|
|
- offset = rand() % (UDP_HLEN + payload_len - 1);
|
|
- /* sendto() returns EINVAL if offset + frag_len is too small. */
|
|
if (ipv6) {
|
|
struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
|
|
+ /* sendto() returns EINVAL if offset + frag_len is too small. */
|
|
+ offset = rand() % (UDP_HLEN + payload_len - 1);
|
|
frag_len = max_frag_len + rand() % 256;
|
|
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
|
|
frag_len &= ~0x7;
|
|
@@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
|
|
ip6hdr->ip6_plen = htons(frag_len);
|
|
frag_len += IP6_HLEN;
|
|
} else {
|
|
- frag_len = IP4_HLEN + UDP_HLEN + rand() % 256;
|
|
+ /* In IPv4, duplicates and some fragments completely inside
|
|
+ * previously sent fragments are dropped/ignored. So
|
|
+ * random offset and frag_len can result in a dropped
|
|
+ * fragment instead of a dropped queue/packet. So we
|
|
+ * hard-code offset and frag_len.
|
|
+ *
|
|
+ * See ade446403bfb ("net: ipv4: do not handle duplicate
|
|
+ * fragments as overlapping").
|
|
+ */
|
|
+ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
|
|
+ /* not enough payload to play with random offset and frag_len. */
|
|
+ offset = 8;
|
|
+ frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
|
|
+ } else {
|
|
+ offset = rand() % (payload_len / 2);
|
|
+ frag_len = 2 * max_frag_len + 1 + rand() % 256;
|
|
+ }
|
|
iphdr->ip_off = htons(offset / 8 | IP4_MF);
|
|
iphdr->ip_len = htons(frag_len);
|
|
}
|
|
res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
|
|
if (res < 0)
|
|
- error(1, errno, "sendto overlap");
|
|
+ error(1, errno, "sendto overlap: %d", frag_len);
|
|
if (res != frag_len)
|
|
error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
|
|
frag_counter++;
|
|
@@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
|
|
offset = 0;
|
|
while (offset < (UDP_HLEN + payload_len)) {
|
|
send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
+ /* IPv4 ignores duplicates, so randomly send a duplicate. */
|
|
+ if (ipv4 && (1 == rand() % 100))
|
|
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
|
|
offset += 2 * max_frag_len;
|
|
}
|
|
}
|
|
@@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
|
|
static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
|
|
{
|
|
int fd_tx_raw, fd_rx_udp;
|
|
- struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 };
|
|
+ /* Frag queue timeout is set to one second in the calling script;
|
|
+ * socket timeout should be just a bit longer to avoid tests interfering
|
|
+ * with each other.
|
|
+ */
|
|
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
|
|
int idx;
|
|
int min_frag_len = ipv6 ? 1280 : 8;
|
|
|
|
@@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
|
|
payload_len += (rand() % 4096)) {
|
|
if (cfg_verbose)
|
|
printf("payload_len: %d\n", payload_len);
|
|
- max_frag_len = min_frag_len;
|
|
- do {
|
|
+
|
|
+ if (cfg_overlap) {
|
|
+ /* With overlaps, one send/receive pair below takes
|
|
+ * at least one second (== timeout) to run, so there
|
|
+ * is not enough test time to run a nested loop:
|
|
+ * the full overlap test takes 20-30 seconds.
|
|
+ */
|
|
+ max_frag_len = min_frag_len +
|
|
+ rand() % (1500 - FRAG_HLEN - min_frag_len);
|
|
send_udp_frags(fd_tx_raw, addr, alen, ipv6);
|
|
recv_validate_udp(fd_rx_udp);
|
|
- max_frag_len += 8 * (rand() % 8);
|
|
- } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len);
|
|
+ } else {
|
|
+ /* Without overlaps, each packet reassembly (== one
|
|
+ * send/receive pair below) takes very little time to
|
|
+ * run, so we can easily afford more thourough testing
|
|
+ * with a nested loop: the full non-overlap test takes
|
|
+ * less than one second).
|
|
+ */
|
|
+ max_frag_len = min_frag_len;
|
|
+ do {
|
|
+ send_udp_frags(fd_tx_raw, addr, alen, ipv6);
|
|
+ recv_validate_udp(fd_rx_udp);
|
|
+ max_frag_len += 8 * (rand() % 8);
|
|
+ } while (max_frag_len < (1500 - FRAG_HLEN) &&
|
|
+ max_frag_len <= payload_len);
|
|
+ }
|
|
}
|
|
|
|
/* Cleanup. */
|
|
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
|
|
index f346727960449..7dd79a9efb177 100755
|
|
--- a/tools/testing/selftests/net/ip_defrag.sh
|
|
+++ b/tools/testing/selftests/net/ip_defrag.sh
|
|
@@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
|
|
setup() {
|
|
ip netns add "${NETNS}"
|
|
ip -netns "${NETNS}" link set lo up
|
|
+
|
|
ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
|
|
ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
|
|
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
|
|
+
|
|
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
|
|
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
|
|
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
|
|
+
|
|
+ # DST cache can get full with a lot of frags, with GC not keeping up with the test.
|
|
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
|
|
}
|
|
|
|
cleanup() {
|
|
@@ -27,7 +34,6 @@ setup
|
|
echo "ipv4 defrag"
|
|
ip netns exec "${NETNS}" ./ip_defrag -4
|
|
|
|
-
|
|
echo "ipv4 defrag with overlaps"
|
|
ip netns exec "${NETNS}" ./ip_defrag -4o
|
|
|
|
@@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6
|
|
echo "ipv6 defrag with overlaps"
|
|
ip netns exec "${NETNS}" ./ip_defrag -6o
|
|
|
|
+echo "all tests done"
|
|
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
|
|
index 14cfcf006936d..c46c0eefab9ef 100644
|
|
--- a/tools/testing/selftests/networking/timestamping/Makefile
|
|
+++ b/tools/testing/selftests/networking/timestamping/Makefile
|
|
@@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
|
|
all: $(TEST_PROGS)
|
|
|
|
top_srcdir = ../../../../..
|
|
+KSFT_KHDR_INSTALL := 1
|
|
include ../../lib.mk
|
|
|
|
clean:
|
|
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
|
|
index c9a2abf8be1b3..0692d5dab729a 100644
|
|
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
|
|
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
|
|
@@ -1563,7 +1563,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
|
|
#ifdef SYSCALL_NUM_RET_SHARE_REG
|
|
# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
|
|
#else
|
|
-# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action)
|
|
+# define EXPECT_SYSCALL_RETURN(val, action) \
|
|
+ do { \
|
|
+ errno = 0; \
|
|
+ if (val < 0) { \
|
|
+ EXPECT_EQ(-1, action); \
|
|
+ EXPECT_EQ(-(val), errno); \
|
|
+ } else { \
|
|
+ EXPECT_EQ(val, action); \
|
|
+ } \
|
|
+ } while (0)
|
|
#endif
|
|
|
|
/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
|
|
@@ -1602,7 +1611,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
|
|
|
|
/* Architecture-specific syscall changing routine. */
|
|
void change_syscall(struct __test_metadata *_metadata,
|
|
- pid_t tracee, int syscall)
|
|
+ pid_t tracee, int syscall, int result)
|
|
{
|
|
int ret;
|
|
ARCH_REGS regs;
|
|
@@ -1661,7 +1670,7 @@ void change_syscall(struct __test_metadata *_metadata,
|
|
#ifdef SYSCALL_NUM_RET_SHARE_REG
|
|
TH_LOG("Can't modify syscall return on this architecture");
|
|
#else
|
|
- regs.SYSCALL_RET = EPERM;
|
|
+ regs.SYSCALL_RET = result;
|
|
#endif
|
|
|
|
#ifdef HAVE_GETREGS
|
|
@@ -1689,14 +1698,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
|
|
case 0x1002:
|
|
/* change getpid to getppid. */
|
|
EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
|
|
- change_syscall(_metadata, tracee, __NR_getppid);
|
|
+ change_syscall(_metadata, tracee, __NR_getppid, 0);
|
|
break;
|
|
case 0x1003:
|
|
- /* skip gettid. */
|
|
+ /* skip gettid with valid return code. */
|
|
EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
|
|
- change_syscall(_metadata, tracee, -1);
|
|
+ change_syscall(_metadata, tracee, -1, 45000);
|
|
break;
|
|
case 0x1004:
|
|
+ /* skip openat with error. */
|
|
+ EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
|
|
+ change_syscall(_metadata, tracee, -1, -ESRCH);
|
|
+ break;
|
|
+ case 0x1005:
|
|
/* do nothing (allow getppid) */
|
|
EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
|
|
break;
|
|
@@ -1729,9 +1743,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
|
|
nr = get_syscall(_metadata, tracee);
|
|
|
|
if (nr == __NR_getpid)
|
|
- change_syscall(_metadata, tracee, __NR_getppid);
|
|
+ change_syscall(_metadata, tracee, __NR_getppid, 0);
|
|
+ if (nr == __NR_gettid)
|
|
+ change_syscall(_metadata, tracee, -1, 45000);
|
|
if (nr == __NR_openat)
|
|
- change_syscall(_metadata, tracee, -1);
|
|
+ change_syscall(_metadata, tracee, -1, -ESRCH);
|
|
}
|
|
|
|
FIXTURE_DATA(TRACE_syscall) {
|
|
@@ -1748,8 +1764,10 @@ FIXTURE_SETUP(TRACE_syscall)
|
|
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
|
|
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
|
|
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
|
|
- BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
|
|
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
|
|
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
|
|
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
|
|
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
|
|
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
|
|
};
|
|
|
|
@@ -1797,15 +1815,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
|
|
EXPECT_NE(self->mypid, syscall(__NR_getpid));
|
|
}
|
|
|
|
-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
|
|
+TEST_F(TRACE_syscall, ptrace_syscall_errno)
|
|
+{
|
|
+ /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
|
|
+ teardown_trace_fixture(_metadata, self->tracer);
|
|
+ self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
|
|
+ true);
|
|
+
|
|
+ /* Tracer should skip the open syscall, resulting in ESRCH. */
|
|
+ EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
|
|
+}
|
|
+
|
|
+TEST_F(TRACE_syscall, ptrace_syscall_faked)
|
|
{
|
|
/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
|
|
teardown_trace_fixture(_metadata, self->tracer);
|
|
self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
|
|
true);
|
|
|
|
- /* Tracer should skip the open syscall, resulting in EPERM. */
|
|
- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
|
|
+ /* Tracer should skip the gettid syscall, resulting fake pid. */
|
|
+ EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
|
|
}
|
|
|
|
TEST_F(TRACE_syscall, syscall_allowed)
|
|
@@ -1838,7 +1867,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
|
|
EXPECT_NE(self->mypid, syscall(__NR_getpid));
|
|
}
|
|
|
|
-TEST_F(TRACE_syscall, syscall_dropped)
|
|
+TEST_F(TRACE_syscall, syscall_errno)
|
|
+{
|
|
+ long ret;
|
|
+
|
|
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
|
|
+ ASSERT_EQ(0, ret);
|
|
+
|
|
+ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
|
|
+ ASSERT_EQ(0, ret);
|
|
+
|
|
+ /* openat has been skipped and an errno return. */
|
|
+ EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
|
|
+}
|
|
+
|
|
+TEST_F(TRACE_syscall, syscall_faked)
|
|
{
|
|
long ret;
|
|
|
|
@@ -1849,8 +1892,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
|
|
ASSERT_EQ(0, ret);
|
|
|
|
/* gettid has been skipped and an altered return value stored. */
|
|
- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
|
|
- EXPECT_NE(self->mytid, syscall(__NR_gettid));
|
|
+ EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
|
|
}
|
|
|
|
TEST_F(TRACE_syscall, skip_after_RET_TRACE)
|
|
diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
|
|
index dc92eb271d9a1..be5a5e5428043 100644
|
|
--- a/tools/testing/selftests/tc-testing/bpf/Makefile
|
|
+++ b/tools/testing/selftests/tc-testing/bpf/Makefile
|
|
@@ -4,6 +4,7 @@ APIDIR := ../../../../include/uapi
|
|
TEST_GEN_FILES = action.o
|
|
|
|
top_srcdir = ../../../../..
|
|
+KSFT_KHDR_INSTALL := 1
|
|
include ../../lib.mk
|
|
|
|
CLANG ?= clang
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
|
|
index 637ea0219617f..0da3545cabdb6 100644
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
|
|
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
|
|
@@ -17,7 +17,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -41,7 +41,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -65,7 +65,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -89,7 +89,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -113,7 +113,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -137,7 +137,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 2",
|
|
- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -161,7 +161,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 90",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -185,7 +185,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 90",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -207,7 +207,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -231,7 +231,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -255,7 +255,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -279,7 +279,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -303,7 +303,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -327,7 +327,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 9",
|
|
- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -351,7 +351,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 99",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -375,7 +375,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 99",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -397,7 +397,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -421,7 +421,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -445,7 +445,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -469,7 +469,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -493,7 +493,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 77",
|
|
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -517,7 +517,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 77",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -541,7 +541,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 77",
|
|
- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -565,7 +565,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -589,7 +589,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -611,7 +611,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -635,7 +635,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -659,7 +659,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 11",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -683,7 +683,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -707,7 +707,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 21",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -731,7 +731,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 21",
|
|
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -739,7 +739,7 @@
|
|
},
|
|
{
|
|
"id": "fac3",
|
|
- "name": "Create valid ife encode action with index at 32-bit maximnum",
|
|
+ "name": "Create valid ife encode action with index at 32-bit maximum",
|
|
"category": [
|
|
"actions",
|
|
"ife"
|
|
@@ -755,7 +755,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 4294967295",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -779,7 +779,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode pass index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -803,7 +803,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode pipe index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -827,7 +827,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode continue index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -851,7 +851,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode drop index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -875,7 +875,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -899,7 +899,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 1",
|
|
- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
|
|
+ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -923,7 +923,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4294967295999",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -945,7 +945,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -967,7 +967,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action ife"
|
|
@@ -991,7 +991,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
@@ -1013,7 +1013,7 @@
|
|
"cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
|
|
"expExitCode": "255",
|
|
"verifyCmd": "$TC actions get action ife index 4",
|
|
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
|
|
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
|
|
"matchCount": "0",
|
|
"teardown": []
|
|
},
|
|
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
|
|
index 10b2d894e4362..e7e15a7336b6d 100644
|
|
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
|
|
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
|
|
@@ -81,35 +81,6 @@
|
|
]
|
|
]
|
|
},
|
|
- {
|
|
- "id": "ba4e",
|
|
- "name": "Add tunnel_key set action with missing mandatory id parameter",
|
|
- "category": [
|
|
- "actions",
|
|
- "tunnel_key"
|
|
- ],
|
|
- "setup": [
|
|
- [
|
|
- "$TC actions flush action tunnel_key",
|
|
- 0,
|
|
- 1,
|
|
- 255
|
|
- ]
|
|
- ],
|
|
- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
|
|
- "expExitCode": "255",
|
|
- "verifyCmd": "$TC actions list action tunnel_key",
|
|
- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
|
|
- "matchCount": "0",
|
|
- "teardown": [
|
|
- [
|
|
- "$TC actions flush action tunnel_key",
|
|
- 0,
|
|
- 1,
|
|
- 255
|
|
- ]
|
|
- ]
|
|
- },
|
|
{
|
|
"id": "a5e0",
|
|
"name": "Add tunnel_key set action with invalid src_ip parameter",
|
|
@@ -634,7 +605,7 @@
|
|
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
|
|
"expExitCode": "0",
|
|
"verifyCmd": "$TC actions get action tunnel_key index 4",
|
|
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
|
|
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
|
|
"matchCount": "1",
|
|
"teardown": [
|
|
"$TC actions flush action tunnel_key"
|
|
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
|
|
index 6e67e726e5a5d..e13eb6cc89018 100644
|
|
--- a/tools/testing/selftests/vm/Makefile
|
|
+++ b/tools/testing/selftests/vm/Makefile
|
|
@@ -25,6 +25,7 @@ TEST_GEN_FILES += virtual_address_range
|
|
|
|
TEST_PROGS := run_vmtests
|
|
|
|
+KSFT_KHDR_INSTALL := 1
|
|
include ../lib.mk
|
|
|
|
$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
|
|
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
|
|
index 460b4bdf4c1ed..5d546dcdbc805 100644
|
|
--- a/tools/testing/selftests/x86/protection_keys.c
|
|
+++ b/tools/testing/selftests/x86/protection_keys.c
|
|
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
|
|
pkey_assert(err);
|
|
}
|
|
|
|
+void become_child(void)
|
|
+{
|
|
+ pid_t forkret;
|
|
+
|
|
+ forkret = fork();
|
|
+ pkey_assert(forkret >= 0);
|
|
+ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
|
|
+
|
|
+ if (!forkret) {
|
|
+ /* in the child */
|
|
+ return;
|
|
+ }
|
|
+ exit(0);
|
|
+}
|
|
+
|
|
/* Assumes that all pkeys other than 'pkey' are unallocated */
|
|
void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
|
{
|
|
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
|
int nr_allocated_pkeys = 0;
|
|
int i;
|
|
|
|
- for (i = 0; i < NR_PKEYS*2; i++) {
|
|
+ for (i = 0; i < NR_PKEYS*3; i++) {
|
|
int new_pkey;
|
|
dprintf1("%s() alloc loop: %d\n", __func__, i);
|
|
new_pkey = alloc_pkey();
|
|
@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
|
if ((new_pkey == -1) && (errno == ENOSPC)) {
|
|
dprintf2("%s() failed to allocate pkey after %d tries\n",
|
|
__func__, nr_allocated_pkeys);
|
|
- break;
|
|
+ } else {
|
|
+ /*
|
|
+ * Ensure the number of successes never
|
|
+ * exceeds the number of keys supported
|
|
+ * in the hardware.
|
|
+ */
|
|
+ pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
|
+ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
|
}
|
|
- pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
|
- allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
|
+
|
|
+ /*
|
|
+ * Make sure that allocation state is properly
|
|
+ * preserved across fork().
|
|
+ */
|
|
+ if (i == NR_PKEYS*2)
|
|
+ become_child();
|
|
}
|
|
|
|
dprintf3("%s()::%d\n", __func__, __LINE__);
|
|
|
|
- /*
|
|
- * ensure it did not reach the end of the loop without
|
|
- * failure:
|
|
- */
|
|
- pkey_assert(i < NR_PKEYS*2);
|
|
-
|
|
/*
|
|
* There are 16 pkeys supported in hardware. Three are
|
|
* allocated by the time we get here:
|
|
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
|
|
index 23774970c9df6..abcd29db2d7a9 100644
|
|
--- a/virt/kvm/arm/arm.c
|
|
+++ b/virt/kvm/arm/arm.c
|
|
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
|
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
|
static u32 kvm_next_vmid;
|
|
static unsigned int kvm_vmid_bits __read_mostly;
|
|
-static DEFINE_RWLOCK(kvm_vmid_lock);
|
|
+static DEFINE_SPINLOCK(kvm_vmid_lock);
|
|
|
|
static bool vgic_present;
|
|
|
|
@@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask)
|
|
*/
|
|
static bool need_new_vmid_gen(struct kvm *kvm)
|
|
{
|
|
- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
|
|
+ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
|
|
+ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
|
|
+ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
|
|
}
|
|
|
|
/**
|
|
@@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm)
|
|
{
|
|
phys_addr_t pgd_phys;
|
|
u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
|
|
- bool new_gen;
|
|
|
|
- read_lock(&kvm_vmid_lock);
|
|
- new_gen = need_new_vmid_gen(kvm);
|
|
- read_unlock(&kvm_vmid_lock);
|
|
-
|
|
- if (!new_gen)
|
|
+ if (!need_new_vmid_gen(kvm))
|
|
return;
|
|
|
|
- write_lock(&kvm_vmid_lock);
|
|
+ spin_lock(&kvm_vmid_lock);
|
|
|
|
/*
|
|
* We need to re-check the vmid_gen here to ensure that if another vcpu
|
|
@@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm)
|
|
* use the same vmid.
|
|
*/
|
|
if (!need_new_vmid_gen(kvm)) {
|
|
- write_unlock(&kvm_vmid_lock);
|
|
+ spin_unlock(&kvm_vmid_lock);
|
|
return;
|
|
}
|
|
|
|
@@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm)
|
|
kvm_call_hyp(__kvm_flush_vm_context);
|
|
}
|
|
|
|
- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
|
|
kvm->arch.vmid = kvm_next_vmid;
|
|
kvm_next_vmid++;
|
|
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
|
|
@@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm)
|
|
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
|
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
|
|
|
|
- write_unlock(&kvm_vmid_lock);
|
|
+ smp_wmb();
|
|
+ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
|
|
+
|
|
+ spin_unlock(&kvm_vmid_lock);
|
|
}
|
|
|
|
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
|
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
|
|
index dac7ceb1a6777..08443a15e6be8 100644
|
|
--- a/virt/kvm/arm/mmio.c
|
|
+++ b/virt/kvm/arm/mmio.c
|
|
@@ -117,6 +117,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
|
}
|
|
|
|
+ /*
|
|
+ * The MMIO instruction is emulated and should not be re-executed
|
|
+ * in the guest.
|
|
+ */
|
|
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -144,11 +150,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
|
|
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
|
vcpu->arch.mmio_decode.rt = rt;
|
|
|
|
- /*
|
|
- * The MMIO instruction is emulated and should not be re-executed
|
|
- * in the guest.
|
|
- */
|
|
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
|
|
index f56ff1cf52ec2..ceeda7e04a4d9 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-mmio.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
|
|
@@ -313,36 +313,30 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
|
|
|
spin_lock_irqsave(&irq->irq_lock, flags);
|
|
|
|
- /*
|
|
- * If this virtual IRQ was written into a list register, we
|
|
- * have to make sure the CPU that runs the VCPU thread has
|
|
- * synced back the LR state to the struct vgic_irq.
|
|
- *
|
|
- * As long as the conditions below are true, we know the VCPU thread
|
|
- * may be on its way back from the guest (we kicked the VCPU thread in
|
|
- * vgic_change_active_prepare) and still has to sync back this IRQ,
|
|
- * so we release and re-acquire the spin_lock to let the other thread
|
|
- * sync back the IRQ.
|
|
- *
|
|
- * When accessing VGIC state from user space, requester_vcpu is
|
|
- * NULL, which is fine, because we guarantee that no VCPUs are running
|
|
- * when accessing VGIC state from user space so irq->vcpu->cpu is
|
|
- * always -1.
|
|
- */
|
|
- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
|
|
- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
|
|
- irq->vcpu->cpu != -1) /* VCPU thread is running */
|
|
- cond_resched_lock(&irq->irq_lock);
|
|
-
|
|
if (irq->hw) {
|
|
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
|
|
} else {
|
|
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
|
+ u8 active_source;
|
|
|
|
irq->active = active;
|
|
+
|
|
+ /*
|
|
+ * The GICv2 architecture indicates that the source CPUID for
|
|
+ * an SGI should be provided during an EOI which implies that
|
|
+ * the active state is stored somewhere, but at the same time
|
|
+ * this state is not architecturally exposed anywhere and we
|
|
+ * have no way of knowing the right source.
|
|
+ *
|
|
+ * This may lead to a VCPU not being able to receive
|
|
+ * additional instances of a particular SGI after migration
|
|
+ * for a GICv2 VM on some GIC implementations. Oh well.
|
|
+ */
|
|
+ active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
|
|
+
|
|
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
|
|
active && vgic_irq_is_sgi(irq->intid))
|
|
- irq->active_source = requester_vcpu->vcpu_id;
|
|
+ irq->active_source = active_source;
|
|
}
|
|
|
|
if (irq->active)
|
|
@@ -368,14 +362,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
|
*/
|
|
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
|
{
|
|
- if (intid > VGIC_NR_PRIVATE_IRQS)
|
|
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
|
+ intid > VGIC_NR_PRIVATE_IRQS)
|
|
kvm_arm_halt_guest(vcpu->kvm);
|
|
}
|
|
|
|
/* See vgic_change_active_prepare */
|
|
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
|
{
|
|
- if (intid > VGIC_NR_PRIVATE_IRQS)
|
|
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
|
+ intid > VGIC_NR_PRIVATE_IRQS)
|
|
kvm_arm_resume_guest(vcpu->kvm);
|
|
}
|
|
|
|
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
|
|
index 7cfdfbc910e0c..f884a54b2601a 100644
|
|
--- a/virt/kvm/arm/vgic/vgic.c
|
|
+++ b/virt/kvm/arm/vgic/vgic.c
|
|
@@ -103,13 +103,13 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
|
{
|
|
/* SGIs and PPIs */
|
|
if (intid <= VGIC_MAX_PRIVATE) {
|
|
- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
|
|
+ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
|
|
return &vcpu->arch.vgic_cpu.private_irqs[intid];
|
|
}
|
|
|
|
/* SPIs */
|
|
- if (intid <= VGIC_MAX_SPI) {
|
|
- intid = array_index_nospec(intid, VGIC_MAX_SPI);
|
|
+ if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
|
|
+ intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
|
|
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
|
|
}
|
|
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index 2679e476b6c39..9fa05ed539445 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -1965,7 +1965,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
|
|
|
|
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|
- void *data, int offset, unsigned long len)
|
|
+ void *data, unsigned int offset,
|
|
+ unsigned long len)
|
|
{
|
|
struct kvm_memslots *slots = kvm_memslots(kvm);
|
|
int r;
|
|
@@ -2918,8 +2919,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
|
|
if (ops->init)
|
|
ops->init(dev);
|
|
|
|
+ kvm_get_kvm(kvm);
|
|
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
|
|
if (ret < 0) {
|
|
+ kvm_put_kvm(kvm);
|
|
mutex_lock(&kvm->lock);
|
|
list_del(&dev->vm_node);
|
|
mutex_unlock(&kvm->lock);
|
|
@@ -2927,7 +2930,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
|
|
return ret;
|
|
}
|
|
|
|
- kvm_get_kvm(kvm);
|
|
cd->fd = ret;
|
|
return 0;
|
|
}
|