From f15bf42e2b5c6dd48dc1632fbbf68174ba4c5525 Mon Sep 17 00:00:00 2001 From: 5kft <5kft@users.noreply.github.com> Date: Thu, 29 Oct 2020 07:27:36 -0700 Subject: [PATCH] [ sunxi-dev ] add upstream patches --- config/kernel/linux-sunxi-dev.config | 4 +- config/kernel/linux-sunxi64-dev.config | 4 +- .../sunxi-dev/patch-5.9.1-2-modified.patch | 29706 ++++++++++++++++ 3 files changed, 29712 insertions(+), 2 deletions(-) create mode 100644 patch/kernel/sunxi-dev/patch-5.9.1-2-modified.patch diff --git a/config/kernel/linux-sunxi-dev.config b/config/kernel/linux-sunxi-dev.config index 38152422c..5a6955be0 100644 --- a/config/kernel/linux-sunxi-dev.config +++ b/config/kernel/linux-sunxi-dev.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.9.1 Kernel Configuration +# Linux/arm 5.9.2 Kernel Configuration # CONFIG_CC_VERSION_TEXT="arm-none-linux-gnueabihf-gcc (GNU Toolchain for the A-profile Architecture 9.2-2019.12 (arm-9.10)) 9.2.1 20191025" CONFIG_CC_IS_GCC=y @@ -7010,6 +7010,7 @@ CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y +# CONFIG_NFSD_V4_2_INTER_SSC is not set CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_GRACE_PERIOD=m CONFIG_LOCKD=m @@ -7814,6 +7815,7 @@ CONFIG_TEST_VMALLOC=m CONFIG_TEST_MEMCAT_P=m # CONFIG_TEST_STACKINIT is not set # CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set # CONFIG_MEMTEST is not set # end of Kernel Testing and Coverage # end of Kernel hacking diff --git a/config/kernel/linux-sunxi64-dev.config b/config/kernel/linux-sunxi64-dev.config index a03fbf1b6..81495cbed 100644 --- a/config/kernel/linux-sunxi64-dev.config +++ b/config/kernel/linux-sunxi64-dev.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.9.1 Kernel Configuration +# Linux/arm64 5.9.2 Kernel Configuration # CONFIG_CC_VERSION_TEXT="aarch64-none-linux-gnu-gcc (GNU Toolchain for the A-profile Architecture 9.2-2019.12 (arm-9.10)) 9.2.1 20191025" CONFIG_CC_IS_GCC=y @@ -6764,6 +6764,7 @@ CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y +# CONFIG_NFSD_V4_2_INTER_SSC is not set CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_GRACE_PERIOD=y CONFIG_LOCKD=y @@ -7512,6 +7513,7 @@ CONFIG_TEST_BLACKHOLE_DEV=m CONFIG_TEST_MEMCAT_P=m CONFIG_TEST_STACKINIT=m # CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set # CONFIG_MEMTEST is not set # end of Kernel Testing and Coverage # end of Kernel hacking diff --git a/patch/kernel/sunxi-dev/patch-5.9.1-2-modified.patch b/patch/kernel/sunxi-dev/patch-5.9.1-2-modified.patch new file mode 100644 index 000000000..1cfe0b165 --- /dev/null +++ b/patch/kernel/sunxi-dev/patch-5.9.1-2-modified.patch @@ -0,0 +1,29706 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index a1068742a6df1..ffe864390c5ac 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -577,7 +577,7 @@ + loops can be debugged more effectively on production + systems. + +- clearcpuid=BITNUM [X86] ++ clearcpuid=BITNUM[,BITNUM...] [X86] + Disable CPUID feature X for the kernel. See + arch/x86/include/asm/cpufeatures.h for the valid bit + numbers. Note the Linux specific bits are not necessarily +diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml +index fc823572bcff2..90c6d039b91b0 100644 +--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml ++++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml +@@ -23,8 +23,7 @@ properties: + - items: + - const: allwinner,sun7i-a20-crypto + - const: allwinner,sun4i-a10-crypto +- - items: +- - const: allwinner,sun8i-a33-crypto ++ - const: allwinner,sun8i-a33-crypto + + reg: + maxItems: 1 +@@ -59,7 +58,9 @@ if: + properties: + compatible: + contains: +- const: allwinner,sun6i-a31-crypto ++ enum: ++ - allwinner,sun6i-a31-crypto ++ - allwinner,sun8i-a33-crypto + + then: + required: +diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt +index 9d6c9feb12ff1..a3c1dffaa4bb4 100644 +--- a/Documentation/devicetree/bindings/net/socionext-netsec.txt ++++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt +@@ -30,7 +30,9 @@ Optional properties: (See ethernet.txt file in the same directory) + - max-frame-size: See ethernet.txt in the same directory. + + The MAC address will be determined using the optional properties +-defined in ethernet.txt. ++defined in ethernet.txt. The 'phy-mode' property is required, but may ++be set to the empty string if the PHY configuration is programmed by ++the firmware or set by hardware straps, and needs to be preserved. + + Example: + eth0: ethernet@522d0000 { +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index 837d51f9e1fab..25e6673a085a0 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -1142,13 +1142,15 @@ icmp_ratelimit - INTEGER + icmp_msgs_per_sec - INTEGER + Limit maximal number of ICMP packets sent per second from this host. + Only messages whose type matches icmp_ratemask (see below) are +- controlled by this limit. ++ controlled by this limit. For security reasons, the precise count ++ of messages per second is randomized. + + Default: 1000 + + icmp_msgs_burst - INTEGER + icmp_msgs_per_sec controls number of ICMP packets sent per second, + while icmp_msgs_burst controls the burst size of these packets. ++ For security reasons, the precise burst size is randomized. + + Default: 50 + +diff --git a/Makefile b/Makefile +index d600b38144f42..53e7f4ee2557e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 9 +-SUBLEVEL = 1 ++SUBLEVEL = 2 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig +index ce81018345184..6b5c54576f54d 100644 +--- a/arch/arc/plat-hsdk/Kconfig ++++ b/arch/arc/plat-hsdk/Kconfig +@@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK + select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE + select CLK_HSDK ++ select RESET_CONTROLLER + select RESET_HSDK + select HAVE_PCI +diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi +index 1c7180f285393..91a8c54d5e113 100644 +--- a/arch/arm/boot/dts/imx6sl.dtsi ++++ b/arch/arm/boot/dts/imx6sl.dtsi +@@ -939,8 +939,10 @@ + }; + + rngb: rngb@21b4000 { ++ compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb"; + reg = <0x021b4000 0x4000>; + interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clks IMX6SL_CLK_DUMMY>; + }; + + weim: weim@21b8000 { +diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi +index ebbe1518ef8a6..63cafd220dba1 100644 +--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi ++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi +@@ -57,7 +57,7 @@ + + lvds-receiver { + compatible = "ti,ds90cf384a", "lvds-decoder"; +- powerdown-gpios = <&gpio7 25 GPIO_ACTIVE_LOW>; ++ power-supply = <&vcc_3v3_tft1>; + + ports { + #address-cells = <1>; +@@ -81,6 +81,7 @@ + panel { + compatible = "edt,etm0700g0dh6"; + backlight = <&lcd_backlight>; ++ power-supply = <&vcc_3v3_tft1>; + + port { + panel_in: endpoint { +@@ -113,6 +114,17 @@ + }; + }; + ++ vcc_3v3_tft1: regulator-panel { ++ compatible = "regulator-fixed"; ++ ++ regulator-name = "vcc-3v3-tft1"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ enable-active-high; ++ startup-delay-us = <500>; ++ gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>; ++ }; ++ + vcc_sdhi1: regulator-vcc-sdhi1 { + compatible = "regulator-fixed"; + +@@ -207,6 +219,7 @@ + reg = <0x38>; + interrupt-parent = <&gpio2>; + interrupts = <12 IRQ_TYPE_EDGE_FALLING>; ++ vcc-supply = <&vcc_3v3_tft1>; + }; + }; + +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi +index 277c0bb104534..04688e8abce2c 100644 +--- a/arch/arm/boot/dts/meson8.dtsi ++++ b/arch/arm/boot/dts/meson8.dtsi +@@ -240,8 +240,6 @@ + , + , + , +- , +- , + , + , + , +diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi +index 5ceb6cc4451d2..1dbe4e8b38ac7 100644 +--- a/arch/arm/boot/dts/owl-s500.dtsi ++++ b/arch/arm/boot/dts/owl-s500.dtsi +@@ -84,21 +84,21 @@ + global_timer: timer@b0020200 { + compatible = "arm,cortex-a9-global-timer"; + reg = <0xb0020200 0x100>; +- interrupts = ; ++ interrupts = ; + status = "disabled"; + }; + + twd_timer: timer@b0020600 { + compatible = "arm,cortex-a9-twd-timer"; + reg = <0xb0020600 0x20>; +- interrupts = ; ++ interrupts = ; + status = "disabled"; + }; + + twd_wdt: wdt@b0020620 { + compatible = "arm,cortex-a9-twd-wdt"; + reg = <0xb0020620 0xe0>; +- interrupts = ; ++ interrupts = ; + status = "disabled"; + }; + +diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts +index 5700e6b700d36..b85025d009437 100644 +--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts ++++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts +@@ -121,8 +121,6 @@ + reset-gpios = <&gpiog 0 GPIO_ACTIVE_LOW>; /* ETH_RST# */ + interrupt-parent = <&gpioa>; + interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */ +- rxc-skew-ps = <1860>; +- txc-skew-ps = <1860>; + reset-assert-us = <10000>; + reset-deassert-us = <300>; + micrel,force-master; +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi +index 7c4bd615b3115..e4e3c92eb30d3 100644 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi +@@ -11,7 +11,6 @@ + serial0 = &uart4; + serial1 = &usart3; + serial2 = &uart8; +- ethernet0 = ðernet0; + }; + + chosen { +@@ -26,23 +25,13 @@ + + display_bl: display-bl { + compatible = "pwm-backlight"; +- pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>; ++ pwms = <&pwm2 3 500000 PWM_POLARITY_INVERTED>; + brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>; + default-brightness-level = <8>; + enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>; + status = "okay"; + }; + +- ethernet_vio: vioregulator { +- compatible = "regulator-fixed"; +- regulator-name = "vio"; +- regulator-min-microvolt = <3300000>; +- regulator-max-microvolt = <3300000>; +- gpio = <&gpiog 3 GPIO_ACTIVE_LOW>; +- regulator-always-on; +- regulator-boot-on; +- }; +- + gpio-keys-polled { + compatible = "gpio-keys-polled"; + #size-cells = <0>; +@@ -141,28 +130,6 @@ + status = "okay"; + }; + +-ðernet0 { +- status = "okay"; +- pinctrl-0 = <ðernet0_rmii_pins_a>; +- pinctrl-1 = <ðernet0_rmii_sleep_pins_a>; +- pinctrl-names = "default", "sleep"; +- phy-mode = "rmii"; +- max-speed = <100>; +- phy-handle = <&phy0>; +- st,eth-ref-clk-sel; +- phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>; +- +- mdio0 { +- #address-cells = <1>; +- #size-cells = <0>; +- compatible = "snps,dwmac-mdio"; +- +- phy0: ethernet-phy@1 { +- reg = <1>; +- }; +- }; +-}; +- + &i2c2 { /* Header X22 */ + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins_a>; +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi +index ba905196fb549..a87ebc4843963 100644 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi +@@ -9,6 +9,10 @@ + #include + + / { ++ aliases { ++ ethernet0 = ðernet0; ++ }; ++ + memory@c0000000 { + device_type = "memory"; + reg = <0xC0000000 0x40000000>; +@@ -55,6 +59,16 @@ + no-map; + }; + }; ++ ++ ethernet_vio: vioregulator { ++ compatible = "regulator-fixed"; ++ regulator-name = "vio"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpio = <&gpiog 3 GPIO_ACTIVE_LOW>; ++ regulator-always-on; ++ regulator-boot-on; ++ }; + }; + + &adc { +@@ -94,6 +108,28 @@ + status = "okay"; + }; + ++ðernet0 { ++ status = "okay"; ++ pinctrl-0 = <ðernet0_rmii_pins_a>; ++ pinctrl-1 = <ðernet0_rmii_sleep_pins_a>; ++ pinctrl-names = "default", "sleep"; ++ phy-mode = "rmii"; ++ max-speed = <100>; ++ phy-handle = <&phy0>; ++ st,eth-ref-clk-sel; ++ phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>; ++ ++ mdio0 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,dwmac-mdio"; ++ ++ phy0: ethernet-phy@1 { ++ reg = <1>; ++ }; ++ }; ++}; ++ + &i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c4_pins_a>; +@@ -249,7 +285,7 @@ + compatible = "ti,tsc2004"; + reg = <0x49>; + vio-supply = <&v3v3>; +- interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>; ++ interrupts-extended = <&gpioh 15 IRQ_TYPE_EDGE_FALLING>; + }; + + eeprom@50 { +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +index 930202742a3f6..905cd7bb98cf0 100644 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +@@ -295,9 +295,9 @@ + + &sdmmc2 { + pinctrl-names = "default", "opendrain", "sleep"; +- pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>; +- pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>; +- pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>; ++ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_c>; ++ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_c>; ++ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_c>; + bus-width = <8>; + mmc-ddr-1_8v; + no-sd; +diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts +index 42d62d1ba1dc7..ea15073f0c79c 100644 +--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts ++++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts +@@ -223,16 +223,16 @@ + }; + + ®_dc1sw { +- regulator-min-microvolt = <3000000>; +- regulator-max-microvolt = <3000000>; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; + regulator-name = "vcc-gmac-phy"; + }; + + ®_dcdc1 { + regulator-always-on; +- regulator-min-microvolt = <3000000>; +- regulator-max-microvolt = <3000000>; +- regulator-name = "vcc-3v0"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-name = "vcc-3v3"; + }; + + ®_dcdc2 { +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c +index 2aab043441e8f..eae8aaaadc3bf 100644 +--- a/arch/arm/mach-at91/pm.c ++++ b/arch/arm/mach-at91/pm.c +@@ -800,6 +800,7 @@ static void __init at91_pm_init(void (*pm_idle)(void)) + + pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id); + soc_pm.data.pmc = of_iomap(pmc_np, 0); ++ of_node_put(pmc_np); + if (!soc_pm.data.pmc) { + pr_err("AT91: PM not supported, PMC not found\n"); + return; +diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c +index 6f5f89711f256..a92d277f81a08 100644 +--- a/arch/arm/mach-omap2/cpuidle44xx.c ++++ b/arch/arm/mach-omap2/cpuidle44xx.c +@@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, + */ + if (mpuss_can_lose_context) { + error = cpu_cluster_pm_enter(); +- if (error) ++ if (error) { ++ omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON); + goto cpu_cluster_pm_out; ++ } + } + } + +diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c +index 58c5ef3cf1d7e..2d370f7f75fa2 100644 +--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c ++++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c +@@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = { + .dev_id = "s3c2410-sdi", + .table = { + /* Card detect S3C2410_GPG(10) */ +- GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW), + { }, + }, + }; +diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c +index f4710052843ac..3601c7abe69dc 100644 +--- a/arch/arm/mach-s3c24xx/mach-h1940.c ++++ b/arch/arm/mach-s3c24xx/mach-h1940.c +@@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = { + .dev_id = "s3c2410-sdi", + .table = { + /* Card detect S3C2410_GPF(5) */ +- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW), + /* Write protect S3C2410_GPH(8) */ +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW), + { }, + }, + }; +diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c +index 2357494483118..5729bf07a6232 100644 +--- a/arch/arm/mach-s3c24xx/mach-mini2440.c ++++ b/arch/arm/mach-s3c24xx/mach-mini2440.c +@@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = { + .dev_id = "s3c2410-sdi", + .table = { + /* Card detect S3C2410_GPG(8) */ +- GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW), + /* Write protect S3C2410_GPH(8) */ +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH), ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH), + { }, + }, + }; +diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c +index 998ccff3c174b..ed993bc666351 100644 +--- a/arch/arm/mach-s3c24xx/mach-n30.c ++++ b/arch/arm/mach-s3c24xx/mach-n30.c +@@ -389,9 +389,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = { + .dev_id = "s3c2410-sdi", + .table = { + /* Card detect S3C2410_GPF(1) */ +- GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW), + /* Write protect S3C2410_GPG(10) */ +- GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW), + { }, + }, + }; +diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c +index fde98b175c752..c0a06f123cfea 100644 +--- a/arch/arm/mach-s3c24xx/mach-rx1950.c ++++ b/arch/arm/mach-s3c24xx/mach-rx1950.c +@@ -571,9 +571,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = { + .dev_id = "s3c2410-sdi", + .table = { + /* Card detect S3C2410_GPF(5) */ +- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW), + /* Write protect S3C2410_GPH(8) */ +- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW), ++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW), + { }, + }, + }; +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c +index 12c26eb88afbc..43d91bfd23600 100644 +--- a/arch/arm/mm/cache-l2x0.c ++++ b/arch/arm/mm/cache-l2x0.c +@@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np, + + ret = of_property_read_u32(np, "prefetch-data", &val); + if (ret == 0) { +- if (val) ++ if (val) { + prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; +- else ++ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH; ++ } else { + prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; ++ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; ++ } ++ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF prefetch-data property value is missing\n"); + } + + ret = of_property_read_u32(np, "prefetch-instr", &val); + if (ret == 0) { +- if (val) ++ if (val) { + prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; +- else ++ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ } else { + prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ } ++ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF prefetch-instr property value is missing\n"); + } +diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi +index 2006ad5424fa6..f8eb72bb41254 100644 +--- a/arch/arm64/boot/dts/actions/s700.dtsi ++++ b/arch/arm64/boot/dts/actions/s700.dtsi +@@ -231,7 +231,7 @@ + + pinctrl: pinctrl@e01b0000 { + compatible = "actions,s700-pinctrl"; +- reg = <0x0 0xe01b0000 0x0 0x1000>; ++ reg = <0x0 0xe01b0000 0x0 0x100>; + clocks = <&cmu CLK_GPIO>; + gpio-controller; + gpio-ranges = <&pinctrl 0 0 136>; +diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi +index 94f75b4465044..73783692e30ee 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi +@@ -41,13 +41,13 @@ + + led-white { + label = "vim3:white:sys"; +- gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>; ++ gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + + led-red { + label = "vim3:red"; +- gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>; ++ gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>; + }; + }; + +diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi +index 561fa792fe5a9..58c08398d4ba7 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi +@@ -617,6 +617,7 @@ + gpc: gpc@303a0000 { + compatible = "fsl,imx8mq-gpc"; + reg = <0x303a0000 0x10000>; ++ interrupts = ; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <3>; +diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi +index a5a12b2599a4a..44a0346133cde 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi +@@ -5,6 +5,7 @@ + + #include + #include ++#include + #include + #include "mt8173.dtsi" + +@@ -294,7 +295,8 @@ + regulator-max-microamp = <4400000>; + regulator-ramp-delay = <10000>; + regulator-always-on; +- regulator-allowed-modes = <0 1>; ++ regulator-allowed-modes = ; + }; + + da9211_vgpu_reg: BUCKB { +@@ -431,12 +433,11 @@ + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&nor_gpio1_pins>; +- bus-width = <8>; +- max-frequency = <50000000>; +- non-removable; ++ + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; ++ spi-max-frequency = <50000000>; + }; + }; + +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi +index 67cae5f9e47e6..75687442d5827 100644 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi +@@ -229,14 +229,14 @@ + }; + + thermal-zones { +- cpu0_1-thermal { ++ cpu0-1-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&tsens 5>; + + trips { +- cpu0_1_alert0: trip-point@0 { ++ cpu0_1_alert0: trip-point0 { + temperature = <75000>; + hysteresis = <2000>; + type = "passive"; +@@ -259,7 +259,7 @@ + }; + }; + +- cpu2_3-thermal { ++ cpu2-3-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + +@@ -1052,7 +1052,7 @@ + reg-names = "mdp_phys"; + + interrupt-parent = <&mdss>; +- interrupts = <0 0>; ++ interrupts = <0>; + + clocks = <&gcc GCC_MDSS_AHB_CLK>, + <&gcc GCC_MDSS_AXI_CLK>, +@@ -1084,7 +1084,7 @@ + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; +- interrupts = <4 0>; ++ interrupts = <4>; + + assigned-clocks = <&gcc BYTE0_CLK_SRC>, + <&gcc PCLK0_CLK_SRC>; +diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi +index 188fff2095f11..8626b3a50eda7 100644 +--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi +@@ -335,7 +335,7 @@ + blsp2_uart2: serial@f995e000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0xf995e000 0x1000>; +- interrupt = ; ++ interrupts = ; + clock-names = "core", "iface"; + clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; +diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi +index 0bcdf04711079..adf9a5988cdc2 100644 +--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi ++++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi +@@ -119,7 +119,7 @@ + + wcd_codec: codec@f000 { + compatible = "qcom,pm8916-wcd-analog-codec"; +- reg = <0xf000 0x200>; ++ reg = <0xf000>; + reg-names = "pmic-codec-core"; + clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; + clock-names = "mclk"; +diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi +index d46b3833e52fd..a6be72d8f6fde 100644 +--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi +@@ -2618,7 +2618,7 @@ + + system-cache-controller@9200000 { + compatible = "qcom,sc7180-llcc"; +- reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>; ++ reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>; + reg-names = "llcc_base", "llcc_broadcast_base"; + interrupts = ; + }; +@@ -2785,7 +2785,7 @@ + power-domains = <&rpmhpd SC7180_CX>; + + interrupt-parent = <&mdss>; +- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; ++ interrupts = <0>; + + status = "disabled"; + +@@ -2833,7 +2833,7 @@ + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; +- interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; ++ interrupts = <4>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, +diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +index a2a98680ccf53..99d33955270ec 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts ++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +@@ -451,16 +451,16 @@ + port@0 { + reg = <0>; + +- lt9611_out: endpoint { +- remote-endpoint = <&hdmi_con>; ++ lt9611_a: endpoint { ++ remote-endpoint = <&dsi0_out>; + }; + }; + +- port@1 { +- reg = <1>; ++ port@2 { ++ reg = <2>; + +- lt9611_a: endpoint { +- remote-endpoint = <&dsi0_out>; ++ lt9611_out: endpoint { ++ remote-endpoint = <&hdmi_con>; + }; + }; + }; +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi +index 2884577dcb777..eca81cffd2c19 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi +@@ -1093,8 +1093,8 @@ + qup_opp_table: qup-opp-table { + compatible = "operating-points-v2"; + +- opp-19200000 { +- opp-hz = /bits/ 64 <19200000>; ++ opp-50000000 { ++ opp-hz = /bits/ 64 <50000000>; + required-opps = <&rpmhpd_opp_min_svs>; + }; + +@@ -1107,6 +1107,11 @@ + opp-hz = /bits/ 64 <100000000>; + required-opps = <&rpmhpd_opp_svs>; + }; ++ ++ opp-128000000 { ++ opp-hz = /bits/ 64 <128000000>; ++ required-opps = <&rpmhpd_opp_nom>; ++ }; + }; + + qupv3_id_0: geniqup@8c0000 { +diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi +index b86a7ead30067..ab8680c6672e4 100644 +--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi +@@ -767,7 +767,7 @@ + + usb_1_hsphy: phy@88e2000 { + compatible = "qcom,sm8150-usb-hs-phy", +- "qcom,usb-snps-hs-7nm-phy"; ++ "qcom,usb-snps-hs-7nm-phy"; + reg = <0 0x088e2000 0 0x400>; + status = "disabled"; + #phy-cells = <0>; +@@ -833,7 +833,7 @@ + + assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_PRIM_MASTER_CLK>; +- assigned-clock-rates = <19200000>, <150000000>; ++ assigned-clock-rates = <19200000>, <200000000>; + + interrupts = , + , +diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +index 6894f8490dae7..6e2f7ae1d6211 100644 +--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts ++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +@@ -17,7 +17,7 @@ + compatible = "qcom,sm8250-mtp"; + + aliases { +- serial0 = &uart2; ++ serial0 = &uart12; + }; + + chosen { +@@ -371,7 +371,7 @@ + gpio-reserved-ranges = <28 4>, <40 4>; + }; + +-&uart2 { ++&uart12 { + status = "okay"; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi +index 377172e8967b7..e7d139e1a6cec 100644 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi +@@ -935,11 +935,13 @@ + status = "disabled"; + }; + +- uart2: serial@a90000 { ++ uart12: serial@a90000 { + compatible = "qcom,geni-debug-uart"; + reg = <0x0 0x00a90000 0x0 0x4000>; + clock-names = "se"; + clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&qup_uart12_default>; + interrupts = ; + status = "disabled"; + }; +@@ -1880,6 +1882,13 @@ + bias-disable; + }; + }; ++ ++ qup_uart12_default: qup-uart12-default { ++ mux { ++ pins = "gpio34", "gpio35"; ++ function = "qup12"; ++ }; ++ }; + }; + + adsp: remoteproc@17300000 { +diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +index 42171190cce46..065e8fe3a071c 100644 +--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi ++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +@@ -1214,9 +1214,8 @@ + reg = <0 0xe6ea0000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 210>; +- dmas = <&dmac1 0x43>, <&dmac1 0x42>, +- <&dmac2 0x43>, <&dmac2 0x42>; +- dma-names = "tx", "rx", "tx", "rx"; ++ dmas = <&dmac0 0x43>, <&dmac0 0x42>; ++ dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 210>; + #address-cells = <1>; +diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi +index 1991bdc36792f..27f74df8efbde 100644 +--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi ++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi +@@ -1192,9 +1192,8 @@ + reg = <0 0xe6ea0000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 210>; +- dmas = <&dmac1 0x43>, <&dmac1 0x42>, +- <&dmac2 0x43>, <&dmac2 0x42>; +- dma-names = "tx", "rx", "tx", "rx"; ++ dmas = <&dmac0 0x43>, <&dmac0 0x42>; ++ dma-names = "tx", "rx"; + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; + resets = <&cpg 210>; + #address-cells = <1>; +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +index e8fc01d97adad..6f7490efc438b 100644 +--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts ++++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +@@ -404,11 +404,12 @@ + }; + + &serdes_ln_ctrl { +- idle-states = , , +- , , +- , , +- , , +- , , , ; ++ idle-states = , , ++ , , ++ , , ++ , , ++ , , ++ , ; + }; + + &serdes_wiz3 { +diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +index 12ceea9b3c9ae..63d221aee9bc0 100644 +--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +@@ -6,7 +6,7 @@ + */ + #include + #include +-#include ++#include + + &cbass_main { + msmc_ram: sram@70000000 { +@@ -38,11 +38,12 @@ + <0x40b0 0x3>, <0x40b4 0x3>, /* SERDES3 lane0/1 select */ + <0x40c0 0x3>, <0x40c4 0x3>, <0x40c8 0x3>, <0x40cc 0x3>; + /* SERDES4 lane0/1/2/3 select */ +- idle-states = , , +- , , +- , , +- , , +- , , , ; ++ idle-states = , , ++ , , ++ , , ++ , , ++ , , ++ , ; + }; + + usb_serdes_mux: mux-controller@4000 { +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +index 3ec99f13c259e..a6d869727a92e 100644 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +@@ -501,7 +501,7 @@ + }; + + i2c0: i2c@ff020000 { +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; ++ compatible = "cdns,i2c-r1p14"; + status = "disabled"; + interrupt-parent = <&gic>; + interrupts = <0 17 4>; +@@ -512,7 +512,7 @@ + }; + + i2c1: i2c@ff030000 { +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; ++ compatible = "cdns,i2c-r1p14"; + status = "disabled"; + interrupt-parent = <&gic>; + interrupts = <0 18 4>; +diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h +index 0bc46149e4917..4b39293d0f72d 100644 +--- a/arch/arm64/include/asm/insn.h ++++ b/arch/arm64/include/asm/insn.h +@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) + __AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000) + __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) + __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) ++__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800) + __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) ++__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800) + __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) ++__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF) + __AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0) ++__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF) + __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000) + __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F) + __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index afa722504bfde..1ded73189874d 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -164,7 +164,6 @@ + extern u64 vabits_actual; + #define PAGE_END (_PAGE_END(vabits_actual)) + +-extern s64 physvirt_offset; + extern s64 memstart_addr; + /* PHYS_OFFSET - the physical address of the start of memory. */ + #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) +@@ -240,7 +239,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) + */ + #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) + +-#define __lm_to_phys(addr) (((addr) + physvirt_offset)) ++#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) + #define __kimg_to_phys(addr) ((addr) - kimage_voffset) + + #define __virt_to_phys_nodebug(x) ({ \ +@@ -258,7 +257,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); + #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) + #endif /* CONFIG_DEBUG_VIRTUAL */ + +-#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) ++#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) + #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) + + /* +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index d5d3fbe739534..88233d42d9c29 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -23,6 +23,8 @@ + #define VMALLOC_START (MODULES_END) + #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) + ++#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) ++ + #define FIRST_USER_ADDRESS 0UL + + #ifndef __ASSEMBLY__ +@@ -33,8 +35,6 @@ + #include + #include + +-extern struct page *vmemmap; +- + extern void __pte_error(const char *file, int line, unsigned long val); + extern void __pmd_error(const char *file, int line, unsigned long val); + extern void __pud_error(const char *file, int line, unsigned long val); +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 560ba69e13c11..fe3a7695a4202 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -234,14 +234,17 @@ static int detect_harden_bp_fw(void) + smccc_end = NULL; + break; + +-#if IS_ENABLED(CONFIG_KVM) + case SMCCC_CONDUIT_SMC: + cb = call_smc_arch_workaround_1; ++#if IS_ENABLED(CONFIG_KVM) + smccc_start = __smccc_workaround_1_smc; + smccc_end = __smccc_workaround_1_smc + + __SMCCC_WORKAROUND_1_SMC_SZ; +- break; ++#else ++ smccc_start = NULL; ++ smccc_end = NULL; + #endif ++ break; + + default: + return -1; +diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c +index a107375005bc9..ccc8c9e22b258 100644 +--- a/arch/arm64/kernel/insn.c ++++ b/arch/arm64/kernel/insn.c +@@ -176,7 +176,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn) + + bool __kprobes aarch64_insn_is_branch(u32 insn) + { +- /* b, bl, cb*, tb*, b.cond, br, blr */ ++ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */ + + return aarch64_insn_is_b(insn) || + aarch64_insn_is_bl(insn) || +@@ -185,8 +185,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn) + aarch64_insn_is_tbz(insn) || + aarch64_insn_is_tbnz(insn) || + aarch64_insn_is_ret(insn) || ++ aarch64_insn_is_ret_auth(insn) || + aarch64_insn_is_br(insn) || ++ aarch64_insn_is_br_auth(insn) || + aarch64_insn_is_blr(insn) || ++ aarch64_insn_is_blr_auth(insn) || + aarch64_insn_is_bcond(insn); + } + +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c +index 462f9a9cc44be..481d48e3872b8 100644 +--- a/arch/arm64/kernel/perf_event.c ++++ b/arch/arm64/kernel/perf_event.c +@@ -532,6 +532,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event) + + static inline void armv8pmu_enable_counter(u32 mask) + { ++ /* ++ * Make sure event configuration register writes are visible before we ++ * enable the counter. ++ * */ ++ isb(); + write_sysreg(mask, pmcntenset_el0); + } + +diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c +index 263d5fba4c8a3..c541fb48886e3 100644 +--- a/arch/arm64/kernel/probes/decode-insn.c ++++ b/arch/arm64/kernel/probes/decode-insn.c +@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn) + aarch64_insn_is_msr_imm(insn) || + aarch64_insn_is_msr_reg(insn) || + aarch64_insn_is_exception(insn) || +- aarch64_insn_is_eret(insn)) ++ aarch64_insn_is_eret(insn) || ++ aarch64_insn_is_eret_auth(insn)) + return false; + + /* +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 481d22c32a2e7..324f0e0894f6e 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -54,12 +54,6 @@ + s64 memstart_addr __ro_after_init = -1; + EXPORT_SYMBOL(memstart_addr); + +-s64 physvirt_offset __ro_after_init; +-EXPORT_SYMBOL(physvirt_offset); +- +-struct page *vmemmap __ro_after_init; +-EXPORT_SYMBOL(vmemmap); +- + /* + * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of + * memory as some devices, namely the Raspberry Pi 4, have peripherals with +@@ -290,20 +284,6 @@ void __init arm64_memblock_init(void) + memstart_addr = round_down(memblock_start_of_DRAM(), + ARM64_MEMSTART_ALIGN); + +- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; +- +- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); +- +- /* +- * If we are running with a 52-bit kernel VA config on a system that +- * does not support it, we have to offset our vmemmap and physvirt_offset +- * s.t. we avoid the 52-bit portion of the direct linear map +- */ +- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { +- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; +- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); +- } +- + /* + * Remove the memory that we will not be able to cover with the + * linear mapping. Take care not to clip the kernel which may be +@@ -318,6 +298,16 @@ void __init arm64_memblock_init(void) + memblock_remove(0, memstart_addr); + } + ++ /* ++ * If we are running with a 52-bit kernel VA config on a system that ++ * does not support it, we have to place the available physical ++ * memory in the 48-bit addressable part of the linear region, i.e., ++ * we have to move it upward. Since memstart_addr represents the ++ * physical address of PAGE_OFFSET, we have to *subtract* from it. ++ */ ++ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) ++ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); ++ + /* + * Apply the memory limit if it was set. Since the kernel may be loaded + * high up in memory, add back the kernel region that must be accessible +diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c +index 9ef4ec0aea008..59f7dfe50a4d0 100644 +--- a/arch/m68k/coldfire/device.c ++++ b/arch/m68k/coldfire/device.c +@@ -554,7 +554,7 @@ static struct platform_device mcf_edma = { + }; + #endif /* IS_ENABLED(CONFIG_MCF_EDMA) */ + +-#if IS_ENABLED(CONFIG_MMC) ++#ifdef MCFSDHC_BASE + static struct mcf_esdhc_platform_data mcf_esdhc_data = { + .max_bus_width = 4, + .cd_type = ESDHC_CD_NONE, +@@ -579,7 +579,7 @@ static struct platform_device mcf_esdhc = { + .resource = mcf_esdhc_resources, + .dev.platform_data = &mcf_esdhc_data, + }; +-#endif /* IS_ENABLED(CONFIG_MMC) */ ++#endif /* MCFSDHC_BASE */ + + static struct platform_device *mcf_devices[] __initdata = { + &mcf_uart, +@@ -613,7 +613,7 @@ static struct platform_device *mcf_devices[] __initdata = { + #if IS_ENABLED(CONFIG_MCF_EDMA) + &mcf_edma, + #endif +-#if IS_ENABLED(CONFIG_MMC) ++#ifdef MCFSDHC_BASE + &mcf_esdhc, + #endif + }; +diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild +index 2e87a9b6d312f..63bce836b9f10 100644 +--- a/arch/microblaze/include/asm/Kbuild ++++ b/arch/microblaze/include/asm/Kbuild +@@ -1,7 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + generated-y += syscall_table.h + generic-y += extable.h +-generic-y += hw_irq.h + generic-y += kvm_para.h + generic-y += local64.h + generic-y += mcs_spinlock.h +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 787e829b6f25c..997da0221780b 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -980,7 +980,7 @@ config PPC_MEM_KEYS + config PPC_SECURE_BOOT + prompt "Enable secure boot support" + bool +- depends on PPC_POWERNV ++ depends on PPC_POWERNV || PPC_PSERIES + depends on IMA_ARCH_POLICY + imply IMA_SECURE_AND_OR_TRUSTED_BOOT + help +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h +index de14b1a34d568..9652756b0694c 100644 +--- a/arch/powerpc/include/asm/asm-prototypes.h ++++ b/arch/powerpc/include/asm/asm-prototypes.h +@@ -144,7 +144,9 @@ void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); + void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); + + /* Patch sites */ +-extern s32 patch__call_flush_branch_caches; ++extern s32 patch__call_flush_branch_caches1; ++extern s32 patch__call_flush_branch_caches2; ++extern s32 patch__call_flush_branch_caches3; + extern s32 patch__flush_count_cache_return; + extern s32 patch__flush_link_stack_return; + extern s32 patch__call_kvm_flush_link_stack; +diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h +index 082b988087011..b3ca542f871ec 100644 +--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h ++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h +@@ -13,20 +13,19 @@ + */ + #define MAX_EA_BITS_PER_CONTEXT 46 + +-#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2) + + /* +- * Our page table limit us to 64TB. Hence for the kernel mapping, +- * each MAP area is limited to 16 TB. +- * The four map areas are: linear mapping, vmap, IO and vmemmap ++ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB ++ * of vmemmap space. To better support sparse memory layout, we use 61TB ++ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap. + */ ++#define REGION_SHIFT (40) + #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT) + + /* +- * Define the address range of the kernel non-linear virtual area +- * 16TB ++ * Define the address range of the kernel non-linear virtual area (61TB) + */ +-#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000) ++#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000) + + #ifndef __ASSEMBLY__ + #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) +diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h +index b392384a3b150..86173bfc39feb 100644 +--- a/arch/powerpc/include/asm/book3s/64/mmu.h ++++ b/arch/powerpc/include/asm/book3s/64/mmu.h +@@ -85,7 +85,7 @@ extern unsigned int mmu_base_pid; + /* + * memory block size used with radix translation. + */ +-extern unsigned int __ro_after_init radix_mem_block_size; ++extern unsigned long __ro_after_init radix_mem_block_size; + + #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) + #define PRTB_ENTRIES (1ul << mmu_pid_bits) +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h +index 32a15dc49e8ca..ade681c1d4095 100644 +--- a/arch/powerpc/include/asm/cputable.h ++++ b/arch/powerpc/include/asm/cputable.h +@@ -483,7 +483,7 @@ static inline void cpu_feature_keys_init(void) { } + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ + CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ +- CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \ ++ CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \ + CPU_FTR_DAWR | CPU_FTR_DAWR1) + #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ +diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h +index 17ccc6474ab6f..030a19d922132 100644 +--- a/arch/powerpc/include/asm/drmem.h ++++ b/arch/powerpc/include/asm/drmem.h +@@ -8,14 +8,13 @@ + #ifndef _ASM_POWERPC_LMB_H + #define _ASM_POWERPC_LMB_H + ++#include ++ + struct drmem_lmb { + u64 base_addr; + u32 drc_index; + u32 aa_index; + u32 flags; +-#ifdef CONFIG_MEMORY_HOTPLUG +- int nid; +-#endif + }; + + struct drmem_lmb_info { +@@ -26,8 +25,22 @@ struct drmem_lmb_info { + + extern struct drmem_lmb_info *drmem_info; + ++static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb, ++ const struct drmem_lmb *start) ++{ ++ /* ++ * DLPAR code paths can take several milliseconds per element ++ * when interacting with firmware. Ensure that we don't ++ * unfairly monopolize the CPU. ++ */ ++ if (((++lmb - start) % 16) == 0) ++ cond_resched(); ++ ++ return lmb; ++} ++ + #define for_each_drmem_lmb_in_range(lmb, start, end) \ +- for ((lmb) = (start); (lmb) < (end); (lmb)++) ++ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start)) + + #define for_each_drmem_lmb(lmb) \ + for_each_drmem_lmb_in_range((lmb), \ +@@ -105,22 +118,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb) + lmb->aa_index = 0xffffffff; + } + +-#ifdef CONFIG_MEMORY_HOTPLUG +-static inline void lmb_set_nid(struct drmem_lmb *lmb) +-{ +- lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr); +-} +-static inline void lmb_clear_nid(struct drmem_lmb *lmb) +-{ +- lmb->nid = -1; +-} +-#else +-static inline void lmb_set_nid(struct drmem_lmb *lmb) +-{ +-} +-static inline void lmb_clear_nid(struct drmem_lmb *lmb) +-{ +-} +-#endif +- + #endif /* _ASM_POWERPC_LMB_H */ +diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h +index db206a7f38e24..9b68eafebf439 100644 +--- a/arch/powerpc/include/asm/hw_breakpoint.h ++++ b/arch/powerpc/include/asm/hw_breakpoint.h +@@ -42,6 +42,7 @@ struct arch_hw_breakpoint { + #else + #define HW_BREAKPOINT_SIZE 0x8 + #endif ++#define HW_BREAKPOINT_SIZE_QUADWORD 0x10 + + #define DABR_MAX_LEN 8 + #define DAWR_MAX_LEN 512 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 88fb88491fe9f..5647006ed373e 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -817,7 +817,7 @@ + #define THRM1_TIN (1 << 31) + #define THRM1_TIV (1 << 30) + #define THRM1_THRES(x) ((x&0x7f)<<23) +-#define THRM3_SITV(x) ((x&0x3fff)<<1) ++#define THRM3_SITV(x) ((x & 0x1fff) << 1) + #define THRM1_TID (1<<2) + #define THRM1_TIE (1<<1) + #define THRM1_V (1<<0) +diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h +index 85580b30aba48..7546402d796af 100644 +--- a/arch/powerpc/include/asm/svm.h ++++ b/arch/powerpc/include/asm/svm.h +@@ -15,6 +15,8 @@ static inline bool is_secure_guest(void) + return mfmsr() & MSR_S; + } + ++void __init svm_swiotlb_init(void); ++ + void dtl_cache_ctor(void *addr); + #define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL) + +@@ -25,6 +27,8 @@ static inline bool is_secure_guest(void) + return false; + } + ++static inline void svm_swiotlb_init(void) {} ++ + #define get_dtl_cache_ctor() NULL + + #endif /* CONFIG_PPC_SVM */ +diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h +index fbc6f3002f236..d97f061fecac0 100644 +--- a/arch/powerpc/include/asm/tlb.h ++++ b/arch/powerpc/include/asm/tlb.h +@@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm) + return false; + return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); + } +-static inline void mm_reset_thread_local(struct mm_struct *mm) +-{ +- WARN_ON(atomic_read(&mm->context.copros) > 0); +- /* +- * It's possible for mm_access to take a reference on mm_users to +- * access the remote mm from another thread, but it's not allowed +- * to set mm_cpumask, so mm_users may be > 1 here. +- */ +- WARN_ON(current->mm != mm); +- atomic_set(&mm->context.active_cpus, 1); +- cpumask_clear(mm_cpumask(mm)); +- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +-} + #else /* CONFIG_PPC_BOOK3S_64 */ + static inline int mm_is_thread_local(struct mm_struct *mm) + { +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c +index 2aa89c6b28967..0d704f1e07739 100644 +--- a/arch/powerpc/kernel/cputable.c ++++ b/arch/powerpc/kernel/cputable.c +@@ -120,9 +120,16 @@ extern void __restore_cpu_e6500(void); + PPC_FEATURE2_DARN | \ + PPC_FEATURE2_SCV) + #define COMMON_USER_POWER10 COMMON_USER_POWER9 +-#define COMMON_USER2_POWER10 (COMMON_USER2_POWER9 | \ +- PPC_FEATURE2_ARCH_3_1 | \ +- PPC_FEATURE2_MMA) ++#define COMMON_USER2_POWER10 (PPC_FEATURE2_ARCH_3_1 | \ ++ PPC_FEATURE2_MMA | \ ++ PPC_FEATURE2_ARCH_3_00 | \ ++ PPC_FEATURE2_HAS_IEEE128 | \ ++ PPC_FEATURE2_DARN | \ ++ PPC_FEATURE2_SCV | \ ++ PPC_FEATURE2_ARCH_2_07 | \ ++ PPC_FEATURE2_DSCR | \ ++ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ ++ PPC_FEATURE2_VEC_CRYPTO) + + #ifdef CONFIG_PPC_BOOK3E_64 + #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S +index 733e40eba4ebe..2f3846192ec7d 100644 +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -430,7 +430,11 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs); + + #define FLUSH_COUNT_CACHE \ + 1: nop; \ +- patch_site 1b, patch__call_flush_branch_caches ++ patch_site 1b, patch__call_flush_branch_caches1; \ ++1: nop; \ ++ patch_site 1b, patch__call_flush_branch_caches2; \ ++1: nop; \ ++ patch_site 1b, patch__call_flush_branch_caches3 + + .macro nops number + .rept \number +@@ -512,7 +516,7 @@ _GLOBAL(_switch) + + kuap_check_amr r9, r10 + +- FLUSH_COUNT_CACHE ++ FLUSH_COUNT_CACHE /* Clobbers r9, ctr */ + + /* + * On SMP kernels, care must be taken because a task may be +diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c +index 1f4a1efa00744..f6b24838ca3c0 100644 +--- a/arch/powerpc/kernel/hw_breakpoint.c ++++ b/arch/powerpc/kernel/hw_breakpoint.c +@@ -520,9 +520,17 @@ static bool ea_hw_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) + { + unsigned long hw_start_addr, hw_end_addr; ++ unsigned long align_size = HW_BREAKPOINT_SIZE; + +- hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); +- hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); ++ /* ++ * On p10 predecessors, quadword is handle differently then ++ * other instructions. ++ */ ++ if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16) ++ align_size = HW_BREAKPOINT_SIZE_QUADWORD; ++ ++ hw_start_addr = ALIGN_DOWN(info->address, align_size); ++ hw_end_addr = ALIGN(info->address + info->len, align_size); + + return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); + } +@@ -636,6 +644,8 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, + if (*type == CACHEOP) { + *size = cache_op_size(); + *ea &= ~(*size - 1); ++ } else if (*type == LOAD_VMX || *type == STORE_VMX) { ++ *ea &= ~(*size - 1); + } + } + +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c +index bf21ebd361900..3fdad93368858 100644 +--- a/arch/powerpc/kernel/irq.c ++++ b/arch/powerpc/kernel/irq.c +@@ -214,7 +214,7 @@ void replay_soft_interrupts(void) + struct pt_regs regs; + + ppc_save_regs(®s); +- regs.softe = IRQS_ALL_DISABLED; ++ regs.softe = IRQS_ENABLED; + + again: + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) +@@ -368,6 +368,12 @@ notrace void arch_local_irq_restore(unsigned long mask) + } + } + ++ /* ++ * Disable preempt here, so that the below preempt_enable will ++ * perform resched if required (a replayed interrupt may set ++ * need_resched). ++ */ ++ preempt_disable(); + irq_soft_mask_set(IRQS_ALL_DISABLED); + trace_hardirqs_off(); + +@@ -377,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask) + trace_hardirqs_on(); + irq_soft_mask_set(IRQS_ENABLED); + __hard_irq_enable(); ++ preempt_enable(); + } + EXPORT_SYMBOL(arch_local_irq_restore); + +diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c +index 697c7e4b5877f..8bd8d8de5c40b 100644 +--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c ++++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c +@@ -219,6 +219,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf + brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE); + brk.type = HW_BRK_TYPE_TRANSLATE; + brk.len = DABR_MAX_LEN; ++ brk.hw_len = DABR_MAX_LEN; + if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) + brk.type |= HW_BRK_TYPE_READ; + if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c +index c9876aab31421..e4e1a94ccf6a6 100644 +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -430,30 +430,44 @@ device_initcall(stf_barrier_debugfs_init); + + static void update_branch_cache_flush(void) + { ++ u32 *site; ++ + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ++ site = &patch__call_kvm_flush_link_stack; + // This controls the branch from guest_exit_cont to kvm_flush_link_stack + if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { +- patch_instruction_site(&patch__call_kvm_flush_link_stack, +- ppc_inst(PPC_INST_NOP)); ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + } else { + // Could use HW flush, but that could also flush count cache +- patch_branch_site(&patch__call_kvm_flush_link_stack, +- (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); ++ patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); + } + #endif + ++ // Patch out the bcctr first, then nop the rest ++ site = &patch__call_flush_branch_caches3; ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); ++ site = &patch__call_flush_branch_caches2; ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); ++ site = &patch__call_flush_branch_caches1; ++ patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); ++ + // This controls the branch from _switch to flush_branch_caches + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && + link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { +- patch_instruction_site(&patch__call_flush_branch_caches, +- ppc_inst(PPC_INST_NOP)); ++ // Nothing to be done ++ + } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && + link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { +- patch_instruction_site(&patch__call_flush_branch_caches, +- ppc_inst(PPC_INST_BCCTR_FLUSH)); ++ // Patch in the bcctr last ++ site = &patch__call_flush_branch_caches1; ++ patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff ++ site = &patch__call_flush_branch_caches2; ++ patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 ++ site = &patch__call_flush_branch_caches3; ++ patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); ++ + } else { +- patch_branch_site(&patch__call_flush_branch_caches, +- (u64)&flush_branch_caches, BRANCH_SET_LINK); ++ patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); + + // If we just need to flush the link stack, early return + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { +diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c +index e2ab8a111b693..0b4694b8d2482 100644 +--- a/arch/powerpc/kernel/tau_6xx.c ++++ b/arch/powerpc/kernel/tau_6xx.c +@@ -13,13 +13,14 @@ + */ + + #include +-#include + #include + #include + #include + #include + #include + #include ++#include ++#include + + #include + #include +@@ -39,9 +40,7 @@ static struct tau_temp + unsigned char grew; + } tau[NR_CPUS]; + +-struct timer_list tau_timer; +- +-#undef DEBUG ++static bool tau_int_enable; + + /* TODO: put these in a /proc interface, with some sanity checks, and maybe + * dynamic adjustment to minimize # of interrupts */ +@@ -50,72 +49,49 @@ struct timer_list tau_timer; + #define step_size 2 /* step size when temp goes out of range */ + #define window_expand 1 /* expand the window by this much */ + /* configurable values for shrinking the window */ +-#define shrink_timer 2*HZ /* period between shrinking the window */ ++#define shrink_timer 2000 /* period between shrinking the window */ + #define min_window 2 /* minimum window size, degrees C */ + + static void set_thresholds(unsigned long cpu) + { +-#ifdef CONFIG_TAU_INT +- /* +- * setup THRM1, +- * threshold, valid bit, enable interrupts, interrupt when below threshold +- */ +- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); ++ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0; + +- /* setup THRM2, +- * threshold, valid bit, enable interrupts, interrupt when above threshold +- */ +- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); +-#else +- /* same thing but don't enable interrupts */ +- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); +- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); +-#endif ++ /* setup THRM1, threshold, valid bit, interrupt when below threshold */ ++ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); ++ ++ /* setup THRM2, threshold, valid bit, interrupt when above threshold */ ++ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); + } + + static void TAUupdate(int cpu) + { +- unsigned thrm; +- +-#ifdef DEBUG +- printk("TAUupdate "); +-#endif ++ u32 thrm; ++ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V; + + /* if both thresholds are crossed, the step_sizes cancel out + * and the window winds up getting expanded twice. */ +- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ +- if(thrm & THRM1_TIN){ /* crossed low threshold */ +- if (tau[cpu].low >= step_size){ +- tau[cpu].low -= step_size; +- tau[cpu].high -= (step_size - window_expand); +- } +- tau[cpu].grew = 1; +-#ifdef DEBUG +- printk("low threshold crossed "); +-#endif ++ thrm = mfspr(SPRN_THRM1); ++ if ((thrm & bits) == bits) { ++ mtspr(SPRN_THRM1, 0); ++ ++ if (tau[cpu].low >= step_size) { ++ tau[cpu].low -= step_size; ++ tau[cpu].high -= (step_size - window_expand); + } ++ tau[cpu].grew = 1; ++ pr_debug("%s: low threshold crossed\n", __func__); + } +- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ +- if(thrm & THRM1_TIN){ /* crossed high threshold */ +- if (tau[cpu].high <= 127-step_size){ +- tau[cpu].low += (step_size - window_expand); +- tau[cpu].high += step_size; +- } +- tau[cpu].grew = 1; +-#ifdef DEBUG +- printk("high threshold crossed "); +-#endif ++ thrm = mfspr(SPRN_THRM2); ++ if ((thrm & bits) == bits) { ++ mtspr(SPRN_THRM2, 0); ++ ++ if (tau[cpu].high <= 127 - step_size) { ++ tau[cpu].low += (step_size - window_expand); ++ tau[cpu].high += step_size; + } ++ tau[cpu].grew = 1; ++ pr_debug("%s: high threshold crossed\n", __func__); + } +- +-#ifdef DEBUG +- printk("grew = %d\n", tau[cpu].grew); +-#endif +- +-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ +- set_thresholds(cpu); +-#endif +- + } + + #ifdef CONFIG_TAU_INT +@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs) + static void tau_timeout(void * info) + { + int cpu; +- unsigned long flags; + int size; + int shrink; + +- /* disabling interrupts *should* be okay */ +- local_irq_save(flags); + cpu = smp_processor_id(); + +-#ifndef CONFIG_TAU_INT +- TAUupdate(cpu); +-#endif ++ if (!tau_int_enable) ++ TAUupdate(cpu); ++ ++ /* Stop thermal sensor comparisons and interrupts */ ++ mtspr(SPRN_THRM3, 0); + + size = tau[cpu].high - tau[cpu].low; + if (size > min_window && ! tau[cpu].grew) { +@@ -173,32 +148,26 @@ static void tau_timeout(void * info) + + set_thresholds(cpu); + +- /* +- * Do the enable every time, since otherwise a bunch of (relatively) +- * complex sleep code needs to be added. One mtspr every time +- * tau_timeout is called is probably not a big deal. +- * +- * Enable thermal sensor and set up sample interval timer +- * need 20 us to do the compare.. until a nice 'cpu_speed' function +- * call is implemented, just assume a 500 mhz clock. It doesn't really +- * matter if we take too long for a compare since it's all interrupt +- * driven anyway. +- * +- * use a extra long time.. (60 us @ 500 mhz) ++ /* Restart thermal sensor comparisons and interrupts. ++ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet" ++ * recommends that "the maximum value be set in THRM3 under all ++ * conditions." + */ +- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); +- +- local_irq_restore(flags); ++ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E); + } + +-static void tau_timeout_smp(struct timer_list *unused) +-{ ++static struct workqueue_struct *tau_workq; + +- /* schedule ourselves to be run again */ +- mod_timer(&tau_timer, jiffies + shrink_timer) ; ++static void tau_work_func(struct work_struct *work) ++{ ++ msleep(shrink_timer); + on_each_cpu(tau_timeout, NULL, 0); ++ /* schedule ourselves to be run again */ ++ queue_work(tau_workq, work); + } + ++DECLARE_WORK(tau_work, tau_work_func); ++ + /* + * setup the TAU + * +@@ -231,21 +200,19 @@ static int __init TAU_init(void) + return 1; + } + ++ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) && ++ !strcmp(cur_cpu_spec->platform, "ppc750"); + +- /* first, set up the window shrinking timer */ +- timer_setup(&tau_timer, tau_timeout_smp, 0); +- tau_timer.expires = jiffies + shrink_timer; +- add_timer(&tau_timer); ++ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0); ++ if (!tau_workq) ++ return -ENOMEM; + + on_each_cpu(TAU_init_smp, NULL, 0); + +- printk("Thermal assist unit "); +-#ifdef CONFIG_TAU_INT +- printk("using interrupts, "); +-#else +- printk("using timers, "); +-#endif +- printk("shrink_timer: %d jiffies\n", shrink_timer); ++ queue_work(tau_workq, &tau_work); ++ ++ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n", ++ tau_int_enable ? "interrupts" : "workqueue", shrink_timer); + tau_initialized = 1; + + return 0; +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c +index d5f0c10d752a3..aae8550379bae 100644 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c +@@ -34,7 +34,7 @@ + + unsigned int mmu_pid_bits; + unsigned int mmu_base_pid; +-unsigned int radix_mem_block_size __ro_after_init; ++unsigned long radix_mem_block_size __ro_after_init; + + static __ref void *early_alloc_pgtable(unsigned long size, int nid, + unsigned long region_start, unsigned long region_end) +diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c +index 0d233763441fd..143b4fd396f08 100644 +--- a/arch/powerpc/mm/book3s64/radix_tlb.c ++++ b/arch/powerpc/mm/book3s64/radix_tlb.c +@@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg) + struct mm_struct *mm = arg; + unsigned long pid = mm->context.id; + ++ /* ++ * A kthread could have done a mmget_not_zero() after the flushing CPU ++ * checked mm_is_singlethreaded, and be in the process of ++ * kthread_use_mm when interrupted here. In that case, current->mm will ++ * be set to mm, because kthread_use_mm() setting ->mm and switching to ++ * the mm is done with interrupts off. ++ */ + if (current->mm == mm) +- return; /* Local CPU */ ++ goto out_flush; + + if (current->active_mm == mm) { +- /* +- * Must be a kernel thread because sender is single-threaded. +- */ +- BUG_ON(current->mm); ++ WARN_ON_ONCE(current->mm != NULL); ++ /* Is a kernel thread and is using mm as the lazy tlb */ + mmgrab(&init_mm); +- switch_mm(mm, &init_mm, current); + current->active_mm = &init_mm; ++ switch_mm_irqs_off(mm, &init_mm, current); + mmdrop(mm); + } ++ ++ atomic_dec(&mm->context.active_cpus); ++ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm)); ++ ++out_flush: + _tlbiel_pid(pid, RIC_FLUSH_ALL); + } + +@@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm) + */ + smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, + (void *)mm, 1); +- mm_reset_thread_local(mm); + } + + void radix__flush_tlb_mm(struct mm_struct *mm) +diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c +index b2eeea39684ca..9af3832c9d8dc 100644 +--- a/arch/powerpc/mm/drmem.c ++++ b/arch/powerpc/mm/drmem.c +@@ -389,10 +389,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop) + if (!drmem_info->lmbs) + return; + +- for_each_drmem_lmb(lmb) { ++ for_each_drmem_lmb(lmb) + read_drconf_v1_cell(lmb, &prop); +- lmb_set_nid(lmb); +- } + } + + static void __init init_drmem_v2_lmbs(const __be32 *prop) +@@ -437,8 +435,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop) + + lmb->aa_index = dr_cell.aa_index; + lmb->flags = dr_cell.flags; +- +- lmb_set_nid(lmb); + } + } + } +diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c +index fb294046e00e4..929716ea21e9c 100644 +--- a/arch/powerpc/mm/kasan/kasan_init_32.c ++++ b/arch/powerpc/mm/kasan/kasan_init_32.c +@@ -127,8 +127,7 @@ void __init kasan_mmu_init(void) + { + int ret; + +- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || +- IS_ENABLED(CONFIG_KASAN_VMALLOC)) { ++ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { + ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); + + if (ret) +@@ -139,11 +138,11 @@ void __init kasan_mmu_init(void) + void __init kasan_init(void) + { + struct memblock_region *reg; ++ int ret; + + for_each_memblock(memory, reg) { + phys_addr_t base = reg->base; + phys_addr_t top = min(base + reg->size, total_lowmem); +- int ret; + + if (base >= top) + continue; +@@ -153,6 +152,13 @@ void __init kasan_init(void) + panic("kasan: kasan_init_region() failed"); + } + ++ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { ++ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); ++ ++ if (ret) ++ panic("kasan: kasan_init_shadow_page_tables() failed"); ++ } ++ + kasan_remap_early_shadow_ro(); + + clear_page(kasan_early_shadow_page); +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c +index 42e25874f5a8f..ddc32cc1b6cfc 100644 +--- a/arch/powerpc/mm/mem.c ++++ b/arch/powerpc/mm/mem.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + + #include + +@@ -282,7 +283,10 @@ void __init mem_init(void) + * back to to-down. + */ + memblock_set_bottom_up(true); +- swiotlb_init(0); ++ if (is_secure_guest()) ++ svm_swiotlb_init(); ++ else ++ swiotlb_init(0); + #endif + + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); +diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h +index e608f9db12ddc..8965b4463d433 100644 +--- a/arch/powerpc/perf/hv-gpci-requests.h ++++ b/arch/powerpc/perf/hv-gpci-requests.h +@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id) + + #define REQUEST_NAME system_performance_capabilities + #define REQUEST_NUM 0x40 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" ++#define REQUEST_IDX_KIND "starting_index=0xffffffff" + #include I(REQUEST_BEGIN) + REQUEST(__field(0, 1, perf_collect_privileged) + __field(0x1, 1, capability_mask) +@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id) + + #define REQUEST_NAME system_hypervisor_times + #define REQUEST_NUM 0xF0 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" ++#define REQUEST_IDX_KIND "starting_index=0xffffffff" + #include I(REQUEST_BEGIN) + REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) + __count(0x8, 8, time_spent_processing_virtual_processor_timers) +@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) + + #define REQUEST_NAME system_tlbie_count_and_time + #define REQUEST_NUM 0xF4 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" ++#define REQUEST_IDX_KIND "starting_index=0xffffffff" + #include I(REQUEST_BEGIN) + REQUEST(__count(0, 8, tlbie_instructions_issued) + /* +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c +index 964437adec185..2848904df6383 100644 +--- a/arch/powerpc/perf/isa207-common.c ++++ b/arch/powerpc/perf/isa207-common.c +@@ -288,6 +288,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + + mask |= CNST_PMC_MASK(pmc); + value |= CNST_PMC_VAL(pmc); ++ ++ /* ++ * PMC5 and PMC6 are used to count cycles and instructions and ++ * they do not support most of the constraint bits. Add a check ++ * to exclude PMC5/6 from most of the constraints except for ++ * EBB/BHRB. ++ */ ++ if (pmc >= 5) ++ goto ebb_bhrb; + } + + if (pmc <= 4) { +@@ -357,6 +366,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + } + } + ++ebb_bhrb: + if (!pmc && ebb) + /* EBB events must specify the PMC */ + return -1; +diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig +index fb7515b4fa9c6..b439b027a42f1 100644 +--- a/arch/powerpc/platforms/Kconfig ++++ b/arch/powerpc/platforms/Kconfig +@@ -223,12 +223,11 @@ config TAU + temperature within 2-4 degrees Celsius. This option shows the current + on-die temperature in /proc/cpuinfo if the cpu supports it. + +- Unfortunately, on some chip revisions, this sensor is very inaccurate +- and in many cases, does not work at all, so don't assume the cpu +- temp is actually what /proc/cpuinfo says it is. ++ Unfortunately, this sensor is very inaccurate when uncalibrated, so ++ don't assume the cpu temp is actually what /proc/cpuinfo says it is. + + config TAU_INT +- bool "Interrupt driven TAU driver (DANGEROUS)" ++ bool "Interrupt driven TAU driver (EXPERIMENTAL)" + depends on TAU + help + The TAU supports an interrupt driven mode which causes an interrupt +@@ -236,12 +235,7 @@ config TAU_INT + to get notified the temp has exceeded a range. With this option off, + a timer is used to re-check the temperature periodically. + +- However, on some cpus it appears that the TAU interrupt hardware +- is buggy and can cause a situation which would lead unexplained hard +- lockups. +- +- Unless you are extending the TAU driver, or enjoy kernel/hardware +- debugging, leave this option off. ++ If in doubt, say N here. + + config TAU_AVERAGE + bool "Average high and low temp" +diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c +index 543c816fa99ef..0e6693bacb7e7 100644 +--- a/arch/powerpc/platforms/powernv/opal-dump.c ++++ b/arch/powerpc/platforms/powernv/opal-dump.c +@@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, + return count; + } + +-static struct dump_obj *create_dump_obj(uint32_t id, size_t size, +- uint32_t type) ++static void create_dump_obj(uint32_t id, size_t size, uint32_t type) + { + struct dump_obj *dump; + int rc; + + dump = kzalloc(sizeof(*dump), GFP_KERNEL); + if (!dump) +- return NULL; ++ return; + + dump->kobj.kset = dump_kset; + +@@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size, + rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id); + if (rc) { + kobject_put(&dump->kobj); +- return NULL; ++ return; + } + ++ /* ++ * As soon as the sysfs file for this dump is created/activated there is ++ * a chance the opal_errd daemon (or any userspace) might read and ++ * acknowledge the dump before kobject_uevent() is called. If that ++ * happens then there is a potential race between ++ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a ++ * use-after-free of a kernfs object resulting in a kernel crash. ++ * ++ * To avoid that, we need to take a reference on behalf of the bin file, ++ * so that our reference remains valid while we call kobject_uevent(). ++ * We then drop our reference before exiting the function, leaving the ++ * bin file to drop the last reference (if it hasn't already). ++ */ ++ ++ /* Take a reference for the bin file */ ++ kobject_get(&dump->kobj); + rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr); +- if (rc) { ++ if (rc == 0) { ++ kobject_uevent(&dump->kobj, KOBJ_ADD); ++ ++ pr_info("%s: New platform dump. ID = 0x%x Size %u\n", ++ __func__, dump->id, dump->size); ++ } else { ++ /* Drop reference count taken for bin file */ + kobject_put(&dump->kobj); +- return NULL; + } + +- pr_info("%s: New platform dump. ID = 0x%x Size %u\n", +- __func__, dump->id, dump->size); +- +- kobject_uevent(&dump->kobj, KOBJ_ADD); +- +- return dump; ++ /* Drop our reference */ ++ kobject_put(&dump->kobj); ++ return; + } + + static irqreturn_t process_dump(int irq, void *data) +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c +index 5d545b78111f9..0ea976d1cac47 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -354,25 +354,32 @@ static int dlpar_add_lmb(struct drmem_lmb *); + + static int dlpar_remove_lmb(struct drmem_lmb *lmb) + { ++ struct memory_block *mem_block; + unsigned long block_sz; + int rc; + + if (!lmb_is_removable(lmb)) + return -EINVAL; + ++ mem_block = lmb_to_memblock(lmb); ++ if (mem_block == NULL) ++ return -EINVAL; ++ + rc = dlpar_offline_lmb(lmb); +- if (rc) ++ if (rc) { ++ put_device(&mem_block->dev); + return rc; ++ } + + block_sz = pseries_memory_block_size(); + +- __remove_memory(lmb->nid, lmb->base_addr, block_sz); ++ __remove_memory(mem_block->nid, lmb->base_addr, block_sz); ++ put_device(&mem_block->dev); + + /* Update memory regions for memory remove */ + memblock_remove(lmb->base_addr, block_sz); + + invalidate_lmb_associativity_index(lmb); +- lmb_clear_nid(lmb); + lmb->flags &= ~DRCONF_MEM_ASSIGNED; + + return 0; +@@ -591,7 +598,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) + static int dlpar_add_lmb(struct drmem_lmb *lmb) + { + unsigned long block_sz; +- int rc; ++ int nid, rc; + + if (lmb->flags & DRCONF_MEM_ASSIGNED) + return -EINVAL; +@@ -602,11 +609,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) + return rc; + } + +- lmb_set_nid(lmb); + block_sz = memory_block_size_bytes(); + ++ /* Find the node id for this address. */ ++ nid = memory_add_physaddr_to_nid(lmb->base_addr); ++ + /* Add the memory */ +- rc = __add_memory(lmb->nid, lmb->base_addr, block_sz); ++ rc = __add_memory(nid, lmb->base_addr, block_sz); + if (rc) { + invalidate_lmb_associativity_index(lmb); + return rc; +@@ -614,9 +623,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) + + rc = dlpar_online_lmb(lmb); + if (rc) { +- __remove_memory(lmb->nid, lmb->base_addr, block_sz); ++ __remove_memory(nid, lmb->base_addr, block_sz); + invalidate_lmb_associativity_index(lmb); +- lmb_clear_nid(lmb); + } else { + lmb->flags |= DRCONF_MEM_ASSIGNED; + } +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c +index a88a707a608aa..27268370dee00 100644 +--- a/arch/powerpc/platforms/pseries/papr_scm.c ++++ b/arch/powerpc/platforms/pseries/papr_scm.c +@@ -785,7 +785,8 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, + static ssize_t perf_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +- int index, rc; ++ int index; ++ ssize_t rc; + struct seq_buf s; + struct papr_scm_perf_stat *stat; + struct papr_scm_perf_stats *stats; +@@ -820,7 +821,7 @@ static ssize_t perf_stats_show(struct device *dev, + + free_stats: + kfree(stats); +- return rc ? rc : seq_buf_used(&s); ++ return rc ? rc : (ssize_t)seq_buf_used(&s); + } + DEVICE_ATTR_ADMIN_RO(perf_stats); + +@@ -897,6 +898,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) + p->bus_desc.of_node = p->pdev->dev.of_node; + p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL); + ++ /* Set the dimm command family mask to accept PDSMs */ ++ set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask); ++ + if (!p->bus_desc.provider_name) + return -ENOMEM; + +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c +index 13c86a292c6d7..b2b245b25edba 100644 +--- a/arch/powerpc/platforms/pseries/ras.c ++++ b/arch/powerpc/platforms/pseries/ras.c +@@ -521,18 +521,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs) + return 0; /* need to perform reset */ + } + ++static int mce_handle_err_realmode(int disposition, u8 error_type) ++{ ++#ifdef CONFIG_PPC_BOOK3S_64 ++ if (disposition == RTAS_DISP_NOT_RECOVERED) { ++ switch (error_type) { ++ case MC_ERROR_TYPE_SLB: ++ case MC_ERROR_TYPE_ERAT: ++ /* ++ * Store the old slb content in paca before flushing. ++ * Print this when we go to virtual mode. ++ * There are chances that we may hit MCE again if there ++ * is a parity error on the SLB entry we trying to read ++ * for saving. Hence limit the slb saving to single ++ * level of recursion. ++ */ ++ if (local_paca->in_mce == 1) ++ slb_save_contents(local_paca->mce_faulty_slbs); ++ flush_and_reload_slb(); ++ disposition = RTAS_DISP_FULLY_RECOVERED; ++ break; ++ default: ++ break; ++ } ++ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { ++ /* Platform corrected itself but could be degraded */ ++ pr_err("MCE: limited recovery, system may be degraded\n"); ++ disposition = RTAS_DISP_FULLY_RECOVERED; ++ } ++#endif ++ return disposition; ++} + +-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) ++static int mce_handle_err_virtmode(struct pt_regs *regs, ++ struct rtas_error_log *errp, ++ struct pseries_mc_errorlog *mce_log, ++ int disposition) + { + struct mce_error_info mce_err = { 0 }; +- unsigned long eaddr = 0, paddr = 0; +- struct pseries_errorlog *pseries_log; +- struct pseries_mc_errorlog *mce_log; +- int disposition = rtas_error_disposition(errp); + int initiator = rtas_error_initiator(errp); + int severity = rtas_error_severity(errp); ++ unsigned long eaddr = 0, paddr = 0; + u8 error_type, err_sub_type; + ++ if (!mce_log) ++ goto out; ++ ++ error_type = mce_log->error_type; ++ err_sub_type = rtas_mc_error_sub_type(mce_log); ++ + if (initiator == RTAS_INITIATOR_UNKNOWN) + mce_err.initiator = MCE_INITIATOR_UNKNOWN; + else if (initiator == RTAS_INITIATOR_CPU) +@@ -571,18 +608,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN; + mce_err.error_class = MCE_ECLASS_UNKNOWN; + +- if (!rtas_error_extended(errp)) +- goto out; +- +- pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); +- if (pseries_log == NULL) +- goto out; +- +- mce_log = (struct pseries_mc_errorlog *)pseries_log->data; +- error_type = mce_log->error_type; +- err_sub_type = rtas_mc_error_sub_type(mce_log); +- +- switch (mce_log->error_type) { ++ switch (error_type) { + case MC_ERROR_TYPE_UE: + mce_err.error_type = MCE_ERROR_TYPE_UE; + mce_common_process_ue(regs, &mce_err); +@@ -682,37 +708,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) + mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN; + break; + } ++out: ++ save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED, ++ &mce_err, regs->nip, eaddr, paddr); ++ return disposition; ++} + +-#ifdef CONFIG_PPC_BOOK3S_64 +- if (disposition == RTAS_DISP_NOT_RECOVERED) { +- switch (error_type) { +- case MC_ERROR_TYPE_SLB: +- case MC_ERROR_TYPE_ERAT: +- /* +- * Store the old slb content in paca before flushing. +- * Print this when we go to virtual mode. +- * There are chances that we may hit MCE again if there +- * is a parity error on the SLB entry we trying to read +- * for saving. Hence limit the slb saving to single +- * level of recursion. +- */ +- if (local_paca->in_mce == 1) +- slb_save_contents(local_paca->mce_faulty_slbs); +- flush_and_reload_slb(); +- disposition = RTAS_DISP_FULLY_RECOVERED; +- break; +- default: +- break; +- } +- } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { +- /* Platform corrected itself but could be degraded */ +- printk(KERN_ERR "MCE: limited recovery, system may " +- "be degraded\n"); +- disposition = RTAS_DISP_FULLY_RECOVERED; +- } +-#endif ++static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp) ++{ ++ struct pseries_errorlog *pseries_log; ++ struct pseries_mc_errorlog *mce_log = NULL; ++ int disposition = rtas_error_disposition(errp); ++ u8 error_type; ++ ++ if (!rtas_error_extended(errp)) ++ goto out; ++ ++ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); ++ if (!pseries_log) ++ goto out; ++ ++ mce_log = (struct pseries_mc_errorlog *)pseries_log->data; ++ error_type = mce_log->error_type; ++ ++ disposition = mce_handle_err_realmode(disposition, error_type); + +-out: + /* + * Enable translation as we will be accessing per-cpu variables + * in save_mce_event() which may fall outside RMO region, also +@@ -723,10 +743,10 @@ out: + * Note: All the realmode handling like flushing SLB entries for + * SLB multihit is done by now. + */ ++out: + mtmsr(mfmsr() | MSR_IR | MSR_DR); +- save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED, +- &mce_err, regs->nip, eaddr, paddr); +- ++ disposition = mce_handle_err_virtmode(regs, errp, mce_log, ++ disposition); + return disposition; + } + +diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c +index bbb97169bf63e..6268545947b83 100644 +--- a/arch/powerpc/platforms/pseries/rng.c ++++ b/arch/powerpc/platforms/pseries/rng.c +@@ -36,6 +36,7 @@ static __init int rng_init(void) + + ppc_md.get_random_seed = pseries_get_random_long; + ++ of_node_put(dn); + return 0; + } + machine_subsys_initcall(pseries, rng_init); +diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c +index e6d7a344d9f22..7b739cc7a8a93 100644 +--- a/arch/powerpc/platforms/pseries/svm.c ++++ b/arch/powerpc/platforms/pseries/svm.c +@@ -7,6 +7,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -35,6 +36,31 @@ static int __init init_svm(void) + } + machine_early_initcall(pseries, init_svm); + ++/* ++ * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it ++ * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have ++ * any addressing limitation, we don't need to allocate it in low addresses. ++ */ ++void __init svm_swiotlb_init(void) ++{ ++ unsigned char *vstart; ++ unsigned long bytes, io_tlb_nslabs; ++ ++ io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT); ++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); ++ ++ bytes = io_tlb_nslabs << IO_TLB_SHIFT; ++ ++ vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); ++ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false)) ++ return; ++ ++ if (io_tlb_start) ++ memblock_free_early(io_tlb_start, ++ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); ++ panic("SVM: Cannot allocate SWIOTLB buffer"); ++} ++ + int set_memory_encrypted(unsigned long addr, int numpages) + { + if (!PAGE_ALIGNED(addr)) +diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c +index ad8117148ea3b..21b9d1bf39ff6 100644 +--- a/arch/powerpc/sysdev/xics/icp-hv.c ++++ b/arch/powerpc/sysdev/xics/icp-hv.c +@@ -174,6 +174,7 @@ int icp_hv_init(void) + + icp_ops = &icp_hv_ops; + ++ of_node_put(np); + return 0; + } + +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index df7bca00f5ec9..55c43a6c91112 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -969,6 +969,7 @@ static void insert_cpu_bpts(void) + brk.address = dabr[i].address; + brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; + brk.len = 8; ++ brk.hw_len = 8; + __set_breakpoint(i, &brk); + } + } +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index be4b8532dd3c4..0a41827928769 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -50,7 +50,6 @@ struct bpf_jit { + int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ + int tail_call_start; /* Tail call start offset */ + int excnt; /* Number of exception table entries */ +- int labels[1]; /* Labels for local jumps */ + }; + + #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ +@@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) + REG_SET_SEEN(b3); \ + }) + +-#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \ ++#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \ + ({ \ +- int rel = (jit->labels[label] - jit->prg) >> 1; \ ++ unsigned int rel = (int)((target) - jit->prg) / 2; \ + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ + (op2) | (mask) << 12); \ + REG_SET_SEEN(b1); \ + REG_SET_SEEN(b2); \ + }) + +-#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \ ++#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \ + ({ \ +- int rel = (jit->labels[label] - jit->prg) >> 1; \ ++ unsigned int rel = (int)((target) - jit->prg) / 2; \ + _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ + (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ + REG_SET_SEEN(b1); \ +@@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, + EMIT4(0xb9040000, BPF_REG_0, REG_2); + break; + } +- case BPF_JMP | BPF_TAIL_CALL: ++ case BPF_JMP | BPF_TAIL_CALL: { ++ int patch_1_clrj, patch_2_clij, patch_3_brc; ++ + /* + * Implicit input: + * B1: pointer to ctx +@@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, + offsetof(struct bpf_array, map.max_entries)); + /* if ((u32)%b3 >= (u32)%w1) goto out; */ +- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { +- /* clrj %b3,%w1,0xa,label0 */ +- EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, +- REG_W1, 0, 0xa); +- } else { +- /* clr %b3,%w1 */ +- EMIT2(0x1500, BPF_REG_3, REG_W1); +- /* brcl 0xa,label0 */ +- EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]); +- } ++ /* clrj %b3,%w1,0xa,out */ ++ patch_1_clrj = jit->prg; ++ EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa, ++ jit->prg); + + /* + * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) +@@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, + EMIT4_IMM(0xa7080000, REG_W0, 1); + /* laal %w1,%w0,off(%r15) */ + EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); +- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { +- /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ +- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, +- MAX_TAIL_CALL_CNT, 0, 0x2); +- } else { +- /* clfi %w1,MAX_TAIL_CALL_CNT */ +- EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT); +- /* brcl 0x2,label0 */ +- EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]); +- } ++ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */ ++ patch_2_clij = jit->prg; ++ EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT, ++ 2, jit->prg); + + /* + * prog = array->ptrs[index]; +@@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, + /* ltg %r1,prog(%b2,%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2, + REG_1, offsetof(struct bpf_array, ptrs)); +- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { +- /* brc 0x8,label0 */ +- EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]); +- } else { +- /* brcl 0x8,label0 */ +- EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]); +- } ++ /* brc 0x8,out */ ++ patch_3_brc = jit->prg; ++ EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg); + + /* + * Restore registers before calling function +@@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, + /* bc 0xf,tail_call_start(%r1) */ + _EMIT4(0x47f01000 + jit->tail_call_start); + /* out: */ +- jit->labels[0] = jit->prg; ++ if (jit->prg_buf) { ++ *(u16 *)(jit->prg_buf + patch_1_clrj + 2) = ++ (jit->prg - patch_1_clrj) >> 1; ++ *(u16 *)(jit->prg_buf + patch_2_clij + 2) = ++ (jit->prg - patch_2_clij) >> 1; ++ *(u16 *)(jit->prg_buf + patch_3_brc + 2) = ++ (jit->prg - patch_3_brc) >> 1; ++ } + break; ++ } + case BPF_JMP | BPF_EXIT: /* return b0 */ + last = (i == fp->len - 1) ? 1 : 0; + if (last) +diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c +index 5967f30141563..c93486a9989bc 100644 +--- a/arch/s390/pci/pci_bus.c ++++ b/arch/s390/pci/pci_bus.c +@@ -197,9 +197,10 @@ void pcibios_bus_add_device(struct pci_dev *pdev) + * With pdev->no_vf_scan the common PCI probing code does not + * perform PF/VF linking. + */ +- if (zdev->vfn) ++ if (zdev->vfn) { + zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn); +- ++ pdev->no_command_memory = 1; ++ } + } + + static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev) +diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c +index 8735c468230a5..555203e3e7b45 100644 +--- a/arch/um/drivers/vector_kern.c ++++ b/arch/um/drivers/vector_kern.c +@@ -1403,7 +1403,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev, + kfree(vp->bpf->filter); + vp->bpf->filter = NULL; + } else { +- vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL); ++ vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); + if (vp->bpf == NULL) { + netdev_err(dev, "failed to allocate memory for firmware\n"); + goto flash_fail; +@@ -1415,7 +1415,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev, + if (request_firmware(&fw, efl->data, &vdevice->pdev.dev)) + goto flash_fail; + +- vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL); ++ vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); + if (!vp->bpf->filter) + goto free_buffer; + +diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c +index 25eaa6a0c6583..c07436e89e599 100644 +--- a/arch/um/kernel/time.c ++++ b/arch/um/kernel/time.c +@@ -70,13 +70,17 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg, + * read of the message and write of the ACK. + */ + if (mode != TTMH_READ) { ++ bool disabled = irqs_disabled(); ++ ++ BUG_ON(mode == TTMH_IDLE && !disabled); ++ ++ if (disabled) ++ local_irq_enable(); + while (os_poll(1, &time_travel_ext_fd) != 0) { +- if (mode == TTMH_IDLE) { +- BUG_ON(!irqs_disabled()); +- local_irq_enable(); +- local_irq_disable(); +- } ++ /* nothing */ + } ++ if (disabled) ++ local_irq_disable(); + } + + ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg)); +diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c +index c8862696a47b9..7d0394f4ebf97 100644 +--- a/arch/x86/boot/compressed/pgtable_64.c ++++ b/arch/x86/boot/compressed/pgtable_64.c +@@ -5,15 +5,6 @@ + #include "pgtable.h" + #include "../string.h" + +-/* +- * __force_order is used by special_insns.h asm code to force instruction +- * serialization. +- * +- * It is not referenced from the code, but GCC < 5 with -fPIE would fail +- * due to an undefined symbol. Define it to make these ancient GCCs work. +- */ +-unsigned long __force_order; +- + #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */ + #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */ + +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c +index fb616203ce427..be50ef8572cce 100644 +--- a/arch/x86/events/amd/iommu.c ++++ b/arch/x86/events/amd/iommu.c +@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void) + while (amd_iommu_v2_event_descs[i].attr.attr.name) + i++; + +- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL); ++ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 1cbf57dc2ac89..11bbc6590f904 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, + + cpuc->event_list[n] = event; + n++; +- if (is_counter_pair(&event->hw)) ++ if (is_counter_pair(&event->hw)) { + cpuc->n_pair++; ++ cpuc->n_txn_pair++; ++ } + } + return n; + } +@@ -1962,6 +1964,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) + + perf_pmu_disable(pmu); + __this_cpu_write(cpu_hw_events.n_txn, 0); ++ __this_cpu_write(cpu_hw_events.n_txn_pair, 0); + } + + /* +@@ -1987,6 +1990,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) + */ + __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); + __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); ++ __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair)); + perf_pmu_enable(pmu); + } + +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c +index 86848c57b55ed..404315df1e167 100644 +--- a/arch/x86/events/intel/ds.c ++++ b/arch/x86/events/intel/ds.c +@@ -670,9 +670,7 @@ unlock: + + static inline void intel_pmu_drain_pebs_buffer(void) + { +- struct pt_regs regs; +- +- x86_pmu.drain_pebs(®s); ++ x86_pmu.drain_pebs(NULL); + } + + /* +@@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, + struct x86_perf_regs perf_regs; + struct pt_regs *regs = &perf_regs.regs; + void *at = get_next_pebs_record_by_bit(base, top, bit); ++ struct pt_regs dummy_iregs; + + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { + /* +@@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event, + } else if (!intel_pmu_save_and_restart(event)) + return; + ++ if (!iregs) ++ iregs = &dummy_iregs; ++ + while (count > 1) { + setup_sample(event, iregs, at, &data, regs); + perf_event_output(event, &data, regs); +@@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event, + } + + setup_sample(event, iregs, at, &data, regs); +- +- /* +- * All but the last records are processed. +- * The last one is left to be able to call the overflow handler. +- */ +- if (perf_event_overflow(event, &data, regs)) { +- x86_pmu_stop(event, 0); +- return; ++ if (iregs == &dummy_iregs) { ++ /* ++ * The PEBS records may be drained in the non-overflow context, ++ * e.g., large PEBS + context switch. Perf should treat the ++ * last record the same as other PEBS records, and doesn't ++ * invoke the generic overflow handler. ++ */ ++ perf_event_output(event, &data, regs); ++ } else { ++ /* ++ * All but the last records are processed. ++ * The last one is left to be able to call the overflow handler. ++ */ ++ if (perf_event_overflow(event, &data, regs)) ++ x86_pmu_stop(event, 0); + } +- + } + + static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c +index 6a4ca27b2c9e1..4aa735694e030 100644 +--- a/arch/x86/events/intel/uncore_snb.c ++++ b/arch/x86/events/intel/uncore_snb.c +@@ -126,6 +126,10 @@ + #define ICL_UNC_CBO_0_PER_CTR0 0x702 + #define ICL_UNC_CBO_MSR_OFFSET 0x8 + ++/* ICL ARB register */ ++#define ICL_UNC_ARB_PER_CTR 0x3b1 ++#define ICL_UNC_ARB_PERFEVTSEL 0x3b3 ++ + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); + DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); + DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +@@ -313,15 +317,21 @@ void skl_uncore_cpu_init(void) + snb_uncore_arb.ops = &skl_uncore_msr_ops; + } + ++static struct intel_uncore_ops icl_uncore_msr_ops = { ++ .disable_event = snb_uncore_msr_disable_event, ++ .enable_event = snb_uncore_msr_enable_event, ++ .read_counter = uncore_msr_read_counter, ++}; ++ + static struct intel_uncore_type icl_uncore_cbox = { + .name = "cbox", +- .num_counters = 4, ++ .num_counters = 2, + .perf_ctr_bits = 44, + .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = ICL_UNC_CBO_MSR_OFFSET, +- .ops = &skl_uncore_msr_ops, ++ .ops = &icl_uncore_msr_ops, + .format_group = &snb_uncore_format_group, + }; + +@@ -350,13 +360,25 @@ static struct intel_uncore_type icl_uncore_clockbox = { + .single_fixed = 1, + .event_mask = SNB_UNC_CTL_EV_SEL_MASK, + .format_group = &icl_uncore_clock_format_group, +- .ops = &skl_uncore_msr_ops, ++ .ops = &icl_uncore_msr_ops, + .event_descs = icl_uncore_events, + }; + ++static struct intel_uncore_type icl_uncore_arb = { ++ .name = "arb", ++ .num_counters = 1, ++ .num_boxes = 1, ++ .perf_ctr_bits = 44, ++ .perf_ctr = ICL_UNC_ARB_PER_CTR, ++ .event_ctl = ICL_UNC_ARB_PERFEVTSEL, ++ .event_mask = SNB_UNC_RAW_EVENT_MASK, ++ .ops = &icl_uncore_msr_ops, ++ .format_group = &snb_uncore_format_group, ++}; ++ + static struct intel_uncore_type *icl_msr_uncores[] = { + &icl_uncore_cbox, +- &snb_uncore_arb, ++ &icl_uncore_arb, + &icl_uncore_clockbox, + NULL, + }; +@@ -374,7 +396,6 @@ void icl_uncore_cpu_init(void) + { + uncore_msr_uncores = icl_msr_uncores; + icl_uncore_cbox.num_boxes = icl_get_cbox_num(); +- snb_uncore_arb.ops = &skl_uncore_msr_ops; + } + + enum { +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c +index 62e88ad919ffc..4f5e78a4003be 100644 +--- a/arch/x86/events/intel/uncore_snbep.c ++++ b/arch/x86/events/intel/uncore_snbep.c +@@ -3749,7 +3749,9 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) + + ret = skx_iio_get_topology(type); + if (ret) +- return ret; ++ goto clear_attr_update; ++ ++ ret = -ENOMEM; + + /* One more for NULL. */ + attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL); +@@ -3781,8 +3783,9 @@ err: + kfree(eas); + kfree(attrs); + kfree(type->topology); ++clear_attr_update: + type->attr_update = NULL; +- return -ENOMEM; ++ return ret; + } + + static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) +@@ -4751,10 +4754,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), + + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), +- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"), ++ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), +- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), ++ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + { /* end: all zeroes */ }, + }; +@@ -5212,17 +5215,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), + + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), +- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"), ++ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), +- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), ++ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + + INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"), +- INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"), ++ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"), +- INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"), ++ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"), + { /* end: all zeroes */ }, + }; +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h +index 7b68ab5f19e76..0e74235cdac9e 100644 +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -210,6 +210,7 @@ struct cpu_hw_events { + they've never been enabled yet */ + int n_txn; /* the # last events in the below arrays; + added in the current transaction */ ++ int n_txn_pair; + int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ + u64 tags[X86_PMC_IDX_MAX]; + +diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h +index 59a3e13204c34..d6e3bb9363d22 100644 +--- a/arch/x86/include/asm/special_insns.h ++++ b/arch/x86/include/asm/special_insns.h +@@ -11,45 +11,47 @@ + #include + + /* +- * Volatile isn't enough to prevent the compiler from reordering the +- * read/write functions for the control registers and messing everything up. +- * A memory clobber would solve the problem, but would prevent reordering of +- * all loads stores around it, which can hurt performance. Solution is to +- * use a variable and mimic reads and writes to it to enforce serialization ++ * The compiler should not reorder volatile asm statements with respect to each ++ * other: they should execute in program order. However GCC 4.9.x and 5.x have ++ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder ++ * volatile asm. The write functions are not affected since they have memory ++ * clobbers preventing reordering. To prevent reads from being reordered with ++ * respect to writes, use a dummy memory operand. + */ +-extern unsigned long __force_order; ++ ++#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL) + + void native_write_cr0(unsigned long val); + + static inline unsigned long native_read_cr0(void) + { + unsigned long val; +- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); ++ asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER); + return val; + } + + static __always_inline unsigned long native_read_cr2(void) + { + unsigned long val; +- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); ++ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER); + return val; + } + + static __always_inline void native_write_cr2(unsigned long val) + { +- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); ++ asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); + } + + static inline unsigned long __native_read_cr3(void) + { + unsigned long val; +- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); ++ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER); + return val; + } + + static inline void native_write_cr3(unsigned long val) + { +- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); ++ asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); + } + + static inline unsigned long native_read_cr4(void) +@@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void) + asm volatile("1: mov %%cr4, %0\n" + "2:\n" + _ASM_EXTABLE(1b, 2b) +- : "=r" (val), "=m" (__force_order) : "0" (0)); ++ : "=r" (val) : "0" (0), __FORCE_ORDER); + #else + /* CR4 always exists on x86_64. */ +- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); ++ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER); + #endif + return val; + } +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index c5d6f17d9b9d3..178499f903661 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -359,7 +359,7 @@ void native_write_cr0(unsigned long val) + unsigned long bits_missing = 0; + + set_register: +- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); ++ asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); + + if (static_branch_likely(&cr_pinning)) { + if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { +@@ -378,7 +378,7 @@ void native_write_cr4(unsigned long val) + unsigned long bits_changed = 0; + + set_register: +- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); ++ asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); + + if (static_branch_likely(&cr_pinning)) { + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c +index fc4f8c04bdb56..84eef4fa95990 100644 +--- a/arch/x86/kernel/cpu/mce/core.c ++++ b/arch/x86/kernel/cpu/mce/core.c +@@ -373,42 +373,105 @@ static int msr_to_offset(u32 msr) + return -1; + } + ++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, ++ struct pt_regs *regs, int trapnr, ++ unsigned long error_code, ++ unsigned long fault_addr) ++{ ++ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", ++ (unsigned int)regs->cx, regs->ip, (void *)regs->ip); ++ ++ show_stack_regs(regs); ++ ++ panic("MCA architectural violation!\n"); ++ ++ while (true) ++ cpu_relax(); ++ ++ return true; ++} ++ + /* MSR access wrappers used for error injection */ +-static u64 mce_rdmsrl(u32 msr) ++static noinstr u64 mce_rdmsrl(u32 msr) + { +- u64 v; ++ DECLARE_ARGS(val, low, high); + + if (__this_cpu_read(injectm.finished)) { +- int offset = msr_to_offset(msr); ++ int offset; ++ u64 ret; + ++ instrumentation_begin(); ++ ++ offset = msr_to_offset(msr); + if (offset < 0) +- return 0; +- return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); +- } ++ ret = 0; ++ else ++ ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); + +- if (rdmsrl_safe(msr, &v)) { +- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr); +- /* +- * Return zero in case the access faulted. This should +- * not happen normally but can happen if the CPU does +- * something weird, or if the code is buggy. +- */ +- v = 0; ++ instrumentation_end(); ++ ++ return ret; + } + +- return v; ++ /* ++ * RDMSR on MCA MSRs should not fault. If they do, this is very much an ++ * architectural violation and needs to be reported to hw vendor. Panic ++ * the box to not allow any further progress. ++ */ ++ asm volatile("1: rdmsr\n" ++ "2:\n" ++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault) ++ : EAX_EDX_RET(val, low, high) : "c" (msr)); ++ ++ ++ return EAX_EDX_VAL(val, low, high); ++} ++ ++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, ++ struct pt_regs *regs, int trapnr, ++ unsigned long error_code, ++ unsigned long fault_addr) ++{ ++ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", ++ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, ++ regs->ip, (void *)regs->ip); ++ ++ show_stack_regs(regs); ++ ++ panic("MCA architectural violation!\n"); ++ ++ while (true) ++ cpu_relax(); ++ ++ return true; + } + +-static void mce_wrmsrl(u32 msr, u64 v) ++static noinstr void mce_wrmsrl(u32 msr, u64 v) + { ++ u32 low, high; ++ + if (__this_cpu_read(injectm.finished)) { +- int offset = msr_to_offset(msr); ++ int offset; ++ ++ instrumentation_begin(); + ++ offset = msr_to_offset(msr); + if (offset >= 0) + *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; ++ ++ instrumentation_end(); ++ + return; + } +- wrmsrl(msr, v); ++ ++ low = (u32)v; ++ high = (u32)(v >> 32); ++ ++ /* See comment in mce_rdmsrl() */ ++ asm volatile("1: wrmsr\n" ++ "2:\n" ++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault) ++ : : "c" (msr), "a"(low), "d" (high) : "memory"); + } + + /* +diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h +index 6473070b5da49..b122610e9046a 100644 +--- a/arch/x86/kernel/cpu/mce/internal.h ++++ b/arch/x86/kernel/cpu/mce/internal.h +@@ -185,4 +185,14 @@ extern bool amd_filter_mce(struct mce *m); + static inline bool amd_filter_mce(struct mce *m) { return false; }; + #endif + ++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, ++ struct pt_regs *regs, int trapnr, ++ unsigned long error_code, ++ unsigned long fault_addr); ++ ++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, ++ struct pt_regs *regs, int trapnr, ++ unsigned long error_code, ++ unsigned long fault_addr); ++ + #endif /* __X86_MCE_INTERNAL_H__ */ +diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c +index e1da619add192..567ce09a02868 100644 +--- a/arch/x86/kernel/cpu/mce/severity.c ++++ b/arch/x86/kernel/cpu/mce/severity.c +@@ -9,9 +9,11 @@ + #include + #include + #include +-#include + #include + ++#include ++#include ++ + #include "internal.h" + + /* +@@ -40,9 +42,14 @@ static struct severity { + unsigned char context; + unsigned char excp; + unsigned char covered; ++ unsigned char cpu_model; ++ unsigned char cpu_minstepping; ++ unsigned char bank_lo, bank_hi; + char *msg; + } severities[] = { + #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } ++#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h ++#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s + #define KERNEL .context = IN_KERNEL + #define USER .context = IN_USER + #define KERNEL_RECOV .context = IN_KERNEL_RECOV +@@ -97,7 +104,6 @@ static struct severity { + KEEP, "Corrected error", + NOSER, BITCLR(MCI_STATUS_UC) + ), +- + /* + * known AO MCACODs reported via MCE or CMC: + * +@@ -113,6 +119,18 @@ static struct severity { + AO, "Action optional: last level cache writeback error", + SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB) + ), ++ /* ++ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured ++ * to report uncorrected errors using CMCI with a special signature. ++ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported ++ * in one of the memory controller banks. ++ * Set severity to "AO" for same action as normal patrol scrub error. ++ */ ++ MCESEV( ++ AO, "Uncorrected Patrol Scrub Error", ++ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0), ++ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18) ++ ), + + /* ignore OVER for UCNA */ + MCESEV( +@@ -324,6 +342,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e + continue; + if (s->excp && excp != s->excp) + continue; ++ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model) ++ continue; ++ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping) ++ continue; ++ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi)) ++ continue; + if (msg) + *msg = s->msg; + s->covered = 1; +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index 48ce44576947c..ea8d51ec251bb 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -115,7 +115,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl) + unsigned long prologue = regs->ip - PROLOGUE_SIZE; + + if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { +- printk("%sCode: Bad RIP value.\n", loglvl); ++ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n", ++ loglvl, prologue); + } else { + printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" + __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes, +diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c +index 61ddc3a5e5c2b..f8ff895aaf7e1 100644 +--- a/arch/x86/kernel/fpu/init.c ++++ b/arch/x86/kernel/fpu/init.c +@@ -243,9 +243,9 @@ static void __init fpu__init_system_ctx_switch(void) + */ + static void __init fpu__init_parse_early_param(void) + { +- char arg[32]; ++ char arg[128]; + char *argptr = arg; +- int bit; ++ int arglen, res, bit; + + #ifdef CONFIG_X86_32 + if (cmdline_find_option_bool(boot_command_line, "no387")) +@@ -268,12 +268,26 @@ static void __init fpu__init_parse_early_param(void) + if (cmdline_find_option_bool(boot_command_line, "noxsaves")) + setup_clear_cpu_cap(X86_FEATURE_XSAVES); + +- if (cmdline_find_option(boot_command_line, "clearcpuid", arg, +- sizeof(arg)) && +- get_option(&argptr, &bit) && +- bit >= 0 && +- bit < NCAPINTS * 32) +- setup_clear_cpu_cap(bit); ++ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); ++ if (arglen <= 0) ++ return; ++ ++ pr_info("Clearing CPUID bits:"); ++ do { ++ res = get_option(&argptr, &bit); ++ if (res == 0 || res == 3) ++ break; ++ ++ /* If the argument was too long, the last bit may be cut off */ ++ if (res == 1 && arglen >= sizeof(arg)) ++ break; ++ ++ if (bit >= 0 && bit < NCAPINTS * 32) { ++ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); ++ setup_clear_cpu_cap(bit); ++ } ++ } while (res == 2); ++ pr_cont("\n"); + } + + /* +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 4fc9954a95600..47381666d6a55 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -102,7 +102,6 @@ fs_initcall(nmi_warning_debugfs); + + static void nmi_check_duration(struct nmiaction *action, u64 duration) + { +- u64 whole_msecs = READ_ONCE(action->max_duration); + int remainder_ns, decimal_msecs; + + if (duration < nmi_longest_ns || duration < action->max_duration) +@@ -110,12 +109,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) + + action->max_duration = duration; + +- remainder_ns = do_div(whole_msecs, (1000 * 1000)); ++ remainder_ns = do_div(duration, (1000 * 1000)); + decimal_msecs = remainder_ns / 1000; + + printk_ratelimited(KERN_INFO + "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", +- action->handler, whole_msecs, decimal_msecs); ++ action->handler, duration, decimal_msecs); + } + + static int nmi_handle(unsigned int type, struct pt_regs *regs) +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 2f6510de6b0c0..85111cd0adcd0 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -3606,7 +3606,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) + u64 tsc_aux = 0; + + if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) +- return emulate_gp(ctxt, 0); ++ return emulate_ud(ctxt); + ctxt->dst.val = tsc_aux; + return X86EMUL_CONTINUE; + } +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c +index d057376bd3d33..698969e18fe35 100644 +--- a/arch/x86/kvm/ioapic.c ++++ b/arch/x86/kvm/ioapic.c +@@ -197,12 +197,9 @@ static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq) + + /* + * If no longer has pending EOI in LAPICs, update +- * EOI for this vetor. ++ * EOI for this vector. + */ + rtc_irq_eoi(ioapic, vcpu, entry->fields.vector); +- kvm_ioapic_update_eoi_one(vcpu, ioapic, +- entry->fields.trig_mode, +- irq); + break; + } + } +diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h +index cfe83d4ae6252..ca0781b41df9d 100644 +--- a/arch/x86/kvm/kvm_cache_regs.h ++++ b/arch/x86/kvm/kvm_cache_regs.h +@@ -7,7 +7,7 @@ + #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS + #define KVM_POSSIBLE_CR4_GUEST_BITS \ + (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ +- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD) ++ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD) + + #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ + static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 35cca2e0c8026..8055a486d843d 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -488,6 +488,12 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) + } + } + ++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec) ++{ ++ apic_clear_irr(vec, vcpu->arch.apic); ++} ++EXPORT_SYMBOL_GPL(kvm_apic_clear_irr); ++ + static inline void apic_set_isr(int vec, struct kvm_lapic *apic) + { + struct kvm_vcpu *vcpu; +@@ -2461,6 +2467,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) + __apic_update_ppr(apic, &ppr); + return apic_has_interrupt_for_ppr(apic, ppr); + } ++EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt); + + int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) + { +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h +index 754f29beb83e3..4fb86e3a9dd3d 100644 +--- a/arch/x86/kvm/lapic.h ++++ b/arch/x86/kvm/lapic.h +@@ -89,6 +89,7 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, + bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, + int shorthand, unsigned int dest, int dest_mode); + int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); ++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec); + bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr); + bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr); + void kvm_apic_update_ppr(struct kvm_vcpu *vcpu); +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 71aa3da2a0b7b..d0ca3ab389520 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -6376,6 +6376,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) + cond_resched_lock(&kvm->mmu_lock); + } + } ++ kvm_mmu_commit_zap_page(kvm, &invalid_list); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, rcu_idx); +diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c +index ac830cd508305..381d22daa4acd 100644 +--- a/arch/x86/kvm/svm/avic.c ++++ b/arch/x86/kvm/svm/avic.c +@@ -868,6 +868,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, + * - Tell IOMMU to use legacy mode for this interrupt. + * - Retrieve ga_tag of prior interrupt remapping data. + */ ++ pi.prev_ga_tag = 0; + pi.is_guest_mode = false; + ret = irq_set_vcpu_affinity(host_irq, &pi); + +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c +index e90bc436f5849..27042c9ea40d6 100644 +--- a/arch/x86/kvm/svm/nested.c ++++ b/arch/x86/kvm/svm/nested.c +@@ -243,7 +243,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb) + } else { + if (!(vmcb->save.cr4 & X86_CR4_PAE) || + !(vmcb->save.cr0 & X86_CR0_PE) || +- (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK)) ++ (vmcb->save.cr3 & MSR_CR3_LONG_MBZ_MASK)) + return false; + } + if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4)) +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h +index a798e17317094..c0d75b1e06645 100644 +--- a/arch/x86/kvm/svm/svm.h ++++ b/arch/x86/kvm/svm/svm.h +@@ -345,7 +345,7 @@ static inline bool gif_set(struct vcpu_svm *svm) + /* svm.c */ + #define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U + #define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U +-#define MSR_CR3_LONG_RESERVED_MASK 0xfff0000000000fe7U ++#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U + #define MSR_INVALID 0xffffffffU + + u32 svm_msrpm_offset(u32 msr); +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 1bb6b31eb6466..76ee5553b9d6c 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -2408,6 +2408,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); ++ ++ vmx->segment_cache.bitmask = 0; + } + + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & +@@ -3344,8 +3346,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, + prepare_vmcs02_early(vmx, vmcs12); + + if (from_vmentry) { +- if (unlikely(!nested_get_vmcs12_pages(vcpu))) ++ if (unlikely(!nested_get_vmcs12_pages(vcpu))) { ++ vmx_switch_vmcs(vcpu, &vmx->vmcs01); + return NVMX_VMENTRY_KVM_INTERNAL_ERROR; ++ } + + if (nested_vmx_check_vmentry_hw(vcpu)) { + vmx_switch_vmcs(vcpu, &vmx->vmcs01); +@@ -3528,6 +3532,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) + if (unlikely(status != NVMX_VMENTRY_SUCCESS)) + goto vmentry_failed; + ++ /* Emulate processing of posted interrupts on VM-Enter. */ ++ if (nested_cpu_has_posted_intr(vmcs12) && ++ kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { ++ vmx->nested.pi_pending = true; ++ kvm_make_request(KVM_REQ_EVENT, vcpu); ++ kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); ++ } ++ + /* Hide L1D cache contents from the nested guest. */ + vmx->vcpu.arch.l1tf_flush_l1d = true; + +diff --git a/block/blk-core.c b/block/blk-core.c +index 10c08ac506978..0014e7caae3d2 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -803,11 +803,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector) + { + char b[BDEVNAME_SIZE]; + +- printk(KERN_INFO "attempt to access beyond end of device\n"); +- printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", +- bio_devname(bio, b), bio->bi_opf, +- (unsigned long long)bio_end_sector(bio), +- (long long)maxsector); ++ pr_info_ratelimited("attempt to access beyond end of device\n" ++ "%s: rw=%d, want=%llu, limit=%llu\n", ++ bio_devname(bio, b), bio->bi_opf, ++ bio_end_sector(bio), maxsector); + } + + #ifdef CONFIG_FAIL_MAKE_REQUEST +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c +index 062229395a507..7b52e7657b2d1 100644 +--- a/block/blk-mq-sysfs.c ++++ b/block/blk-mq-sysfs.c +@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj) + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, + kobj); + +- cancel_delayed_work_sync(&hctx->run_work); +- + if (hctx->flags & BLK_MQ_F_BLOCKING) + cleanup_srcu_struct(hctx->srcu); + blk_free_flush_queue(hctx->fq); +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c +index 32d82e23b0953..a1c1e7c611f7b 100644 +--- a/block/blk-mq-tag.c ++++ b/block/blk-mq-tag.c +@@ -59,7 +59,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) + static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, + struct sbitmap_queue *bt) + { +- if (!data->q->elevator && !hctx_may_queue(data->hctx, bt)) ++ if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && ++ !hctx_may_queue(data->hctx, bt)) + return BLK_MQ_NO_TAG; + + if (data->shallow_depth) +diff --git a/block/blk-mq.c b/block/blk-mq.c +index cdced4aca2e81..94a53d779c12b 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -1105,10 +1105,11 @@ static bool __blk_mq_get_driver_tag(struct request *rq) + if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { + bt = &rq->mq_hctx->tags->breserved_tags; + tag_offset = 0; ++ } else { ++ if (!hctx_may_queue(rq->mq_hctx, bt)) ++ return false; + } + +- if (!hctx_may_queue(rq->mq_hctx, bt)) +- return false; + tag = __sbitmap_queue_get(bt); + if (tag == BLK_MQ_NO_TAG) + return false; +@@ -2264,7 +2265,6 @@ queue_exit: + blk_queue_exit(q); + return BLK_QC_T_NONE; + } +-EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */ + + void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, + unsigned int hctx_idx) +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c +index 7dda709f3ccb6..8c6bafc801dd9 100644 +--- a/block/blk-sysfs.c ++++ b/block/blk-sysfs.c +@@ -934,9 +934,16 @@ static void blk_release_queue(struct kobject *kobj) + + blk_free_queue_stats(q->stats); + +- if (queue_is_mq(q)) ++ if (queue_is_mq(q)) { ++ struct blk_mq_hw_ctx *hctx; ++ int i; ++ + cancel_delayed_work_sync(&q->requeue_work); + ++ queue_for_each_hw_ctx(q, hctx, i) ++ cancel_delayed_work_sync(&hctx->run_work); ++ } ++ + blk_exit_queue(q); + + blk_queue_free_zone_bitmaps(q); +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c +index 21efa786f09c9..002edfdbb0937 100644 +--- a/crypto/algif_aead.c ++++ b/crypto/algif_aead.c +@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, + SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); + + skcipher_request_set_sync_tfm(skreq, null_tfm); +- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + skcipher_request_set_crypt(skreq, src, dst, len, NULL); + +@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, + areq->outlen = outlen; + + aead_request_set_callback(&areq->cra_u.aead_req, +- CRYPTO_TFM_REQ_MAY_BACKLOG, ++ CRYPTO_TFM_REQ_MAY_SLEEP, + af_alg_async_cb, areq); + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : + crypto_aead_decrypt(&areq->cra_u.aead_req); + + /* AIO operation in progress */ +- if (err == -EINPROGRESS || err == -EBUSY) ++ if (err == -EINPROGRESS) + return -EIOCBQUEUED; + + sock_put(sk); + } else { + /* Synchronous operation */ + aead_request_set_callback(&areq->cra_u.aead_req, ++ CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &ctx->wait); + err = crypto_wait_req(ctx->enc ? +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index 478f3b8f5bd52..ee8890ee8f332 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + + /* AIO operation in progress */ +- if (err == -EINPROGRESS || err == -EBUSY) ++ if (err == -EINPROGRESS) + return -EIOCBQUEUED; + + sock_put(sk); +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index f936530a19b0e..b27b6bf0c1186 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -223,7 +223,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( + struct binder_work { + struct list_head entry; + +- enum { ++ enum binder_work_type { + BINDER_WORK_TRANSACTION = 1, + BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_RETURN_ERROR, +@@ -885,27 +885,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked( + return w; + } + +-/** +- * binder_dequeue_work_head() - Dequeues the item at head of list +- * @proc: binder_proc associated with list +- * @list: list to dequeue head +- * +- * Removes the head of the list if there are items on the list +- * +- * Return: pointer dequeued binder_work, NULL if list was empty +- */ +-static struct binder_work *binder_dequeue_work_head( +- struct binder_proc *proc, +- struct list_head *list) +-{ +- struct binder_work *w; +- +- binder_inner_proc_lock(proc); +- w = binder_dequeue_work_head_ilocked(list); +- binder_inner_proc_unlock(proc); +- return w; +-} +- + static void + binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); + static void binder_free_thread(struct binder_thread *thread); +@@ -2344,8 +2323,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, + * file is done when the transaction is torn + * down. + */ +- WARN_ON(failed_at && +- proc->tsk == current->group_leader); + } break; + case BINDER_TYPE_PTR: + /* +@@ -4587,13 +4564,17 @@ static void binder_release_work(struct binder_proc *proc, + struct list_head *list) + { + struct binder_work *w; ++ enum binder_work_type wtype; + + while (1) { +- w = binder_dequeue_work_head(proc, list); ++ binder_inner_proc_lock(proc); ++ w = binder_dequeue_work_head_ilocked(list); ++ wtype = w ? w->type : 0; ++ binder_inner_proc_unlock(proc); + if (!w) + return; + +- switch (w->type) { ++ switch (wtype) { + case BINDER_WORK_TRANSACTION: { + struct binder_transaction *t; + +@@ -4627,9 +4608,11 @@ static void binder_release_work(struct binder_proc *proc, + kfree(death); + binder_stats_deleted(BINDER_STAT_DEATH); + } break; ++ case BINDER_WORK_NODE: ++ break; + default: + pr_err("unexpected work type, %d, not freed\n", +- w->type); ++ wtype); + break; + } + } +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index b71f9ecddff5d..fff0547c26c53 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -711,6 +711,8 @@ struct regmap *__regmap_init(struct device *dev, + if (ret) + goto err_map; + ++ ret = -EINVAL; /* Later error paths rely on this */ ++ + if (config->disable_locking) { + map->lock = map->unlock = regmap_lock_unlock_none; + regmap_debugfs_disable(map); +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 8d2608ddfd087..f88968bcdd6a8 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -2896,6 +2896,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev) + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + kfree(dr); ++ usb_free_urb(urb); + return -ENOMEM; + } + +diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile +index 66e2700c9032a..bc1469778cf87 100644 +--- a/drivers/bus/mhi/core/Makefile ++++ b/drivers/bus/mhi/core/Makefile +@@ -1,3 +1,3 @@ +-obj-$(CONFIG_MHI_BUS) := mhi.o ++obj-$(CONFIG_MHI_BUS) += mhi.o + + mhi-y := init.o main.o pm.o boot.o +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 77b8d551ae7fe..dd559661c15b3 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -1963,7 +1963,7 @@ static int try_smi_init(struct smi_info *new_smi) + /* Do this early so it's available for logs. */ + if (!new_smi->io.dev) { + pr_err("IPMI interface added with no device\n"); +- rv = EIO; ++ rv = -EIO; + goto out_err; + } + +diff --git a/drivers/char/random.c b/drivers/char/random.c +index d20ba1b104ca3..2a41b21623ae4 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags) + + fast_mix(fast_pool); + add_interrupt_bench(cycles); +- this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); + + if (unlikely(crng_init == 0)) { + if ((fast_pool->count >= 64) && +diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c +index 5c83e899084ff..cfae2f59df665 100644 +--- a/drivers/clk/at91/clk-main.c ++++ b/drivers/clk/at91/clk-main.c +@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index) + return -EINVAL; + + regmap_read(regmap, AT91_CKGR_MOR, &tmp); +- tmp &= ~MOR_KEY_MASK; + + if (index && !(tmp & AT91_PMC_MOSCSEL)) +- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); ++ tmp = AT91_PMC_MOSCSEL; + else if (!index && (tmp & AT91_PMC_MOSCSEL)) +- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); ++ tmp = 0; ++ else ++ return 0; ++ ++ regmap_update_bits(regmap, AT91_CKGR_MOR, ++ AT91_PMC_MOSCSEL | MOR_KEY_MASK, ++ tmp | AT91_PMC_KEY); + + while (!clk_sam9x5_main_ready(regmap)) + cpu_relax(); +diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c +index ab6318c0589e9..3c4c956035954 100644 +--- a/drivers/clk/at91/sam9x60.c ++++ b/drivers/clk/at91/sam9x60.c +@@ -279,7 +279,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) + parent_names[3] = "masterck"; + parent_names[4] = "pllack_divck"; + parent_names[5] = "upllck_divck"; +- for (i = 0; i < 8; i++) { ++ for (i = 0; i < 2; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c +index 3439bc65bb4e3..1ac803e14fa3e 100644 +--- a/drivers/clk/bcm/clk-bcm2835.c ++++ b/drivers/clk/bcm/clk-bcm2835.c +@@ -1338,8 +1338,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, + pll->hw.init = &init; + + ret = devm_clk_hw_register(cprman->dev, &pll->hw); +- if (ret) ++ if (ret) { ++ kfree(pll); + return NULL; ++ } + return &pll->hw; + } + +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c +index a64aace213c27..7762c5825e77d 100644 +--- a/drivers/clk/imx/clk-imx8mq.c ++++ b/drivers/clk/imx/clk-imx8mq.c +@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys + "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; + + static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", +- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; ++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", }; + + static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", +- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", }; ++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", }; + + static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out", + "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; +diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c +index 2ad26cb927fdb..f126b6045afa7 100644 +--- a/drivers/clk/keystone/sci-clk.c ++++ b/drivers/clk/keystone/sci-clk.c +@@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider) + np = of_find_node_with_property(np, *clk_name); + if (!np) { + clk_name++; +- break; ++ continue; + } + + if (!of_device_is_available(np)) +diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c +index 9766cccf5844c..6e0d3a1667291 100644 +--- a/drivers/clk/mediatek/clk-mt6779.c ++++ b/drivers/clk/mediatek/clk-mt6779.c +@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = { + "pwm_sel", 19), + GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm", + "pwm_sel", 21), ++ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0", ++ "uart_sel", 22), + GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1", + "uart_sel", 23), + GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2", +diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c +index 53715e36326c6..9918cb375de30 100644 +--- a/drivers/clk/meson/axg-audio.c ++++ b/drivers/clk/meson/axg-audio.c +@@ -1209,13 +1209,132 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = { + }; + + +-/* Convenience table to populate regmap in .probe() +- * Note that this table is shared between both AXG and G12A, +- * with spdifout_b clocks being exclusive to G12A. Since those +- * clocks are not declared within the AXG onecell table, we do not +- * feel the need to have separate AXG/G12A regmap tables. +- */ ++/* Convenience table to populate regmap in .probe(). */ + static struct clk_regmap *const axg_clk_regmaps[] = { ++ &ddr_arb, ++ &pdm, ++ &tdmin_a, ++ &tdmin_b, ++ &tdmin_c, ++ &tdmin_lb, ++ &tdmout_a, ++ &tdmout_b, ++ &tdmout_c, ++ &frddr_a, ++ &frddr_b, ++ &frddr_c, ++ &toddr_a, ++ &toddr_b, ++ &toddr_c, ++ &loopback, ++ &spdifin, ++ &spdifout, ++ &resample, ++ &power_detect, ++ &mst_a_mclk_sel, ++ &mst_b_mclk_sel, ++ &mst_c_mclk_sel, ++ &mst_d_mclk_sel, ++ &mst_e_mclk_sel, ++ &mst_f_mclk_sel, ++ &mst_a_mclk_div, ++ &mst_b_mclk_div, ++ &mst_c_mclk_div, ++ &mst_d_mclk_div, ++ &mst_e_mclk_div, ++ &mst_f_mclk_div, ++ &mst_a_mclk, ++ &mst_b_mclk, ++ &mst_c_mclk, ++ &mst_d_mclk, ++ &mst_e_mclk, ++ &mst_f_mclk, ++ &spdifout_clk_sel, ++ &spdifout_clk_div, ++ &spdifout_clk, ++ &spdifin_clk_sel, ++ &spdifin_clk_div, ++ &spdifin_clk, ++ &pdm_dclk_sel, ++ &pdm_dclk_div, ++ &pdm_dclk, ++ &pdm_sysclk_sel, ++ &pdm_sysclk_div, ++ &pdm_sysclk, ++ &mst_a_sclk_pre_en, ++ &mst_b_sclk_pre_en, ++ &mst_c_sclk_pre_en, ++ &mst_d_sclk_pre_en, ++ &mst_e_sclk_pre_en, ++ &mst_f_sclk_pre_en, ++ &mst_a_sclk_div, ++ &mst_b_sclk_div, ++ &mst_c_sclk_div, ++ &mst_d_sclk_div, ++ &mst_e_sclk_div, ++ &mst_f_sclk_div, ++ &mst_a_sclk_post_en, ++ &mst_b_sclk_post_en, ++ &mst_c_sclk_post_en, ++ &mst_d_sclk_post_en, ++ &mst_e_sclk_post_en, ++ &mst_f_sclk_post_en, ++ &mst_a_sclk, ++ &mst_b_sclk, ++ &mst_c_sclk, ++ &mst_d_sclk, ++ &mst_e_sclk, ++ &mst_f_sclk, ++ &mst_a_lrclk_div, ++ &mst_b_lrclk_div, ++ &mst_c_lrclk_div, ++ &mst_d_lrclk_div, ++ &mst_e_lrclk_div, ++ &mst_f_lrclk_div, ++ &mst_a_lrclk, ++ &mst_b_lrclk, ++ &mst_c_lrclk, ++ &mst_d_lrclk, ++ &mst_e_lrclk, ++ &mst_f_lrclk, ++ &tdmin_a_sclk_sel, ++ &tdmin_b_sclk_sel, ++ &tdmin_c_sclk_sel, ++ &tdmin_lb_sclk_sel, ++ &tdmout_a_sclk_sel, ++ &tdmout_b_sclk_sel, ++ &tdmout_c_sclk_sel, ++ &tdmin_a_sclk_pre_en, ++ &tdmin_b_sclk_pre_en, ++ &tdmin_c_sclk_pre_en, ++ &tdmin_lb_sclk_pre_en, ++ &tdmout_a_sclk_pre_en, ++ &tdmout_b_sclk_pre_en, ++ &tdmout_c_sclk_pre_en, ++ &tdmin_a_sclk_post_en, ++ &tdmin_b_sclk_post_en, ++ &tdmin_c_sclk_post_en, ++ &tdmin_lb_sclk_post_en, ++ &tdmout_a_sclk_post_en, ++ &tdmout_b_sclk_post_en, ++ &tdmout_c_sclk_post_en, ++ &tdmin_a_sclk, ++ &tdmin_b_sclk, ++ &tdmin_c_sclk, ++ &tdmin_lb_sclk, ++ &tdmout_a_sclk, ++ &tdmout_b_sclk, ++ &tdmout_c_sclk, ++ &tdmin_a_lrclk, ++ &tdmin_b_lrclk, ++ &tdmin_c_lrclk, ++ &tdmin_lb_lrclk, ++ &tdmout_a_lrclk, ++ &tdmout_b_lrclk, ++ &tdmout_c_lrclk, ++}; ++ ++static struct clk_regmap *const g12a_clk_regmaps[] = { + &ddr_arb, + &pdm, + &tdmin_a, +@@ -1713,8 +1832,8 @@ static const struct audioclk_data axg_audioclk_data = { + }; + + static const struct audioclk_data g12a_audioclk_data = { +- .regmap_clks = axg_clk_regmaps, +- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps), ++ .regmap_clks = g12a_clk_regmaps, ++ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps), + .hw_onecell_data = &g12a_audio_hw_onecell_data, + .reset_offset = AUDIO_SW_RESET, + .reset_num = 26, +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c +index 9803d44bb1578..b814d44917a5d 100644 +--- a/drivers/clk/meson/g12a.c ++++ b/drivers/clk/meson/g12a.c +@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = { + &g12a_fclk_div2_div.hw + }, + .num_parents = 1, ++ /* ++ * Similar to fclk_div3, it seems that this clock is used by ++ * the resident firmware and is required by the platform to ++ * operate correctly. ++ * Until the following condition are met, we need this clock to ++ * be marked as critical: ++ * a) Mark the clock used by a firmware resource, if possible ++ * b) CCF has a clock hand-off mechanism to make the sure the ++ * clock stays on until the proper driver comes along ++ */ ++ .flags = CLK_IS_CRITICAL, + }, + }; + +diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c +index f0b47b7d50ca6..31258795e7b86 100644 +--- a/drivers/clk/qcom/gcc-sdm660.c ++++ b/drivers/clk/qcom/gcc-sdm660.c +@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = { + .cmd_rcgr = 0x48044, + .mnd_width = 0, + .hid_width = 5, +- .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, ++ .parent_map = gcc_parent_map_xo_gpll0, + .freq_tbl = ftbl_hmss_rbcpr_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "hmss_rbcpr_clk_src", +diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c +index bfc4ac02f9ea2..af26e0695b866 100644 +--- a/drivers/clk/qcom/gdsc.c ++++ b/drivers/clk/qcom/gdsc.c +@@ -358,6 +358,14 @@ static int gdsc_init(struct gdsc *sc) + if ((sc->flags & VOTABLE) && on) + gdsc_enable(&sc->pd); + ++ /* ++ * Make sure the retain bit is set if the GDSC is already on, otherwise ++ * we end up turning off the GDSC and destroying all the register ++ * contents that we thought we were saving. ++ */ ++ if ((sc->flags & RETAIN_FF_ENABLE) && on) ++ gdsc_retain_ff_on(sc); ++ + /* If ALWAYS_ON GDSCs are not ON, turn them ON */ + if (sc->flags & ALWAYS_ON) { + if (!on) +diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c +index b333fc28c94b6..37c858d689e0d 100644 +--- a/drivers/clk/rockchip/clk-half-divider.c ++++ b/drivers/clk/rockchip/clk-half-divider.c +@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name, + unsigned long flags, + spinlock_t *lock) + { +- struct clk *clk; ++ struct clk *clk = ERR_PTR(-ENOMEM); + struct clk_mux *mux = NULL; + struct clk_gate *gate = NULL; + struct clk_divider *div = NULL; +diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c +index 09aa44cb8a91d..ba04cb381cd3f 100644 +--- a/drivers/clocksource/hyperv_timer.c ++++ b/drivers/clocksource/hyperv_timer.c +@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) + return read_hv_clock_tsc(); + } + +-static u64 read_hv_sched_clock_tsc(void) ++static u64 notrace read_hv_sched_clock_tsc(void) + { + return (read_hv_clock_tsc() - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); +@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) + return read_hv_clock_msr(); + } + +-static u64 read_hv_sched_clock_msr(void) ++static u64 notrace read_hv_sched_clock_msr(void) + { + return (read_hv_clock_msr() - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c +index df1c941260d14..b4af4094309b0 100644 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c +@@ -484,6 +484,12 @@ remove_opp: + /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */ + late_initcall(armada37xx_cpufreq_driver_init); + ++static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = { ++ { .compatible = "marvell,armada-3700-nb-pm" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match); ++ + MODULE_AUTHOR("Gregory CLEMENT "); + MODULE_DESCRIPTION("Armada 37xx cpufreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index a9af15e994ccf..e439b43c19ebe 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -885,12 +885,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) + { + int cpu; +- struct cpufreq_policy cpu_policy; ++ struct cpufreq_policy *cpu_policy; + + rebooting = true; + for_each_online_cpu(cpu) { +- cpufreq_get_policy(&cpu_policy, cpu); +- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index()); ++ cpu_policy = cpufreq_cpu_get(cpu); ++ if (!cpu_policy) ++ continue; ++ powernv_cpufreq_target_index(cpu_policy, get_nominal_index()); ++ cpufreq_cpu_put(cpu_policy); + } + + return NOTIFY_DONE; +diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c +index 3fb044b907a83..47b7d394d2abb 100644 +--- a/drivers/cpufreq/qcom-cpufreq-hw.c ++++ b/drivers/cpufreq/qcom-cpufreq-hw.c +@@ -177,10 +177,15 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, + freq = cpu_hw_rate / 1000; + + if (freq != prev_freq && core_count != LUT_TURBO_IND) { +- table[i].frequency = freq; +- qcom_cpufreq_update_opp(cpu_dev, freq, volt); +- dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, ++ if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) { ++ table[i].frequency = freq; ++ dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, + freq, core_count); ++ } else { ++ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq); ++ table[i].frequency = CPUFREQ_ENTRY_INVALID; ++ } ++ + } else if (core_count == LUT_TURBO_IND) { + table[i].frequency = CPUFREQ_ENTRY_INVALID; + } +@@ -197,9 +202,13 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, + * as the boost frequency + */ + if (prev->frequency == CPUFREQ_ENTRY_INVALID) { +- prev->frequency = prev_freq; +- prev->flags = CPUFREQ_BOOST_FREQ; +- qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt); ++ if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) { ++ prev->frequency = prev_freq; ++ prev->flags = CPUFREQ_BOOST_FREQ; ++ } else { ++ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", ++ freq); ++ } + } + + break; +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index 52a9b7cf6576f..ab941cfd27a88 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -876,6 +876,7 @@ config CRYPTO_DEV_SA2UL + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 ++ select CRYPTO_AUTHENC + select HW_RANDOM + select SG_SPLIT + help +diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +index 138759dc8190e..08ed1ca12baf9 100644 +--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c ++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +@@ -120,7 +120,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) + /* Be sure all data is written before enabling the task */ + wmb(); + +- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8; ++ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored ++ * on older SoCs, we have no reason to complicate things. ++ */ ++ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8); + writel(v, ce->base + CE_TLR); + mutex_unlock(&ce->mlock); + +diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig +index bc35aa0ec07ae..84ea7cba5ee5b 100644 +--- a/drivers/crypto/caam/Kconfig ++++ b/drivers/crypto/caam/Kconfig +@@ -101,6 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_LIB_DES ++ select CRYPTO_XTS + help + Selecting this will offload crypto for users of the + scatterlist crypto API (such as the linux native IPSec +@@ -114,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_DES ++ select CRYPTO_XTS + help + Selecting this will use CAAM Queue Interface (QI) for sending + & receiving crypto jobs to/from CAAM. This gives better performance +@@ -165,6 +167,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM + select CRYPTO_AEAD + select CRYPTO_HASH + select CRYPTO_DES ++ select CRYPTO_XTS + help + CAAM driver for QorIQ Data Path Acceleration Architecture 2. + It handles DPSECI DPAA2 objects that sit on the Management Complex +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c +index 91feda5b63f65..e72aa3e2e0656 100644 +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -57,6 +57,8 @@ + #include "key_gen.h" + #include "caamalg_desc.h" + #include ++#include ++#include + + /* + * crypto alg +@@ -114,10 +116,13 @@ struct caam_ctx { + struct alginfo adata; + struct alginfo cdata; + unsigned int authsize; ++ bool xts_key_fallback; ++ struct crypto_skcipher *fallback; + }; + + struct caam_skcipher_req_ctx { + struct skcipher_edesc *edesc; ++ struct skcipher_request fallback_req; + }; + + struct caam_aead_req_ctx { +@@ -830,12 +835,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + u32 *desc; ++ int err; + +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { ++ err = xts_verify_key(skcipher, key, keylen); ++ if (err) { + dev_dbg(jrdev, "key size mismatch\n"); +- return -EINVAL; ++ return err; + } + ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) ++ ctx->xts_key_fallback = true; ++ ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen); ++ if (err) ++ return err; ++ + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; +@@ -1755,6 +1769,14 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) + return ret; + } + ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req) ++{ ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher); ++ ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); ++} ++ + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) + { + struct skcipher_edesc *edesc; +@@ -1765,9 +1787,30 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) + u32 *desc; + int ret = 0; + +- if (!req->cryptlen) ++ /* ++ * XTS is expected to return an error even for input length = 0 ++ * Note that the case input length < block size will be caught during ++ * HW offloading and return an error. ++ */ ++ if (!req->cryptlen && !ctx->fallback) + return 0; + ++ if (ctx->fallback && (xts_skcipher_ivsize(req) || ++ ctx->xts_key_fallback)) { ++ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); ++ ++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); ++ skcipher_request_set_callback(&rctx->fallback_req, ++ req->base.flags, ++ req->base.complete, ++ req->base.data); ++ skcipher_request_set_crypt(&rctx->fallback_req, req->src, ++ req->dst, req->cryptlen, req->iv); ++ ++ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : ++ crypto_skcipher_decrypt(&rctx->fallback_req); ++ } ++ + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); + if (IS_ERR(edesc)) +@@ -1905,6 +1948,7 @@ static struct caam_skcipher_alg driver_algs[] = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam", ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, +@@ -3344,13 +3388,35 @@ static int caam_cra_init(struct crypto_skcipher *tfm) + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); +- +- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx)); ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; ++ int ret = 0; + + ctx->enginectx.op.do_one_request = skcipher_do_one_req; + +- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, +- false); ++ if (alg_aai == OP_ALG_AAI_XTS) { ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base); ++ struct crypto_skcipher *fallback; ++ ++ fallback = crypto_alloc_skcipher(tfm_name, 0, ++ CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(fallback)) { ++ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n", ++ tfm_name, PTR_ERR(fallback)); ++ return PTR_ERR(fallback); ++ } ++ ++ ctx->fallback = fallback; ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + ++ crypto_skcipher_reqsize(fallback)); ++ } else { ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx)); ++ } ++ ++ ret = caam_init_common(ctx, &caam_alg->caam, false); ++ if (ret && ctx->fallback) ++ crypto_free_skcipher(ctx->fallback); ++ ++ return ret; + } + + static int caam_aead_init(struct crypto_aead *tfm) +@@ -3378,7 +3444,11 @@ static void caam_exit_common(struct caam_ctx *ctx) + + static void caam_cra_exit(struct crypto_skcipher *tfm) + { +- caam_exit_common(crypto_skcipher_ctx(tfm)); ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); ++ ++ if (ctx->fallback) ++ crypto_free_skcipher(ctx->fallback); ++ caam_exit_common(ctx); + } + + static void caam_aead_exit(struct crypto_aead *tfm) +@@ -3412,8 +3482,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | +- CRYPTO_ALG_KERN_DRIVER_ONLY; ++ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | ++ CRYPTO_ALG_KERN_DRIVER_ONLY); + + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; +diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c +index bb1c0106a95c3..efcc7cb050fc7 100644 +--- a/drivers/crypto/caam/caamalg_qi.c ++++ b/drivers/crypto/caam/caamalg_qi.c +@@ -18,6 +18,8 @@ + #include "qi.h" + #include "jr.h" + #include "caamalg_desc.h" ++#include ++#include + + /* + * crypto alg +@@ -67,6 +69,12 @@ struct caam_ctx { + struct device *qidev; + spinlock_t lock; /* Protects multiple init of driver context */ + struct caam_drv_ctx *drv_ctx[NUM_OP]; ++ bool xts_key_fallback; ++ struct crypto_skcipher *fallback; ++}; ++ ++struct caam_skcipher_req_ctx { ++ struct skcipher_request fallback_req; + }; + + static int aead_set_sh_desc(struct crypto_aead *aead) +@@ -726,12 +734,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + int ret = 0; ++ int err; + +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { ++ err = xts_verify_key(skcipher, key, keylen); ++ if (err) { + dev_dbg(jrdev, "key size mismatch\n"); +- return -EINVAL; ++ return err; + } + ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) ++ ctx->xts_key_fallback = true; ++ ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen); ++ if (err) ++ return err; ++ + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; +@@ -1373,6 +1390,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, + return edesc; + } + ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req) ++{ ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher); ++ ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); ++} ++ + static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) + { + struct skcipher_edesc *edesc; +@@ -1380,9 +1405,30 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + int ret; + +- if (!req->cryptlen) ++ /* ++ * XTS is expected to return an error even for input length = 0 ++ * Note that the case input length < block size will be caught during ++ * HW offloading and return an error. ++ */ ++ if (!req->cryptlen && !ctx->fallback) + return 0; + ++ if (ctx->fallback && (xts_skcipher_ivsize(req) || ++ ctx->xts_key_fallback)) { ++ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); ++ ++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); ++ skcipher_request_set_callback(&rctx->fallback_req, ++ req->base.flags, ++ req->base.complete, ++ req->base.data); ++ skcipher_request_set_crypt(&rctx->fallback_req, req->src, ++ req->dst, req->cryptlen, req->iv); ++ ++ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : ++ crypto_skcipher_decrypt(&rctx->fallback_req); ++ } ++ + if (unlikely(caam_congested)) + return -EAGAIN; + +@@ -1507,6 +1553,7 @@ static struct caam_skcipher_alg driver_algs[] = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam-qi", ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, +@@ -2440,9 +2487,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm) + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; ++ int ret = 0; ++ ++ if (alg_aai == OP_ALG_AAI_XTS) { ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base); ++ struct crypto_skcipher *fallback; ++ ++ fallback = crypto_alloc_skcipher(tfm_name, 0, ++ CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(fallback)) { ++ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n", ++ tfm_name, PTR_ERR(fallback)); ++ return PTR_ERR(fallback); ++ } ++ ++ ctx->fallback = fallback; ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + ++ crypto_skcipher_reqsize(fallback)); ++ } ++ ++ ret = caam_init_common(ctx, &caam_alg->caam, false); ++ if (ret && ctx->fallback) ++ crypto_free_skcipher(ctx->fallback); + +- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, +- false); ++ return ret; + } + + static int caam_aead_init(struct crypto_aead *tfm) +@@ -2468,7 +2538,11 @@ static void caam_exit_common(struct caam_ctx *ctx) + + static void caam_cra_exit(struct crypto_skcipher *tfm) + { +- caam_exit_common(crypto_skcipher_ctx(tfm)); ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); ++ ++ if (ctx->fallback) ++ crypto_free_skcipher(ctx->fallback); ++ caam_exit_common(ctx); + } + + static void caam_aead_exit(struct crypto_aead *tfm) +@@ -2502,8 +2576,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | +- CRYPTO_ALG_KERN_DRIVER_ONLY; ++ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | ++ CRYPTO_ALG_KERN_DRIVER_ONLY); + + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; +diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c +index 66ae1d5811689..d7622edb31610 100644 +--- a/drivers/crypto/caam/caamalg_qi2.c ++++ b/drivers/crypto/caam/caamalg_qi2.c +@@ -19,6 +19,8 @@ + #include + #include + #include ++#include ++#include + + #define CAAM_CRA_PRIORITY 2000 + +@@ -80,6 +82,8 @@ struct caam_ctx { + struct alginfo adata; + struct alginfo cdata; + unsigned int authsize; ++ bool xts_key_fallback; ++ struct crypto_skcipher *fallback; + }; + + static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, +@@ -1056,12 +1060,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + struct device *dev = ctx->dev; + struct caam_flc *flc; + u32 *desc; ++ int err; + +- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { ++ err = xts_verify_key(skcipher, key, keylen); ++ if (err) { + dev_dbg(dev, "key size mismatch\n"); +- return -EINVAL; ++ return err; + } + ++ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) ++ ctx->xts_key_fallback = true; ++ ++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen); ++ if (err) ++ return err; ++ + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; +@@ -1443,6 +1456,14 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) + skcipher_request_complete(req, ecode); + } + ++static inline bool xts_skcipher_ivsize(struct skcipher_request *req) ++{ ++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); ++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher); ++ ++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); ++} ++ + static int skcipher_encrypt(struct skcipher_request *req) + { + struct skcipher_edesc *edesc; +@@ -1451,9 +1472,27 @@ static int skcipher_encrypt(struct skcipher_request *req) + struct caam_request *caam_req = skcipher_request_ctx(req); + int ret; + +- if (!req->cryptlen) ++ /* ++ * XTS is expected to return an error even for input length = 0 ++ * Note that the case input length < block size will be caught during ++ * HW offloading and return an error. ++ */ ++ if (!req->cryptlen && !ctx->fallback) + return 0; + ++ if (ctx->fallback && (xts_skcipher_ivsize(req) || ++ ctx->xts_key_fallback)) { ++ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); ++ skcipher_request_set_callback(&caam_req->fallback_req, ++ req->base.flags, ++ req->base.complete, ++ req->base.data); ++ skcipher_request_set_crypt(&caam_req->fallback_req, req->src, ++ req->dst, req->cryptlen, req->iv); ++ ++ return crypto_skcipher_encrypt(&caam_req->fallback_req); ++ } ++ + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req); + if (IS_ERR(edesc)) +@@ -1482,8 +1521,27 @@ static int skcipher_decrypt(struct skcipher_request *req) + struct caam_request *caam_req = skcipher_request_ctx(req); + int ret; + +- if (!req->cryptlen) ++ /* ++ * XTS is expected to return an error even for input length = 0 ++ * Note that the case input length < block size will be caught during ++ * HW offloading and return an error. ++ */ ++ if (!req->cryptlen && !ctx->fallback) + return 0; ++ ++ if (ctx->fallback && (xts_skcipher_ivsize(req) || ++ ctx->xts_key_fallback)) { ++ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); ++ skcipher_request_set_callback(&caam_req->fallback_req, ++ req->base.flags, ++ req->base.complete, ++ req->base.data); ++ skcipher_request_set_crypt(&caam_req->fallback_req, req->src, ++ req->dst, req->cryptlen, req->iv); ++ ++ return crypto_skcipher_decrypt(&caam_req->fallback_req); ++ } ++ + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req); + if (IS_ERR(edesc)) +@@ -1537,9 +1595,34 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); ++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; ++ int ret = 0; ++ ++ if (alg_aai == OP_ALG_AAI_XTS) { ++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base); ++ struct crypto_skcipher *fallback; ++ ++ fallback = crypto_alloc_skcipher(tfm_name, 0, ++ CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(fallback)) { ++ dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n", ++ tfm_name, PTR_ERR(fallback)); ++ return PTR_ERR(fallback); ++ } + +- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request)); +- return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false); ++ ctx->fallback = fallback; ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) + ++ crypto_skcipher_reqsize(fallback)); ++ } else { ++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request)); ++ } ++ ++ ret = caam_cra_init(ctx, &caam_alg->caam, false); ++ if (ret && ctx->fallback) ++ crypto_free_skcipher(ctx->fallback); ++ ++ return ret; + } + + static int caam_cra_init_aead(struct crypto_aead *tfm) +@@ -1562,7 +1645,11 @@ static void caam_exit_common(struct caam_ctx *ctx) + + static void caam_cra_exit(struct crypto_skcipher *tfm) + { +- caam_exit_common(crypto_skcipher_ctx(tfm)); ++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); ++ ++ if (ctx->fallback) ++ crypto_free_skcipher(ctx->fallback); ++ caam_exit_common(ctx); + } + + static void caam_cra_exit_aead(struct crypto_aead *tfm) +@@ -1665,6 +1752,7 @@ static struct caam_skcipher_alg driver_algs[] = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam-qi2", ++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, +@@ -2912,8 +3000,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); +- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | +- CRYPTO_ALG_KERN_DRIVER_ONLY; ++ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | ++ CRYPTO_ALG_KERN_DRIVER_ONLY); + + alg->init = caam_cra_init_skcipher; + alg->exit = caam_cra_exit; +diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h +index f29cb7bd7dd36..d35253407ade4 100644 +--- a/drivers/crypto/caam/caamalg_qi2.h ++++ b/drivers/crypto/caam/caamalg_qi2.h +@@ -13,6 +13,7 @@ + #include + #include "dpseci.h" + #include "desc_constr.h" ++#include + + #define DPAA2_CAAM_STORE_SIZE 16 + /* NAPI weight *must* be a multiple of the store size. */ +@@ -186,6 +187,7 @@ struct caam_request { + void (*cbk)(void *ctx, u32 err); + void *ctx; + void *edesc; ++ struct skcipher_request fallback_req; + }; + + /** +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c +index bd270e66185e9..40869ea1ed20f 100644 +--- a/drivers/crypto/ccp/ccp-ops.c ++++ b/drivers/crypto/ccp/ccp-ops.c +@@ -1744,7 +1744,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) + break; + default: + ret = -EINVAL; +- goto e_ctx; ++ goto e_data; + } + } else { + /* Stash the context */ +diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c +index d39e1664fc7ed..3c65bf070c908 100644 +--- a/drivers/crypto/ccree/cc_pm.c ++++ b/drivers/crypto/ccree/cc_pm.c +@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = { + int cc_pm_get(struct device *dev) + { + int rc = pm_runtime_get_sync(dev); ++ if (rc < 0) { ++ pm_runtime_put_noidle(dev); ++ return rc; ++ } + +- return (rc == 1 ? 0 : rc); ++ return 0; + } + + void cc_pm_put_suspend(struct device *dev) +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c +index 05520dccd9065..ec4f79049a061 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c +@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref) + static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, + struct sock *sk) + { ++ struct adapter *adap = pci_get_drvdata(cdev->pdev); + struct net_device *ndev = cdev->ports[0]; + #if IS_ENABLED(CONFIG_IPV6) + struct net_device *temp; + int addr_type; + #endif ++ int i; + + switch (sk->sk_family) { + case PF_INET: +@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, + return NULL; + + if (is_vlan_dev(ndev)) +- return vlan_dev_real_dev(ndev); +- return ndev; ++ ndev = vlan_dev_real_dev(ndev); ++ ++ for_each_port(adap, i) ++ if (cdev->ports[i] == ndev) ++ return ndev; ++ return NULL; + } + + static void assign_rxopt(struct sock *sk, unsigned int opt) +@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk) + chtls_purge_write_queue(sk); + free_tls_keyid(sk); + kref_put(&csk->kref, chtls_sock_release); +- csk->cdev = NULL; + if (sk->sk_family == AF_INET) + sk->sk_prot = &tcp_prot; + #if IS_ENABLED(CONFIG_IPV6) +@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) + + #if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) { +- struct chtls_sock *csk; ++ struct net_device *ndev = chtls_find_netdev(cdev, sk); + int addr_type = 0; + +- csk = rcu_dereference_sk_user_data(sk); + addr_type = ipv6_addr_type((const struct in6_addr *) + &sk->sk_v6_rcv_saddr); + if (addr_type != IPV6_ADDR_ANY) +- cxgb4_clip_release(csk->egress_dev, (const u32 *) ++ cxgb4_clip_release(ndev, (const u32 *) + &sk->sk_v6_rcv_saddr, 1); + } + #endif +@@ -1157,6 +1161,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + ndev = n->dev; + if (!ndev) + goto free_dst; ++ if (is_vlan_dev(ndev)) ++ ndev = vlan_dev_real_dev(ndev); ++ + port_id = cxgb4_port_idx(ndev); + + csk = chtls_sock_create(cdev); +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c +index 2e9acae1cba3b..9fb5ca6682ea2 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c +@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk, + return 0; + } + +-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) ++static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk) + { +- return (cdev->max_host_sndbuf - sk->sk_wmem_queued); ++ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0); + } + + static int csk_wait_memory(struct chtls_dev *cdev, +@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, + copied = 0; + csk = rcu_dereference_sk_user_data(sk); + cdev = csk->cdev; ++ lock_sock(sk); + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); + + err = sk_stream_wait_connect(sk, &timeo); +diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c +index 497969ae8b230..b9973d152a24a 100644 +--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c ++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c +@@ -342,11 +342,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, + ret = sec_alloc_pbuf_resource(dev, res); + if (ret) { + dev_err(dev, "fail to alloc pbuf dma resource!\n"); +- goto alloc_fail; ++ goto alloc_pbuf_fail; + } + } + + return 0; ++alloc_pbuf_fail: ++ if (ctx->alg_type == SEC_AEAD) ++ sec_free_mac_resource(dev, qp_ctx->res); + alloc_fail: + sec_free_civ_resource(dev, res); + +@@ -457,8 +460,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) + ctx->fake_req_limit = QM_Q_DEPTH >> 1; + ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), + GFP_KERNEL); +- if (!ctx->qp_ctx) +- return -ENOMEM; ++ if (!ctx->qp_ctx) { ++ ret = -ENOMEM; ++ goto err_destroy_qps; ++ } + + for (i = 0; i < sec->ctx_q_num; i++) { + ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); +@@ -467,12 +472,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) + } + + return 0; ++ + err_sec_release_qp_ctx: + for (i = i - 1; i >= 0; i--) + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); + +- sec_destroy_qps(ctx->qps, sec->ctx_q_num); + kfree(ctx->qp_ctx); ++err_destroy_qps: ++ sec_destroy_qps(ctx->qps, sec->ctx_q_num); ++ + return ret; + } + +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c +index f478bb0a566af..276012e7c482f 100644 +--- a/drivers/crypto/ixp4xx_crypto.c ++++ b/drivers/crypto/ixp4xx_crypto.c +@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev) + + if (crypt_virt) { + dma_free_coherent(dev, +- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), ++ NPE_QLEN * sizeof(struct crypt_ctl), + crypt_virt, crypt_phys); + } + } +diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c +index 7e3ad085b5bdd..efce3a83b35a8 100644 +--- a/drivers/crypto/mediatek/mtk-platform.c ++++ b/drivers/crypto/mediatek/mtk-platform.c +@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp) + static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) + { + struct mtk_ring **ring = cryp->ring; +- int i, err = ENOMEM; ++ int i; + + for (i = 0; i < MTK_RING_MAX; i++) { + ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); +@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) + return 0; + + err_cleanup: +- for (; i--; ) { ++ do { + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, + ring[i]->res_base, ring[i]->res_dma); + dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, + ring[i]->cmd_base, ring[i]->cmd_dma); + kfree(ring[i]); +- } +- return err; ++ } while (i--); ++ return -ENOMEM; + } + + static int mtk_crypto_probe(struct platform_device *pdev) +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c +index 954d703f29811..89ed055f21bf4 100644 +--- a/drivers/crypto/omap-sham.c ++++ b/drivers/crypto/omap-sham.c +@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val, mask; + ++ if (likely(ctx->digcnt)) ++ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); ++ + /* + * Setting ALGO_CONST only for the first iteration and + * CLOSE_HASH only for the last one. Note that flags mode bits +diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c +index dac6eb37fff93..fb34bf92861d1 100644 +--- a/drivers/crypto/picoxcell_crypto.c ++++ b/drivers/crypto/picoxcell_crypto.c +@@ -1685,11 +1685,6 @@ static int spacc_probe(struct platform_device *pdev) + goto err_clk_put; + } + +- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); +- if (ret) +- goto err_clk_disable; +- +- + /* + * Use an IRQ threshold of 50% as a default. This seems to be a + * reasonable trade off of latency against throughput but can be +@@ -1697,6 +1692,10 @@ static int spacc_probe(struct platform_device *pdev) + */ + engine->stat_irq_thresh = (engine->fifo_sz / 2); + ++ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); ++ if (ret) ++ goto err_clk_disable; ++ + /* + * Configure the interrupts. We only use the STAT_CNT interrupt as we + * only submit a new packet for processing when we complete another in +diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c +index 5bc099052bd20..039579b7cc818 100644 +--- a/drivers/crypto/sa2ul.c ++++ b/drivers/crypto/sa2ul.c +@@ -1148,12 +1148,10 @@ static int sa_run(struct sa_req *req) + ret = sg_split(req->dst, mapped_dst_nents, 0, 1, + &split_size, &dst, &dst_nents, + gfp_flags); +- if (ret) { +- dst_nents = dst_nents; ++ if (ret) + dst = req->dst; +- } else { ++ else + rxd->split_dst_sg = dst; +- } + } + } + +@@ -2333,7 +2331,7 @@ static int sa_ul_probe(struct platform_device *pdev) + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); +- if (ret) { ++ if (ret < 0) { + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, + ret); + return ret; +diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig +index 4ef3eb11361c2..4a4c3284ae1f3 100644 +--- a/drivers/crypto/stm32/Kconfig ++++ b/drivers/crypto/stm32/Kconfig +@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC + tristate "Support for STM32 crc accelerators" + depends on ARCH_STM32 + select CRYPTO_HASH ++ select CRC32 + help + This enables support for the CRC32 hw accelerator which can be found + on STMicroelectronics STM32 SOC. +diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c +index 3ba41148c2a46..2c13f5214d2cf 100644 +--- a/drivers/crypto/stm32/stm32-crc32.c ++++ b/drivers/crypto/stm32/stm32-crc32.c +@@ -6,6 +6,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -147,7 +148,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8, + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; +- unsigned long flags; + + crc = stm32_crc_get_next_crc(); + if (!crc) +@@ -155,7 +155,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8, + + pm_runtime_get_sync(crc->dev); + +- spin_lock_irqsave(&crc->lock, flags); ++ if (!spin_trylock(&crc->lock)) { ++ /* Hardware is busy, calculate crc32 by software */ ++ if (mctx->poly == CRC32_POLY_LE) ++ ctx->partial = crc32_le(ctx->partial, d8, length); ++ else ++ ctx->partial = __crc32c_le(ctx->partial, d8, length); ++ ++ goto pm_out; ++ } + + /* + * Restore previously calculated CRC for this context as init value +@@ -195,8 +203,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8, + /* Store partial result */ + ctx->partial = readl_relaxed(crc->regs + CRC_DR); + +- spin_unlock_irqrestore(&crc->lock, flags); ++ spin_unlock(&crc->lock); + ++pm_out: + pm_runtime_mark_last_busy(crc->dev); + pm_runtime_put_autosuspend(crc->dev); + +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index a819611b8892c..146c3f39f576b 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -1249,15 +1249,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp) + add_threaded_test(info); + + /* Check if channel was added successfully */ +- dtc = list_last_entry(&info->channels, struct dmatest_chan, node); +- +- if (dtc->chan) { ++ if (!list_empty(&info->channels)) { + /* + * if new channel was not successfully added, revert the + * "test_channel" string to the name of the last successfully + * added channel. exception for when users issues empty string + * to channel parameter. + */ ++ dtc = list_last_entry(&info->channels, struct dmatest_chan, node); + if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) + && (strcmp("", strim(test_channel)) != 0)) { + ret = -EINVAL; +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c +index 4700f2e87a627..d9333ee14527e 100644 +--- a/drivers/dma/dw/core.c ++++ b/drivers/dma/dw/core.c +@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) + if (dws->dma_dev != chan->device->dev) + return false; + ++ /* permit channels in accordance with the channels mask */ ++ if (dws->channels && !(dws->channels & dwc->mask)) ++ return false; ++ + /* We have to copy data since dws can be temporary storage */ + memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); + +diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c +index 7a085b3c1854c..d9810980920a1 100644 +--- a/drivers/dma/dw/dw.c ++++ b/drivers/dma/dw/dw.c +@@ -14,7 +14,7 @@ + static void dw_dma_initialize_chan(struct dw_dma_chan *dwc) + { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); +- u32 cfghi = DWC_CFGH_FIFO_MODE; ++ u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE; + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); + bool hs_polarity = dwc->dws.hs_polarity; + +diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c +index 1474b3817ef4f..c1cf7675b9d10 100644 +--- a/drivers/dma/dw/of.c ++++ b/drivers/dma/dw/of.c +@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, + }; + dma_cap_mask_t cap; + +- if (dma_spec->args_count != 3) ++ if (dma_spec->args_count < 3 || dma_spec->args_count > 4) + return NULL; + + slave.src_id = dma_spec->args[0]; + slave.dst_id = dma_spec->args[0]; + slave.m_master = dma_spec->args[1]; + slave.p_master = dma_spec->args[2]; ++ if (dma_spec->args_count >= 4) ++ slave.channels = dma_spec->args[3]; + + if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || + slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || + slave.m_master >= dw->pdata->nr_masters || +- slave.p_master >= dw->pdata->nr_masters)) ++ slave.p_master >= dw->pdata->nr_masters || ++ slave.channels >= BIT(dw->pdata->nr_channels))) + return NULL; + + dma_cap_zero(cap); +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c +index a814b200299bf..07296171e2bbc 100644 +--- a/drivers/dma/ioat/dma.c ++++ b/drivers/dma/ioat/dma.c +@@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) + struct ioat_descs *descs = &ioat_chan->descs[i]; + + descs->virt = dma_alloc_coherent(to_dev(ioat_chan), +- SZ_2M, &descs->hw, flags); ++ IOAT_CHUNK_SIZE, &descs->hw, flags); + if (!descs->virt) { + int idx; + +diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c +index 3a5d33ea5ebe7..a367584f0d7b3 100644 +--- a/drivers/dma/ti/k3-udma-glue.c ++++ b/drivers/dma/ti/k3-udma-glue.c +@@ -378,17 +378,11 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); + + int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) + { +- u32 txrt_ctl; +- +- txrt_ctl = UDMA_PEER_RT_EN_ENABLE; + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, +- txrt_ctl); ++ UDMA_PEER_RT_EN_ENABLE); + +- txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx, +- UDMA_CHAN_RT_CTL_REG); +- txrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, +- txrt_ctl); ++ UDMA_CHAN_RT_CTL_EN); + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); + return 0; +@@ -579,8 +573,8 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + + /* request and cfg rings */ + ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, +- flow_cfg->ring_rxq_id, + flow_cfg->ring_rxfdq0_id, ++ flow_cfg->ring_rxq_id, + &flow->ringrxfdq, + &flow->ringrx); + if (ret) { +@@ -1058,19 +1052,14 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); + + int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) + { +- u32 rxrt_ctl; +- + if (rx_chn->remote) + return -EINVAL; + + if (rx_chn->flows_ready < rx_chn->flow_num) + return -EINVAL; + +- rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx, +- UDMA_CHAN_RT_CTL_REG); +- rxrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, +- rxrt_ctl); ++ UDMA_CHAN_RT_CTL_EN); + + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); +diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c +index b194658b8b5c9..fbec28dc661d7 100644 +--- a/drivers/edac/aspeed_edac.c ++++ b/drivers/edac/aspeed_edac.c +@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev) + /* register interrupt handler */ + irq = platform_get_irq(pdev, 0); + dev_dbg(&pdev->dev, "got irq %d\n", irq); +- if (!irq) +- return -ENODEV; ++ if (irq < 0) ++ return irq; + + rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH, + DRV_NAME, ctx); +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c +index 191aa7c19ded7..324a46b8479b0 100644 +--- a/drivers/edac/i5100_edac.c ++++ b/drivers/edac/i5100_edac.c +@@ -1061,16 +1061,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + PCI_DEVICE_ID_INTEL_5100_19, 0); + if (!einj) { + ret = -ENODEV; +- goto bail_einj; ++ goto bail_mc_free; + } + + rc = pci_enable_device(einj); + if (rc < 0) { + ret = rc; +- goto bail_disable_einj; ++ goto bail_einj; + } + +- + mci->pdev = &pdev->dev; + + priv = mci->pvt_info; +@@ -1136,14 +1135,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + bail_scrub: + priv->scrub_enable = 0; + cancel_delayed_work_sync(&(priv->i5100_scrubbing)); +- edac_mc_free(mci); +- +-bail_disable_einj: + pci_disable_device(einj); + + bail_einj: + pci_dev_put(einj); + ++bail_mc_free: ++ edac_mc_free(mci); ++ + bail_disable_ch1: + pci_disable_device(ch1mm); + +diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c +index 8be3e89a510e4..d7419a90a2f5b 100644 +--- a/drivers/edac/ti_edac.c ++++ b/drivers/edac/ti_edac.c +@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev) + + /* add EMIF ECC error handler */ + error_irq = platform_get_irq(pdev, 0); +- if (!error_irq) { ++ if (error_irq < 0) { ++ ret = error_irq; + edac_printk(KERN_ERR, EDAC_MOD_NAME, + "EMIF irq number not defined.\n"); + goto err; +diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c +index 6998dc86b5ce8..b797a713c3313 100644 +--- a/drivers/firmware/arm_scmi/mailbox.c ++++ b/drivers/firmware/arm_scmi/mailbox.c +@@ -110,7 +110,7 @@ static int mailbox_chan_free(int id, void *p, void *data) + struct scmi_chan_info *cinfo = p; + struct scmi_mailbox *smbox = cinfo->transport_info; + +- if (!IS_ERR(smbox->chan)) { ++ if (smbox && !IS_ERR(smbox->chan)) { + mbox_free_channel(smbox->chan); + cinfo->transport_info = NULL; + smbox->chan = NULL; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +index 913c8f0513bd3..5b7dc1d1b44c7 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) + } + adev->atif = atif; + +- if (atif->notifications.brightness_change) { + #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) ++ if (atif->notifications.brightness_change) { + if (amdgpu_device_has_dc_support(adev)) { + #if defined(CONFIG_DRM_AMD_DC) + struct amdgpu_display_manager *dm = &adev->dm; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +index 71e005cf29522..479735c448478 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +@@ -1691,13 +1691,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + uint64_t max_entries; + uint64_t addr, last; + ++ max_entries = mapping->last - start + 1; + if (nodes) { + addr = nodes->start << PAGE_SHIFT; +- max_entries = (nodes->size - pfn) * +- AMDGPU_GPU_PAGES_IN_CPU_PAGE; ++ max_entries = min((nodes->size - pfn) * ++ AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries); + } else { + addr = 0; +- max_entries = S64_MAX; + } + + if (pages_addr) { +@@ -1727,7 +1727,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + addr += pfn << PAGE_SHIFT; + } + +- last = min((uint64_t)mapping->last, start + max_entries - 1); ++ last = start + max_entries - 1; + r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv, + start, last, flags, addr, + dma_addr, fence); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index a717a4904268e..5474f7e4c75b1 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -8217,8 +8217,7 @@ static int dm_update_plane_state(struct dc *dc, + dm_old_plane_state->dc_state, + dm_state->context)) { + +- ret = EINVAL; +- return ret; ++ return -EINVAL; + } + + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 92eb1ca1634fc..95ec8ae5a7739 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -2295,6 +2295,7 @@ static void commit_planes_for_stream(struct dc *dc, + enum surface_update_type update_type, + struct dc_state *context) + { ++ bool mpcc_disconnected = false; + int i, j; + struct pipe_ctx *top_pipe_to_program = NULL; + +@@ -2325,6 +2326,15 @@ static void commit_planes_for_stream(struct dc *dc, + context_clock_trace(dc, context); + } + ++ if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock && ++ dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){ ++ dc->hwss.interdependent_update_lock(dc, context, true); ++ mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context); ++ dc->hwss.interdependent_update_lock(dc, context, false); ++ if (mpcc_disconnected) ++ dc->hwss.wait_for_pending_cleared(dc, context); ++ } ++ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + +@@ -2621,7 +2631,7 @@ void dc_commit_updates_for_stream(struct dc *dc, + + copy_stream_update_to_stream(dc, context, stream, stream_update); + +- if (update_type > UPDATE_TYPE_FAST) { ++ if (update_type >= UPDATE_TYPE_FULL) { + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + DC_ERROR("Mode validation failed for stream update!\n"); + dc_release_state(context); +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +index 43781e77be431..f9456ff6845b6 100644 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +@@ -75,7 +75,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d + else + bl_pwm &= 0xFFFF; + +- current_backlight = bl_pwm << (1 + bl_int_count); ++ current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count); + + if (bl_period == 0) + bl_period = 0xFFFF; +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +index fa643ec5a8760..4bbfd8a26a606 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +@@ -2769,6 +2769,152 @@ static struct pipe_ctx *dcn10_find_top_pipe_for_stream( + return NULL; + } + ++bool dcn10_disconnect_pipes( ++ struct dc *dc, ++ struct dc_state *context) ++{ ++ bool found_stream = false; ++ int i, j; ++ struct dce_hwseq *hws = dc->hwseq; ++ struct dc_state *old_ctx = dc->current_state; ++ bool mpcc_disconnected = false; ++ struct pipe_ctx *old_pipe; ++ struct pipe_ctx *new_pipe; ++ DC_LOGGER_INIT(dc->ctx->logger); ++ ++ /* Set pipe update flags and lock pipes */ ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; ++ new_pipe = &context->res_ctx.pipe_ctx[i]; ++ new_pipe->update_flags.raw = 0; ++ ++ if (!old_pipe->plane_state && !new_pipe->plane_state) ++ continue; ++ ++ if (old_pipe->plane_state && !new_pipe->plane_state) ++ new_pipe->update_flags.bits.disable = 1; ++ ++ /* Check for scl update */ ++ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) ++ new_pipe->update_flags.bits.scaler = 1; ++ ++ /* Check for vp update */ ++ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) ++ || memcmp(&old_pipe->plane_res.scl_data.viewport_c, ++ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) ++ new_pipe->update_flags.bits.viewport = 1; ++ ++ } ++ ++ if (!IS_DIAG_DC(dc->ctx->dce_environment)) { ++ /* Disconnect mpcc here only if losing pipe split*/ ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && ++ old_ctx->res_ctx.pipe_ctx[i].top_pipe) { ++ ++ /* Find the top pipe in the new ctx for the bottom pipe that we ++ * want to remove by comparing the streams. If both pipes are being ++ * disabled then do it in the regular pipe programming sequence ++ */ ++ for (j = 0; j < dc->res_pool->pipe_count; j++) { ++ if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream && ++ !context->res_ctx.pipe_ctx[j].top_pipe && ++ !context->res_ctx.pipe_ctx[j].update_flags.bits.disable) { ++ found_stream = true; ++ break; ++ } ++ } ++ ++ // Disconnect if the top pipe lost it's pipe split ++ if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) { ++ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); ++ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); ++ mpcc_disconnected = true; ++ } ++ } ++ found_stream = false; ++ } ++ } ++ ++ if (mpcc_disconnected) { ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; ++ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; ++ struct dc_plane_state *plane_state = pipe_ctx->plane_state; ++ struct hubp *hubp = pipe_ctx->plane_res.hubp; ++ ++ if (!pipe_ctx || !plane_state || !pipe_ctx->stream) ++ continue; ++ ++ // Only update scaler and viewport here if we lose a pipe split. ++ // This is to prevent half the screen from being black when we ++ // unlock after disconnecting MPCC. ++ if (!(old_pipe && !pipe_ctx->top_pipe && ++ !pipe_ctx->bottom_pipe && old_pipe->bottom_pipe)) ++ continue; ++ ++ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) { ++ if (pipe_ctx->update_flags.bits.scaler || ++ plane_state->update_flags.bits.scaling_change || ++ plane_state->update_flags.bits.position_change || ++ plane_state->update_flags.bits.per_pixel_alpha_change || ++ pipe_ctx->stream->update_flags.bits.scaling) { ++ ++ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; ++ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); ++ /* scaler configuration */ ++ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( ++ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); ++ } ++ ++ if (pipe_ctx->update_flags.bits.viewport || ++ (context == dc->current_state && plane_state->update_flags.bits.position_change) || ++ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || ++ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { ++ ++ hubp->funcs->mem_program_viewport( ++ hubp, ++ &pipe_ctx->plane_res.scl_data.viewport, ++ &pipe_ctx->plane_res.scl_data.viewport_c); ++ } ++ } ++ } ++ } ++ return mpcc_disconnected; ++} ++ ++void dcn10_wait_for_pending_cleared(struct dc *dc, ++ struct dc_state *context) ++{ ++ struct pipe_ctx *pipe_ctx; ++ struct timing_generator *tg; ++ int i; ++ ++ for (i = 0; i < dc->res_pool->pipe_count; i++) { ++ pipe_ctx = &context->res_ctx.pipe_ctx[i]; ++ tg = pipe_ctx->stream_res.tg; ++ ++ /* ++ * Only wait for top pipe's tg penindg bit ++ * Also skip if pipe is disabled. ++ */ ++ if (pipe_ctx->top_pipe || ++ !pipe_ctx->stream || !pipe_ctx->plane_state || ++ !tg->funcs->is_tg_enabled(tg)) ++ continue; ++ ++ /* ++ * Wait for VBLANK then VACTIVE to ensure we get VUPDATE. ++ * For some reason waiting for OTG_UPDATE_PENDING cleared ++ * seems to not trigger the update right away, and if we ++ * lock again before VUPDATE then we don't get a separated ++ * operation. ++ */ ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); ++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); ++ } ++} ++ + void dcn10_apply_ctx_for_surface( + struct dc *dc, + const struct dc_stream_state *stream, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +index 6d891166da8a4..e5691e4990231 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +@@ -194,6 +194,12 @@ void dcn10_get_surface_visual_confirm_color( + void dcn10_get_hdr_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); ++bool dcn10_disconnect_pipes( ++ struct dc *dc, ++ struct dc_state *context); ++ ++void dcn10_wait_for_pending_cleared(struct dc *dc, ++ struct dc_state *context); + void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); + void dcn10_verify_allow_pstate_change_high(struct dc *dc); + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +index 5c98b71c1d47a..a1d1559bb5d73 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, + .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, ++ .disconnect_pipes = dcn10_disconnect_pipes, ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .update_plane_addr = dcn10_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +index 3dde6f26de474..966e1790b9bfd 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = { + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, ++ .disconnect_pipes = dcn10_disconnect_pipes, ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +index f31f48dd0da29..aaf9a99f9f045 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +@@ -3209,6 +3209,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, + context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive = + dc->debug.enable_dram_clock_change_one_display_vactive; + ++ /*Unsafe due to current pipe merge and split logic*/ ++ ASSERT(context != dc->current_state); ++ + if (fast_validate) { + return dcn20_validate_bandwidth_internal(dc, context, true); + } +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +index b187f71afa652..2ba880c3943c3 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = { + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, ++ .disconnect_pipes = dcn10_disconnect_pipes, ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index 88d41a385add8..a4f37d83d5cc9 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -1184,6 +1184,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, + + BW_VAL_TRACE_COUNT(); + ++ /*Unsafe due to current pipe merge and split logic*/ ++ ASSERT(context != dc->current_state); ++ + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel); + + if (pipe_cnt == 0) +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +index 9afee71604902..19daa456e3bfe 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = { + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, ++ .disconnect_pipes = dcn10_disconnect_pipes, ++ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +index 3c986717dcd56..64c1be818b0e8 100644 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +@@ -67,6 +67,10 @@ struct hw_sequencer_funcs { + int num_planes, struct dc_state *context); + void (*program_front_end_for_ctx)(struct dc *dc, + struct dc_state *context); ++ bool (*disconnect_pipes)(struct dc *dc, ++ struct dc_state *context); ++ void (*wait_for_pending_cleared)(struct dc *dc, ++ struct dc_state *context); + void (*post_unlock_program_front_end)(struct dc *dc, + struct dc_state *context); + void (*update_plane_addr)(const struct dc *dc, +diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c +index ab45ac445045a..351a85088d0ec 100644 +--- a/drivers/gpu/drm/arm/malidp_planes.c ++++ b/drivers/gpu/drm/arm/malidp_planes.c +@@ -346,7 +346,7 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms, + if (cma_obj->sgt) + sgt = cma_obj->sgt; + else +- sgt = obj->dev->driver->gem_prime_get_sg_table(obj); ++ sgt = obj->funcs->get_sg_table(obj); + + if (!sgt) + return false; +diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c +index 5d67a41f7c3a8..3dd70d813f694 100644 +--- a/drivers/gpu/drm/drm_debugfs_crc.c ++++ b/drivers/gpu/drm/drm_debugfs_crc.c +@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf, + source[len - 1] = '\0'; + + ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); +- if (ret) ++ if (ret) { ++ kfree(source); + return ret; ++ } + + spin_lock_irq(&crc->lock); + +diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c +index 3296ed3df3580..8b65ca164bf4b 100644 +--- a/drivers/gpu/drm/drm_gem_vram_helper.c ++++ b/drivers/gpu/drm/drm_gem_vram_helper.c +@@ -167,6 +167,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, + } + } + ++/* ++ * Note that on error, drm_gem_vram_init will free the buffer object. ++ */ ++ + static int drm_gem_vram_init(struct drm_device *dev, + struct drm_gem_vram_object *gbo, + size_t size, unsigned long pg_align) +@@ -176,15 +180,19 @@ static int drm_gem_vram_init(struct drm_device *dev, + int ret; + size_t acc_size; + +- if (WARN_ONCE(!vmm, "VRAM MM not initialized")) ++ if (WARN_ONCE(!vmm, "VRAM MM not initialized")) { ++ kfree(gbo); + return -EINVAL; ++ } + bdev = &vmm->bdev; + + gbo->bo.base.funcs = &drm_gem_vram_object_funcs; + + ret = drm_gem_object_init(dev, &gbo->bo.base, size); +- if (ret) ++ if (ret) { ++ kfree(gbo); + return ret; ++ } + + acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); + +@@ -195,13 +203,13 @@ static int drm_gem_vram_init(struct drm_device *dev, + &gbo->placement, pg_align, false, acc_size, + NULL, NULL, ttm_buffer_object_destroy); + if (ret) +- goto err_drm_gem_object_release; ++ /* ++ * A failing ttm_bo_init will call ttm_buffer_object_destroy ++ * to release gbo->bo.base and kfree gbo. ++ */ ++ return ret; + + return 0; +- +-err_drm_gem_object_release: +- drm_gem_object_release(&gbo->bo.base); +- return ret; + } + + /** +@@ -235,13 +243,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, + + ret = drm_gem_vram_init(dev, gbo, size, pg_align); + if (ret < 0) +- goto err_kfree; ++ return ERR_PTR(ret); + + return gbo; +- +-err_kfree: +- kfree(gbo); +- return ERR_PTR(ret); + } + EXPORT_SYMBOL(drm_gem_vram_create); + +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c +index f41cbb753bb46..720a767118c9c 100644 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c +@@ -2078,7 +2078,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + cdv_intel_edp_panel_vdd_off(gma_encoder); +- if (ret == 0) { ++ if (ret <= 0) { + /* if this fails, presume the device is a ghost */ + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + drm_encoder_cleanup(encoder); +diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +index cc70e836522f0..8758958e16893 100644 +--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c ++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +@@ -160,37 +160,6 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = { + .atomic_update = hibmc_plane_atomic_update, + }; + +-static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv) +-{ +- struct drm_device *dev = priv->dev; +- struct drm_plane *plane; +- int ret = 0; +- +- plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL); +- if (!plane) { +- DRM_ERROR("failed to alloc memory when init plane\n"); +- return ERR_PTR(-ENOMEM); +- } +- /* +- * plane init +- * TODO: Now only support primary plane, overlay planes +- * need to do. +- */ +- ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs, +- channel_formats1, +- ARRAY_SIZE(channel_formats1), +- NULL, +- DRM_PLANE_TYPE_PRIMARY, +- NULL); +- if (ret) { +- DRM_ERROR("failed to init plane: %d\n", ret); +- return ERR_PTR(ret); +- } +- +- drm_plane_helper_add(plane, &hibmc_plane_helper_funcs); +- return plane; +-} +- + static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms) + { + struct hibmc_drm_private *priv = crtc->dev->dev_private; +@@ -537,22 +506,24 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = { + int hibmc_de_init(struct hibmc_drm_private *priv) + { + struct drm_device *dev = priv->dev; +- struct drm_crtc *crtc; +- struct drm_plane *plane; ++ struct drm_crtc *crtc = &priv->crtc; ++ struct drm_plane *plane = &priv->primary_plane; + int ret; + +- plane = hibmc_plane_init(priv); +- if (IS_ERR(plane)) { +- DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane)); +- return PTR_ERR(plane); +- } ++ ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs, ++ channel_formats1, ++ ARRAY_SIZE(channel_formats1), ++ NULL, ++ DRM_PLANE_TYPE_PRIMARY, ++ NULL); + +- crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL); +- if (!crtc) { +- DRM_ERROR("failed to alloc memory when init crtc\n"); +- return -ENOMEM; ++ if (ret) { ++ DRM_ERROR("failed to init plane: %d\n", ret); ++ return ret; + } + ++ drm_plane_helper_add(plane, &hibmc_plane_helper_funcs); ++ + ret = drm_crtc_init_with_planes(dev, crtc, plane, + NULL, &hibmc_crtc_funcs, NULL); + if (ret) { +diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +index 609768748de65..0a74ba220cac5 100644 +--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h ++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +@@ -29,6 +29,8 @@ struct hibmc_drm_private { + + /* drm */ + struct drm_device *dev; ++ struct drm_plane primary_plane; ++ struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + bool mode_config_initialized; +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +index 4d29568be3f53..ac038572164d3 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +@@ -481,7 +481,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) + mbox_flush(mtk_crtc->cmdq_client->chan, 2000); + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); +- cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); ++ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_finalize(cmdq_handle); + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 66a95e22b7b3d..456d729c81c39 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1048,6 +1048,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) + { + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; ++ struct adreno_platform_config *config = pdev->dev.platform_data; ++ const struct adreno_info *info; + struct device_node *node; + struct a6xx_gpu *a6xx_gpu; + struct adreno_gpu *adreno_gpu; +@@ -1064,7 +1066,14 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) + adreno_gpu->registers = NULL; + adreno_gpu->reg_offsets = a6xx_register_offsets; + +- if (adreno_is_a650(adreno_gpu)) ++ /* ++ * We need to know the platform type before calling into adreno_gpu_init ++ * so that the hw_apriv flag can be correctly set. Snoop into the info ++ * and grab the revision number ++ */ ++ info = adreno_info(config->rev); ++ ++ if (info && info->revn == 650) + adreno_gpu->base.hw_apriv = true; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +index b12f5b4a1bea9..e9ede19193b0e 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +@@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu, + int i; + + a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count, +- sizeof(a6xx_state->indexed_regs)); ++ sizeof(*a6xx_state->indexed_regs)); + if (!a6xx_state->indexed_regs) + return; + +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index 862dd35b27d3d..6e8bef1a9ea25 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -189,10 +189,16 @@ struct msm_gem_address_space * + adreno_iommu_create_address_space(struct msm_gpu *gpu, + struct platform_device *pdev) + { +- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); +- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu); ++ struct iommu_domain *iommu; ++ struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; + ++ iommu = iommu_domain_alloc(&platform_bus_type); ++ if (!iommu) ++ return NULL; ++ ++ mmu = msm_iommu_new(&pdev->dev, iommu); ++ + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, + 0xffffffff - SZ_16M); + +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +index c2729f71e2fa7..f9cb1e0da1a59 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +@@ -881,7 +881,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_plane *plane; + struct drm_display_mode *mode; + +- int cnt = 0, rc = 0, mixer_width, i, z_pos; ++ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; + + struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; + int multirect_count = 0; +@@ -914,9 +914,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, + + memset(pipe_staged, 0, sizeof(pipe_staged)); + +- mixer_width = mode->hdisplay / cstate->num_mixers; ++ if (cstate->num_mixers) { ++ mixer_width = mode->hdisplay / cstate->num_mixers; + +- _dpu_crtc_setup_lm_bounds(crtc, state); ++ _dpu_crtc_setup_lm_bounds(crtc, state); ++ } + + crtc_rect.x2 = mode->hdisplay; + crtc_rect.y2 = mode->vdisplay; +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c +index 508764fccd27d..27ccfa531d31f 100644 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -92,8 +93,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb) + clk_disable_unprepare(mxsfb->clk_axi); + } + ++static struct drm_framebuffer * ++mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, ++ const struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ const struct drm_format_info *info; ++ ++ info = drm_get_format_info(dev, mode_cmd); ++ if (!info) ++ return ERR_PTR(-EINVAL); ++ ++ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { ++ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ return drm_gem_fb_create(dev, file_priv, mode_cmd); ++} ++ + static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { +- .fb_create = drm_gem_fb_create, ++ .fb_create = mxsfb_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + }; +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index cb6550d37e858..eabc9e41d92b4 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -2941,12 +2941,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { + static const struct panel_desc ortustech_com43h4m85ulc = { + .modes = &ortustech_com43h4m85ulc_mode, + .num_modes = 1, +- .bpc = 8, ++ .bpc = 6, + .size = { + .width = 56, + .height = 93, + }, +- .bus_format = MEDIA_BUS_FMT_RGB888_1X24, ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, + }; +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h +index c30c719a80594..3c4a85213c15f 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h +@@ -69,6 +69,9 @@ struct panfrost_compatible { + int num_pm_domains; + /* Only required if num_pm_domains > 1. */ + const char * const *pm_domain_names; ++ ++ /* Vendor implementation quirks callback */ ++ void (*vendor_quirk)(struct panfrost_device *pfdev); + }; + + struct panfrost_device { +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c +index ada51df9a7a32..f6d5d03201fad 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c +@@ -667,7 +667,18 @@ static const struct panfrost_compatible default_data = { + .pm_domain_names = NULL, + }; + ++static const struct panfrost_compatible amlogic_data = { ++ .num_supplies = ARRAY_SIZE(default_supplies), ++ .supply_names = default_supplies, ++ .vendor_quirk = panfrost_gpu_amlogic_quirk, ++}; ++ + static const struct of_device_id dt_match[] = { ++ /* Set first to probe before the generic compatibles */ ++ { .compatible = "amlogic,meson-gxm-mali", ++ .data = &amlogic_data, }, ++ { .compatible = "amlogic,meson-g12a-mali", ++ .data = &amlogic_data, }, + { .compatible = "arm,mali-t604", .data = &default_data, }, + { .compatible = "arm,mali-t624", .data = &default_data, }, + { .compatible = "arm,mali-t628", .data = &default_data, }, +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c +index f2c1ddc41a9bf..165403878ad9b 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c +@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) + return 0; + } + ++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) ++{ ++ /* ++ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs ++ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order ++ * to operate correctly. ++ */ ++ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); ++ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); ++} ++ + static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) + { + u32 quirks = 0; +@@ -135,6 +146,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) + + if (quirks) + gpu_write(pfdev, GPU_JM_CONFIG, quirks); ++ ++ /* Here goes platform specific quirks */ ++ if (pfdev->comp->vendor_quirk) ++ pfdev->comp->vendor_quirk(pfdev); + } + + #define MAX_HW_REVS 6 +@@ -304,16 +319,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) + int ret; + u32 val; + ++ panfrost_gpu_init_quirks(pfdev); ++ + /* Just turn on everything for now */ + gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present); + ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, +- val, val == pfdev->features.l2_present, 100, 1000); ++ val, val == pfdev->features.l2_present, 100, 20000); + if (ret) + dev_err(pfdev->dev, "error powering up gpu L2"); + + gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present); + ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO, +- val, val == pfdev->features.shader_present, 100, 1000); ++ val, val == pfdev->features.shader_present, 100, 20000); + if (ret) + dev_err(pfdev->dev, "error powering up gpu shader"); + +diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h +index 4112412087b27..468c51e7e46db 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h ++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h +@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); + void panfrost_gpu_power_on(struct panfrost_device *pfdev); + void panfrost_gpu_power_off(struct panfrost_device *pfdev); + ++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); ++ + #endif +diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h +index ea38ac60581c6..eddaa62ad8b0e 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_regs.h ++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h +@@ -51,6 +51,10 @@ + #define GPU_STATUS 0x34 + #define GPU_STATUS_PRFCNT_ACTIVE BIT(2) + #define GPU_LATEST_FLUSH_ID 0x38 ++#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */ ++#define GPU_PWR_KEY_UNLOCK 0x2968A819 ++#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */ ++#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */ + #define GPU_FAULT_STATUS 0x3C + #define GPU_FAULT_ADDRESS_LO 0x40 + #define GPU_FAULT_ADDRESS_HI 0x44 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +index f1a81c9b184d4..fa09b3ae8b9d4 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -341,6 +342,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { + .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state, + }; + ++static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res) ++{ ++ struct rcar_du_vsp *vsp = res; ++ ++ put_device(vsp->vsp); ++} ++ + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, + unsigned int crtcs) + { +@@ -357,6 +365,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, + + vsp->vsp = &pdev->dev; + ++ ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp); ++ if (ret < 0) ++ return ret; ++ + ret = vsp1_du_init(vsp->vsp); + if (ret < 0) + return ret; +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c +index 6d8fa6118fc1a..eaad187c41f07 100644 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c +@@ -723,11 +723,18 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc, + + void vc4_crtc_reset(struct drm_crtc *crtc) + { ++ struct vc4_crtc_state *vc4_crtc_state; ++ + if (crtc->state) + vc4_crtc_destroy_state(crtc, crtc->state); +- crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); +- if (crtc->state) +- __drm_atomic_helper_crtc_reset(crtc, crtc->state); ++ ++ vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL); ++ if (!vc4_crtc_state) { ++ crtc->state = NULL; ++ return; ++ } ++ ++ __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base); + } + + static const struct drm_crtc_funcs vc4_crtc_funcs = { +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c +index a775feda1cc73..313339bbff901 100644 +--- a/drivers/gpu/drm/vgem/vgem_drv.c ++++ b/drivers/gpu/drm/vgem/vgem_drv.c +@@ -471,8 +471,8 @@ static int __init vgem_init(void) + + out_put: + drm_dev_put(&vgem_device->drm); ++ platform_device_unregister(vgem_device->platform); + return ret; +- + out_unregister: + platform_device_unregister(vgem_device->platform); + out_free: +diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c +index 4d944a0dff3e9..fdd7671a7b126 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_kms.c ++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c +@@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, + vgdev->capsets[i].id > 0, 5 * HZ); + if (ret == 0) { + DRM_ERROR("timed out waiting for cap set %d\n", i); ++ spin_lock(&vgdev->display_info_lock); + kfree(vgdev->capsets); + vgdev->capsets = NULL; ++ spin_unlock(&vgdev->display_info_lock); + return; + } + DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", +diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c +index 53af60d484a44..9d2abdbd865a7 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_vq.c ++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c +@@ -684,9 +684,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, + int i = le32_to_cpu(cmd->capset_index); + + spin_lock(&vgdev->display_info_lock); +- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); +- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); +- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); ++ if (vgdev->capsets) { ++ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); ++ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); ++ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); ++ } else { ++ DRM_ERROR("invalid capset memory."); ++ } + spin_unlock(&vgdev->display_info_lock); + wake_up(&vgdev->resp_wq); + } +diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c +index 4af2f19480f4f..b8b060354667e 100644 +--- a/drivers/gpu/drm/vkms/vkms_composer.c ++++ b/drivers/gpu/drm/vkms/vkms_composer.c +@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer) + + (i * composer->pitch) + + (j * composer->cpp); + /* XRGB format ignores Alpha channel */ +- memset(vaddr_out + src_offset + 24, 0, 8); ++ bitmap_clear(vaddr_out + src_offset, 24, 8); + crc = crc32_le(crc, vaddr_out + src_offset, + sizeof(u32)); + } +diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c +index 57a8a397d5e84..83dd5567de8b5 100644 +--- a/drivers/gpu/drm/vkms/vkms_drv.c ++++ b/drivers/gpu/drm/vkms/vkms_drv.c +@@ -190,8 +190,8 @@ static int __init vkms_init(void) + + out_put: + drm_dev_put(&vkms_device->drm); ++ platform_device_unregister(vkms_device->platform); + return ret; +- + out_unregister: + platform_device_unregister(vkms_device->platform); + out_free: +diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +index 26328c76305be..8e69303aad3f7 100644 +--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c ++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +@@ -111,7 +111,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) + /* Initialize mode config, vblank and the KMS poll helper. */ + ret = drmm_mode_config_init(drm); + if (ret < 0) +- goto err_dev_put; ++ return ret; + + drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs; + drm->mode_config.min_width = 0; +@@ -121,7 +121,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) + + ret = drm_vblank_init(drm, 1); + if (ret) +- goto err_dev_put; ++ return ret; + + drm->irq_enabled = 1; + +@@ -154,8 +154,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) + + err_poll_fini: + drm_kms_helper_poll_fini(drm); +-err_dev_put: +- drm_dev_put(drm); + return ret; + } + +@@ -208,27 +206,16 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) + int ret; + + /* Allocate private data. */ +- dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL); +- if (!dpsub) +- return -ENOMEM; ++ dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver, ++ struct zynqmp_dpsub, drm); ++ if (IS_ERR(dpsub)) ++ return PTR_ERR(dpsub); + + dpsub->dev = &pdev->dev; + platform_set_drvdata(pdev, dpsub); + + dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT)); + +- /* +- * Initialize the DRM device early, as the DRM core mandates usage of +- * the managed memory helpers tied to the DRM device. +- */ +- ret = drm_dev_init(&dpsub->drm, &zynqmp_dpsub_drm_driver, &pdev->dev); +- if (ret < 0) { +- kfree(dpsub); +- return ret; +- } +- +- drmm_add_final_kfree(&dpsub->drm, dpsub); +- + /* Try the reserved memory. Proceed if there's none. */ + of_reserved_mem_device_init(&pdev->dev); + +@@ -286,8 +273,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev) + clk_disable_unprepare(dpsub->apb_clk); + of_reserved_mem_device_release(&pdev->dev); + +- drm_dev_put(drm); +- + return 0; + } + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 74fc1df6e3c27..79495e218b7fc 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -727,6 +727,7 @@ + #define USB_DEVICE_ID_LENOVO_TP10UBKBD 0x6062 + #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 ++#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 + #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e +@@ -1123,6 +1124,7 @@ + #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 + #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 ++#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 + + #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 88e19996427e6..9770db624bfaf 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case 0x3b: /* Battery Strength */ + hidinput_setup_battery(device, HID_INPUT_REPORT, field); + usage->type = EV_PWR; +- goto ignore; ++ return; + + case 0x3c: /* Invert */ + map_key_clear(BTN_TOOL_RUBBER); +@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + case HID_DC_BATTERYSTRENGTH: + hidinput_setup_battery(device, HID_INPUT_REPORT, field); + usage->type = EV_PWR; +- goto ignore; ++ return; + } + goto unknown; + +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c +index 6c55682c59740..044a93f3c1178 100644 +--- a/drivers/hid/hid-ite.c ++++ b/drivers/hid/hid-ite.c +@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = { + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, ++ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, ++ USB_VENDOR_ID_SYNAPTICS, ++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) }, + { } + }; + MODULE_DEVICE_TABLE(hid, ite_devices); +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index e3152155c4b85..99f041afd5c0c 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -1973,6 +1973,12 @@ static const struct hid_device_id mt_devices[] = { + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, + ++ /* Lenovo X1 TAB Gen 3 */ ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, ++ USB_VENDOR_ID_LENOVO, ++ USB_DEVICE_ID_LENOVO_X1_TAB3) }, ++ + /* MosArt panels */ + { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, + MT_USB_DEVICE(USB_VENDOR_ID_ASUS, +diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c +index 2ff4c8e366ff2..1ca64481145ee 100644 +--- a/drivers/hid/hid-roccat-kone.c ++++ b/drivers/hid/hid-roccat-kone.c +@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj, + struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval = 0, difference, old_profile; ++ struct kone_settings *settings = (struct kone_settings *)buf; + + /* I need to get my data in one piece */ + if (off != 0 || count != sizeof(struct kone_settings)) + return -EINVAL; + + mutex_lock(&kone->kone_lock); +- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings)); ++ difference = memcmp(settings, &kone->settings, ++ sizeof(struct kone_settings)); + if (difference) { +- retval = kone_set_settings(usb_dev, +- (struct kone_settings const *)buf); +- if (retval) { +- mutex_unlock(&kone->kone_lock); +- return retval; ++ if (settings->startup_profile < 1 || ++ settings->startup_profile > 5) { ++ retval = -EINVAL; ++ goto unlock; + } + ++ retval = kone_set_settings(usb_dev, settings); ++ if (retval) ++ goto unlock; ++ + old_profile = kone->settings.startup_profile; +- memcpy(&kone->settings, buf, sizeof(struct kone_settings)); ++ memcpy(&kone->settings, settings, sizeof(struct kone_settings)); + + kone_profile_activated(kone, kone->settings.startup_profile); + + if (kone->settings.startup_profile != old_profile) + kone_profile_report(kone, kone->settings.startup_profile); + } ++unlock: + mutex_unlock(&kone->kone_lock); + ++ if (retval) ++ return retval; ++ + return sizeof(struct kone_settings); + } + static BIN_ATTR(settings, 0660, kone_sysfs_read_settings, +diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c +index 94698cae04971..3e1d56585b91a 100644 +--- a/drivers/hwmon/bt1-pvt.c ++++ b/drivers/hwmon/bt1-pvt.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -476,6 +477,7 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + long *val) + { + struct pvt_cache *cache = &pvt->cache[type]; ++ unsigned long timeout; + u32 data; + int ret; + +@@ -499,7 +501,14 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0); + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN); + +- wait_for_completion(&cache->conversion); ++ /* ++ * Wait with timeout since in case if the sensor is suddenly powered ++ * down the request won't be completed and the caller will hang up on ++ * this procedure until the power is back up again. Multiply the ++ * timeout by the factor of two to prevent a false timeout. ++ */ ++ timeout = 2 * usecs_to_jiffies(ktime_to_us(pvt->timeout)); ++ ret = wait_for_completion_timeout(&cache->conversion, timeout); + + pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); + pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, +@@ -509,6 +518,9 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type, + + mutex_unlock(&pvt->iface_mtx); + ++ if (!ret) ++ return -ETIMEDOUT; ++ + if (type == PVT_TEMP) + *val = pvt_calc_poly(&poly_N_to_temp, data); + else +@@ -654,44 +666,16 @@ static int pvt_write_trim(struct pvt_hwmon *pvt, long val) + + static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val) + { +- unsigned long rate; +- ktime_t kt; +- u32 data; +- +- rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk); +- if (!rate) +- return -ENODEV; +- +- /* +- * Don't bother with mutex here, since we just read data from MMIO. +- * We also have to scale the ticks timeout up to compensate the +- * ms-ns-data translations. +- */ +- data = readl(pvt->regs + PVT_TTIMEOUT) + 1; ++ int ret; + +- /* +- * Calculate ref-clock based delay (Ttotal) between two consecutive +- * data samples of the same sensor. So we first must calculate the +- * delay introduced by the internal ref-clock timer (Tref * Fclk). +- * Then add the constant timeout cuased by each conversion latency +- * (Tmin). The basic formulae for each conversion is following: +- * Ttotal = Tref * Fclk + Tmin +- * Note if alarms are enabled the sensors are polled one after +- * another, so in order to have the delay being applicable for each +- * sensor the requested value must be equally redistirbuted. +- */ +-#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) +- kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0); +- kt = ktime_divns(kt, rate); +- kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN); +-#else +- kt = ktime_set(data, 0); +- kt = ktime_divns(kt, rate); +- kt = ktime_add_ns(kt, PVT_TOUT_MIN); +-#endif ++ ret = mutex_lock_interruptible(&pvt->iface_mtx); ++ if (ret) ++ return ret; + + /* Return the result in msec as hwmon sysfs interface requires. */ +- *val = ktime_to_ms(kt); ++ *val = ktime_to_ms(pvt->timeout); ++ ++ mutex_unlock(&pvt->iface_mtx); + + return 0; + } +@@ -699,7 +683,7 @@ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val) + static int pvt_write_timeout(struct pvt_hwmon *pvt, long val) + { + unsigned long rate; +- ktime_t kt; ++ ktime_t kt, cache; + u32 data; + int ret; + +@@ -712,7 +696,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val) + * between all available sensors to have the requested delay + * applicable to each individual sensor. + */ +- kt = ms_to_ktime(val); ++ cache = kt = ms_to_ktime(val); + #if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) + kt = ktime_divns(kt, PVT_SENSORS_NUM); + #endif +@@ -741,6 +725,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val) + return ret; + + pvt_set_tout(pvt, data); ++ pvt->timeout = cache; + + mutex_unlock(&pvt->iface_mtx); + +@@ -982,10 +967,52 @@ static int pvt_request_clks(struct pvt_hwmon *pvt) + return 0; + } + +-static void pvt_init_iface(struct pvt_hwmon *pvt) ++static int pvt_check_pwr(struct pvt_hwmon *pvt) + { ++ unsigned long tout; ++ int ret = 0; ++ u32 data; ++ ++ /* ++ * Test out the sensor conversion functionality. If it is not done on ++ * time then the domain must have been unpowered and we won't be able ++ * to use the device later in this driver. ++ * Note If the power source is lost during the normal driver work the ++ * data read procedure will either return -ETIMEDOUT (for the ++ * alarm-less driver configuration) or just stop the repeated ++ * conversion. In the later case alas we won't be able to detect the ++ * problem. ++ */ ++ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL); ++ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN); ++ pvt_set_tout(pvt, 0); ++ readl(pvt->regs + PVT_DATA); ++ ++ tout = PVT_TOUT_MIN / NSEC_PER_USEC; ++ usleep_range(tout, 2 * tout); ++ ++ data = readl(pvt->regs + PVT_DATA); ++ if (!(data & PVT_DATA_VALID)) { ++ ret = -ENODEV; ++ dev_err(pvt->dev, "Sensor is powered down\n"); ++ } ++ ++ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0); ++ ++ return ret; ++} ++ ++static int pvt_init_iface(struct pvt_hwmon *pvt) ++{ ++ unsigned long rate; + u32 trim, temp; + ++ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk); ++ if (!rate) { ++ dev_err(pvt->dev, "Invalid reference clock rate\n"); ++ return -ENODEV; ++ } ++ + /* + * Make sure all interrupts and controller are disabled so not to + * accidentally have ISR executed before the driver data is fully +@@ -1000,12 +1027,37 @@ static void pvt_init_iface(struct pvt_hwmon *pvt) + pvt_set_mode(pvt, pvt_info[pvt->sensor].mode); + pvt_set_tout(pvt, PVT_TOUT_DEF); + ++ /* ++ * Preserve the current ref-clock based delay (Ttotal) between the ++ * sensors data samples in the driver data so not to recalculate it ++ * each time on the data requests and timeout reads. It consists of the ++ * delay introduced by the internal ref-clock timer (N / Fclk) and the ++ * constant timeout caused by each conversion latency (Tmin): ++ * Ttotal = N / Fclk + Tmin ++ * If alarms are enabled the sensors are polled one after another and ++ * in order to get the next measurement of a particular sensor the ++ * caller will have to wait for at most until all the others are ++ * polled. In that case the formulae will look a bit different: ++ * Ttotal = 5 * (N / Fclk + Tmin) ++ */ ++#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS) ++ pvt->timeout = ktime_set(PVT_SENSORS_NUM * PVT_TOUT_DEF, 0); ++ pvt->timeout = ktime_divns(pvt->timeout, rate); ++ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_SENSORS_NUM * PVT_TOUT_MIN); ++#else ++ pvt->timeout = ktime_set(PVT_TOUT_DEF, 0); ++ pvt->timeout = ktime_divns(pvt->timeout, rate); ++ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_TOUT_MIN); ++#endif ++ + trim = PVT_TRIM_DEF; + if (!of_property_read_u32(pvt->dev->of_node, + "baikal,pvt-temp-offset-millicelsius", &temp)) + trim = pvt_calc_trim(temp); + + pvt_set_trim(pvt, trim); ++ ++ return 0; + } + + static int pvt_request_irq(struct pvt_hwmon *pvt) +@@ -1109,7 +1161,13 @@ static int pvt_probe(struct platform_device *pdev) + if (ret) + return ret; + +- pvt_init_iface(pvt); ++ ret = pvt_check_pwr(pvt); ++ if (ret) ++ return ret; ++ ++ ret = pvt_init_iface(pvt); ++ if (ret) ++ return ret; + + ret = pvt_request_irq(pvt); + if (ret) +diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h +index 5eac73e948854..93b8dd5e7c944 100644 +--- a/drivers/hwmon/bt1-pvt.h ++++ b/drivers/hwmon/bt1-pvt.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -201,6 +202,7 @@ struct pvt_cache { + * if alarms are disabled). + * @sensor: current PVT sensor the data conversion is being performed for. + * @cache: data cache descriptor. ++ * @timeout: conversion timeout cache. + */ + struct pvt_hwmon { + struct device *dev; +@@ -214,6 +216,7 @@ struct pvt_hwmon { + struct mutex iface_mtx; + enum pvt_sensor_type sensor; + struct pvt_cache cache[PVT_SENSORS_NUM]; ++ ktime_t timeout; + }; + + /* +diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c +index 18b4e071067f7..de04dff28945b 100644 +--- a/drivers/hwmon/pmbus/max34440.c ++++ b/drivers/hwmon/pmbus/max34440.c +@@ -388,7 +388,6 @@ static struct pmbus_driver_info max34440_info[] = { + .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, +- .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, +@@ -419,7 +418,6 @@ static struct pmbus_driver_info max34440_info[] = { + .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, +- .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, +@@ -455,7 +453,6 @@ static struct pmbus_driver_info max34440_info[] = { + .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, +- .read_byte_data = max34440_read_byte_data, + .read_word_data = max34440_read_word_data, + .write_word_data = max34440_write_word_data, + }, +diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c +index 5a5120121e507..3964ceab2817c 100644 +--- a/drivers/hwmon/w83627ehf.c ++++ b/drivers/hwmon/w83627ehf.c +@@ -1951,8 +1951,12 @@ static int w83627ehf_probe(struct platform_device *pdev) + data, + &w83627ehf_chip_info, + w83627ehf_groups); ++ if (IS_ERR(hwmon_dev)) { ++ err = PTR_ERR(hwmon_dev); ++ goto exit_release; ++ } + +- return PTR_ERR_OR_ZERO(hwmon_dev); ++ return 0; + + exit_release: + release_region(res->start, IOREGION_LENGTH); +diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c +index 3ccc703dc9409..167fbc2e7033f 100644 +--- a/drivers/hwtracing/coresight/coresight-cti.c ++++ b/drivers/hwtracing/coresight/coresight-cti.c +@@ -86,22 +86,16 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata) + CS_LOCK(drvdata->base); + } + +-static void cti_enable_hw_smp_call(void *info) +-{ +- struct cti_drvdata *drvdata = info; +- +- cti_write_all_hw_regs(drvdata); +-} +- + /* write regs to hardware and enable */ + static int cti_enable_hw(struct cti_drvdata *drvdata) + { + struct cti_config *config = &drvdata->config; + struct device *dev = &drvdata->csdev->dev; ++ unsigned long flags; + int rc = 0; + + pm_runtime_get_sync(dev->parent); +- spin_lock(&drvdata->spinlock); ++ spin_lock_irqsave(&drvdata->spinlock, flags); + + /* no need to do anything if enabled or unpowered*/ + if (config->hw_enabled || !config->hw_powered) +@@ -112,19 +106,11 @@ static int cti_enable_hw(struct cti_drvdata *drvdata) + if (rc) + goto cti_err_not_enabled; + +- if (drvdata->ctidev.cpu >= 0) { +- rc = smp_call_function_single(drvdata->ctidev.cpu, +- cti_enable_hw_smp_call, +- drvdata, 1); +- if (rc) +- goto cti_err_not_enabled; +- } else { +- cti_write_all_hw_regs(drvdata); +- } ++ cti_write_all_hw_regs(drvdata); + + config->hw_enabled = true; + atomic_inc(&drvdata->config.enable_req_count); +- spin_unlock(&drvdata->spinlock); ++ spin_unlock_irqrestore(&drvdata->spinlock, flags); + return rc; + + cti_state_unchanged: +@@ -132,7 +118,7 @@ cti_state_unchanged: + + /* cannot enable due to error */ + cti_err_not_enabled: +- spin_unlock(&drvdata->spinlock); ++ spin_unlock_irqrestore(&drvdata->spinlock, flags); + pm_runtime_put(dev->parent); + return rc; + } +@@ -141,9 +127,7 @@ cti_err_not_enabled: + static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata) + { + struct cti_config *config = &drvdata->config; +- struct device *dev = &drvdata->csdev->dev; + +- pm_runtime_get_sync(dev->parent); + spin_lock(&drvdata->spinlock); + config->hw_powered = true; + +@@ -163,7 +147,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata) + /* did not re-enable due to no claim / no request */ + cti_hp_not_enabled: + spin_unlock(&drvdata->spinlock); +- pm_runtime_put(dev->parent); + } + + /* disable hardware */ +@@ -511,12 +494,15 @@ static bool cti_add_sysfs_link(struct cti_drvdata *drvdata, + return !link_err; + } + +-static void cti_remove_sysfs_link(struct cti_trig_con *tc) ++static void cti_remove_sysfs_link(struct cti_drvdata *drvdata, ++ struct cti_trig_con *tc) + { + struct coresight_sysfs_link link_info; + ++ link_info.orig = drvdata->csdev; + link_info.orig_name = tc->con_dev_name; + link_info.target = tc->con_dev; ++ link_info.target_name = dev_name(&drvdata->csdev->dev); + coresight_remove_sysfs_link(&link_info); + } + +@@ -606,8 +592,8 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev) + ctidrv = csdev_to_cti_drvdata(csdev->ect_dev); + ctidev = &ctidrv->ctidev; + list_for_each_entry(tc, &ctidev->trig_cons, node) { +- if (tc->con_dev == csdev->ect_dev) { +- cti_remove_sysfs_link(tc); ++ if (tc->con_dev == csdev) { ++ cti_remove_sysfs_link(ctidrv, tc); + tc->con_dev = NULL; + break; + } +@@ -651,7 +637,7 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata) + if (tc->con_dev) { + coresight_set_assoc_ectdev_mutex(tc->con_dev, + NULL); +- cti_remove_sysfs_link(tc); ++ cti_remove_sysfs_link(drvdata, tc); + tc->con_dev = NULL; + } + } +@@ -742,7 +728,8 @@ static int cti_dying_cpu(unsigned int cpu) + + spin_lock(&drvdata->spinlock); + drvdata->config.hw_powered = false; +- coresight_disclaim_device(drvdata->base); ++ if (drvdata->config.hw_enabled) ++ coresight_disclaim_device(drvdata->base); + spin_unlock(&drvdata->spinlock); + return 0; + } +diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c +index 1a3169e69bb19..be591b557df94 100644 +--- a/drivers/hwtracing/coresight/coresight-etm-perf.c ++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c +@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data) + cpumask_t *mask = &event_data->mask; + struct coresight_device *sink; + +- if (WARN_ON(cpumask_empty(mask))) ++ if (!event_data->snk_config) + return; + +- if (!event_data->snk_config) ++ if (WARN_ON(cpumask_empty(mask))) + return; + + cpu = cpumask_first(mask); +@@ -321,6 +321,16 @@ static void etm_event_start(struct perf_event *event, int flags) + if (!event_data) + goto fail; + ++ /* ++ * Check if this ETM is allowed to trace, as decided ++ * at etm_setup_aux(). This could be due to an unreachable ++ * sink from this ETM. We can't do much in this case if ++ * the sink was specified or hinted to the driver. For ++ * now, simply don't record anything on this ETM. ++ */ ++ if (!cpumask_test_cpu(cpu, &event_data->mask)) ++ goto fail_end_stop; ++ + path = etm_event_cpu_path(event_data, cpu); + /* We need a sink, no need to continue without one */ + sink = coresight_get_sink(path); +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +index b673e738bc9a8..a588cd6de01c7 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c ++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +@@ -206,7 +206,7 @@ static ssize_t reset_store(struct device *dev, + * each trace run. + */ + config->vinst_ctrl = BIT(0); +- if (drvdata->nr_addr_cmp == true) { ++ if (drvdata->nr_addr_cmp > 0) { + config->mode |= ETM_MODE_VIEWINST_STARTSTOP; + /* SSSTATUS, bit[9] */ + config->vinst_ctrl |= BIT(9); +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c +index 96425e818fc20..fd678792b755d 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c +@@ -48,12 +48,11 @@ module_param(pm_save_enable, int, 0444); + MODULE_PARM_DESC(pm_save_enable, + "Save/restore state on power down: 1 = never, 2 = self-hosted"); + +-/* The number of ETMv4 currently registered */ +-static int etm4_count; + static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; + static void etm4_set_default_config(struct etmv4_config *config); + static int etm4_set_event_filters(struct etmv4_drvdata *drvdata, + struct perf_event *event); ++static u64 etm4_get_access_type(struct etmv4_config *config); + + static enum cpuhp_state hp_online; + +@@ -785,6 +784,22 @@ static void etm4_init_arch_data(void *info) + CS_LOCK(drvdata->base); + } + ++/* Set ELx trace filter access in the TRCVICTLR register */ ++static void etm4_set_victlr_access(struct etmv4_config *config) ++{ ++ u64 access_type; ++ ++ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK); ++ ++ /* ++ * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering ++ * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by ++ * etm4_get_access_type() but with a relative shift in this register. ++ */ ++ access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR; ++ config->vinst_ctrl |= (u32)access_type; ++} ++ + static void etm4_set_default_config(struct etmv4_config *config) + { + /* disable all events tracing */ +@@ -802,6 +817,9 @@ static void etm4_set_default_config(struct etmv4_config *config) + + /* TRCVICTLR::EVENT = 0x01, select the always on logic */ + config->vinst_ctrl = BIT(0); ++ ++ /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */ ++ etm4_set_victlr_access(config); + } + + static u64 etm4_get_ns_access_type(struct etmv4_config *config) +@@ -1066,7 +1084,7 @@ out: + + void etm4_config_trace_mode(struct etmv4_config *config) + { +- u32 addr_acc, mode; ++ u32 mode; + + mode = config->mode; + mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); +@@ -1078,15 +1096,7 @@ void etm4_config_trace_mode(struct etmv4_config *config) + if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) + return; + +- addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; +- /* clear default config */ +- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS | +- ETM_EXLEVEL_NS_HYP); +- +- addr_acc |= etm4_get_ns_access_type(config); +- +- config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; +- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; ++ etm4_set_victlr_access(config); + } + + static int etm4_online_cpu(unsigned int cpu) +@@ -1183,7 +1193,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) + state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR); + state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR); + +- for (i = 0; i < drvdata->nrseqstate; i++) ++ for (i = 0; i < drvdata->nrseqstate - 1; i++) + state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i)); + + state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR); +@@ -1227,7 +1237,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) + state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1); + + state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0); +- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1); ++ state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1); + + state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR); + +@@ -1288,7 +1298,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) + writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR); + writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR); + +- for (i = 0; i < drvdata->nrseqstate; i++) ++ for (i = 0; i < drvdata->nrseqstate - 1; i++) + writel_relaxed(state->trcseqevr[i], + drvdata->base + TRCSEQEVRn(i)); + +@@ -1337,7 +1347,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) + writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1); + + writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0); +- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1); ++ writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1); + + writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET); + +@@ -1397,28 +1407,25 @@ static struct notifier_block etm4_cpu_pm_nb = { + .notifier_call = etm4_cpu_pm_notify, + }; + +-/* Setup PM. Called with cpus locked. Deals with error conditions and counts */ +-static int etm4_pm_setup_cpuslocked(void) ++/* Setup PM. Deals with error conditions and counts */ ++static int __init etm4_pm_setup(void) + { + int ret; + +- if (etm4_count++) +- return 0; +- + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb); + if (ret) +- goto reduce_count; ++ return ret; + +- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, +- "arm/coresight4:starting", +- etm4_starting_cpu, etm4_dying_cpu); ++ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, ++ "arm/coresight4:starting", ++ etm4_starting_cpu, etm4_dying_cpu); + + if (ret) + goto unregister_notifier; + +- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, +- "arm/coresight4:online", +- etm4_online_cpu, NULL); ++ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, ++ "arm/coresight4:online", ++ etm4_online_cpu, NULL); + + /* HP dyn state ID returned in ret on success */ + if (ret > 0) { +@@ -1427,21 +1434,15 @@ static int etm4_pm_setup_cpuslocked(void) + } + + /* failed dyn state - remove others */ +- cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING); ++ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); + + unregister_notifier: + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); +- +-reduce_count: +- --etm4_count; + return ret; + } + +-static void etm4_pm_clear(void) ++static void __init etm4_pm_clear(void) + { +- if (--etm4_count != 0) +- return; +- + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); + if (hp_online) { +@@ -1497,22 +1498,12 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) + if (!desc.name) + return -ENOMEM; + +- cpus_read_lock(); + etmdrvdata[drvdata->cpu] = drvdata; + + if (smp_call_function_single(drvdata->cpu, + etm4_init_arch_data, drvdata, 1)) + dev_err(dev, "ETM arch init failed\n"); + +- ret = etm4_pm_setup_cpuslocked(); +- cpus_read_unlock(); +- +- /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */ +- if (ret) { +- etmdrvdata[drvdata->cpu] = NULL; +- return ret; +- } +- + if (etm4_arch_supported(drvdata->arch) == false) { + ret = -EINVAL; + goto err_arch_supported; +@@ -1559,7 +1550,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) + + err_arch_supported: + etmdrvdata[drvdata->cpu] = NULL; +- etm4_pm_clear(); + return ret; + } + +@@ -1597,4 +1587,23 @@ static struct amba_driver etm4x_driver = { + .probe = etm4_probe, + .id_table = etm4_ids, + }; +-builtin_amba_driver(etm4x_driver); ++ ++static int __init etm4x_init(void) ++{ ++ int ret; ++ ++ ret = etm4_pm_setup(); ++ ++ /* etm4_pm_setup() does its own cleanup - exit on error */ ++ if (ret) ++ return ret; ++ ++ ret = amba_driver_register(&etm4x_driver); ++ if (ret) { ++ pr_err("Error registering etm4x driver\n"); ++ etm4_pm_clear(); ++ } ++ ++ return ret; ++} ++device_initcall(etm4x_init); +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h +index b8283e1d6d88c..5259f96fd28a0 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x.h ++++ b/drivers/hwtracing/coresight/coresight-etm4x.h +@@ -192,6 +192,9 @@ + #define ETM_EXLEVEL_NS_HYP BIT(14) + #define ETM_EXLEVEL_NS_NA BIT(15) + ++/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */ ++#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8 ++ + /* secure / non secure masks - TRCVICTLR, IDR3 */ + #define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16) + /* NS MON (EL3) mode never implemented */ +diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c +index bfd44231d7ad5..227e234a24701 100644 +--- a/drivers/hwtracing/coresight/coresight-platform.c ++++ b/drivers/hwtracing/coresight/coresight-platform.c +@@ -711,11 +711,11 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev, + return dir; + + if (dir == ACPI_CORESIGHT_LINK_MASTER) { +- if (ptr->outport > pdata->nr_outport) +- pdata->nr_outport = ptr->outport; ++ if (ptr->outport >= pdata->nr_outport) ++ pdata->nr_outport = ptr->outport + 1; + ptr++; + } else { +- WARN_ON(pdata->nr_inport == ptr->child_port); ++ WARN_ON(pdata->nr_inport == ptr->child_port + 1); + /* + * We do not track input port connections for a device. + * However we need the highest port number described, +@@ -723,8 +723,8 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev, + * record for an output connection. Hence, do not move + * the ptr for input connections + */ +- if (ptr->child_port > pdata->nr_inport) +- pdata->nr_inport = ptr->child_port; ++ if (ptr->child_port >= pdata->nr_inport) ++ pdata->nr_inport = ptr->child_port + 1; + } + } + +diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c +index e9c90f2de34ac..cdcb1917216fd 100644 +--- a/drivers/hwtracing/coresight/coresight.c ++++ b/drivers/hwtracing/coresight/coresight.c +@@ -1188,7 +1188,6 @@ static void coresight_device_release(struct device *dev) + { + struct coresight_device *csdev = to_coresight_device(dev); + +- cti_remove_assoc_from_csdev(csdev); + fwnode_handle_put(csdev->dev.fwnode); + kfree(csdev->refcnt); + kfree(csdev); +@@ -1522,6 +1521,7 @@ void coresight_unregister(struct coresight_device *csdev) + { + etm_perf_del_symlink_sink(csdev); + /* Remove references of that device in the topology */ ++ cti_remove_assoc_from_csdev(csdev); + coresight_remove_conns(csdev); + coresight_clear_default_sink(csdev); + coresight_release_platform_data(csdev, csdev->pdata); +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 293e7a0760e77..7ccbfbcb02e9a 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -1181,6 +1181,7 @@ config I2C_RCAR + tristate "Renesas R-Car I2C Controller" + depends on ARCH_RENESAS || COMPILE_TEST + select I2C_SLAVE ++ select RESET_CONTROLLER if ARCH_RCAR_GEN3 + help + If you say yes to this option, support will be included for the + R-Car I2C controller. +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c +index e627d7b2790f7..37c510d9347a7 100644 +--- a/drivers/i2c/i2c-core-acpi.c ++++ b/drivers/i2c/i2c-core-acpi.c +@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, + void i2c_acpi_register_devices(struct i2c_adapter *adap) + { + acpi_status status; ++ acpi_handle handle; + + if (!has_acpi_companion(&adap->dev)) + return; +@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) + adap, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); ++ ++ if (!adap->dev.parent) ++ return; ++ ++ handle = ACPI_HANDLE(adap->dev.parent); ++ if (!handle) ++ return; ++ ++ acpi_walk_dep_device_list(handle); + } + + static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { +@@ -719,7 +729,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) + return -ENOMEM; + } + +- acpi_walk_dep_device_list(handle); + return 0; + } + +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c +index 97f2e29265da7..cc7564446ccd2 100644 +--- a/drivers/i3c/master.c ++++ b/drivers/i3c/master.c +@@ -1782,6 +1782,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master) + i3c_master_detach_free_devs(master); + } + ++static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev) ++{ ++ struct i3c_master_controller *master = i3cdev->common.master; ++ struct i3c_dev_boardinfo *i3cboardinfo; ++ ++ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) { ++ if (i3cdev->info.pid != i3cboardinfo->pid) ++ continue; ++ ++ i3cdev->boardinfo = i3cboardinfo; ++ i3cdev->info.static_addr = i3cboardinfo->static_addr; ++ return; ++ } ++} ++ + static struct i3c_dev_desc * + i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev) + { +@@ -1837,10 +1852,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, + if (ret) + goto err_detach_dev; + ++ i3c_master_attach_boardinfo(newdev); ++ + olddev = i3c_master_search_i3c_dev_duplicate(newdev); + if (olddev) { +- newdev->boardinfo = olddev->boardinfo; +- newdev->info.static_addr = olddev->info.static_addr; + newdev->dev = olddev->dev; + if (newdev->dev) + newdev->dev->desc = newdev; +diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c +index 3fee8bd7fe20b..3f2226928fe05 100644 +--- a/drivers/i3c/master/i3c-master-cdns.c ++++ b/drivers/i3c/master/i3c-master-cdns.c +@@ -1635,8 +1635,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) + master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots, + sizeof(*master->ibi.slots), + GFP_KERNEL); +- if (!master->ibi.slots) ++ if (!master->ibi.slots) { ++ ret = -ENOMEM; + goto err_disable_sysclk; ++ } + + writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL); + writel(MST_INT_IBIR_THR, master->regs + MST_IER); +diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c +index 0e2068ec068b8..358636954619d 100644 +--- a/drivers/iio/adc/stm32-adc-core.c ++++ b/drivers/iio/adc/stm32-adc-core.c +@@ -794,6 +794,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev) + { + return stm32_adc_core_hw_start(dev); + } ++ ++static int stm32_adc_core_runtime_idle(struct device *dev) ++{ ++ pm_runtime_mark_last_busy(dev); ++ ++ return 0; ++} + #endif + + static const struct dev_pm_ops stm32_adc_core_pm_ops = { +@@ -801,7 +808,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = { + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend, + stm32_adc_core_runtime_resume, +- NULL) ++ stm32_adc_core_runtime_idle) + }; + + static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 5888311b21198..baf0b6ae7a8bb 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -68,6 +68,9 @@ static const char * const cma_events[] = { + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", + }; + ++static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, ++ union ib_gid *mgid); ++ + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) + { + size_t index = event; +@@ -345,13 +348,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) + + struct cma_multicast { + struct rdma_id_private *id_priv; +- union { +- struct ib_sa_multicast *ib; +- } multicast; ++ struct ib_sa_multicast *sa_mc; + struct list_head list; + void *context; + struct sockaddr_storage addr; +- struct kref mcref; + u8 join_state; + }; + +@@ -363,18 +363,6 @@ struct cma_work { + struct rdma_cm_event event; + }; + +-struct cma_ndev_work { +- struct work_struct work; +- struct rdma_id_private *id; +- struct rdma_cm_event event; +-}; +- +-struct iboe_mcast_work { +- struct work_struct work; +- struct rdma_id_private *id; +- struct cma_multicast *mc; +-}; +- + union cma_ip_addr { + struct in6_addr ip6; + struct { +@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, + rdma_start_port(cma_dev->device)]; + } + +-static inline void release_mc(struct kref *kref) +-{ +- struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); +- +- kfree(mc->multicast.ib); +- kfree(mc); +-} +- + static void cma_release_dev(struct rdma_id_private *id_priv) + { + mutex_lock(&lock); +@@ -1783,19 +1763,30 @@ static void cma_release_port(struct rdma_id_private *id_priv) + mutex_unlock(&lock); + } + +-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv, +- struct cma_multicast *mc) ++static void destroy_mc(struct rdma_id_private *id_priv, ++ struct cma_multicast *mc) + { +- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; +- struct net_device *ndev = NULL; ++ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) ++ ib_sa_free_multicast(mc->sa_mc); + +- if (dev_addr->bound_dev_if) +- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); +- if (ndev) { +- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false); +- dev_put(ndev); ++ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { ++ struct rdma_dev_addr *dev_addr = ++ &id_priv->id.route.addr.dev_addr; ++ struct net_device *ndev = NULL; ++ ++ if (dev_addr->bound_dev_if) ++ ndev = dev_get_by_index(dev_addr->net, ++ dev_addr->bound_dev_if); ++ if (ndev) { ++ union ib_gid mgid; ++ ++ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, ++ &mgid); ++ cma_igmp_send(ndev, &mgid, false); ++ dev_put(ndev); ++ } + } +- kref_put(&mc->mcref, release_mc); ++ kfree(mc); + } + + static void cma_leave_mc_groups(struct rdma_id_private *id_priv) +@@ -1803,16 +1794,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) + struct cma_multicast *mc; + + while (!list_empty(&id_priv->mc_list)) { +- mc = container_of(id_priv->mc_list.next, +- struct cma_multicast, list); ++ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, ++ list); + list_del(&mc->list); +- if (rdma_cap_ib_mcast(id_priv->cma_dev->device, +- id_priv->id.port_num)) { +- ib_sa_free_multicast(mc->multicast.ib); +- kfree(mc); +- } else { +- cma_leave_roce_mc_group(id_priv, mc); +- } ++ destroy_mc(id_priv, mc); + } + } + +@@ -2647,32 +2632,14 @@ static void cma_work_handler(struct work_struct *_work) + struct rdma_id_private *id_priv = work->id; + + mutex_lock(&id_priv->handler_mutex); +- if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) ++ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || ++ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) + goto out_unlock; +- +- if (cma_cm_event_handler(id_priv, &work->event)) { +- cma_id_put(id_priv); +- destroy_id_handler_unlock(id_priv); +- goto out_free; ++ if (work->old_state != 0 || work->new_state != 0) { ++ if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) ++ goto out_unlock; + } + +-out_unlock: +- mutex_unlock(&id_priv->handler_mutex); +- cma_id_put(id_priv); +-out_free: +- kfree(work); +-} +- +-static void cma_ndev_work_handler(struct work_struct *_work) +-{ +- struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); +- struct rdma_id_private *id_priv = work->id; +- +- mutex_lock(&id_priv->handler_mutex); +- if (id_priv->state == RDMA_CM_DESTROYING || +- id_priv->state == RDMA_CM_DEVICE_REMOVAL) +- goto out_unlock; +- + if (cma_cm_event_handler(id_priv, &work->event)) { + cma_id_put(id_priv); + destroy_id_handler_unlock(id_priv); +@@ -2683,6 +2650,8 @@ out_unlock: + mutex_unlock(&id_priv->handler_mutex); + cma_id_put(id_priv); + out_free: ++ if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) ++ rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); + kfree(work); + } + +@@ -4299,63 +4268,66 @@ out: + } + EXPORT_SYMBOL(rdma_disconnect); + +-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) ++static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, ++ struct ib_sa_multicast *multicast, ++ struct rdma_cm_event *event, ++ struct cma_multicast *mc) + { +- struct rdma_id_private *id_priv; +- struct cma_multicast *mc = multicast->context; +- struct rdma_cm_event event = {}; +- int ret = 0; +- +- id_priv = mc->id_priv; +- mutex_lock(&id_priv->handler_mutex); +- if (id_priv->state != RDMA_CM_ADDR_BOUND && +- id_priv->state != RDMA_CM_ADDR_RESOLVED) +- goto out; ++ struct rdma_dev_addr *dev_addr; ++ enum ib_gid_type gid_type; ++ struct net_device *ndev; + + if (!status) + status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); + else + pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", + status); +- mutex_lock(&id_priv->qp_mutex); +- if (!status && id_priv->id.qp) { +- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, +- be16_to_cpu(multicast->rec.mlid)); +- if (status) +- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n", +- status); ++ ++ event->status = status; ++ event->param.ud.private_data = mc->context; ++ if (status) { ++ event->event = RDMA_CM_EVENT_MULTICAST_ERROR; ++ return; + } +- mutex_unlock(&id_priv->qp_mutex); + +- event.status = status; +- event.param.ud.private_data = mc->context; +- if (!status) { +- struct rdma_dev_addr *dev_addr = +- &id_priv->id.route.addr.dev_addr; +- struct net_device *ndev = +- dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); +- enum ib_gid_type gid_type = +- id_priv->cma_dev->default_gid_type[id_priv->id.port_num - +- rdma_start_port(id_priv->cma_dev->device)]; +- +- event.event = RDMA_CM_EVENT_MULTICAST_JOIN; +- ret = ib_init_ah_from_mcmember(id_priv->id.device, +- id_priv->id.port_num, +- &multicast->rec, +- ndev, gid_type, +- &event.param.ud.ah_attr); +- if (ret) +- event.event = RDMA_CM_EVENT_MULTICAST_ERROR; ++ dev_addr = &id_priv->id.route.addr.dev_addr; ++ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); ++ gid_type = ++ id_priv->cma_dev ++ ->default_gid_type[id_priv->id.port_num - ++ rdma_start_port( ++ id_priv->cma_dev->device)]; ++ ++ event->event = RDMA_CM_EVENT_MULTICAST_JOIN; ++ if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, ++ &multicast->rec, ndev, gid_type, ++ &event->param.ud.ah_attr)) { ++ event->event = RDMA_CM_EVENT_MULTICAST_ERROR; ++ goto out; ++ } + +- event.param.ud.qp_num = 0xFFFFFF; +- event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); +- if (ndev) +- dev_put(ndev); +- } else +- event.event = RDMA_CM_EVENT_MULTICAST_ERROR; ++ event->param.ud.qp_num = 0xFFFFFF; ++ event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey); + +- ret = cma_cm_event_handler(id_priv, &event); ++out: ++ if (ndev) ++ dev_put(ndev); ++} + ++static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) ++{ ++ struct cma_multicast *mc = multicast->context; ++ struct rdma_id_private *id_priv = mc->id_priv; ++ struct rdma_cm_event event = {}; ++ int ret = 0; ++ ++ mutex_lock(&id_priv->handler_mutex); ++ if (id_priv->state != RDMA_CM_ADDR_BOUND && ++ id_priv->state != RDMA_CM_ADDR_RESOLVED) ++ goto out; ++ ++ cma_make_mc_event(status, id_priv, multicast, &event, mc); ++ ret = cma_cm_event_handler(id_priv, &event); + rdma_destroy_ah_attr(&event.param.ud.ah_attr); + if (ret) { + destroy_id_handler_unlock(id_priv); +@@ -4445,23 +4417,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, + IB_SA_MCMEMBER_REC_MTU | + IB_SA_MCMEMBER_REC_HOP_LIMIT; + +- mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, +- id_priv->id.port_num, &rec, +- comp_mask, GFP_KERNEL, +- cma_ib_mc_handler, mc); +- return PTR_ERR_OR_ZERO(mc->multicast.ib); +-} +- +-static void iboe_mcast_work_handler(struct work_struct *work) +-{ +- struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); +- struct cma_multicast *mc = mw->mc; +- struct ib_sa_multicast *m = mc->multicast.ib; +- +- mc->multicast.ib->context = mc; +- cma_ib_mc_handler(0, m); +- kref_put(&mc->mcref, release_mc); +- kfree(mw); ++ mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, ++ id_priv->id.port_num, &rec, comp_mask, ++ GFP_KERNEL, cma_ib_mc_handler, mc); ++ return PTR_ERR_OR_ZERO(mc->sa_mc); + } + + static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, +@@ -4496,52 +4455,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, + static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, + struct cma_multicast *mc) + { +- struct iboe_mcast_work *work; ++ struct cma_work *work; + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; + int err = 0; + struct sockaddr *addr = (struct sockaddr *)&mc->addr; + struct net_device *ndev = NULL; ++ struct ib_sa_multicast ib; + enum ib_gid_type gid_type; + bool send_only; + + send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); + +- if (cma_zero_addr((struct sockaddr *)&mc->addr)) ++ if (cma_zero_addr(addr)) + return -EINVAL; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (!work) + return -ENOMEM; + +- mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); +- if (!mc->multicast.ib) { +- err = -ENOMEM; +- goto out1; +- } +- + gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - + rdma_start_port(id_priv->cma_dev->device)]; +- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type); ++ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); + +- mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); ++ ib.rec.pkey = cpu_to_be16(0xffff); + if (id_priv->id.ps == RDMA_PS_UDP) +- mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); ++ ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); + + if (dev_addr->bound_dev_if) + ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); + if (!ndev) { + err = -ENODEV; +- goto out2; ++ goto err_free; + } +- mc->multicast.ib->rec.rate = iboe_get_rate(ndev); +- mc->multicast.ib->rec.hop_limit = 1; +- mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); ++ ib.rec.rate = iboe_get_rate(ndev); ++ ib.rec.hop_limit = 1; ++ ib.rec.mtu = iboe_get_mtu(ndev->mtu); + + if (addr->sa_family == AF_INET) { + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { +- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; ++ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; + if (!send_only) { +- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, ++ err = cma_igmp_send(ndev, &ib.rec.mgid, + true); + } + } +@@ -4550,24 +4504,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, + err = -ENOTSUPP; + } + dev_put(ndev); +- if (err || !mc->multicast.ib->rec.mtu) { ++ if (err || !ib.rec.mtu) { + if (!err) + err = -EINVAL; +- goto out2; ++ goto err_free; + } + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, +- &mc->multicast.ib->rec.port_gid); ++ &ib.rec.port_gid); + work->id = id_priv; +- work->mc = mc; +- INIT_WORK(&work->work, iboe_mcast_work_handler); +- kref_get(&mc->mcref); ++ INIT_WORK(&work->work, cma_work_handler); ++ cma_make_mc_event(0, id_priv, &ib, &work->event, mc); ++ /* Balances with cma_id_put() in cma_work_handler */ ++ cma_id_get(id_priv); + queue_work(cma_wq, &work->work); +- + return 0; + +-out2: +- kfree(mc->multicast.ib); +-out1: ++err_free: + kfree(work); + return err; + } +@@ -4579,6 +4531,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, + struct cma_multicast *mc; + int ret; + ++ /* Not supported for kernel QPs */ ++ if (WARN_ON(id->qp)) ++ return -EINVAL; ++ + if (!id->device) + return -EINVAL; + +@@ -4587,7 +4543,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, + !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) + return -EINVAL; + +- mc = kmalloc(sizeof *mc, GFP_KERNEL); ++ mc = kzalloc(sizeof(*mc), GFP_KERNEL); + if (!mc) + return -ENOMEM; + +@@ -4597,7 +4553,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, + mc->join_state = join_state; + + if (rdma_protocol_roce(id->device, id->port_num)) { +- kref_init(&mc->mcref); + ret = cma_iboe_join_multicast(id_priv, mc); + if (ret) + goto out_err; +@@ -4629,25 +4584,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) + id_priv = container_of(id, struct rdma_id_private, id); + spin_lock_irq(&id_priv->lock); + list_for_each_entry(mc, &id_priv->mc_list, list) { +- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { +- list_del(&mc->list); +- spin_unlock_irq(&id_priv->lock); +- +- if (id->qp) +- ib_detach_mcast(id->qp, +- &mc->multicast.ib->rec.mgid, +- be16_to_cpu(mc->multicast.ib->rec.mlid)); +- +- BUG_ON(id_priv->cma_dev->device != id->device); +- +- if (rdma_cap_ib_mcast(id->device, id->port_num)) { +- ib_sa_free_multicast(mc->multicast.ib); +- kfree(mc); +- } else if (rdma_protocol_roce(id->device, id->port_num)) { +- cma_leave_roce_mc_group(id_priv, mc); +- } +- return; +- } ++ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) ++ continue; ++ list_del(&mc->list); ++ spin_unlock_irq(&id_priv->lock); ++ ++ WARN_ON(id_priv->cma_dev->device != id->device); ++ destroy_mc(id_priv, mc); ++ return; + } + spin_unlock_irq(&id_priv->lock); + } +@@ -4656,7 +4600,7 @@ EXPORT_SYMBOL(rdma_leave_multicast); + static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) + { + struct rdma_dev_addr *dev_addr; +- struct cma_ndev_work *work; ++ struct cma_work *work; + + dev_addr = &id_priv->id.route.addr.dev_addr; + +@@ -4669,7 +4613,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id + if (!work) + return -ENOMEM; + +- INIT_WORK(&work->work, cma_ndev_work_handler); ++ INIT_WORK(&work->work, cma_work_handler); + work->id = id_priv; + work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; + cma_id_get(id_priv); +diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c +index a92fc3f90bb5b..19e36e52181be 100644 +--- a/drivers/infiniband/core/cq.c ++++ b/drivers/infiniband/core/cq.c +@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) + } + + /** +- * __ib_alloc_cq_user - allocate a completion queue ++ * __ib_alloc_cq allocate a completion queue + * @dev: device to allocate the CQ for + * @private: driver private data, accessible from cq->cq_context + * @nr_cqe: number of CQEs to allocate + * @comp_vector: HCA completion vectors for this CQ + * @poll_ctx: context to poll the CQ from. + * @caller: module owner name. +- * @udata: Valid user data or NULL for kernel object + * + * This is the proper interface to allocate a CQ for in-kernel users. A + * CQ allocated with this interface will automatically be polled from the + * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id + * to use this CQ abstraction. + */ +-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, +- int nr_cqe, int comp_vector, +- enum ib_poll_context poll_ctx, +- const char *caller, struct ib_udata *udata) ++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, ++ int comp_vector, enum ib_poll_context poll_ctx, ++ const char *caller) + { + struct ib_cq_init_attr cq_attr = { + .cqe = nr_cqe, +@@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, + out_destroy_cq: + rdma_dim_destroy(cq); + rdma_restrack_del(&cq->res); +- cq->device->ops.destroy_cq(cq, udata); ++ cq->device->ops.destroy_cq(cq, NULL); + out_free_wc: + kfree(cq->wc); + out_free_cq: +@@ -285,7 +283,7 @@ out_free_cq: + trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); + return ERR_PTR(ret); + } +-EXPORT_SYMBOL(__ib_alloc_cq_user); ++EXPORT_SYMBOL(__ib_alloc_cq); + + /** + * __ib_alloc_cq_any - allocate a completion queue +@@ -310,18 +308,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, + atomic_inc_return(&counter) % + min_t(int, dev->num_comp_vectors, num_online_cpus()); + +- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, +- caller, NULL); ++ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, ++ caller); + } + EXPORT_SYMBOL(__ib_alloc_cq_any); + + /** +- * ib_free_cq_user - free a completion queue ++ * ib_free_cq - free a completion queue + * @cq: completion queue to free. +- * @udata: User data or NULL for kernel object + */ +-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) ++void ib_free_cq(struct ib_cq *cq) + { ++ int ret; ++ + if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) + return; + if (WARN_ON_ONCE(cq->cqe_used)) +@@ -343,12 +342,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) + + rdma_dim_destroy(cq); + trace_cq_free(cq); ++ ret = cq->device->ops.destroy_cq(cq, NULL); ++ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); + rdma_restrack_del(&cq->res); +- cq->device->ops.destroy_cq(cq, udata); + kfree(cq->wc); + kfree(cq); + } +-EXPORT_SYMBOL(ib_free_cq_user); ++EXPORT_SYMBOL(ib_free_cq); + + void ib_cq_pool_init(struct ib_device *dev) + { +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index 1d184ea05eba1..6f42ff8f2ec57 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -586,6 +586,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) + list_move_tail(&uevent->list, &list); + } + list_del(&ctx->list); ++ events_reported = ctx->events_reported; + mutex_unlock(&ctx->file->mut); + + list_for_each_entry_safe(uevent, tmp, &list, list) { +@@ -595,7 +596,6 @@ static int ucma_free_ctx(struct ucma_context *ctx) + kfree(uevent); + } + +- events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); + kfree(ctx); + return events_reported; +@@ -1512,7 +1512,9 @@ static ssize_t ucma_process_join(struct ucma_file *file, + return 0; + + err3: ++ mutex_lock(&ctx->mutex); + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); ++ mutex_unlock(&ctx->mutex); + ucma_cleanup_mc_events(mc); + err2: + xa_erase(&multicast_table, mc->id); +@@ -1678,7 +1680,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, + + cur_file = ctx->file; + if (cur_file == new_file) { ++ mutex_lock(&cur_file->mut); + resp.events_reported = ctx->events_reported; ++ mutex_unlock(&cur_file->mut); + goto response; + } + +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c +index 831bff8d52e54..1d0599997d0fb 100644 +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, + dma_addr_t mask; + int i; + ++ /* rdma_for_each_block() has a bug if the page size is smaller than the ++ * page size used to build the umem. For now prevent smaller page sizes ++ * from being returned. ++ */ ++ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); ++ + /* At minimum, drivers must support PAGE_SIZE or smaller */ + if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0)))) + return 0; + + va = virt; +- /* max page size not to exceed MR length */ +- mask = roundup_pow_of_two(umem->length); ++ /* The best result is the smallest page size that results in the minimum ++ * number of required pages. Compute the largest page size that could ++ * work based on VA address bits that don't change. ++ */ ++ mask = pgsz_bitmap & ++ GENMASK(BITS_PER_LONG - 1, ++ bits_per((umem->length - 1 + virt) ^ virt)); + /* offset into first SGL */ + pgoff = umem->address & ~PAGE_MASK; + +diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c +index cad842ede077d..f2e6a625724a4 100644 +--- a/drivers/infiniband/core/uverbs_std_types_wq.c ++++ b/drivers/infiniband/core/uverbs_std_types_wq.c +@@ -16,7 +16,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject, + container_of(uobject, struct ib_uwq_object, uevent.uobject); + int ret; + +- ret = ib_destroy_wq(wq, &attrs->driver_udata); ++ ret = ib_destroy_wq_user(wq, &attrs->driver_udata); + if (ib_is_destroy_retryable(ret, why, uobject)) + return ret; + +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index 307886737646e..6653f92f2df99 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -2011,16 +2011,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation); + + int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) + { ++ int ret; ++ + if (WARN_ON_ONCE(cq->shared)) + return -EOPNOTSUPP; + + if (atomic_read(&cq->usecnt)) + return -EBUSY; + ++ ret = cq->device->ops.destroy_cq(cq, udata); ++ if (ret) ++ return ret; ++ + rdma_restrack_del(&cq->res); +- cq->device->ops.destroy_cq(cq, udata); + kfree(cq); +- return 0; ++ return ret; + } + EXPORT_SYMBOL(ib_destroy_cq_user); + +@@ -2328,13 +2333,17 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user); + */ + int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) + { ++ int ret; ++ + if (atomic_read(&xrcd->usecnt)) + return -EBUSY; + + WARN_ON(!xa_empty(&xrcd->tgt_qps)); +- xrcd->device->ops.dealloc_xrcd(xrcd, udata); ++ ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); ++ if (ret) ++ return ret; + kfree(xrcd); +- return 0; ++ return ret; + } + EXPORT_SYMBOL(ib_dealloc_xrcd_user); + +@@ -2378,25 +2387,28 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd, + EXPORT_SYMBOL(ib_create_wq); + + /** +- * ib_destroy_wq - Destroys the specified user WQ. ++ * ib_destroy_wq_user - Destroys the specified user WQ. + * @wq: The WQ to destroy. + * @udata: Valid user data + */ +-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) ++int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata) + { + struct ib_cq *cq = wq->cq; + struct ib_pd *pd = wq->pd; ++ int ret; + + if (atomic_read(&wq->usecnt)) + return -EBUSY; + +- wq->device->ops.destroy_wq(wq, udata); ++ ret = wq->device->ops.destroy_wq(wq, udata); ++ if (ret) ++ return ret; ++ + atomic_dec(&pd->usecnt); + atomic_dec(&cq->usecnt); +- +- return 0; ++ return ret; + } +-EXPORT_SYMBOL(ib_destroy_wq); ++EXPORT_SYMBOL(ib_destroy_wq_user); + + /** + * ib_modify_wq - Modifies the specified WQ. +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +index 1d7a9ca5240c5..e0d06899ad4f4 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +@@ -2800,7 +2800,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, + } + + /* Completion Queues */ +-void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ++int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + { + struct bnxt_re_cq *cq; + struct bnxt_qplib_nq *nq; +@@ -2816,6 +2816,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + atomic_dec(&rdev->cq_count); + nq->budget--; + kfree(cq->cql); ++ return 0; + } + + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h +index 1daeb30e06fda..f1d98540fede5 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h +@@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, + const struct ib_recv_wr **bad_recv_wr); + int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +-void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); ++int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); + int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); + int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); + struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c +index 352b8af1998a5..28349ed508854 100644 +--- a/drivers/infiniband/hw/cxgb4/cq.c ++++ b/drivers/infiniband/hw/cxgb4/cq.c +@@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) + return !err || err == -ENODATA ? npolled : err; + } + +-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + { + struct c4iw_cq *chp; + struct c4iw_ucontext *ucontext; +@@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, + chp->destroy_skb, chp->wr_waitp); + c4iw_put_wr_wait(chp->wr_waitp); ++ return 0; + } + + int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, +diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +index 2b2b009b371af..a5975119b0d4c 100644 +--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h ++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +@@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, + struct ib_udata *udata); + struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); + int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); +-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); ++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); + int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h +index 1889dd172a252..05f593940e7b0 100644 +--- a/drivers/infiniband/hw/efa/efa.h ++++ b/drivers/infiniband/hw/efa/efa.h +@@ -139,7 +139,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); + struct ib_qp *efa_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); ++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); + int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c +index 9e201f1692892..61520521baccd 100644 +--- a/drivers/infiniband/hw/efa/efa_verbs.c ++++ b/drivers/infiniband/hw/efa/efa_verbs.c +@@ -843,7 +843,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) + return efa_com_destroy_cq(&dev->edev, ¶ms); + } + +-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + { + struct efa_dev *dev = to_edev(ibcq->device); + struct efa_cq *cq = to_ecq(ibcq); +@@ -856,6 +856,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + efa_destroy_cq_idx(dev, cq->cq_idx); + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, + DMA_FROM_DEVICE); ++ return 0; + } + + static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, +diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c +index e87d616f79882..c5acf3332519b 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_cq.c ++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c +@@ -311,7 +311,7 @@ err_cq_buf: + return ret; + } + +-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + { + struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); +@@ -322,6 +322,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + free_cq_buf(hr_dev, hr_cq); + free_cq_db(hr_dev, hr_cq, udata); + free_cqc(hr_dev, hr_cq); ++ return 0; + } + + void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h +index 6edcbdcd8f432..6dc07bfb4daad 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h +@@ -930,7 +930,7 @@ struct hns_roce_hw { + int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); + int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, + struct ib_udata *udata); +- void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); ++ int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); + int (*init_eq)(struct hns_roce_dev *hr_dev); + void (*cleanup_eq)(struct hns_roce_dev *hr_dev); +@@ -1247,7 +1247,7 @@ int to_hr_qp_type(int qp_type); + int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + +-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); ++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); + int hns_roce_db_map_user(struct hns_roce_ucontext *context, + struct ib_udata *udata, unsigned long virt, + struct hns_roce_db *db); +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +index aeb3a6fa7d472..eac971c663791 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, + ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; + break; + case IB_WR_LOCAL_INV: +- break; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + case IB_WR_LSO: +@@ -3572,7 +3571,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) + return 0; + } + +-static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ++static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + { + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); + struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); +@@ -3603,6 +3602,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + } + wait_time++; + } ++ return 0; + } + + static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +index 4cda95ed1fbe2..cee140920c579 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +@@ -1770,9 +1770,9 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num, + int *buf_page_size, int *bt_page_size, u32 hem_type) + { + u64 obj_per_chunk; +- int bt_chunk_size = 1 << PAGE_SHIFT; +- int buf_chunk_size = 1 << PAGE_SHIFT; +- int obj_per_chunk_default = buf_chunk_size / obj_size; ++ u64 bt_chunk_size = PAGE_SIZE; ++ u64 buf_chunk_size = PAGE_SIZE; ++ u64 obj_per_chunk_default = buf_chunk_size / obj_size; + + *buf_page_size = 0; + *bt_page_size = 0; +@@ -3641,9 +3641,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, + V2_QPC_BYTE_76_SRQ_EN_S, 1); + } + +- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, +- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4); +- + roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1); + + hr_qp->access_flags = attr->qp_access_flags; +@@ -3954,6 +3951,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, + dma_addr_t trrl_ba; + dma_addr_t irrl_ba; + enum ib_mtu mtu; ++ u8 lp_pktn_ini; + u8 port_num; + u64 *mtts; + u8 *dmac; +@@ -4061,13 +4059,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, + } + + #define MAX_LP_MSG_LEN 65536 +- /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */ ++ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */ ++ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)); ++ + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, +- V2_QPC_BYTE_56_LP_PKTN_INI_S, +- ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu))); ++ V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini); + roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, + V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); + ++ /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */ ++ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini); ++ roce_set_field(qpc_mask->byte_172_sq_psn, ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_M, ++ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0); ++ + roce_set_bit(qpc_mask->byte_108_rx_reqepsn, + V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0); + roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M, +@@ -4259,11 +4265,19 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, + V2_QPC_BYTE_28_FL_S, 0); + memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); + memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); ++ ++ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); ++ if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ++ ibdev_err(ibdev, ++ "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", ++ hr_qp->sl, MAX_SERVICE_LEVEL); ++ return -EINVAL; ++ } ++ + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, +- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr)); ++ V2_QPC_BYTE_28_SL_S, hr_qp->sl); + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, + V2_QPC_BYTE_28_SL_S, 0); +- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + + return 0; + } +@@ -4759,7 +4773,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, + qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S); +- qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer); ++ qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack, ++ V2_QPC_BYTE_244_RNR_CNT_M, ++ V2_QPC_BYTE_244_RNR_CNT_S); + + done: + qp_attr->cur_qp_state = qp_attr->qp_state; +@@ -4775,6 +4791,7 @@ done: + } + + qp_init_attr->cap = qp_attr->cap; ++ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits; + + out: + mutex_unlock(&hr_qp->mutex); +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +index ac29be43b6bd5..17f35f91f4ad2 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +@@ -1941,6 +1941,8 @@ struct hns_roce_eq_context { + #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 + #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) + ++#define MAX_SERVICE_LEVEL 0x7 ++ + struct hns_roce_wqe_atomic_seg { + __le64 fetchadd_swap_data; + __le64 cmp_data; +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c +index c063c450c715f..975281f034685 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c +@@ -1161,8 +1161,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + + mutex_lock(&hr_qp->mutex); + +- cur_state = attr_mask & IB_QP_CUR_STATE ? +- attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; ++ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) ++ goto out; ++ ++ cur_state = hr_qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if (ibqp->uobject && +diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h +index 25747b85a79c7..832b80de004fb 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw.h ++++ b/drivers/infiniband/hw/i40iw/i40iw.h +@@ -409,8 +409,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp) + } + + /* i40iw.c */ +-void i40iw_add_ref(struct ib_qp *); +-void i40iw_rem_ref(struct ib_qp *); ++void i40iw_qp_add_ref(struct ib_qp *ibqp); ++void i40iw_qp_rem_ref(struct ib_qp *ibqp); + struct ib_qp *i40iw_get_qp(struct ib_device *, int); + + void i40iw_flush_wqes(struct i40iw_device *iwdev, +@@ -554,9 +554,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev, + bool wait); + void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf); + void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp); +-void i40iw_free_qp_resources(struct i40iw_device *iwdev, +- struct i40iw_qp *iwqp, +- u32 qp_num); ++void i40iw_free_qp_resources(struct i40iw_qp *iwqp); ++ + enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, + struct i40iw_dma_mem *memptr, + u32 size, u32 mask); +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c +index a3b95805c154e..3053c345a5a34 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c +@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) + iwqp = cm_node->iwqp; + if (iwqp) { + iwqp->cm_node = NULL; +- i40iw_rem_ref(&iwqp->ibqp); ++ i40iw_qp_rem_ref(&iwqp->ibqp); + cm_node->iwqp = NULL; + } else if (cm_node->qhash_set) { + i40iw_get_addr_info(cm_node, &nfo); +@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp) + kfree(work); + return; + } +- i40iw_add_ref(&iwqp->ibqp); ++ i40iw_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + + work->iwqp = iwqp; +@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work) + + kfree(dwork); + i40iw_cm_disconn_true(iwqp); +- i40iw_rem_ref(&iwqp->ibqp); ++ i40iw_qp_rem_ref(&iwqp->ibqp); + } + + /** +@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + cm_node->lsmm_size = accept.size + conn_param->private_data_len; + i40iw_cm_init_tsa_conn(iwqp, cm_node); + cm_id->add_ref(cm_id); +- i40iw_add_ref(&iwqp->ibqp); ++ i40iw_qp_add_ref(&iwqp->ibqp); + + attr.qp_state = IB_QPS_RTS; + cm_node->qhash_set = false; +@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + iwqp->cm_node = cm_node; + cm_node->iwqp = iwqp; + iwqp->cm_id = cm_id; +- i40iw_add_ref(&iwqp->ibqp); ++ i40iw_qp_add_ref(&iwqp->ibqp); + + if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { + cm_node->state = I40IW_CM_STATE_SYN_SENT; +diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c +index e1085634b8d9d..56fdc161f6f8e 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c +@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) + __func__, info->qp_cq_id); + continue; + } +- i40iw_add_ref(&iwqp->ibqp); ++ i40iw_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + qp = &iwqp->sc_qp; + spin_lock_irqsave(&iwqp->lock, flags); +@@ -426,7 +426,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) + break; + } + if (info->qp) +- i40iw_rem_ref(&iwqp->ibqp); ++ i40iw_qp_rem_ref(&iwqp->ibqp); + } while (1); + + if (aeqcnt) +diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c +index e07fb37af0865..5e196bd49a583 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c +@@ -477,25 +477,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev) + } + } + +-/** +- * i40iw_free_qp - callback after destroy cqp completes +- * @cqp_request: cqp request for destroy qp +- * @num: not used +- */ +-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num) +-{ +- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param; +- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; +- struct i40iw_device *iwdev; +- u32 qp_num = iwqp->ibqp.qp_num; +- +- iwdev = iwqp->iwdev; +- +- i40iw_rem_pdusecount(iwqp->iwpd, iwdev); +- i40iw_free_qp_resources(iwdev, iwqp, qp_num); +- i40iw_rem_devusecount(iwdev); +-} +- + /** + * i40iw_wait_event - wait for completion + * @iwdev: iwarp device +@@ -616,26 +597,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) + } + + /** +- * i40iw_add_ref - add refcount for qp ++ * i40iw_qp_add_ref - add refcount for qp + * @ibqp: iqarp qp + */ +-void i40iw_add_ref(struct ib_qp *ibqp) ++void i40iw_qp_add_ref(struct ib_qp *ibqp) + { + struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp; + +- atomic_inc(&iwqp->refcount); ++ refcount_inc(&iwqp->refcount); + } + + /** +- * i40iw_rem_ref - rem refcount for qp and free if 0 ++ * i40iw_qp_rem_ref - rem refcount for qp and free if 0 + * @ibqp: iqarp qp + */ +-void i40iw_rem_ref(struct ib_qp *ibqp) ++void i40iw_qp_rem_ref(struct ib_qp *ibqp) + { + struct i40iw_qp *iwqp; +- enum i40iw_status_code status; +- struct i40iw_cqp_request *cqp_request; +- struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev; + u32 qp_num; + unsigned long flags; +@@ -643,7 +621,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp) + iwqp = to_iwqp(ibqp); + iwdev = iwqp->iwdev; + spin_lock_irqsave(&iwdev->qptable_lock, flags); +- if (!atomic_dec_and_test(&iwqp->refcount)) { ++ if (!refcount_dec_and_test(&iwqp->refcount)) { + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + return; + } +@@ -651,25 +629,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp) + qp_num = iwqp->ibqp.qp_num; + iwdev->qp_table[qp_num] = NULL; + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); +- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); +- if (!cqp_request) +- return; +- +- cqp_request->callback_fcn = i40iw_free_qp; +- cqp_request->param = (void *)&iwqp->sc_qp; +- cqp_info = &cqp_request->info; +- cqp_info->cqp_cmd = OP_QP_DESTROY; +- cqp_info->post_sq = 1; +- cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp; +- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; +- cqp_info->in.u.qp_destroy.remove_hash_idx = true; +- status = i40iw_handle_cqp_op(iwdev, cqp_request); +- if (!status) +- return; ++ complete(&iwqp->free_qp); + +- i40iw_rem_pdusecount(iwqp->iwpd, iwdev); +- i40iw_free_qp_resources(iwdev, iwqp, qp_num); +- i40iw_rem_devusecount(iwdev); + } + + /** +@@ -936,7 +897,7 @@ static void i40iw_terminate_timeout(struct timer_list *t) + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp; + + i40iw_terminate_done(qp, 1); +- i40iw_rem_ref(&iwqp->ibqp); ++ i40iw_qp_rem_ref(&iwqp->ibqp); + } + + /** +@@ -948,7 +909,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp) + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; +- i40iw_add_ref(&iwqp->ibqp); ++ i40iw_qp_add_ref(&iwqp->ibqp); + timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0); + iwqp->terminate_timer.expires = jiffies + HZ; + add_timer(&iwqp->terminate_timer); +@@ -964,7 +925,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp) + + iwqp = (struct i40iw_qp *)qp->back_qp; + if (del_timer(&iwqp->terminate_timer)) +- i40iw_rem_ref(&iwqp->ibqp); ++ i40iw_qp_rem_ref(&iwqp->ibqp); + } + + /** +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c +index b51339328a51e..09caad228aa4f 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c +@@ -363,11 +363,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va, + * @iwqp: qp ptr (user or kernel) + * @qp_num: qp number assigned + */ +-void i40iw_free_qp_resources(struct i40iw_device *iwdev, +- struct i40iw_qp *iwqp, +- u32 qp_num) ++void i40iw_free_qp_resources(struct i40iw_qp *iwqp) + { + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; ++ struct i40iw_device *iwdev = iwqp->iwdev; ++ u32 qp_num = iwqp->ibqp.qp_num; + + i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); +@@ -401,6 +401,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq) + static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) + { + struct i40iw_qp *iwqp = to_iwqp(ibqp); ++ struct ib_qp_attr attr; ++ struct i40iw_device *iwdev = iwqp->iwdev; ++ ++ memset(&attr, 0, sizeof(attr)); + + iwqp->destroyed = 1; + +@@ -415,7 +419,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) + } + } + +- i40iw_rem_ref(&iwqp->ibqp); ++ attr.qp_state = IB_QPS_ERR; ++ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); ++ i40iw_qp_rem_ref(&iwqp->ibqp); ++ wait_for_completion(&iwqp->free_qp); ++ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp); ++ i40iw_rem_pdusecount(iwqp->iwpd, iwdev); ++ i40iw_free_qp_resources(iwqp); ++ i40iw_rem_devusecount(iwdev); ++ + return 0; + } + +@@ -576,6 +588,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, + qp->back_qp = (void *)iwqp; + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; + ++ iwqp->iwdev = iwdev; + iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info; + + if (i40iw_allocate_dma_mem(dev->hw, +@@ -600,7 +613,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, + goto error; + } + +- iwqp->iwdev = iwdev; + iwqp->iwpd = iwpd; + iwqp->ibqp.qp_num = qp_num; + qp = &iwqp->sc_qp; +@@ -714,7 +726,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, + goto error; + } + +- i40iw_add_ref(&iwqp->ibqp); ++ refcount_set(&iwqp->refcount, 1); + spin_lock_init(&iwqp->lock); + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + iwdev->qp_table[qp_num] = iwqp; +@@ -736,10 +748,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, + } + init_completion(&iwqp->sq_drained); + init_completion(&iwqp->rq_drained); ++ init_completion(&iwqp->free_qp); + + return &iwqp->ibqp; + error: +- i40iw_free_qp_resources(iwdev, iwqp, qp_num); ++ i40iw_free_qp_resources(iwqp); + return ERR_PTR(err_code); + } + +@@ -1052,7 +1065,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) + * @ib_cq: cq pointer + * @udata: user data or NULL for kernel object + */ +-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ++static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + { + struct i40iw_cq *iwcq; + struct i40iw_device *iwdev; +@@ -1064,6 +1077,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) + i40iw_cq_wq_destroy(iwdev, cq); + cq_free_resources(iwdev, iwcq); + i40iw_rem_devusecount(iwdev); ++ return 0; + } + + /** +@@ -2636,13 +2650,13 @@ static const struct ib_device_ops i40iw_dev_ops = { + .get_hw_stats = i40iw_get_hw_stats, + .get_port_immutable = i40iw_port_immutable, + .iw_accept = i40iw_accept, +- .iw_add_ref = i40iw_add_ref, ++ .iw_add_ref = i40iw_qp_add_ref, + .iw_connect = i40iw_connect, + .iw_create_listen = i40iw_create_listen, + .iw_destroy_listen = i40iw_destroy_listen, + .iw_get_qp = i40iw_get_qp, + .iw_reject = i40iw_reject, +- .iw_rem_ref = i40iw_rem_ref, ++ .iw_rem_ref = i40iw_qp_rem_ref, + .map_mr_sg = i40iw_map_mr_sg, + .mmap = i40iw_mmap, + .modify_qp = i40iw_modify_qp, +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h +index 331bc21cbcc73..bab71f3e56374 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h +@@ -139,7 +139,7 @@ struct i40iw_qp { + struct i40iw_qp_host_ctx_info ctx_info; + struct i40iwarp_offload_info iwarp_info; + void *allocated_buffer; +- atomic_t refcount; ++ refcount_t refcount; + struct iw_cm_id *cm_id; + void *cm_node; + struct ib_mr *lsmm_mr; +@@ -174,5 +174,6 @@ struct i40iw_qp { + struct i40iw_dma_mem ietf_mem; + struct completion sq_drained; + struct completion rq_drained; ++ struct completion free_qp; + }; + #endif +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c +index b591861934b3c..81d6a3460b55d 100644 +--- a/drivers/infiniband/hw/mlx4/cm.c ++++ b/drivers/infiniband/hw/mlx4/cm.c +@@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) + if (!sriov->is_going_down && !id->scheduled_delete) { + id->scheduled_delete = 1; + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); ++ } else if (id->scheduled_delete) { ++ /* Adjust timeout if already scheduled */ ++ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); + } + spin_unlock_irqrestore(&sriov->going_down_lock, flags); + spin_unlock(&sriov->id_map_lock); +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c +index 8a3436994f809..ee50dd823a8e8 100644 +--- a/drivers/infiniband/hw/mlx4/cq.c ++++ b/drivers/infiniband/hw/mlx4/cq.c +@@ -475,7 +475,7 @@ out: + return err; + } + +-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + { + struct mlx4_ib_dev *dev = to_mdev(cq->device); + struct mlx4_ib_cq *mcq = to_mcq(cq); +@@ -495,6 +495,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + mlx4_db_free(dev->dev, &mcq->db); + } + ib_umem_release(mcq->umem); ++ return 0; + } + + static void dump_cqe(void *cqe) +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c +index abe68708d6d6e..2cbdba4da9dfe 100644 +--- a/drivers/infiniband/hw/mlx4/mad.c ++++ b/drivers/infiniband/hw/mlx4/mad.c +@@ -1299,6 +1299,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); + } + ++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg) ++{ ++ unsigned long flags; ++ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; ++ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); ++ ++ spin_lock_irqsave(&dev->sriov.going_down_lock, flags); ++ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) ++ queue_work(ctx->wi_wq, &ctx->work); ++ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); ++} ++ + static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, + struct mlx4_ib_demux_pv_qp *tun_qp, + int index) +@@ -2001,7 +2013,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, + cq_size *= 2; + + cq_attr.cqe = cq_size; +- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, ++ ctx->cq = ib_create_cq(ctx->ib_dev, ++ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler, + NULL, ctx, &cq_attr); + if (IS_ERR(ctx->cq)) { + ret = PTR_ERR(ctx->cq); +@@ -2038,6 +2051,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, + INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); + + ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; ++ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; + + ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); + if (ret) { +@@ -2181,7 +2195,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + goto err_mcg; + } + +- snprintf(name, sizeof name, "mlx4_ibt%d", port); ++ snprintf(name, sizeof(name), "mlx4_ibt%d", port); + ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->wq) { + pr_err("Failed to create tunnelling WQ for port %d\n", port); +@@ -2189,7 +2203,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + goto err_wq; + } + +- snprintf(name, sizeof name, "mlx4_ibud%d", port); ++ snprintf(name, sizeof(name), "mlx4_ibwi%d", port); ++ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); ++ if (!ctx->wi_wq) { ++ pr_err("Failed to create wire WQ for port %d\n", port); ++ ret = -ENOMEM; ++ goto err_wiwq; ++ } ++ ++ snprintf(name, sizeof(name), "mlx4_ibud%d", port); + ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->ud_wq) { + pr_err("Failed to create up/down WQ for port %d\n", port); +@@ -2200,6 +2222,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + return 0; + + err_udwq: ++ destroy_workqueue(ctx->wi_wq); ++ ctx->wi_wq = NULL; ++ ++err_wiwq: + destroy_workqueue(ctx->wq); + ctx->wq = NULL; + +@@ -2247,12 +2273,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) + ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; + } + flush_workqueue(ctx->wq); ++ flush_workqueue(ctx->wi_wq); + for (i = 0; i < dev->dev->caps.sqp_demux; i++) { + destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); + free_pv_object(dev, i, ctx->port); + } + kfree(ctx->tun); + destroy_workqueue(ctx->ud_wq); ++ destroy_workqueue(ctx->wi_wq); + destroy_workqueue(ctx->wq); + } + } +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index bd4f975e7f9ac..d22bf9a4b53e2 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -1256,11 +1256,12 @@ err2: + return err; + } + +-static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) ++static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) + { + ib_destroy_cq(to_mxrcd(xrcd)->cq); + ib_dealloc_pd(to_mxrcd(xrcd)->pd); + mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); ++ return 0; + } + + static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) +diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h +index 38e87a700a2a2..bb64f6d9421c2 100644 +--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h ++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h +@@ -454,6 +454,7 @@ struct mlx4_ib_demux_pv_ctx { + struct ib_pd *pd; + struct work_struct work; + struct workqueue_struct *wq; ++ struct workqueue_struct *wi_wq; + struct mlx4_ib_demux_pv_qp qp[2]; + }; + +@@ -461,6 +462,7 @@ struct mlx4_ib_demux_ctx { + struct ib_device *ib_dev; + int port; + struct workqueue_struct *wq; ++ struct workqueue_struct *wi_wq; + struct workqueue_struct *ud_wq; + spinlock_t ud_lock; + atomic64_t subnet_prefix; +@@ -736,7 +738,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); + int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); + int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); ++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); + int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); + int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); + void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); +@@ -890,7 +892,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port); + struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata); +-void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); ++int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); + int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, + u32 wq_attr_mask, struct ib_udata *udata); + +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c +index 2975f350b9fd1..b7a0c3f977131 100644 +--- a/drivers/infiniband/hw/mlx4/qp.c ++++ b/drivers/infiniband/hw/mlx4/qp.c +@@ -4327,7 +4327,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr, + return err; + } + +-void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) ++int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) + { + struct mlx4_ib_dev *dev = to_mdev(ibwq->device); + struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); +@@ -4338,6 +4338,7 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) + destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); + + kfree(qp); ++ return 0; + } + + struct ib_rwq_ind_table +diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c +index 145f3cb40ccba..aeeb14ecb3ee7 100644 +--- a/drivers/infiniband/hw/mlx5/counters.c ++++ b/drivers/infiniband/hw/mlx5/counters.c +@@ -456,12 +456,12 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, + cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); + num_counters += ARRAY_SIZE(ext_ppcnt_cnts); + } +- cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL); ++ cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL); + if (!cnts->names) + return -ENOMEM; + + cnts->offsets = kcalloc(num_counters, +- sizeof(cnts->offsets), GFP_KERNEL); ++ sizeof(*cnts->offsets), GFP_KERNEL); + if (!cnts->offsets) + goto err_names; + +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c +index dceb0eb2bed16..35e5bbb44d3d8 100644 +--- a/drivers/infiniband/hw/mlx5/cq.c ++++ b/drivers/infiniband/hw/mlx5/cq.c +@@ -168,7 +168,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + { + enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1); + struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); +- struct mlx5_ib_srq *srq; ++ struct mlx5_ib_srq *srq = NULL; + struct mlx5_ib_wq *wq; + u16 wqe_ctr; + u8 roce_packet_type; +@@ -180,7 +180,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + + if (qp->ibqp.xrcd) { + msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); +- srq = to_mibsrq(msrq); ++ if (msrq) ++ srq = to_mibsrq(msrq); + } else { + srq = to_msrq(qp->ibqp.srq); + } +@@ -1023,16 +1024,21 @@ err_cqb: + return err; + } + +-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + { + struct mlx5_ib_dev *dev = to_mdev(cq->device); + struct mlx5_ib_cq *mcq = to_mcq(cq); ++ int ret; ++ ++ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); ++ if (ret) ++ return ret; + +- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); + if (udata) + destroy_cq_user(mcq, udata); + else + destroy_cq_kernel(dev, mcq); ++ return 0; + } + + static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index d60d63221b14d..b805cc8124657 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -840,7 +840,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, + /* We support 'Gappy' memory registration too */ + props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; + } +- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; ++ /* IB_WR_REG_MR always requires changing the entity size with UMR */ ++ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + if (MLX5_CAP_GEN(mdev, sho)) { + props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; + /* At this stage no support for signature handover */ +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h +index 5287fc8686627..884cc7c731253 100644 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h +@@ -1148,7 +1148,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, + size_t buflen, size_t *bc); + int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); ++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); + int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); + int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); + int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); +@@ -1193,7 +1193,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_mad *in, struct ib_mad *out, + size_t *out_mad_size, u16 *out_mad_pkey_index); + int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); +-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); ++int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); + int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); + int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); + int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, +@@ -1238,7 +1238,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, + struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata); +-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); ++int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); + int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, + u32 wq_attr_mask, struct ib_udata *udata); + struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 3e6f2f9c66555..6eb40b33e1ea8 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -50,6 +50,29 @@ enum { + static void + create_mkey_callback(int status, struct mlx5_async_work *context); + ++static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, ++ struct ib_pd *pd) ++{ ++ struct mlx5_ib_dev *dev = to_mdev(pd->device); ++ ++ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); ++ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); ++ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); ++ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); ++ MLX5_SET(mkc, mkc, lr, 1); ++ ++ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) ++ MLX5_SET(mkc, mkc, relaxed_ordering_write, ++ !!(acc & IB_ACCESS_RELAXED_ORDERING)); ++ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) ++ MLX5_SET(mkc, mkc, relaxed_ordering_read, ++ !!(acc & IB_ACCESS_RELAXED_ORDERING)); ++ ++ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); ++ MLX5_SET(mkc, mkc, qpn, 0xffffff); ++ MLX5_SET64(mkc, mkc, start_addr, start_addr); ++} ++ + static void + assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, + u32 *in) +@@ -152,12 +175,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) + mr->cache_ent = ent; + mr->dev = ent->dev; + ++ set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); + MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); + +- MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); + MLX5_SET(mkc, mkc, log_page_size, ent->page); + return mr; +@@ -774,29 +797,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) + return 0; + } + +-static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, +- struct ib_pd *pd) +-{ +- struct mlx5_ib_dev *dev = to_mdev(pd->device); +- +- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); +- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); +- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); +- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); +- MLX5_SET(mkc, mkc, lr, 1); +- +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) +- MLX5_SET(mkc, mkc, relaxed_ordering_write, +- !!(acc & IB_ACCESS_RELAXED_ORDERING)); +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) +- MLX5_SET(mkc, mkc, relaxed_ordering_read, +- !!(acc & IB_ACCESS_RELAXED_ORDERING)); +- +- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); +- MLX5_SET(mkc, mkc, qpn, 0xffffff); +- MLX5_SET64(mkc, mkc, start_addr, start_addr); +-} +- + struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) + { + struct mlx5_ib_dev *dev = to_mdev(pd->device); +@@ -1190,29 +1190,17 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, + MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); ++ set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr, ++ populate ? pd : dev->umrc.pd); + MLX5_SET(mkc, mkc, free, !populate); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) +- MLX5_SET(mkc, mkc, relaxed_ordering_write, +- !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); +- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) +- MLX5_SET(mkc, mkc, relaxed_ordering_read, +- !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); +- MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); +- MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); +- MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); +- MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); +- MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + +- MLX5_SET64(mkc, mkc, start_addr, virt_addr); + MLX5_SET64(mkc, mkc, len, length); +- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, bsf_octword_size, 0); + MLX5_SET(mkc, mkc, translations_octword_size, + get_octo_len(virt_addr, length, page_shift)); + MLX5_SET(mkc, mkc, log_page_size, page_shift); +- MLX5_SET(mkc, mkc, qpn, 0xffffff); + if (populate) { + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + get_octo_len(virt_addr, length, page_shift)); +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c +index 5758dbe640451..7a3e8e6598d34 100644 +--- a/drivers/infiniband/hw/mlx5/qp.c ++++ b/drivers/infiniband/hw/mlx5/qp.c +@@ -4716,12 +4716,12 @@ int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) + return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); + } + +-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) ++int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) + { + struct mlx5_ib_dev *dev = to_mdev(xrcd->device); + u32 xrcdn = to_mxrcd(xrcd)->xrcdn; + +- mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); ++ return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); + } + + static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type) +@@ -5056,14 +5056,18 @@ err: + return ERR_PTR(err); + } + +-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) ++int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) + { + struct mlx5_ib_dev *dev = to_mdev(wq->device); + struct mlx5_ib_rwq *rwq = to_mrwq(wq); ++ int ret; + +- mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); ++ ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); ++ if (ret) ++ return ret; + destroy_user_rq(dev, wq->pd, rwq, udata); + kfree(rwq); ++ return 0; + } + + struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, +diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h +index ba899df44c5b4..5d4e140db99ce 100644 +--- a/drivers/infiniband/hw/mlx5/qp.h ++++ b/drivers/infiniband/hw/mlx5/qp.h +@@ -26,8 +26,8 @@ int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, + + int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec); + +-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, +- struct mlx5_core_qp *rq); ++int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, ++ struct mlx5_core_qp *rq); + int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, + struct mlx5_core_qp *sq); + void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev, +diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c +index 7c3968ef9cd10..c683d7000168d 100644 +--- a/drivers/infiniband/hw/mlx5/qpc.c ++++ b/drivers/infiniband/hw/mlx5/qpc.c +@@ -576,11 +576,12 @@ err_destroy_rq: + return err; + } + +-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, +- struct mlx5_core_qp *rq) ++int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, ++ struct mlx5_core_qp *rq) + { + destroy_resource_common(dev, rq); + destroy_rq_tracked(dev, rq->qpn, rq->uid); ++ return 0; + } + + static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) +diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c +index 9fa2f9164a47b..2ad15adf304e5 100644 +--- a/drivers/infiniband/hw/mthca/mthca_provider.c ++++ b/drivers/infiniband/hw/mthca/mthca_provider.c +@@ -789,7 +789,7 @@ out: + return ret; + } + +-static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ++static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + { + if (udata) { + struct mthca_ucontext *context = +@@ -808,6 +808,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + to_mcq(cq)->set_ci_db_index); + } + mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); ++ return 0; + } + + static inline u32 convert_access(int acc) +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +index c1751c9a0f625..4ef5298247fcf 100644 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +@@ -1056,7 +1056,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq) + spin_unlock_irqrestore(&cq->cq_lock, flags); + } + +-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + { + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); + struct ocrdma_eq *eq = NULL; +@@ -1081,6 +1081,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + ocrdma_get_db_addr(dev, pdid), + dev->nic_info.db_page_size); + } ++ return 0; + } + + static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +index df8e3b923a440..4322b5d792608 100644 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +@@ -72,7 +72,7 @@ void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); + int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); +-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); ++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); + + struct ib_qp *ocrdma_create_qp(struct ib_pd *, + struct ib_qp_init_attr *attrs, +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c +index d85f992bac299..8e1365951fb6a 100644 +--- a/drivers/infiniband/hw/qedr/main.c ++++ b/drivers/infiniband/hw/qedr/main.c +@@ -602,7 +602,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); + + /* Part 2 - check capabilities */ +- page_size = ~dev->attr.page_size_caps + 1; ++ page_size = ~qed_attr->page_size_caps + 1; + if (page_size > PAGE_SIZE) { + DP_ERR(dev, + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c +index 97fc7dd353b04..c7169d2c69e5b 100644 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c +@@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + struct qedr_dev *dev = ep->dev; + struct qedr_qp *qp; + struct qed_iwarp_accept_in params; +- int rc = 0; ++ int rc; + + DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); + +@@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + params.ord = conn_param->ord; + + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT, +- &qp->iwarp_cm_flags)) ++ &qp->iwarp_cm_flags)) { ++ rc = -EINVAL; + goto err; /* QP already destroyed */ ++ } + + rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms); + if (rc) { +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c +index b49bef94637e5..10536cce120e8 100644 +--- a/drivers/infiniband/hw/qedr/verbs.c ++++ b/drivers/infiniband/hw/qedr/verbs.c +@@ -999,7 +999,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + /* Generate doorbell address. */ + cq->db.data.icid = cq->icid; + cq->db_addr = dev->db_addr + db_offset; +- cq->db.data.params = DB_AGG_CMD_SET << ++ cq->db.data.params = DB_AGG_CMD_MAX << + RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT; + + /* point to the very last element, passing it we will toggle */ +@@ -1051,7 +1051,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) + #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) + #define QEDR_DESTROY_CQ_ITER_DURATION (10) + +-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + { + struct qedr_dev *dev = get_qedr_dev(ibcq->device); + struct qed_rdma_destroy_cq_out_params oparams; +@@ -1066,7 +1066,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + /* GSIs CQs are handled by driver, so they don't exist in the FW */ + if (cq->cq_type == QEDR_CQ_TYPE_GSI) { + qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data); +- return; ++ return 0; + } + + iparams.icid = cq->icid; +@@ -1114,6 +1114,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + * Since the destroy CQ ramrod has also been received on the EQ we can + * be certain that there's no event handler in process. + */ ++ return 0; + } + + static inline int get_gid_info_from_table(struct ib_qp *ibqp, +@@ -2112,6 +2113,28 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev, + return rc; + } + ++static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, ++ struct ib_udata *udata) ++{ ++ struct qedr_ucontext *ctx = ++ rdma_udata_to_drv_context(udata, struct qedr_ucontext, ++ ibucontext); ++ int rc; ++ ++ if (qp->qp_type != IB_QPT_GSI) { ++ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); ++ if (rc) ++ return rc; ++ } ++ ++ if (qp->create_type == QEDR_QP_CREATE_USER) ++ qedr_cleanup_user(dev, ctx, qp); ++ else ++ qedr_cleanup_kernel(dev, qp); ++ ++ return 0; ++} ++ + struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *attrs, + struct ib_udata *udata) +@@ -2158,19 +2181,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, + rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs); + + if (rc) +- goto err; ++ goto out_free_qp; + + qp->ibqp.qp_num = qp->qp_id; + + if (rdma_protocol_iwarp(&dev->ibdev, 1)) { + rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL); + if (rc) +- goto err; ++ goto out_free_qp_resources; + } + + return &qp->ibqp; + +-err: ++out_free_qp_resources: ++ qedr_free_qp_resources(dev, qp, udata); ++out_free_qp: + kfree(qp); + + return ERR_PTR(-EFAULT); +@@ -2636,7 +2661,7 @@ int qedr_query_qp(struct ib_qp *ibqp, + qp_attr->cap.max_recv_wr = qp->rq.max_wr; + qp_attr->cap.max_send_sge = qp->sq.max_sges; + qp_attr->cap.max_recv_sge = qp->rq.max_sges; +- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; ++ qp_attr->cap.max_inline_data = dev->attr.max_inline; + qp_init_attr->cap = qp_attr->cap; + + qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; +@@ -2671,28 +2696,6 @@ err: + return rc; + } + +-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp, +- struct ib_udata *udata) +-{ +- struct qedr_ucontext *ctx = +- rdma_udata_to_drv_context(udata, struct qedr_ucontext, +- ibucontext); +- int rc; +- +- if (qp->qp_type != IB_QPT_GSI) { +- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); +- if (rc) +- return rc; +- } +- +- if (qp->create_type == QEDR_QP_CREATE_USER) +- qedr_cleanup_user(dev, ctx, qp); +- else +- qedr_cleanup_kernel(dev, qp); +- +- return 0; +-} +- + int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) + { + struct qedr_qp *qp = get_qedr_qp(ibqp); +@@ -2752,6 +2755,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) + + if (rdma_protocol_iwarp(&dev->ibdev, 1)) + qedr_iw_qp_rem_ref(&qp->ibqp); ++ else ++ kfree(qp); + + return 0; + } +diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h +index 39dd6286ba395..b6d09f5376d81 100644 +--- a/drivers/infiniband/hw/qedr/verbs.h ++++ b/drivers/infiniband/hw/qedr/verbs.h +@@ -52,7 +52,7 @@ void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); + int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); +-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); ++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); + int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); + struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, + struct ib_udata *); +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +index b8a77ce115908..586ff16be1bb3 100644 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +@@ -596,9 +596,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + return 0; + } + +-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + { +- return; ++ return 0; + } + + struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +index 2aedf78c13cf2..f13b08c59b9a3 100644 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +@@ -60,7 +60,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); + int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); ++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); + struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +index 4f6cc0de7ef95..6d3e6389e47da 100644 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) + * @cq: the completion queue to destroy. + * @udata: user data or null for kernel object + */ +-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + { + struct pvrdma_cq *vcq = to_vcq(cq); + union pvrdma_cmd_req req; +@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + + pvrdma_free_cq(dev, vcq); + atomic_dec(&dev->num_cqs); ++ return 0; + } + + static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +index 699b20849a7ef..61b8425d92c5e 100644 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +@@ -411,7 +411,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); + int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); ++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); + int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); + int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); + int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, +diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c +index 04d2e72017fed..19248be140933 100644 +--- a/drivers/infiniband/sw/rdmavt/cq.c ++++ b/drivers/infiniband/sw/rdmavt/cq.c +@@ -315,7 +315,7 @@ bail_wc: + * + * Called by ib_destroy_cq() in the generic verbs code. + */ +-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + { + struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); + struct rvt_dev_info *rdi = cq->rdi; +@@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + kref_put(&cq->ip->ref, rvt_release_mmap_info); + else + vfree(cq->kqueue); ++ return 0; + } + + /** +diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h +index 5e26a2eb19a4c..feb01e7ee0044 100644 +--- a/drivers/infiniband/sw/rdmavt/cq.h ++++ b/drivers/infiniband/sw/rdmavt/cq.h +@@ -53,7 +53,7 @@ + + int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); ++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); + int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); + int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); + int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); +diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c +index f904bb34477ae..2d534c450f3c8 100644 +--- a/drivers/infiniband/sw/rdmavt/vt.c ++++ b/drivers/infiniband/sw/rdmavt/vt.c +@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports) + if (!rdi) + return rdi; + +- rdi->ports = kcalloc(nports, +- sizeof(struct rvt_ibport **), +- GFP_KERNEL); ++ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL); + if (!rdi->ports) + ib_dealloc_device(&rdi->ibdev); + +diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c +index 7e123d3c4d09b..2da4187db80c9 100644 +--- a/drivers/infiniband/sw/rxe/rxe_recv.c ++++ b/drivers/infiniband/sw/rxe/rxe_recv.c +@@ -260,6 +260,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) + struct rxe_mc_elem *mce; + struct rxe_qp *qp; + union ib_gid dgid; ++ struct sk_buff *per_qp_skb; ++ struct rxe_pkt_info *per_qp_pkt; + int err; + + if (skb->protocol == htons(ETH_P_IP)) +@@ -288,21 +290,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) + if (err) + continue; + +- /* if *not* the last qp in the list +- * increase the users of the skb then post to the next qp ++ /* for all but the last qp create a new clone of the ++ * skb and pass to the qp. + */ + if (mce->qp_list.next != &mcg->qp_list) +- skb_get(skb); ++ per_qp_skb = skb_clone(skb, GFP_ATOMIC); ++ else ++ per_qp_skb = skb; ++ ++ if (unlikely(!per_qp_skb)) ++ continue; + +- pkt->qp = qp; ++ per_qp_pkt = SKB_TO_PKT(per_qp_skb); ++ per_qp_pkt->qp = qp; + rxe_add_ref(qp); +- rxe_rcv_pkt(pkt, skb); ++ rxe_rcv_pkt(per_qp_pkt, per_qp_skb); + } + + spin_unlock_bh(&mcg->mcg_lock); + + rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */ + ++ return; ++ + err1: + kfree_skb(skb); + } +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c +index 8522e9a3e9140..cfe115d64cb88 100644 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c +@@ -803,13 +803,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem); + } + +-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ++static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) + { + struct rxe_cq *cq = to_rcq(ibcq); + + rxe_cq_disable(cq); + + rxe_drop_ref(cq); ++ return 0; + } + + static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) +diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c +index adafa1b8bebe3..60271c30e7de5 100644 +--- a/drivers/infiniband/sw/siw/siw_verbs.c ++++ b/drivers/infiniband/sw/siw/siw_verbs.c +@@ -1055,7 +1055,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, + return rv > 0 ? 0 : rv; + } + +-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) ++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) + { + struct siw_cq *cq = to_siw_cq(base_cq); + struct siw_device *sdev = to_siw_dev(base_cq->device); +@@ -1073,6 +1073,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) + atomic_dec(&sdev->num_cq); + + vfree(cq->queue); ++ return 0; + } + + /* +diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h +index d9572275a6b69..476e9283fce25 100644 +--- a/drivers/infiniband/sw/siw/siw_verbs.h ++++ b/drivers/infiniband/sw/siw/siw_verbs.h +@@ -62,7 +62,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr); + int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr); +-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata); ++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata); + int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc); + int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags); + struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len, +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index f772fe8c5b663..abfab89423f41 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -2480,6 +2480,8 @@ static struct net_device *ipoib_add_port(const char *format, + /* call event handler to ensure pkey in sync */ + queue_work(ipoib_workqueue, &priv->flush_heavy); + ++ ndev->rtnl_link_ops = ipoib_get_link_ops(); ++ + result = register_netdev(ndev); + if (result) { + pr_warn("%s: couldn't register ipoib port %d; error %d\n", +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +index 38c984d16996d..d5a90a66b45cf 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, + return 0; + } + ++static void ipoib_del_child_link(struct net_device *dev, struct list_head *head) ++{ ++ struct ipoib_dev_priv *priv = ipoib_priv(dev); ++ ++ if (!priv->parent) ++ return; ++ ++ unregister_netdevice_queue(dev, head); ++} ++ + static size_t ipoib_get_size(const struct net_device *dev) + { + return nla_total_size(2) + /* IFLA_IPOIB_PKEY */ +@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = { + .priv_size = sizeof(struct ipoib_dev_priv), + .setup = ipoib_setup_common, + .newlink = ipoib_new_child_link, ++ .dellink = ipoib_del_child_link, + .changelink = ipoib_changelink, + .get_size = ipoib_get_size, + .fill_info = ipoib_fill_info, +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +index 30865605e0980..4c50a87ed7cc2 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +@@ -195,6 +195,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) + } + priv = ipoib_priv(ndev); + ++ ndev->rtnl_link_ops = ipoib_get_link_ops(); ++ + result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD); + + if (result && ndev->reg_state == NETREG_UNINITIALIZED) +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +index 28f6414dfa3dc..d6f93601712e4 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +@@ -16,6 +16,7 @@ + #include "rtrs-srv.h" + #include "rtrs-log.h" + #include ++#include + + MODULE_DESCRIPTION("RDMA Transport Server"); + MODULE_LICENSE("GPL"); +@@ -31,6 +32,7 @@ MODULE_LICENSE("GPL"); + static struct rtrs_rdma_dev_pd dev_pd; + static mempool_t *chunk_pool; + struct class *rtrs_dev_class; ++static struct rtrs_srv_ib_ctx ib_ctx; + + static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE; + static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH; +@@ -2042,6 +2044,70 @@ static void free_srv_ctx(struct rtrs_srv_ctx *ctx) + kfree(ctx); + } + ++static int rtrs_srv_add_one(struct ib_device *device) ++{ ++ struct rtrs_srv_ctx *ctx; ++ int ret = 0; ++ ++ mutex_lock(&ib_ctx.ib_dev_mutex); ++ if (ib_ctx.ib_dev_count) ++ goto out; ++ ++ /* ++ * Since our CM IDs are NOT bound to any ib device we will create them ++ * only once ++ */ ++ ctx = ib_ctx.srv_ctx; ++ ret = rtrs_srv_rdma_init(ctx, ib_ctx.port); ++ if (ret) { ++ /* ++ * We errored out here. ++ * According to the ib code, if we encounter an error here then the ++ * error code is ignored, and no more calls to our ops are made. ++ */ ++ pr_err("Failed to initialize RDMA connection"); ++ goto err_out; ++ } ++ ++out: ++ /* ++ * Keep a track on the number of ib devices added ++ */ ++ ib_ctx.ib_dev_count++; ++ ++err_out: ++ mutex_unlock(&ib_ctx.ib_dev_mutex); ++ return ret; ++} ++ ++static void rtrs_srv_remove_one(struct ib_device *device, void *client_data) ++{ ++ struct rtrs_srv_ctx *ctx; ++ ++ mutex_lock(&ib_ctx.ib_dev_mutex); ++ ib_ctx.ib_dev_count--; ++ ++ if (ib_ctx.ib_dev_count) ++ goto out; ++ ++ /* ++ * Since our CM IDs are NOT bound to any ib device we will remove them ++ * only once, when the last device is removed ++ */ ++ ctx = ib_ctx.srv_ctx; ++ rdma_destroy_id(ctx->cm_id_ip); ++ rdma_destroy_id(ctx->cm_id_ib); ++ ++out: ++ mutex_unlock(&ib_ctx.ib_dev_mutex); ++} ++ ++static struct ib_client rtrs_srv_client = { ++ .name = "rtrs_server", ++ .add = rtrs_srv_add_one, ++ .remove = rtrs_srv_remove_one ++}; ++ + /** + * rtrs_srv_open() - open RTRS server context + * @ops: callback functions +@@ -2060,7 +2126,11 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port) + if (!ctx) + return ERR_PTR(-ENOMEM); + +- err = rtrs_srv_rdma_init(ctx, port); ++ mutex_init(&ib_ctx.ib_dev_mutex); ++ ib_ctx.srv_ctx = ctx; ++ ib_ctx.port = port; ++ ++ err = ib_register_client(&rtrs_srv_client); + if (err) { + free_srv_ctx(ctx); + return ERR_PTR(err); +@@ -2099,8 +2169,8 @@ static void close_ctx(struct rtrs_srv_ctx *ctx) + */ + void rtrs_srv_close(struct rtrs_srv_ctx *ctx) + { +- rdma_destroy_id(ctx->cm_id_ip); +- rdma_destroy_id(ctx->cm_id_ib); ++ ib_unregister_client(&rtrs_srv_client); ++ mutex_destroy(&ib_ctx.ib_dev_mutex); + close_ctx(ctx); + free_srv_ctx(ctx); + } +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h +index dc95b0932f0df..08b0b8a6eebe6 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h +@@ -118,6 +118,13 @@ struct rtrs_srv_ctx { + struct list_head srv_list; + }; + ++struct rtrs_srv_ib_ctx { ++ struct rtrs_srv_ctx *srv_ctx; ++ u16 port; ++ struct mutex ib_dev_mutex; ++ int ib_dev_count; ++}; ++ + extern struct class *rtrs_dev_class; + + void close_sess(struct rtrs_srv_sess *sess); +diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c +index 7c70492d9d6b5..f831f01501d58 100644 +--- a/drivers/input/keyboard/ep93xx_keypad.c ++++ b/drivers/input/keyboard/ep93xx_keypad.c +@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) + } + + keypad->irq = platform_get_irq(pdev, 0); +- if (!keypad->irq) { +- err = -ENXIO; ++ if (keypad->irq < 0) { ++ err = keypad->irq; + goto failed_free; + } + +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c +index 94c94d7f5155f..d6c924032aaa8 100644 +--- a/drivers/input/keyboard/omap4-keypad.c ++++ b/drivers/input/keyboard/omap4-keypad.c +@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "no keyboard irq assigned\n"); +- return -EINVAL; +- } ++ if (irq < 0) ++ return irq; + + keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL); + if (!keypad_data) { +diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c +index af3a6824f1a4d..77e0743a3cf85 100644 +--- a/drivers/input/keyboard/twl4030_keypad.c ++++ b/drivers/input/keyboard/twl4030_keypad.c +@@ -50,7 +50,7 @@ struct twl4030_keypad { + bool autorepeat; + unsigned int n_rows; + unsigned int n_cols; +- unsigned int irq; ++ int irq; + + struct device *dbg_dev; + struct input_dev *input; +@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev) + } + + kp->irq = platform_get_irq(pdev, 0); +- if (!kp->irq) { +- dev_err(&pdev->dev, "no keyboard irq assigned\n"); +- return -EINVAL; +- } ++ if (kp->irq < 0) ++ return kp->irq; + + error = matrix_keypad_build_keymap(keymap_data, NULL, + TWL4030_MAX_ROWS, +diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c +index a681a2c04e399..f15ed3dcdb9b2 100644 +--- a/drivers/input/serio/sun4i-ps2.c ++++ b/drivers/input/serio/sun4i-ps2.c +@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev) + struct sun4i_ps2data *drvdata; + struct serio *serio; + struct device *dev = &pdev->dev; +- unsigned int irq; + int error; + + drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL); +@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev) + writel(0, drvdata->reg_base + PS2_REG_GCTL); + + /* Get IRQ for the device */ +- irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(dev, "no IRQ found\n"); +- error = -ENXIO; ++ drvdata->irq = platform_get_irq(pdev, 0); ++ if (drvdata->irq < 0) { ++ error = drvdata->irq; + goto err_disable_clk; + } + +- drvdata->irq = irq; + drvdata->serio = serio; + drvdata->dev = dev; + +diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c +index b0bd5bb079bec..75b39ef39b743 100644 +--- a/drivers/input/touchscreen/elants_i2c.c ++++ b/drivers/input/touchscreen/elants_i2c.c +@@ -90,7 +90,7 @@ + /* FW read command, 0x53 0x?? 0x0, 0x01 */ + #define E_ELAN_INFO_FW_VER 0x00 + #define E_ELAN_INFO_BC_VER 0x10 +-#define E_ELAN_INFO_REK 0xE0 ++#define E_ELAN_INFO_REK 0xD0 + #define E_ELAN_INFO_TEST_VER 0xE0 + #define E_ELAN_INFO_FW_ID 0xF0 + #define E_INFO_OSR 0xD6 +diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c +index 9ed258854349b..5e6ba5c4eca2a 100644 +--- a/drivers/input/touchscreen/imx6ul_tsc.c ++++ b/drivers/input/touchscreen/imx6ul_tsc.c +@@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) + + mutex_lock(&input_dev->mutex); + +- if (input_dev->users) { +- retval = clk_prepare_enable(tsc->adc_clk); +- if (retval) +- goto out; +- +- retval = clk_prepare_enable(tsc->tsc_clk); +- if (retval) { +- clk_disable_unprepare(tsc->adc_clk); +- goto out; +- } ++ if (!input_dev->users) ++ goto out; + +- retval = imx6ul_tsc_init(tsc); ++ retval = clk_prepare_enable(tsc->adc_clk); ++ if (retval) ++ goto out; ++ ++ retval = clk_prepare_enable(tsc->tsc_clk); ++ if (retval) { ++ clk_disable_unprepare(tsc->adc_clk); ++ goto out; + } + ++ retval = imx6ul_tsc_init(tsc); ++ if (retval) { ++ clk_disable_unprepare(tsc->tsc_clk); ++ clk_disable_unprepare(tsc->adc_clk); ++ goto out; ++ } + out: + mutex_unlock(&input_dev->mutex); + return retval; +diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c +index df946869d4cd1..9a64e1dbc04ad 100644 +--- a/drivers/input/touchscreen/stmfts.c ++++ b/drivers/input/touchscreen/stmfts.c +@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev, + + mutex_lock(&sdata->mutex); + +- if (value & sdata->hover_enabled) ++ if (value && sdata->hover_enabled) + goto out; + + if (sdata->running) +diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c +index af6bec3ace007..ef3dd32aa6d97 100644 +--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c ++++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c +@@ -584,8 +584,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) + * index into qcom_iommu->ctxs: + */ + if (WARN_ON(asid < 1) || +- WARN_ON(asid > qcom_iommu->num_ctxs)) ++ WARN_ON(asid > qcom_iommu->num_ctxs)) { ++ put_device(&iommu_pdev->dev); + return -EINVAL; ++ } + + if (!dev_iommu_priv_get(dev)) { + dev_iommu_priv_set(dev, qcom_iommu); +@@ -594,8 +596,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) + * multiple different iommu devices. Multiple context + * banks are ok, but multiple devices are not: + */ +- if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) ++ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) { ++ put_device(&iommu_pdev->dev); + return -EINVAL; ++ } + } + + return iommu_fwspec_add_ids(dev, &asid, 1); +diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c +index d4e97605456bb..05bf94b87b938 100644 +--- a/drivers/irqchip/irq-ti-sci-inta.c ++++ b/drivers/irqchip/irq-ti-sci-inta.c +@@ -175,8 +175,8 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom + struct irq_fwspec parent_fwspec; + struct device_node *parent_node; + unsigned int parent_virq; +- u16 vint_id, p_hwirq; +- int ret; ++ int p_hwirq, ret; ++ u16 vint_id; + + vint_id = ti_sci_get_free_resource(inta->vint); + if (vint_id == TI_SCI_RESOURCE_NULL) +diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c +index cbc1758228d9e..85a72b56177cf 100644 +--- a/drivers/irqchip/irq-ti-sci-intr.c ++++ b/drivers/irqchip/irq-ti-sci-intr.c +@@ -137,8 +137,8 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, + struct ti_sci_intr_irq_domain *intr = domain->host_data; + struct device_node *parent_node; + struct irq_fwspec fwspec; +- u16 out_irq, p_hwirq; +- int err = 0; ++ int p_hwirq, err = 0; ++ u16 out_irq; + + out_irq = ti_sci_get_free_resource(intr->out_irqs); + if (out_irq == TI_SCI_RESOURCE_NULL) +diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c +index fe78bf0fdce54..c1bcac71008c6 100644 +--- a/drivers/lightnvm/core.c ++++ b/drivers/lightnvm/core.c +@@ -1311,8 +1311,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) + strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); + i++; + +- if (i > 31) { +- pr_err("max 31 devices can be reported.\n"); ++ if (i >= ARRAY_SIZE(devices->info)) { ++ pr_err("max %zd devices can be reported.\n", ++ ARRAY_SIZE(devices->info)); + break; + } + } +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c +index 0b821a5b2db84..3e7d4b20ab34f 100644 +--- a/drivers/mailbox/mailbox.c ++++ b/drivers/mailbox/mailbox.c +@@ -82,10 +82,12 @@ + exit: + spin_unlock_irqrestore(&chan->lock, flags); + ++ /* kick start the timer immediately to avoid delays */ + if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { + if (!timekeeping_suspended) { +- /* kick start the timer immediately to avoid delays */ +- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); ++ /* but only if not already active */ ++ if (!hrtimer_active(&chan->mbox->poll_hrt)) ++ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + } + } + } + + static void tx_tick(struct mbox_chan *chan, int r) +@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) + struct mbox_chan *chan = &mbox->chans[i]; + + if (chan->active_req && chan->cl) { ++ resched = true; + txdone = chan->mbox->ops->last_tx_done(chan); + if (txdone) + tx_tick(chan, 0); +- else +- resched = true; + } + } + +diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c +index 484d4438cd835..5665b6ea8119f 100644 +--- a/drivers/mailbox/mtk-cmdq-mailbox.c ++++ b/drivers/mailbox/mtk-cmdq-mailbox.c +@@ -69,7 +69,7 @@ struct cmdq_task { + struct cmdq { + struct mbox_controller mbox; + void __iomem *base; +- u32 irq; ++ int irq; + u32 thread_nr; + u32 irq_mask; + struct cmdq_thread *thread; +@@ -525,10 +525,8 @@ static int cmdq_probe(struct platform_device *pdev) + } + + cmdq->irq = platform_get_irq(pdev, 0); +- if (!cmdq->irq) { +- dev_err(dev, "failed to get irq\n"); +- return -EINVAL; +- } ++ if (cmdq->irq < 0) ++ return cmdq->irq; + + plat_data = (struct gce_plat *)of_device_get_match_data(dev); + if (!plat_data) { +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 6ed05ca65a0f8..9b005e144014f 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1744,17 +1744,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, + } + + /* +- * If in ->submit_bio we need to use blk_queue_split(), otherwise +- * queue_limits for abnormal requests (e.g. discard, writesame, etc) +- * won't be imposed. +- * If called from dm_wq_work() for deferred bio processing, bio +- * was already handled by following code with previous ->submit_bio. ++ * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) ++ * otherwise associated queue_limits won't be imposed. + */ +- if (current->bio_list) { +- if (is_abnormal_io(bio)) +- blk_queue_split(&bio); +- /* regular IO is split by __split_and_process_bio */ +- } ++ if (is_abnormal_io(bio)) ++ blk_queue_split(&bio); + + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) + return __process_bio(md, map, bio, ti); +@@ -1768,18 +1762,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio) + int srcu_idx; + struct dm_table *map; + +- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { +- /* +- * We are called with a live reference on q_usage_counter, but +- * that one will be released as soon as we return. Grab an +- * extra one as blk_mq_submit_bio expects to be able to consume +- * a reference (which lives until the request is freed in case a +- * request is allocated). +- */ +- percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); +- return blk_mq_submit_bio(bio); +- } +- + map = dm_get_live_table(md, &srcu_idx); + + /* if we're suspended, we have to queue this io for later */ +@@ -1849,6 +1831,7 @@ static int next_free_minor(int *minor) + } + + static const struct block_device_operations dm_blk_dops; ++static const struct block_device_operations dm_rq_blk_dops; + static const struct dax_operations dm_dax_ops; + + static void dm_wq_work(struct work_struct *work); +@@ -2248,9 +2231,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) + + switch (type) { + case DM_TYPE_REQUEST_BASED: ++ md->disk->fops = &dm_rq_blk_dops; + r = dm_mq_init_request_queue(md, t); + if (r) { +- DMERR("Cannot initialize queue for request-based dm-mq mapped device"); ++ DMERR("Cannot initialize queue for request-based dm mapped device"); + return r; + } + break; +@@ -2461,29 +2445,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) + */ + static void dm_wq_work(struct work_struct *work) + { +- struct mapped_device *md = container_of(work, struct mapped_device, +- work); +- struct bio *c; +- int srcu_idx; +- struct dm_table *map; +- +- map = dm_get_live_table(md, &srcu_idx); ++ struct mapped_device *md = container_of(work, struct mapped_device, work); ++ struct bio *bio; + + while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { + spin_lock_irq(&md->deferred_lock); +- c = bio_list_pop(&md->deferred); ++ bio = bio_list_pop(&md->deferred); + spin_unlock_irq(&md->deferred_lock); + +- if (!c) ++ if (!bio) + break; + +- if (dm_request_based(md)) +- (void) submit_bio_noacct(c); +- else +- (void) dm_process_bio(md, map, c); ++ submit_bio_noacct(bio); + } +- +- dm_put_live_table(md, srcu_idx); + } + + static void dm_queue_flush(struct mapped_device *md) +@@ -3243,6 +3217,15 @@ static const struct block_device_operations dm_blk_dops = { + .owner = THIS_MODULE + }; + ++static const struct block_device_operations dm_rq_blk_dops = { ++ .open = dm_blk_open, ++ .release = dm_blk_close, ++ .ioctl = dm_blk_ioctl, ++ .getgeo = dm_blk_getgeo, ++ .pr_ops = &dm_pr_ops, ++ .owner = THIS_MODULE ++}; ++ + static const struct dax_operations dm_dax_ops = { + .direct_access = dm_dax_direct_access, + .dax_supported = dm_dax_supported, +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c +index b10c51988c8ee..c61ab86a28b52 100644 +--- a/drivers/md/md-bitmap.c ++++ b/drivers/md/md-bitmap.c +@@ -1949,6 +1949,7 @@ out: + } + EXPORT_SYMBOL_GPL(md_bitmap_load); + ++/* caller need to free returned bitmap with md_bitmap_free() */ + struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) + { + int rv = 0; +@@ -2012,6 +2013,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, + md_bitmap_unplug(mddev->bitmap); + *low = lo; + *high = hi; ++ md_bitmap_free(bitmap); + + return rv; + } +@@ -2615,4 +2617,3 @@ struct attribute_group md_bitmap_group = { + .name = "bitmap", + .attrs = md_bitmap_attrs, + }; +- +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c +index d50737ec40394..afbbc552c3275 100644 +--- a/drivers/md/md-cluster.c ++++ b/drivers/md/md-cluster.c +@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz + * can't resize bitmap + */ + goto out; ++ md_bitmap_free(bitmap); + } + + return 0; +diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c +index 3f1ca40b9b987..8a8585261bb80 100644 +--- a/drivers/media/firewire/firedtv-fw.c ++++ b/drivers/media/firewire/firedtv-fw.c +@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) + + name_len = fw_csr_string(unit->directory, CSR_MODEL, + name, sizeof(name)); +- if (name_len < 0) +- return name_len; ++ if (name_len < 0) { ++ err = name_len; ++ goto fail_free; ++ } + for (i = ARRAY_SIZE(model_names); --i; ) + if (strlen(model_names[i]) <= name_len && + strncmp(name, model_names[i], name_len) == 0) +diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c +index de295114ca482..21666d705e372 100644 +--- a/drivers/media/i2c/m5mols/m5mols_core.c ++++ b/drivers/media/i2c/m5mols/m5mols_core.c +@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable) + + ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); + if (ret) { +- info->set_power(&client->dev, 0); ++ if (info->set_power) ++ info->set_power(&client->dev, 0); + return ret; + } + +diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c +index 47f280518fdb6..c82c1493e099d 100644 +--- a/drivers/media/i2c/max9286.c ++++ b/drivers/media/i2c/max9286.c +@@ -135,13 +135,19 @@ + #define MAX9286_SRC_PAD 4 + + struct max9286_source { +- struct v4l2_async_subdev asd; + struct v4l2_subdev *sd; + struct fwnode_handle *fwnode; + }; + +-#define asd_to_max9286_source(_asd) \ +- container_of(_asd, struct max9286_source, asd) ++struct max9286_asd { ++ struct v4l2_async_subdev base; ++ struct max9286_source *source; ++}; ++ ++static inline struct max9286_asd *to_max9286_asd(struct v4l2_async_subdev *asd) ++{ ++ return container_of(asd, struct max9286_asd, base); ++} + + struct max9286_priv { + struct i2c_client *client; +@@ -405,10 +411,11 @@ static int max9286_check_config_link(struct max9286_priv *priv, + * to 5 milliseconds. + */ + for (i = 0; i < 10; i++) { +- ret = max9286_read(priv, 0x49) & 0xf0; ++ ret = max9286_read(priv, 0x49); + if (ret < 0) + return -EIO; + ++ ret &= 0xf0; + if (ret == conflink_mask) + break; + +@@ -480,7 +487,7 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier, + struct v4l2_async_subdev *asd) + { + struct max9286_priv *priv = sd_to_max9286(notifier->sd); +- struct max9286_source *source = asd_to_max9286_source(asd); ++ struct max9286_source *source = to_max9286_asd(asd)->source; + unsigned int index = to_index(priv, source); + unsigned int src_pad; + int ret; +@@ -544,7 +551,7 @@ static void max9286_notify_unbind(struct v4l2_async_notifier *notifier, + struct v4l2_async_subdev *asd) + { + struct max9286_priv *priv = sd_to_max9286(notifier->sd); +- struct max9286_source *source = asd_to_max9286_source(asd); ++ struct max9286_source *source = to_max9286_asd(asd)->source; + unsigned int index = to_index(priv, source); + + source->sd = NULL; +@@ -569,23 +576,19 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv) + + for_each_source(priv, source) { + unsigned int i = to_index(priv, source); +- +- source->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; +- source->asd.match.fwnode = source->fwnode; +- +- ret = v4l2_async_notifier_add_subdev(&priv->notifier, +- &source->asd); +- if (ret) { +- dev_err(dev, "Failed to add subdev for source %d", i); ++ struct v4l2_async_subdev *asd; ++ ++ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, ++ source->fwnode, ++ sizeof(*asd)); ++ if (IS_ERR(asd)) { ++ dev_err(dev, "Failed to add subdev for source %u: %ld", ++ i, PTR_ERR(asd)); + v4l2_async_notifier_cleanup(&priv->notifier); +- return ret; ++ return PTR_ERR(asd); + } + +- /* +- * Balance the reference counting handled through +- * v4l2_async_notifier_cleanup() +- */ +- fwnode_handle_get(source->fwnode); ++ to_max9286_asd(asd)->source = source; + } + + priv->notifier.ops = &max9286_notify_ops; +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c +index 2fe4a7ac05929..3a4268aa5f023 100644 +--- a/drivers/media/i2c/ov5640.c ++++ b/drivers/media/i2c/ov5640.c +@@ -34,6 +34,8 @@ + #define OV5640_REG_SYS_RESET02 0x3002 + #define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006 + #define OV5640_REG_SYS_CTRL0 0x3008 ++#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42 ++#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02 + #define OV5640_REG_CHIP_ID 0x300a + #define OV5640_REG_IO_MIPI_CTRL00 0x300e + #define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017 +@@ -274,8 +276,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl) + /* YUV422 UYVY VGA@30fps */ + static const struct reg_value ov5640_init_setting_30fps_VGA[] = { + {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0}, +- {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0}, +- {0x3630, 0x36, 0, 0}, ++ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0}, + {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0}, + {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0}, + {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0}, +@@ -751,7 +752,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg, + * +->| PLL Root Div | - reg 0x3037, bit 4 + * +-+------------+ + * | +---------+ +- * +->| Bit Div | - reg 0x3035, bits 0-3 ++ * +->| Bit Div | - reg 0x3034, bits 0-3 + * +-+-------+ + * | +-------------+ + * +->| SCLK Div | - reg 0x3108, bits 0-1 +@@ -1120,6 +1121,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor, + val = regs->val; + mask = regs->mask; + ++ /* remain in power down mode for DVP */ ++ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 && ++ val == OV5640_REG_SYS_CTRL0_SW_PWUP && ++ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY) ++ continue; ++ + if (mask) + ret = ov5640_mod_reg(sensor, reg_addr, mask, val); + else +@@ -1275,31 +1282,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on) + if (ret) + return ret; + +- /* +- * enable VSYNC/HREF/PCLK DVP control lines +- * & D[9:6] DVP data lines +- * +- * PAD OUTPUT ENABLE 01 +- * - 6: VSYNC output enable +- * - 5: HREF output enable +- * - 4: PCLK output enable +- * - [3:0]: D[9:6] output enable +- */ +- ret = ov5640_write_reg(sensor, +- OV5640_REG_PAD_OUTPUT_ENABLE01, +- on ? 0x7f : 0); +- if (ret) +- return ret; +- +- /* +- * enable D[5:0] DVP data lines +- * +- * PAD OUTPUT ENABLE 02 +- * - [7:2]: D[5:0] output enable +- */ +- return ov5640_write_reg(sensor, +- OV5640_REG_PAD_OUTPUT_ENABLE02, +- on ? 0xfc : 0); ++ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ? ++ OV5640_REG_SYS_CTRL0_SW_PWUP : ++ OV5640_REG_SYS_CTRL0_SW_PWDN); + } + + static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on) +@@ -2001,6 +1986,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor) + clk_disable_unprepare(sensor->xclk); + } + ++static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on) ++{ ++ int ret; ++ ++ if (!on) { ++ /* Reset MIPI bus settings to their default values. */ ++ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58); ++ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04); ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00); ++ return 0; ++ } ++ ++ /* ++ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode ++ * ++ * 0x300e = 0x40 ++ * [7:5] = 010 : 2 data lanes mode (see FIXME note in ++ * "ov5640_set_stream_mipi()") ++ * [4] = 0 : Power up MIPI HS Tx ++ * [3] = 0 : Power up MIPI LS Rx ++ * [2] = 0 : MIPI interface disabled ++ */ ++ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40); ++ if (ret) ++ return ret; ++ ++ /* ++ * Gate clock and set LP11 in 'no packets mode' (idle) ++ * ++ * 0x4800 = 0x24 ++ * [5] = 1 : Gate clock when 'no packets' ++ * [2] = 1 : MIPI bus in LP11 when 'no packets' ++ */ ++ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24); ++ if (ret) ++ return ret; ++ ++ /* ++ * Set data lanes and clock in LP11 when 'sleeping' ++ * ++ * 0x3019 = 0x70 ++ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' ++ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' ++ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' ++ */ ++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70); ++ if (ret) ++ return ret; ++ ++ /* Give lanes some time to coax into LP11 state. */ ++ usleep_range(500, 1000); ++ ++ return 0; ++} ++ ++static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on) ++{ ++ int ret; ++ ++ if (!on) { ++ /* Reset settings to their default values. */ ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00); ++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00); ++ return 0; ++ } ++ ++ /* ++ * enable VSYNC/HREF/PCLK DVP control lines ++ * & D[9:6] DVP data lines ++ * ++ * PAD OUTPUT ENABLE 01 ++ * - 6: VSYNC output enable ++ * - 5: HREF output enable ++ * - 4: PCLK output enable ++ * - [3:0]: D[9:6] output enable ++ */ ++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f); ++ if (ret) ++ return ret; ++ ++ /* ++ * enable D[5:0] DVP data lines ++ * ++ * PAD OUTPUT ENABLE 02 ++ * - [7:2]: D[5:0] output enable ++ */ ++ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc); ++} ++ + static int ov5640_set_power(struct ov5640_dev *sensor, bool on) + { + int ret = 0; +@@ -2013,67 +2087,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on) + ret = ov5640_restore_mode(sensor); + if (ret) + goto power_off; ++ } + +- /* We're done here for DVP bus, while CSI-2 needs setup. */ +- if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY) +- return 0; +- +- /* +- * Power up MIPI HS Tx and LS Rx; 2 data lanes mode +- * +- * 0x300e = 0x40 +- * [7:5] = 010 : 2 data lanes mode (see FIXME note in +- * "ov5640_set_stream_mipi()") +- * [4] = 0 : Power up MIPI HS Tx +- * [3] = 0 : Power up MIPI LS Rx +- * [2] = 0 : MIPI interface disabled +- */ +- ret = ov5640_write_reg(sensor, +- OV5640_REG_IO_MIPI_CTRL00, 0x40); +- if (ret) +- goto power_off; +- +- /* +- * Gate clock and set LP11 in 'no packets mode' (idle) +- * +- * 0x4800 = 0x24 +- * [5] = 1 : Gate clock when 'no packets' +- * [2] = 1 : MIPI bus in LP11 when 'no packets' +- */ +- ret = ov5640_write_reg(sensor, +- OV5640_REG_MIPI_CTRL00, 0x24); +- if (ret) +- goto power_off; +- +- /* +- * Set data lanes and clock in LP11 when 'sleeping' +- * +- * 0x3019 = 0x70 +- * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' +- * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' +- * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' +- */ +- ret = ov5640_write_reg(sensor, +- OV5640_REG_PAD_OUTPUT00, 0x70); +- if (ret) +- goto power_off; +- +- /* Give lanes some time to coax into LP11 state. */ +- usleep_range(500, 1000); +- +- } else { +- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) { +- /* Reset MIPI bus settings to their default values. */ +- ov5640_write_reg(sensor, +- OV5640_REG_IO_MIPI_CTRL00, 0x58); +- ov5640_write_reg(sensor, +- OV5640_REG_MIPI_CTRL00, 0x04); +- ov5640_write_reg(sensor, +- OV5640_REG_PAD_OUTPUT00, 0x00); +- } ++ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) ++ ret = ov5640_set_power_mipi(sensor, on); ++ else ++ ret = ov5640_set_power_dvp(sensor, on); ++ if (ret) ++ goto power_off; + ++ if (!on) + ov5640_set_power_off(sensor); +- } + + return 0; + +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c +index dbbab75f135ec..cff99cf61ed4d 100644 +--- a/drivers/media/i2c/tc358743.c ++++ b/drivers/media/i2c/tc358743.c +@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = { + .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable, + }; + +-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, +- bool *handled) ++static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus, ++ bool *handled) + { + struct tc358743_state *state = to_state(sd); + unsigned int cec_rxint, cec_txint; +@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, + cec_transmit_attempt_done(state->cec_adap, + CEC_TX_STATUS_ERROR); + } +- *handled = true; ++ if (handled) ++ *handled = true; + } + if ((intstatus & MASK_CEC_RINT) && + (cec_rxint & MASK_CECRIEND)) { +@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, + msg.msg[i] = v & 0xff; + } + cec_received_msg(state->cec_adap, &msg); +- *handled = true; ++ if (handled) ++ *handled = true; + } + i2c_wr16(sd, INTSTATUS, + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); +@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + + #ifdef CONFIG_VIDEO_TC358743_CEC + if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) { +- tc358743_cec_isr(sd, intstatus, handled); ++ tc358743_cec_handler(sd, intstatus, handled); + i2c_wr16(sd, INTSTATUS, + intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); + intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT); +@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + static irqreturn_t tc358743_irq_handler(int irq, void *dev_id) + { + struct tc358743_state *state = dev_id; +- bool handled; ++ bool handled = false; + + tc358743_isr(&state->sd, 0, &handled); + +diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c +index 9144f795fb933..b721720f9845a 100644 +--- a/drivers/media/pci/bt8xx/bttv-driver.c ++++ b/drivers/media/pci/bt8xx/bttv-driver.c +@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) + btv->id = dev->device; + if (pci_enable_device(dev)) { + pr_warn("%d: Can't enable device\n", btv->c.nr); +- return -EIO; ++ result = -EIO; ++ goto free_mem; + } + if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { + pr_warn("%d: No suitable DMA available\n", btv->c.nr); +- return -EIO; ++ result = -EIO; ++ goto free_mem; + } + if (!request_mem_region(pci_resource_start(dev,0), + pci_resource_len(dev,0), +@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) + pr_warn("%d: can't request iomem (0x%llx)\n", + btv->c.nr, + (unsigned long long)pci_resource_start(dev, 0)); +- return -EBUSY; ++ result = -EBUSY; ++ goto free_mem; + } + pci_set_master(dev); + pci_set_command(dev); +@@ -4211,6 +4214,10 @@ fail0: + release_mem_region(pci_resource_start(btv->c.pci,0), + pci_resource_len(btv->c.pci,0)); + pci_disable_device(btv->c.pci); ++ ++free_mem: ++ bttvs[btv->c.nr] = NULL; ++ kfree(btv); + return result; + } + +diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c +index 79e1afb710758..5cc4ef21f9d37 100644 +--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c ++++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c +@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value) + { + int err; + +- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value); ++ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", ++ (reg << 2) & 0xffffffff, value); + err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR); + if (err < 0) + return err; +diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c +index cde0d254ec1c4..a77c49b185115 100644 +--- a/drivers/media/platform/exynos4-is/fimc-isp.c ++++ b/drivers/media/platform/exynos4-is/fimc-isp.c +@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on) + + if (on) { + ret = pm_runtime_get_sync(&is->pdev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(&is->pdev->dev); + return ret; ++ } + set_bit(IS_ST_PWR_ON, &is->state); + + ret = fimc_is_start_firmware(is); +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c +index 9c666f663ab43..fdd0d369b1925 100644 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c +@@ -471,7 +471,7 @@ static int fimc_lite_open(struct file *file) + set_bit(ST_FLITE_IN_USE, &fimc->state); + ret = pm_runtime_get_sync(&fimc->pdev->dev); + if (ret < 0) +- goto unlock; ++ goto err_pm; + + ret = v4l2_fh_open(file); + if (ret < 0) +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c +index 16dd660137a8d..9034f9cf88481 100644 +--- a/drivers/media/platform/exynos4-is/media-dev.c ++++ b/drivers/media/platform/exynos4-is/media-dev.c +@@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) + return -ENXIO; + + ret = pm_runtime_get_sync(fmd->pmf); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(fmd->pmf); + return ret; ++ } + + fmd->num_sensors = 0; + +@@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) + if (IS_ERR(pctl->state_default)) + return PTR_ERR(pctl->state_default); + ++ /* PINCTRL_STATE_IDLE is optional */ + pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, + PINCTRL_STATE_IDLE); +- if (IS_ERR(pctl->state_idle)) +- return PTR_ERR(pctl->state_idle); +- + return 0; + } + +diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c +index 540151bbf58f2..1aac167abb175 100644 +--- a/drivers/media/platform/exynos4-is/mipi-csis.c ++++ b/drivers/media/platform/exynos4-is/mipi-csis.c +@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable) + if (enable) { + s5pcsis_clear_counters(state); + ret = pm_runtime_get_sync(&state->pdev->dev); +- if (ret && ret != 1) ++ if (ret && ret != 1) { ++ pm_runtime_put_noidle(&state->pdev->dev); + return ret; ++ } + } + + mutex_lock(&state->lock); +diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c +index f96c8b3bf8618..976aa1f4829b8 100644 +--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c ++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c +@@ -94,7 +94,7 @@ static void mtk_mdp_reset_handler(void *priv) + void mtk_mdp_register_component(struct mtk_mdp_dev *mdp, + struct mtk_mdp_comp *comp) + { +- list_add(&mdp->comp_list, &comp->node); ++ list_add(&comp->node, &mdp->comp_list); + } + + void mtk_mdp_unregister_component(struct mtk_mdp_dev *mdp, +diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c +index df78df59da456..08a5473b56104 100644 +--- a/drivers/media/platform/mx2_emmaprp.c ++++ b/drivers/media/platform/mx2_emmaprp.c +@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, pcdev); + + irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; ++ if (irq < 0) { ++ ret = irq; ++ goto rel_vdev; ++ } ++ + ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0, + dev_name(&pdev->dev), pcdev); + if (ret) +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c +index b91e472ee764e..de066757726de 100644 +--- a/drivers/media/platform/omap3isp/isp.c ++++ b/drivers/media/platform/omap3isp/isp.c +@@ -2328,8 +2328,10 @@ static int isp_probe(struct platform_device *pdev) + mem = platform_get_resource(pdev, IORESOURCE_MEM, i); + isp->mmio_base[map_idx] = + devm_ioremap_resource(isp->dev, mem); +- if (IS_ERR(isp->mmio_base[map_idx])) +- return PTR_ERR(isp->mmio_base[map_idx]); ++ if (IS_ERR(isp->mmio_base[map_idx])) { ++ ret = PTR_ERR(isp->mmio_base[map_idx]); ++ goto error; ++ } + } + + ret = isp_get_clocks(isp); +diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c +index 03ef9c5f4774d..85b24054f35e6 100644 +--- a/drivers/media/platform/qcom/camss/camss-csiphy.c ++++ b/drivers/media/platform/qcom/camss/camss-csiphy.c +@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_sync(dev); + return ret; ++ } + + ret = csiphy_set_clock_rates(csiphy); + if (ret < 0) { +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c +index 203c6538044fb..321ad77cb6cf4 100644 +--- a/drivers/media/platform/qcom/venus/core.c ++++ b/drivers/media/platform/qcom/venus/core.c +@@ -224,13 +224,15 @@ static int venus_probe(struct platform_device *pdev) + + ret = dma_set_mask_and_coherent(dev, core->res->dma_mask); + if (ret) +- return ret; ++ goto err_core_put; + + if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), + GFP_KERNEL); +- if (!dev->dma_parms) +- return -ENOMEM; ++ if (!dev->dma_parms) { ++ ret = -ENOMEM; ++ goto err_core_put; ++ } + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + +@@ -242,11 +244,11 @@ static int venus_probe(struct platform_device *pdev) + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "venus", core); + if (ret) +- return ret; ++ goto err_core_put; + + ret = hfi_create(core, &venus_core_ops); + if (ret) +- return ret; ++ goto err_core_put; + + pm_runtime_enable(dev); + +@@ -287,8 +289,10 @@ static int venus_probe(struct platform_device *pdev) + goto err_core_deinit; + + ret = pm_runtime_put_sync(dev); +- if (ret) ++ if (ret) { ++ pm_runtime_get_noresume(dev); + goto err_dev_unregister; ++ } + + return 0; + +@@ -299,9 +303,13 @@ err_core_deinit: + err_venus_shutdown: + venus_shutdown(core); + err_runtime_disable: ++ pm_runtime_put_noidle(dev); + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + hfi_destroy(core); ++err_core_put: ++ if (core->pm_ops->core_put) ++ core->pm_ops->core_put(dev); + return ret; + } + +diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c +index 7c4c483d54389..76be14efbfb09 100644 +--- a/drivers/media/platform/qcom/venus/vdec.c ++++ b/drivers/media/platform/qcom/venus/vdec.c +@@ -1088,8 +1088,6 @@ static int vdec_stop_capture(struct venus_inst *inst) + break; + } + +- INIT_LIST_HEAD(&inst->registeredbufs); +- + return ret; + } + +@@ -1189,6 +1187,14 @@ static int vdec_buf_init(struct vb2_buffer *vb) + static void vdec_buf_cleanup(struct vb2_buffer *vb) + { + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); ++ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); ++ struct venus_buffer *buf = to_venus_buffer(vbuf); ++ ++ mutex_lock(&inst->lock); ++ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) ++ if (!list_empty(&inst->registeredbufs)) ++ list_del_init(&buf->reg_list); ++ mutex_unlock(&inst->lock); + + inst->buf_count--; + if (!inst->buf_count) +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c +index 5c6b00737fe75..05c712e00a2a7 100644 +--- a/drivers/media/platform/rcar-fcp.c ++++ b/drivers/media/platform/rcar-fcp.c +@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp) + return 0; + + ret = pm_runtime_get_sync(fcp->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(fcp->dev); + return ret; ++ } + + return 0; + } +diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c +index c6cc4f473a077..a16c492b31434 100644 +--- a/drivers/media/platform/rcar-vin/rcar-csi2.c ++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c +@@ -362,7 +362,6 @@ struct rcar_csi2 { + struct media_pad pads[NR_OF_RCAR_CSI2_PAD]; + + struct v4l2_async_notifier notifier; +- struct v4l2_async_subdev asd; + struct v4l2_subdev *remote; + + struct v4l2_mbus_framefmt mf; +@@ -811,6 +810,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv, + + static int rcsi2_parse_dt(struct rcar_csi2 *priv) + { ++ struct v4l2_async_subdev *asd; ++ struct fwnode_handle *fwnode; + struct device_node *ep; + struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; + int ret; +@@ -834,24 +835,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv) + return ret; + } + +- priv->asd.match.fwnode = +- fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep)); +- priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; +- ++ fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep)); + of_node_put(ep); + +- v4l2_async_notifier_init(&priv->notifier); +- +- ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd); +- if (ret) { +- fwnode_handle_put(priv->asd.match.fwnode); +- return ret; +- } ++ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode)); + ++ v4l2_async_notifier_init(&priv->notifier); + priv->notifier.ops = &rcar_csi2_notify_ops; + +- dev_dbg(priv->dev, "Found '%pOF'\n", +- to_of_node(priv->asd.match.fwnode)); ++ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode, ++ sizeof(*asd)); ++ fwnode_handle_put(fwnode); ++ if (IS_ERR(asd)) ++ return PTR_ERR(asd); + + ret = v4l2_async_subdev_notifier_register(&priv->subdev, + &priv->notifier); +diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c +index a5dbb90c5210b..260604dc5791b 100644 +--- a/drivers/media/platform/rcar-vin/rcar-dma.c ++++ b/drivers/media/platform/rcar-vin/rcar-dma.c +@@ -1409,8 +1409,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel) + int ret; + + ret = pm_runtime_get_sync(vin->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(vin->dev); + return ret; ++ } + + /* Make register writes take effect immediately. */ + vnmc = rvin_read(vin, VNMC_REG); +diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c +index 3d2451ac347d7..f318cd4b8086f 100644 +--- a/drivers/media/platform/rcar_drif.c ++++ b/drivers/media/platform/rcar_drif.c +@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf { + /* OF graph endpoint's V4L2 async data */ + struct rcar_drif_graph_ep { + struct v4l2_subdev *subdev; /* Async matched subdev */ +- struct v4l2_async_subdev asd; /* Async sub-device descriptor */ + }; + + /* DMA buffer */ +@@ -1109,12 +1108,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier, + struct rcar_drif_sdr *sdr = + container_of(notifier, struct rcar_drif_sdr, notifier); + +- if (sdr->ep.asd.match.fwnode != +- of_fwnode_handle(subdev->dev->of_node)) { +- rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name); +- return -EINVAL; +- } +- + v4l2_set_subdev_hostdata(subdev, sdr); + sdr->ep.subdev = subdev; + rdrif_dbg(sdr, "bound asd %s\n", subdev->name); +@@ -1218,7 +1211,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr) + { + struct v4l2_async_notifier *notifier = &sdr->notifier; + struct fwnode_handle *fwnode, *ep; +- int ret; ++ struct v4l2_async_subdev *asd; + + v4l2_async_notifier_init(notifier); + +@@ -1227,26 +1220,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr) + if (!ep) + return 0; + ++ /* Get the endpoint properties */ ++ rcar_drif_get_ep_properties(sdr, ep); ++ + fwnode = fwnode_graph_get_remote_port_parent(ep); ++ fwnode_handle_put(ep); + if (!fwnode) { + dev_warn(sdr->dev, "bad remote port parent\n"); +- fwnode_handle_put(ep); + return -EINVAL; + } + +- sdr->ep.asd.match.fwnode = fwnode; +- sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; +- ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd); +- if (ret) { +- fwnode_handle_put(fwnode); +- return ret; +- } +- +- /* Get the endpoint properties */ +- rcar_drif_get_ep_properties(sdr, ep); +- ++ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode, ++ sizeof(*asd)); + fwnode_handle_put(fwnode); +- fwnode_handle_put(ep); ++ if (IS_ERR(asd)) ++ return PTR_ERR(asd); + + return 0; + } +diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c +index 36b821ccc1dba..bf9a75b75083b 100644 +--- a/drivers/media/platform/rockchip/rga/rga-buf.c ++++ b/drivers/media/platform/rockchip/rga/rga-buf.c +@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count) + + ret = pm_runtime_get_sync(rga->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(rga->dev); + rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED); + return ret; + } +diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c +index 92f43c0cbc0c0..422fd549e9c87 100644 +--- a/drivers/media/platform/s3c-camif/camif-core.c ++++ b/drivers/media/platform/s3c-camif/camif-core.c +@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev) + + ret = camif_media_dev_init(camif); + if (ret < 0) +- goto err_alloc; ++ goto err_pm; + + ret = camif_register_sensor(camif); + if (ret < 0) +@@ -498,10 +498,9 @@ err_sens: + media_device_unregister(&camif->media_dev); + media_device_cleanup(&camif->media_dev); + camif_unregister_media_entities(camif); +-err_alloc: ++err_pm: + pm_runtime_put(dev); + pm_runtime_disable(dev); +-err_pm: + camif_clk_put(camif); + err_clk: + s3c_camif_unregister_subdev(camif); +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +index 7d52431c2c837..62d2320a72186 100644 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void) + int i, ret = 0; + + ret = pm_runtime_get_sync(pm->device); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(pm->device); + return ret; ++ } + + /* clock control */ + for (i = 0; i < pm->num_clocks; i++) { +diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +index af2d5eb782cee..e1d150584bdc2 100644 +--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c ++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +@@ -1371,7 +1371,7 @@ static int bdisp_probe(struct platform_device *pdev) + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "failed to set PM\n"); +- goto err_dbg; ++ goto err_pm; + } + + /* Filters */ +@@ -1399,7 +1399,6 @@ err_filter: + bdisp_hw_free_filters(bdisp->dev); + err_pm: + pm_runtime_put(dev); +-err_dbg: + bdisp_debugfs_remove(bdisp); + err_v4l2: + v4l2_device_unregister(&bdisp->v4l2_dev); +diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c +index 2503224eeee51..c691b3d81549d 100644 +--- a/drivers/media/platform/sti/delta/delta-v4l2.c ++++ b/drivers/media/platform/sti/delta/delta-v4l2.c +@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work) + /* enable the hardware */ + if (!dec->pm) { + ret = delta_get_sync(ctx); +- if (ret) ++ if (ret) { ++ delta_put_autosuspend(ctx); + goto err; ++ } + } + + /* decode this access unit */ +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c +index 401aaafa17109..43f279e2a6a38 100644 +--- a/drivers/media/platform/sti/hva/hva-hw.c ++++ b/drivers/media/platform/sti/hva/hva-hw.c +@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva) + + if (pm_runtime_get_sync(dev) < 0) { + dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX); ++ pm_runtime_put_noidle(dev); + mutex_unlock(&hva->protect_mutex); + return -EFAULT; + } +@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva) + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "%s failed to set PM\n", HVA_PREFIX); +- goto err_clk; ++ goto err_pm; + } + + /* check IP hardware version */ +@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s) + + if (pm_runtime_get_sync(dev) < 0) { + seq_puts(s, "Cannot wake up IP\n"); ++ pm_runtime_put_noidle(dev); + mutex_unlock(&hva->protect_mutex); + return; + } +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c +index b8931490b83b7..fd1c41cba52fc 100644 +--- a/drivers/media/platform/stm32/stm32-dcmi.c ++++ b/drivers/media/platform/stm32/stm32-dcmi.c +@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) + if (ret < 0) { + dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n", + __func__, ret); +- goto err_release_buffers; ++ goto err_pm_put; + } + + ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline); +@@ -837,8 +837,6 @@ err_media_pipeline_stop: + + err_pm_put: + pm_runtime_put(dcmi->dev); +- +-err_release_buffers: + spin_lock_irq(&dcmi->irqlock); + /* + * Return all buffers to vb2 in QUEUED state. +diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c +index 346f8212791cf..779dd74b82d01 100644 +--- a/drivers/media/platform/ti-vpe/vpe.c ++++ b/drivers/media/platform/ti-vpe/vpe.c +@@ -2475,6 +2475,8 @@ static int vpe_runtime_get(struct platform_device *pdev) + + r = pm_runtime_get_sync(&pdev->dev); + WARN_ON(r < 0); ++ if (r) ++ pm_runtime_put_noidle(&pdev->dev); + return r < 0 ? r : 0; + } + +diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c +index c650e45bb0ad1..dc62533cf32ce 100644 +--- a/drivers/media/platform/vsp1/vsp1_drv.c ++++ b/drivers/media/platform/vsp1/vsp1_drv.c +@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1) + int ret; + + ret = pm_runtime_get_sync(vsp1->dev); +- return ret < 0 ? ret : 0; ++ if (ret < 0) { ++ pm_runtime_put_noidle(vsp1->dev); ++ return ret; ++ } ++ ++ return 0; + } + + /* +@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev) + /* Configure device parameters based on the version register. */ + pm_runtime_enable(&pdev->dev); + +- ret = pm_runtime_get_sync(&pdev->dev); ++ ret = vsp1_device_get(vsp1); + if (ret < 0) + goto done; + + vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION); +- pm_runtime_put_sync(&pdev->dev); ++ vsp1_device_put(vsp1); + + for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) { + if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) == +diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c +index 9cdef17b4793f..c12dda73cdd53 100644 +--- a/drivers/media/rc/ati_remote.c ++++ b/drivers/media/rc/ati_remote.c +@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface, + err("%s: endpoint_in message size==0? \n", __func__); + return -ENODEV; + } ++ if (!usb_endpoint_is_int_out(endpoint_out)) { ++ err("%s: Unexpected endpoint_out\n", __func__); ++ return -ENODEV; ++ } + + ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL); + rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); +diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c +index ff8a039aba72e..95835b52b58fc 100644 +--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c ++++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c +@@ -164,10 +164,11 @@ void vivid_meta_out_process(struct vivid_dev *dev, + { + struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); + +- tpg_s_brightness(&dev->tpg, meta->brightness); +- tpg_s_contrast(&dev->tpg, meta->contrast); +- tpg_s_saturation(&dev->tpg, meta->saturation); +- tpg_s_hue(&dev->tpg, meta->hue); ++ v4l2_ctrl_s_ctrl(dev->brightness, meta->brightness); ++ v4l2_ctrl_s_ctrl(dev->contrast, meta->contrast); ++ v4l2_ctrl_s_ctrl(dev->saturation, meta->saturation); ++ v4l2_ctrl_s_ctrl(dev->hue, meta->hue); ++ + dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n", + __func__, meta->brightness, meta->contrast, + meta->saturation, meta->hue); +diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c +index b6e70fada3fb2..8fb186b25d6af 100644 +--- a/drivers/media/tuners/tuner-simple.c ++++ b/drivers/media/tuners/tuner-simple.c +@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer) + case TUNER_TENA_9533_DI: + case TUNER_YMEC_TVF_5533MF: + tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n"); +- return 0; ++ return -EINVAL; + case TUNER_PHILIPS_FM1216ME_MK3: + case TUNER_PHILIPS_FM1236_MK3: + case TUNER_PHILIPS_FMD1216ME_MK3: +@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe, + TUNER_RATIO_SELECT_50; /* 50 kHz step */ + + /* Bandswitch byte */ +- simple_radio_bandswitch(fe, &buffer[0]); ++ if (simple_radio_bandswitch(fe, &buffer[0])) ++ return 0; + + /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps + freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) = +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c +index e399b9fad7574..a30a8a731eda8 100644 +--- a/drivers/media/usb/uvc/uvc_ctrl.c ++++ b/drivers/media/usb/uvc/uvc_ctrl.c +@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, + offset &= 7; + mask = ((1LL << bits) - 1) << offset; + +- for (; bits > 0; data++) { ++ while (1) { + u8 byte = *data & mask; + value |= offset > 0 ? (byte >> offset) : (byte << (-offset)); + bits -= 8 - (offset > 0 ? offset : 0); ++ if (bits <= 0) ++ break; ++ + offset -= 8; + mask = (1 << bits) - 1; ++ data++; + } + + /* Sign-extend the value if needed. */ +diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c +index b4499cddeffe5..ca3a9c2eec271 100644 +--- a/drivers/media/usb/uvc/uvc_entity.c ++++ b/drivers/media/usb/uvc/uvc_entity.c +@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain, + int ret; + + if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { ++ u32 function; ++ + v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); + strscpy(entity->subdev.name, entity->name, + sizeof(entity->subdev.name)); + ++ switch (UVC_ENTITY_TYPE(entity)) { ++ case UVC_VC_SELECTOR_UNIT: ++ function = MEDIA_ENT_F_VID_MUX; ++ break; ++ case UVC_VC_PROCESSING_UNIT: ++ case UVC_VC_EXTENSION_UNIT: ++ /* For lack of a better option. */ ++ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; ++ break; ++ case UVC_COMPOSITE_CONNECTOR: ++ case UVC_COMPONENT_CONNECTOR: ++ function = MEDIA_ENT_F_CONN_COMPOSITE; ++ break; ++ case UVC_SVIDEO_CONNECTOR: ++ function = MEDIA_ENT_F_CONN_SVIDEO; ++ break; ++ case UVC_ITT_CAMERA: ++ function = MEDIA_ENT_F_CAM_SENSOR; ++ break; ++ case UVC_TT_VENDOR_SPECIFIC: ++ case UVC_ITT_VENDOR_SPECIFIC: ++ case UVC_ITT_MEDIA_TRANSPORT_INPUT: ++ case UVC_OTT_VENDOR_SPECIFIC: ++ case UVC_OTT_DISPLAY: ++ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: ++ case UVC_EXTERNAL_VENDOR_SPECIFIC: ++ default: ++ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; ++ break; ++ } ++ ++ entity->subdev.entity.function = function; ++ + ret = media_entity_pads_init(&entity->subdev.entity, + entity->num_pads, entity->pads); + +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c +index 0335e69b70abe..5e6f3153b5ff8 100644 +--- a/drivers/media/usb/uvc/uvc_v4l2.c ++++ b/drivers/media/usb/uvc/uvc_v4l2.c +@@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, + if (ret < 0) + goto done; + ++ /* After the probe, update fmt with the values returned from ++ * negotiation with the device. ++ */ ++ for (i = 0; i < stream->nformats; ++i) { ++ if (probe->bFormatIndex == stream->format[i].index) { ++ format = &stream->format[i]; ++ break; ++ } ++ } ++ ++ if (i == stream->nformats) { ++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n", ++ probe->bFormatIndex); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < format->nframes; ++i) { ++ if (probe->bFrameIndex == format->frame[i].bFrameIndex) { ++ frame = &format->frame[i]; ++ break; ++ } ++ } ++ ++ if (i == format->nframes) { ++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n", ++ probe->bFrameIndex); ++ return -EINVAL; ++ } ++ + fmt->fmt.pix.width = frame->wWidth; + fmt->fmt.pix.height = frame->wHeight; + fmt->fmt.pix.field = V4L2_FIELD_NONE; + fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame); + fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize; ++ fmt->fmt.pix.pixelformat = format->fcc; + fmt->fmt.pix.colorspace = format->colorspace; + + if (uvc_format != NULL) +diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c +index 60e8633b11758..ddff687c79eaa 100644 +--- a/drivers/memory/brcmstb_dpfe.c ++++ b/drivers/memory/brcmstb_dpfe.c +@@ -188,11 +188,6 @@ struct brcmstb_dpfe_priv { + struct mutex lock; + }; + +-static const char * const error_text[] = { +- "Success", "Header code incorrect", "Unknown command or argument", +- "Incorrect checksum", "Malformed command", "Timed out", +-}; +- + /* + * Forward declaration of our sysfs attribute functions, so we can declare the + * attribute data structures early. +@@ -307,6 +302,20 @@ static const struct dpfe_api dpfe_api_v3 = { + }, + }; + ++static const char *get_error_text(unsigned int i) ++{ ++ static const char * const error_text[] = { ++ "Success", "Header code incorrect", ++ "Unknown command or argument", "Incorrect checksum", ++ "Malformed command", "Timed out", "Unknown error", ++ }; ++ ++ if (unlikely(i >= ARRAY_SIZE(error_text))) ++ i = ARRAY_SIZE(error_text) - 1; ++ ++ return error_text[i]; ++} ++ + static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv) + { + u32 val; +@@ -445,7 +454,7 @@ static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd, + } + if (resp != 0) { + mutex_unlock(&priv->lock); +- return -ETIMEDOUT; ++ return -ffs(DCPU_RET_ERR_TIMEDOUT); + } + + /* Compute checksum over the message */ +@@ -691,7 +700,7 @@ static ssize_t generic_show(unsigned int command, u32 response[], + + ret = __send_command(priv, command, response); + if (ret < 0) +- return sprintf(buf, "ERROR: %s\n", error_text[-ret]); ++ return sprintf(buf, "ERROR: %s\n", get_error_text(-ret)); + + return 0; + } +diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c +index 0b0ed72016da8..0309bd5a18008 100644 +--- a/drivers/memory/fsl-corenet-cf.c ++++ b/drivers/memory/fsl-corenet-cf.c +@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev) + dev_set_drvdata(&pdev->dev, ccf); + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "%s: no irq\n", __func__); +- return -ENXIO; +- } ++ if (irq < 0) ++ return irq; + + ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf); + if (ret) { +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c +index ca0097664b125..057666e1b6cda 100644 +--- a/drivers/memory/omap-gpmc.c ++++ b/drivers/memory/omap-gpmc.c +@@ -943,7 +943,7 @@ static int gpmc_cs_remap(int cs, u32 base) + int ret; + u32 old_base, size; + +- if (cs > gpmc_cs_num) { ++ if (cs >= gpmc_cs_num) { + pr_err("%s: requested chip-select is disabled\n", __func__); + return -ENODEV; + } +@@ -978,7 +978,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) + struct resource *res = &gpmc->mem; + int r = -1; + +- if (cs > gpmc_cs_num) { ++ if (cs >= gpmc_cs_num) { + pr_err("%s: requested chip-select is disabled\n", __func__); + return -ENODEV; + } +@@ -2265,6 +2265,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev) + } + } + #else ++void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) ++{ ++ memset(p, 0, sizeof(*p)); ++} + static int gpmc_probe_dt(struct platform_device *pdev) + { + return 0; +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c +index ccd62b9639528..6d2f4a0a901dc 100644 +--- a/drivers/mfd/sm501.c ++++ b/drivers/mfd/sm501.c +@@ -1415,8 +1415,14 @@ static int sm501_plat_probe(struct platform_device *dev) + goto err_claim; + } + +- return sm501_init_dev(sm); ++ ret = sm501_init_dev(sm); ++ if (ret) ++ goto err_unmap; ++ ++ return 0; + ++ err_unmap: ++ iounmap(sm->regs); + err_claim: + release_mem_region(sm->io_res->start, 0x100); + err_res: +diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c +index df5cebb372a59..ca465794ea9c8 100644 +--- a/drivers/mfd/syscon.c ++++ b/drivers/mfd/syscon.c +@@ -108,7 +108,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) + syscon_config.max_register = resource_size(&res) - reg_io_width; + + regmap = regmap_init_mmio(NULL, base, &syscon_config); +- kfree(syscon_config.name); + if (IS_ERR(regmap)) { + pr_err("regmap init failed\n"); + ret = PTR_ERR(regmap); +@@ -145,6 +144,7 @@ err_clk: + regmap_exit(regmap); + err_regmap: + iounmap(base); ++ kfree(syscon_config.name); + err_map: + kfree(syscon); + return ERR_PTR(ret); +diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c +index 37ccc67f4914b..f2b2805942f50 100644 +--- a/drivers/misc/cardreader/rtsx_pcr.c ++++ b/drivers/misc/cardreader/rtsx_pcr.c +@@ -1562,12 +1562,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, + ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); + if (ret < 0) +- goto disable_irq; ++ goto free_slots; + + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + + return 0; + ++free_slots: ++ kfree(pcr->slots); + disable_irq: + free_irq(pcr->irq, (void *)pcr); + disable_msi: +diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c +index ed8d38b099251..e26398fd977ec 100644 +--- a/drivers/misc/eeprom/at25.c ++++ b/drivers/misc/eeprom/at25.c +@@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi) + at25->nvmem_config.reg_read = at25_ee_read; + at25->nvmem_config.reg_write = at25_ee_write; + at25->nvmem_config.priv = at25; +- at25->nvmem_config.stride = 4; ++ at25->nvmem_config.stride = 1; + at25->nvmem_config.word_size = 1; + at25->nvmem_config.size = chip.byte_len; + +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c +index 4009b7df4cafe..2e55890ad6a61 100644 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c +@@ -6099,7 +6099,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask, + is_idle &= is_eng_idle; + + if (mask) +- *mask |= !is_eng_idle << ++ *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_DMA_0 + dma_id); + if (s) + seq_printf(s, fmt, dma_id, +@@ -6122,7 +6122,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask, + is_idle &= is_eng_idle; + + if (mask) +- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i); ++ *mask |= ((u64) !is_eng_idle) << ++ (GAUDI_ENGINE_ID_TPC_0 + i); + if (s) + seq_printf(s, fmt, i, + is_eng_idle ? "Y" : "N", +@@ -6150,7 +6151,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask, + is_idle &= is_eng_idle; + + if (mask) +- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i); ++ *mask |= ((u64) !is_eng_idle) << ++ (GAUDI_ENGINE_ID_MME_0 + i); + if (s) { + if (!is_slave) + seq_printf(s, fmt, i, +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c +index 33cd2ae653d23..c09742f440f96 100644 +--- a/drivers/misc/habanalabs/goya/goya.c ++++ b/drivers/misc/habanalabs/goya/goya.c +@@ -5166,7 +5166,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, + is_idle &= is_eng_idle; + + if (mask) +- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i); ++ *mask |= ((u64) !is_eng_idle) << ++ (GOYA_ENGINE_ID_DMA_0 + i); + if (s) + seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N", + qm_glbl_sts0, dma_core_sts0); +@@ -5189,7 +5190,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, + is_idle &= is_eng_idle; + + if (mask) +- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i); ++ *mask |= ((u64) !is_eng_idle) << ++ (GOYA_ENGINE_ID_TPC_0 + i); + if (s) + seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N", + qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts); +@@ -5209,7 +5211,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, + is_idle &= is_eng_idle; + + if (mask) +- *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0; ++ *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0; + if (s) { + seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0, + cmdq_glbl_sts0, mme_arch_sts); +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c +index 2da3b474f4863..18fb9d8b8a4b5 100644 +--- a/drivers/misc/mic/scif/scif_rma.c ++++ b/drivers/misc/mic/scif/scif_rma.c +@@ -1392,6 +1392,8 @@ retry: + (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, + pinned_pages->pages); + if (nr_pages != pinned_pages->nr_pages) { ++ if (pinned_pages->nr_pages < 0) ++ pinned_pages->nr_pages = 0; + if (try_upgrade) { + if (ulimit) + __scif_dec_pinned_vm_lock(mm, nr_pages); +@@ -1408,7 +1410,6 @@ retry: + + if (pinned_pages->nr_pages < nr_pages) { + err = -EFAULT; +- pinned_pages->nr_pages = nr_pages; + goto dec_pinned; + } + +@@ -1421,7 +1422,6 @@ dec_pinned: + __scif_dec_pinned_vm_lock(mm, nr_pages); + /* Something went wrong! Rollback */ + error_unmap: +- pinned_pages->nr_pages = nr_pages; + scif_destroy_pinned_pages(pinned_pages); + *pages = NULL; + dev_dbg(scif_info.mdev.this_device, +diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c +index 55e7f21e51f44..6722c726b2590 100644 +--- a/drivers/misc/mic/vop/vop_main.c ++++ b/drivers/misc/mic/vop/vop_main.c +@@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, + /* First assign the vring's allocated in host memory */ + vqconfig = _vop_vq_config(vdev->desc) + index; + memcpy_fromio(&config, vqconfig, sizeof(config)); +- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); ++ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4); + vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); + va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size); + if (!va) +diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c +index 30eac172f0170..7014ffe88632e 100644 +--- a/drivers/misc/mic/vop/vop_vringh.c ++++ b/drivers/misc/mic/vop/vop_vringh.c +@@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, + + num = le16_to_cpu(vqconfig[i].num); + mutex_init(&vvr->vr_mutex); +- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + ++ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) + + sizeof(struct _mic_vring_info)); + vr->va = (void *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, +@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, + goto err; + } + vr->len = vr_size; +- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); ++ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4); + vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i); + vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size, + DMA_BIDIRECTIONAL); +@@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, + size_t partlen; + bool dma = VOP_USE_DMA && vi->dma_ch; + int err = 0; ++ size_t offset = 0; + + if (dma) { + dma_alignment = 1 << vi->dma_ch->device->copy_align; +@@ -655,13 +656,20 @@ memcpy: + * We are copying to IO below and should ideally use something + * like copy_from_user_toio(..) if it existed. + */ +- if (copy_from_user((void __force *)dbuf, ubuf, len)) { +- err = -EFAULT; +- dev_err(vop_dev(vdev), "%s %d err %d\n", +- __func__, __LINE__, err); +- goto err; ++ while (len) { ++ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); ++ ++ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) { ++ err = -EFAULT; ++ dev_err(vop_dev(vdev), "%s %d err %d\n", ++ __func__, __LINE__, err); ++ goto err; ++ } ++ memcpy_toio(dbuf + offset, vvr->buf, partlen); ++ offset += partlen; ++ vdev->out_bytes += partlen; ++ len -= partlen; + } +- vdev->out_bytes += len; + err = 0; + err: + vpdev->hw_ops->unmap(vpdev, dbuf); +diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig +index 6551007a066ce..947294f6d7f44 100644 +--- a/drivers/misc/ocxl/Kconfig ++++ b/drivers/misc/ocxl/Kconfig +@@ -9,9 +9,8 @@ config OCXL_BASE + + config OCXL + tristate "OpenCAPI coherent accelerator support" +- depends on PPC_POWERNV && PCI && EEH ++ depends on PPC_POWERNV && PCI && EEH && HOTPLUG_PCI_POWERNV + select OCXL_BASE +- select HOTPLUG_PCI_POWERNV + default m + help + Select this option to enable the ocxl driver for Open +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c +index 8531ae7811956..c49065887e8f5 100644 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c +@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva, + if (retval < (int)produce_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(produce) failed (retval=%d)", + retval); +- qp_release_pages(produce_q->kernel_if->u.h.header_page, +- retval, false); ++ if (retval > 0) ++ qp_release_pages(produce_q->kernel_if->u.h.header_page, ++ retval, false); + err = VMCI_ERROR_NO_MEM; + goto out; + } +@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva, + if (retval < (int)consume_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(consume) failed (retval=%d)", + retval); +- qp_release_pages(consume_q->kernel_if->u.h.header_page, +- retval, false); ++ if (retval > 0) ++ qp_release_pages(consume_q->kernel_if->u.h.header_page, ++ retval, false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, false); + err = VMCI_ERROR_NO_MEM; +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c +index e0655278c5c32..3efaa9534a777 100644 +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, + unsigned i, nr_strings; + char **buffer, *string; + ++ if (size < 2) ++ return 0; ++ + /* Find all null-terminated (including zero length) strings in + the TPLLV1_INFO field. Trailing garbage is ignored. */ + buf += 2; +diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c +index e0e33f6bf513b..1e70ecfffa39f 100644 +--- a/drivers/mtd/hyperbus/hbmc-am654.c ++++ b/drivers/mtd/hyperbus/hbmc-am654.c +@@ -70,7 +70,8 @@ static int am654_hbmc_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, priv); + +- ret = of_address_to_resource(np, 0, &res); ++ priv->hbdev.np = of_get_next_child(np, NULL); ++ ret = of_address_to_resource(priv->hbdev.np, 0, &res); + if (ret) + return ret; + +@@ -103,7 +104,6 @@ static int am654_hbmc_probe(struct platform_device *pdev) + priv->ctlr.dev = dev; + priv->ctlr.ops = &am654_hbmc_ops; + priv->hbdev.ctlr = &priv->ctlr; +- priv->hbdev.np = of_get_next_child(dev->of_node, NULL); + ret = hyperbus_register_device(&priv->hbdev); + if (ret) { + dev_err(dev, "failed to register controller\n"); +diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c +index 0f1547f09d08b..72f5c7b300790 100644 +--- a/drivers/mtd/lpddr/lpddr2_nvm.c ++++ b/drivers/mtd/lpddr/lpddr2_nvm.c +@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add, + return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK); + } + ++static const struct mtd_info lpddr2_nvm_mtd_info = { ++ .type = MTD_RAM, ++ .writesize = 1, ++ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), ++ ._read = lpddr2_nvm_read, ++ ._write = lpddr2_nvm_write, ++ ._erase = lpddr2_nvm_erase, ++ ._unlock = lpddr2_nvm_unlock, ++ ._lock = lpddr2_nvm_lock, ++}; ++ + /* + * lpddr2_nvm driver probe method + */ +@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) + .pfow_base = OW_BASE_ADDRESS, + .fldrv_priv = pcm_data, + }; ++ + if (IS_ERR(map->virt)) + return PTR_ERR(map->virt); + +@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) + return PTR_ERR(pcm_data->ctl_regs); + + /* Populate mtd_info data structure */ +- *mtd = (struct mtd_info) { +- .dev = { .parent = &pdev->dev }, +- .name = pdev->dev.init_name, +- .type = MTD_RAM, +- .priv = map, +- .size = resource_size(add_range), +- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width, +- .writesize = 1, +- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width, +- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), +- ._read = lpddr2_nvm_read, +- ._write = lpddr2_nvm_write, +- ._erase = lpddr2_nvm_erase, +- ._unlock = lpddr2_nvm_unlock, +- ._lock = lpddr2_nvm_lock, +- }; ++ *mtd = lpddr2_nvm_mtd_info; ++ mtd->dev.parent = &pdev->dev; ++ mtd->name = pdev->dev.init_name; ++ mtd->priv = map; ++ mtd->size = resource_size(add_range); ++ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width; ++ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width; + + /* Verify the presence of the device looking for PFOW string */ + if (!lpddr2_nvm_pfow_present(map)) { +diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c +index 4ced68be7ed7e..774970bfcf859 100644 +--- a/drivers/mtd/mtdoops.c ++++ b/drivers/mtd/mtdoops.c +@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, + record_size - MTDOOPS_HEADER_SIZE, NULL); + +- /* Panics must be written immediately */ +- if (reason != KMSG_DUMP_OOPS) ++ if (reason != KMSG_DUMP_OOPS) { ++ /* Panics must be written immediately */ + mtdoops_write(cxt, 1); +- +- /* For other cases, schedule work to write it "nicely" */ +- schedule_work(&cxt->work_write); ++ } else { ++ /* For other cases, schedule work to write it "nicely" */ ++ schedule_work(&cxt->work_write); ++ } + } + + static void mtdoops_notify_add(struct mtd_info *mtd) +diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c +index fdba155416d25..0bf4cfc251472 100644 +--- a/drivers/mtd/nand/raw/ams-delta.c ++++ b/drivers/mtd/nand/raw/ams-delta.c +@@ -400,12 +400,14 @@ static int gpio_nand_remove(struct platform_device *pdev) + return 0; + } + ++#ifdef CONFIG_OF + static const struct of_device_id gpio_nand_of_id_table[] = { + { + /* sentinel */ + }, + }; + MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table); ++#endif + + static const struct platform_device_id gpio_nand_plat_id_table[] = { + { +diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c +index 7f4546ae91303..5792fb240cb2b 100644 +--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c ++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c +@@ -1762,7 +1762,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, + return ret; + } + +- if (cs > FMC2_MAX_CE) { ++ if (cs >= FMC2_MAX_CE) { + dev_err(nfc->dev, "invalid reg value: %d\n", cs); + return -EINVAL; + } +diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c +index 7248c59011836..fcca45e2abe20 100644 +--- a/drivers/mtd/nand/raw/vf610_nfc.c ++++ b/drivers/mtd/nand/raw/vf610_nfc.c +@@ -852,8 +852,10 @@ static int vf610_nfc_probe(struct platform_device *pdev) + } + + of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev); +- if (!of_id) +- return -ENODEV; ++ if (!of_id) { ++ err = -ENODEV; ++ goto err_disable_clk; ++ } + + nfc->variant = (enum vf610_nfc_variant)of_id->data; + +diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c +index d219c970042a2..0b7667e60780f 100644 +--- a/drivers/mtd/nand/spi/gigadevice.c ++++ b/drivers/mtd/nand/spi/gigadevice.c +@@ -21,7 +21,7 @@ + #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) + + static SPINAND_OP_VARIANTS(read_cache_variants, +- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), +@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants, + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); + + static SPINAND_OP_VARIANTS(read_cache_variants_f, +- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), ++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), +@@ -202,7 +202,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, + gd5fxgq4xa_ecc_get_status)), + SPINAND_INFO("GD5F2GQ4xA", +@@ -212,7 +212,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, + gd5fxgq4xa_ecc_get_status)), + SPINAND_INFO("GD5F4GQ4xA", +@@ -222,7 +222,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, + gd5fxgq4xa_ecc_get_status)), + SPINAND_INFO("GD5F1GQ4UExxG", +@@ -232,7 +232,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), + SPINAND_INFO("GD5F1GQ4UFxxG", +@@ -242,7 +242,7 @@ static const struct spinand_info gigadevice_spinand_table[] = { + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, + gd5fxgq4ufxxg_ecc_get_status)), + }; +diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig +index f98363c9b3630..e72354322f628 100644 +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -12,7 +12,7 @@ config MTD_BCM47XX_PARTS + boards. + + config MTD_BCM63XX_PARTS +- tristate "BCM63XX CFE partitioning parser" ++ bool "BCM63XX CFE partitioning parser" + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST + select CRC32 + select MTD_PARSER_IMAGETAG +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c +index 94d10ec954a05..2ac7a667bde35 100644 +--- a/drivers/net/can/flexcan.c ++++ b/drivers/net/can/flexcan.c +@@ -1260,18 +1260,23 @@ static int flexcan_chip_start(struct net_device *dev) + return err; + } + +-/* flexcan_chip_stop ++/* __flexcan_chip_stop + * +- * this functions is entered with clocks enabled ++ * this function is entered with clocks enabled + */ +-static void flexcan_chip_stop(struct net_device *dev) ++static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error) + { + struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; ++ int err; + + /* freeze + disable module */ +- flexcan_chip_freeze(priv); +- flexcan_chip_disable(priv); ++ err = flexcan_chip_freeze(priv); ++ if (err && !disable_on_error) ++ return err; ++ err = flexcan_chip_disable(priv); ++ if (err && !disable_on_error) ++ goto out_chip_unfreeze; + + /* Disable all interrupts */ + priv->write(0, ®s->imask2); +@@ -1281,6 +1286,23 @@ static void flexcan_chip_stop(struct net_device *dev) + + flexcan_transceiver_disable(priv); + priv->can.state = CAN_STATE_STOPPED; ++ ++ return 0; ++ ++ out_chip_unfreeze: ++ flexcan_chip_unfreeze(priv); ++ ++ return err; ++} ++ ++static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev) ++{ ++ return __flexcan_chip_stop(dev, true); ++} ++ ++static inline int flexcan_chip_stop(struct net_device *dev) ++{ ++ return __flexcan_chip_stop(dev, false); + } + + static int flexcan_open(struct net_device *dev) +@@ -1362,7 +1384,7 @@ static int flexcan_close(struct net_device *dev) + + netif_stop_queue(dev); + can_rx_offload_disable(&priv->offload); +- flexcan_chip_stop(dev); ++ flexcan_chip_stop_disable_on_error(dev); + + can_rx_offload_del(&priv->offload); + free_irq(dev->irq, dev); +diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c +index 38ea5e600fb84..e6d0cb9ee02f0 100644 +--- a/drivers/net/can/m_can/m_can_platform.c ++++ b/drivers/net/can/m_can/m_can_platform.c +@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev) + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = netdev_priv(ndev); + +- m_can_class_suspend(dev); +- + clk_disable_unprepare(mcan_class->cclk); + clk_disable_unprepare(mcan_class->hclk); + +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c +index c796d42730bae..e5f047129b150 100644 +--- a/drivers/net/dsa/microchip/ksz_common.c ++++ b/drivers/net/dsa/microchip/ksz_common.c +@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev) + + INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); + +- /* Read MIB counters every 30 seconds to avoid overflow. */ +- dev->mib_read_interval = msecs_to_jiffies(30000); +- + for (i = 0; i < dev->mib_port_cnt; i++) + dev->dev_ops->port_init_cnt(dev, i); +- +- /* Start the timer 2 seconds later. */ +- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000)); + } + EXPORT_SYMBOL_GPL(ksz_init_mib_timer); + +@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, + + /* Read all MIB counters when the link is going down. */ + p->read = true; +- schedule_delayed_work(&dev->mib_read, 0); ++ /* timer started */ ++ if (dev->mib_read_interval) ++ schedule_delayed_work(&dev->mib_read, 0); + } + EXPORT_SYMBOL_GPL(ksz_mac_link_down); + +@@ -450,6 +446,12 @@ int ksz_switch_register(struct ksz_device *dev, + return ret; + } + ++ /* Read MIB counters every 30 seconds to avoid overflow. */ ++ dev->mib_read_interval = msecs_to_jiffies(30000); ++ ++ /* Start the MIB timer. */ ++ schedule_delayed_work(&dev->mib_read, 0); ++ + return 0; + } + EXPORT_SYMBOL(ksz_switch_register); +diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c +index 9e9fd19e1d00c..e2cd49eec0370 100644 +--- a/drivers/net/dsa/ocelot/seville_vsc9953.c ++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c +@@ -1010,7 +1010,7 @@ static const struct felix_info seville_info_vsc9953 = { + .vcap_is2_keys = vsc9953_vcap_is2_keys, + .vcap_is2_actions = vsc9953_vcap_is2_actions, + .vcap = vsc9953_vcap_props, +- .shared_queue_sz = 2048 * 1024, ++ .shared_queue_sz = 256 * 1024, + .num_mact_rows = 2048, + .num_ports = 10, + .mdio_bus_alloc = vsc9953_mdio_bus_alloc, +diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h +index 9a63b51e1d82f..6f2dab7e33d65 100644 +--- a/drivers/net/dsa/realtek-smi-core.h ++++ b/drivers/net/dsa/realtek-smi-core.h +@@ -25,6 +25,9 @@ struct rtl8366_mib_counter { + const char *name; + }; + ++/** ++ * struct rtl8366_vlan_mc - Virtual LAN member configuration ++ */ + struct rtl8366_vlan_mc { + u16 vid; + u16 untag; +@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi); + int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid); +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val); + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid); + int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); +diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c +index a8c5a934c3d30..c58ca324a4b24 100644 +--- a/drivers/net/dsa/rtl8366.c ++++ b/drivers/net/dsa/rtl8366.c +@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) + } + EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); + ++/** ++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration ++ * @smi: the Realtek SMI device instance ++ * @vid: the VLAN ID to look up or allocate ++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config ++ * if successful ++ * @return: index of a new member config or negative error number ++ */ ++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid, ++ struct rtl8366_vlan_mc *vlanmc) ++{ ++ struct rtl8366_vlan_4k vlan4k; ++ int ret; ++ int i; ++ ++ /* Try to find an existing member config entry for this VID */ ++ for (i = 0; i < smi->num_vlan_mc; i++) { ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ if (vid == vlanmc->vid) ++ return i; ++ } ++ ++ /* We have no MC entry for this VID, try to find an empty one */ ++ for (i = 0; i < smi->num_vlan_mc; i++) { ++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ if (vlanmc->vid == 0 && vlanmc->member == 0) { ++ /* Update the entry from the 4K table */ ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); ++ if (ret) { ++ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ vlanmc->vid = vid; ++ vlanmc->member = vlan4k.member; ++ vlanmc->untag = vlan4k.untag; ++ vlanmc->fid = vlan4k.fid; ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ ++ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n", ++ i, vid); ++ return i; ++ } ++ } ++ ++ /* MC table is full, try to find an unused entry and replace it */ ++ for (i = 0; i < smi->num_vlan_mc; i++) { ++ int used; ++ ++ ret = rtl8366_mc_is_used(smi, i, &used); ++ if (ret) ++ return ret; ++ ++ if (!used) { ++ /* Update the entry from the 4K table */ ++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); ++ if (ret) ++ return ret; ++ ++ vlanmc->vid = vid; ++ vlanmc->member = vlan4k.member; ++ vlanmc->untag = vlan4k.untag; ++ vlanmc->fid = vlan4k.fid; ++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc); ++ if (ret) { ++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n", ++ i, vid); ++ return ret; ++ } ++ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n", ++ i, vid); ++ return i; ++ } ++ } ++ ++ dev_err(smi->dev, "all VLAN member configurations are in use\n"); ++ return -ENOSPC; ++} ++ + int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid) + { ++ struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; ++ int mc; + int ret; +- int i; ++ ++ if (!smi->ops->is_vlan_valid(smi, vid)) ++ return -EINVAL; + + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", +@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + +- /* Try to find an existing MC entry for this VID */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- struct rtl8366_vlan_mc vlanmc; +- +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- if (vid == vlanmc.vid) { +- /* update the MC entry */ +- vlanmc.member |= member; +- vlanmc.untag |= untag; +- vlanmc.fid = fid; +- +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); ++ /* Find or allocate a member config for this VID */ ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc); ++ if (ret < 0) ++ return ret; ++ mc = ret; + +- dev_dbg(smi->dev, +- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", +- vid, vlanmc.member, vlanmc.untag); ++ /* Update the MC entry */ ++ vlanmc.member |= member; ++ vlanmc.untag |= untag; ++ vlanmc.fid = fid; + +- break; +- } +- } ++ /* Commit updates to the MC entry */ ++ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc); ++ if (ret) ++ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n", ++ mc, vid); ++ else ++ dev_dbg(smi->dev, ++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", ++ vid, vlanmc.member, vlanmc.untag); + + return ret; + } + EXPORT_SYMBOL_GPL(rtl8366_set_vlan); + +-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val) +-{ +- struct rtl8366_vlan_mc vlanmc; +- int ret; +- int index; +- +- ret = smi->ops->get_mc_index(smi, port, &index); +- if (ret) +- return ret; +- +- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc); +- if (ret) +- return ret; +- +- *val = vlanmc.vid; +- return 0; +-} +-EXPORT_SYMBOL_GPL(rtl8366_get_pvid); +- + int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid) + { + struct rtl8366_vlan_mc vlanmc; +- struct rtl8366_vlan_4k vlan4k; ++ int mc; + int ret; +- int i; +- +- /* Try to find an existing MC entry for this VID */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- if (vid == vlanmc.vid) { +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- ret = smi->ops->set_mc_index(smi, port, i); +- return ret; +- } +- } +- +- /* We have no MC entry for this VID, try to find an empty one */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- if (vlanmc.vid == 0 && vlanmc.member == 0) { +- /* Update the entry from the 4K table */ +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); +- if (ret) +- return ret; + +- vlanmc.vid = vid; +- vlanmc.member = vlan4k.member; +- vlanmc.untag = vlan4k.untag; +- vlanmc.fid = vlan4k.fid; +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; +- +- ret = smi->ops->set_mc_index(smi, port, i); +- return ret; +- } +- } +- +- /* MC table is full, try to find an unused entry and replace it */ +- for (i = 0; i < smi->num_vlan_mc; i++) { +- int used; +- +- ret = rtl8366_mc_is_used(smi, i, &used); +- if (ret) +- return ret; +- +- if (!used) { +- /* Update the entry from the 4K table */ +- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); +- if (ret) +- return ret; ++ if (!smi->ops->is_vlan_valid(smi, vid)) ++ return -EINVAL; + +- vlanmc.vid = vid; +- vlanmc.member = vlan4k.member; +- vlanmc.untag = vlan4k.untag; +- vlanmc.fid = vlan4k.fid; +- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); +- if (ret) +- return ret; ++ /* Find or allocate a member config for this VID */ ++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc); ++ if (ret < 0) ++ return ret; ++ mc = ret; + +- ret = smi->ops->set_mc_index(smi, port, i); +- return ret; +- } ++ ret = smi->ops->set_mc_index(smi, port, mc); ++ if (ret) { ++ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n", ++ mc, port); ++ return ret; + } + +- dev_err(smi->dev, +- "all VLAN member configurations are in use\n"); ++ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n", ++ port, vid, mc); + +- return -ENOSPC; ++ return 0; + } + EXPORT_SYMBOL_GPL(rtl8366_set_pvid); + +@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, + if (!smi->ops->is_vlan_valid(smi, vid)) + return; + +- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", ++ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", ++ vlan->vid_begin, + port, + untagged ? "untagged" : "tagged", + pvid ? " PVID" : "no PVID"); +@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, + dev_err(smi->dev, "port is DSA or CPU port\n"); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { +- int pvid_val = 0; +- +- dev_info(smi->dev, "add VLAN %04x\n", vid); + member |= BIT(port); + + if (untagged) + untag |= BIT(port); + +- /* To ensure that we have a valid MC entry for this VLAN, +- * initialize the port VLAN ID here. +- */ +- ret = rtl8366_get_pvid(smi, port, &pvid_val); +- if (ret < 0) { +- dev_err(smi->dev, "could not lookup PVID for port %d\n", +- port); +- return; +- } +- if (pvid_val == 0) { +- ret = rtl8366_set_pvid(smi, port, vid); +- if (ret < 0) +- return; +- } +- + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); ++ ++ if (!pvid) ++ continue; ++ ++ ret = rtl8366_set_pvid(smi, port, vid); ++ if (ret) ++ dev_err(smi->dev, ++ "failed to set PVID on port %d to VLAN %04x", ++ port, vid); ++ ++ if (!ret) ++ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n", ++ vid, port); + } + } + EXPORT_SYMBOL_GPL(rtl8366_vlan_add); +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c +index 48f1ff7467999..5cfffa7559c7c 100644 +--- a/drivers/net/dsa/rtl8366rb.c ++++ b/drivers/net/dsa/rtl8366rb.c +@@ -1255,7 +1255,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) + if (smi->vlan4k_enabled) + max = RTL8366RB_NUM_VIDS - 1; + +- if (vlan == 0 || vlan >= max) ++ if (vlan == 0 || vlan > max) + return false; + + return true; +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +index f642c1b475c42..1b88bd1c2dbe4 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = { + PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), + }; + ++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = { ++ /* Default supported NAT modes */ ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_NONE, ++ .natmode = NAT_MODE_NONE, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_DIP, ++ .natmode = NAT_MODE_DIP, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT, ++ .natmode = NAT_MODE_DIP_DP, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | ++ CXGB4_ACTION_NATMODE_SIP, ++ .natmode = NAT_MODE_DIP_DP_SIP, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | ++ CXGB4_ACTION_NATMODE_SPORT, ++ .natmode = NAT_MODE_DIP_DP_SP, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT, ++ .natmode = NAT_MODE_SIP_SP, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | ++ CXGB4_ACTION_NATMODE_SPORT, ++ .natmode = NAT_MODE_DIP_SIP_SP, ++ }, ++ { ++ .chip = CHELSIO_T5, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | ++ CXGB4_ACTION_NATMODE_DPORT | ++ CXGB4_ACTION_NATMODE_SPORT, ++ .natmode = NAT_MODE_ALL, ++ }, ++ /* T6+ can ignore L4 ports when they're disabled. */ ++ { ++ .chip = CHELSIO_T6, ++ .flags = CXGB4_ACTION_NATMODE_SIP, ++ .natmode = NAT_MODE_SIP_SP, ++ }, ++ { ++ .chip = CHELSIO_T6, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT, ++ .natmode = NAT_MODE_DIP_DP_SP, ++ }, ++ { ++ .chip = CHELSIO_T6, ++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP, ++ .natmode = NAT_MODE_ALL, ++ }, ++}; ++ ++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs, ++ u8 natmode_flags) ++{ ++ u8 i = 0; ++ ++ /* Translate the enabled NAT 4-tuple fields to one of the ++ * hardware supported NAT mode configurations. This ensures ++ * that we pick a valid combination, where the disabled fields ++ * do not get overwritten to 0. ++ */ ++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { ++ if (cxgb4_natmode_config_array[i].flags == natmode_flags) { ++ fs->nat_mode = cxgb4_natmode_config_array[i].natmode; ++ return; ++ } ++ } ++} ++ + static struct ch_tc_flower_entry *allocate_flower_entry(void) + { + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); +@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, + } + + static void process_pedit_field(struct ch_filter_specification *fs, u32 val, +- u32 mask, u32 offset, u8 htype) ++ u32 mask, u32 offset, u8 htype, ++ u8 *natmode_flags) + { + switch (htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: +@@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, + switch (offset) { + case PEDIT_IP4_SRC: + offload_pedit(fs, val, mask, IP4_SRC); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; + case PEDIT_IP4_DST: + offload_pedit(fs, val, mask, IP4_DST); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + } +- fs->nat_mode = NAT_MODE_ALL; + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (offset) { + case PEDIT_IP6_SRC_31_0: + offload_pedit(fs, val, mask, IP6_SRC_31_0); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; + case PEDIT_IP6_SRC_63_32: + offload_pedit(fs, val, mask, IP6_SRC_63_32); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; + case PEDIT_IP6_SRC_95_64: + offload_pedit(fs, val, mask, IP6_SRC_95_64); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; + case PEDIT_IP6_SRC_127_96: + offload_pedit(fs, val, mask, IP6_SRC_127_96); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; + break; + case PEDIT_IP6_DST_31_0: + offload_pedit(fs, val, mask, IP6_DST_31_0); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + break; + case PEDIT_IP6_DST_63_32: + offload_pedit(fs, val, mask, IP6_DST_63_32); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + break; + case PEDIT_IP6_DST_95_64: + offload_pedit(fs, val, mask, IP6_DST_95_64); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + break; + case PEDIT_IP6_DST_127_96: + offload_pedit(fs, val, mask, IP6_DST_127_96); ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + } +- fs->nat_mode = NAT_MODE_ALL; + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + switch (offset) { + case PEDIT_TCP_SPORT_DPORT: +- if (~mask & PEDIT_TCP_UDP_SPORT_MASK) ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { + fs->nat_fport = val; +- else ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; ++ } else { + fs->nat_lport = val >> 16; ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; ++ } + } +- fs->nat_mode = NAT_MODE_ALL; + break; + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + switch (offset) { + case PEDIT_UDP_SPORT_DPORT: +- if (~mask & PEDIT_TCP_UDP_SPORT_MASK) ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { + fs->nat_fport = val; +- else ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; ++ } else { + fs->nat_lport = val >> 16; ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; ++ } + } +- fs->nat_mode = NAT_MODE_ALL; ++ break; ++ } ++} ++ ++static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags, ++ struct netlink_ext_ack *extack) ++{ ++ u8 i = 0; ++ ++ /* Extract the NAT mode to enable based on what 4-tuple fields ++ * are enabled to be overwritten. This ensures that the ++ * disabled fields don't get overwritten to 0. ++ */ ++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { ++ const struct cxgb4_natmode_config *c; ++ ++ c = &cxgb4_natmode_config_array[i]; ++ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip && ++ natmode_flags == c->flags) ++ return 0; + } ++ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination"); ++ return -EOPNOTSUPP; + } + + void cxgb4_process_flow_actions(struct net_device *in, +@@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in, + struct ch_filter_specification *fs) + { + struct flow_action_entry *act; ++ u8 natmode_flags = 0; + int i; + + flow_action_for_each(i, act, actions) { +@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in, + val = act->mangle.val; + offset = act->mangle.offset; + +- process_pedit_field(fs, val, mask, offset, htype); ++ process_pedit_field(fs, val, mask, offset, htype, ++ &natmode_flags); + } + break; + case FLOW_ACTION_QUEUE: +@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in, + break; + } + } ++ if (natmode_flags) ++ cxgb4_action_natmode_tweak(fs, natmode_flags); ++ + } + + static bool valid_l4_mask(u32 mask) +@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask) + } + + static bool valid_pedit_action(struct net_device *dev, +- const struct flow_action_entry *act) ++ const struct flow_action_entry *act, ++ u8 *natmode_flags) + { + u32 mask, offset; + u8 htype; +@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev, + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (offset) { + case PEDIT_IP4_SRC: ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; ++ break; + case PEDIT_IP4_DST: ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", +@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev, + case PEDIT_IP6_SRC_63_32: + case PEDIT_IP6_SRC_95_64: + case PEDIT_IP6_SRC_127_96: ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; ++ break; + case PEDIT_IP6_DST_31_0: + case PEDIT_IP6_DST_63_32: + case PEDIT_IP6_DST_95_64: + case PEDIT_IP6_DST_127_96: ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", +@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev, + __func__); + return false; + } ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; ++ else ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", +@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev, + __func__); + return false; + } ++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) ++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; ++ else ++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", +@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev, + struct netlink_ext_ack *extack, + u8 matchall_filter) + { ++ struct adapter *adap = netdev2adap(dev); + struct flow_action_entry *act; + bool act_redir = false; + bool act_pedit = false; + bool act_vlan = false; ++ u8 natmode_flags = 0; + int i; + + if (!flow_action_basic_hw_stats_check(actions, extack)) +@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev, + break; + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_REDIRECT: { +- struct adapter *adap = netdev2adap(dev); + struct net_device *n_dev, *target_dev; + bool found = false; + unsigned int i; +@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev, + } + break; + case FLOW_ACTION_MANGLE: { +- bool pedit_valid = valid_pedit_action(dev, act); ++ bool pedit_valid = valid_pedit_action(dev, act, ++ &natmode_flags); + + if (!pedit_valid) + return -EOPNOTSUPP; +@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev, + return -EINVAL; + } + ++ if (act_pedit) { ++ int ret; ++ ++ ret = cxgb4_action_natmode_validate(adap, natmode_flags, ++ extack); ++ if (ret) ++ return ret; ++ } ++ + return 0; + } + +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +index 6296e1d5a12bb..3a2fa00c8cdee 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields { + #define PEDIT_TCP_SPORT_DPORT 0x0 + #define PEDIT_UDP_SPORT_DPORT 0x0 + ++enum cxgb4_action_natmode_flags { ++ CXGB4_ACTION_NATMODE_NONE = 0, ++ CXGB4_ACTION_NATMODE_DIP = (1 << 0), ++ CXGB4_ACTION_NATMODE_SIP = (1 << 1), ++ CXGB4_ACTION_NATMODE_DPORT = (1 << 2), ++ CXGB4_ACTION_NATMODE_SPORT = (1 << 3), ++}; ++ ++/* TC PEDIT action to NATMODE translation entry */ ++struct cxgb4_natmode_config { ++ enum chip_type chip; ++ u8 flags; ++ u8 natmode; ++}; ++ + void cxgb4_process_flow_actions(struct net_device *in, + struct flow_action *actions, + struct ch_filter_specification *fs); +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h +index 18f3aeb88f22a..c67a16a48d624 100644 +--- a/drivers/net/ethernet/cisco/enic/enic.h ++++ b/drivers/net/ethernet/cisco/enic/enic.h +@@ -169,6 +169,7 @@ struct enic { + u16 num_vfs; + #endif + spinlock_t enic_api_lock; ++ bool enic_api_busy; + struct enic_port_profile *pp; + + /* work queue cache line section */ +diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c +index b161f24522b87..b028ea2dec2b9 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_api.c ++++ b/drivers/net/ethernet/cisco/enic/enic_api.c +@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + struct vnic_dev *vdev = enic->vdev; + + spin_lock(&enic->enic_api_lock); ++ while (enic->enic_api_busy) { ++ spin_unlock(&enic->enic_api_lock); ++ cpu_relax(); ++ spin_lock(&enic->enic_api_lock); ++ } ++ + spin_lock_bh(&enic->devcmd_lock); + + vnic_dev_cmd_proxy_by_index_start(vdev, vf); +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index 552d89fdf54a5..988c0a72e6836 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -2106,8 +2106,6 @@ static int enic_dev_wait(struct vnic_dev *vdev, + int done; + int err; + +- BUG_ON(in_interrupt()); +- + err = start(vdev, arg); + if (err) + return err; +@@ -2295,6 +2293,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic) + rss_hash_bits, rss_base_cpu, rss_enable); + } + ++static void enic_set_api_busy(struct enic *enic, bool busy) ++{ ++ spin_lock(&enic->enic_api_lock); ++ enic->enic_api_busy = busy; ++ spin_unlock(&enic->enic_api_lock); ++} ++ + static void enic_reset(struct work_struct *work) + { + struct enic *enic = container_of(work, struct enic, reset); +@@ -2304,7 +2309,9 @@ static void enic_reset(struct work_struct *work) + + rtnl_lock(); + +- spin_lock(&enic->enic_api_lock); ++ /* Stop any activity from infiniband */ ++ enic_set_api_busy(enic, true); ++ + enic_stop(enic->netdev); + enic_dev_soft_reset(enic); + enic_reset_addr_lists(enic); +@@ -2312,7 +2319,10 @@ static void enic_reset(struct work_struct *work) + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); +- spin_unlock(&enic->enic_api_lock); ++ ++ /* Allow infiniband to fiddle with the device again */ ++ enic_set_api_busy(enic, false); ++ + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +@@ -2324,7 +2334,9 @@ static void enic_tx_hang_reset(struct work_struct *work) + + rtnl_lock(); + +- spin_lock(&enic->enic_api_lock); ++ /* Stop any activity from infiniband */ ++ enic_set_api_busy(enic, true); ++ + enic_dev_hang_notify(enic); + enic_stop(enic->netdev); + enic_dev_hang_reset(enic); +@@ -2333,7 +2345,10 @@ static void enic_tx_hang_reset(struct work_struct *work) + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); +- spin_unlock(&enic->enic_api_lock); ++ ++ /* Allow infiniband to fiddle with the device again */ ++ enic_set_api_busy(enic, false); ++ + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index 87236206366fd..00024dd411471 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -1817,6 +1817,11 @@ static int ftgmac100_probe(struct platform_device *pdev) + priv->rxdes0_edorr_mask = BIT(30); + priv->txdes0_edotr_mask = BIT(30); + priv->is_aspeed = true; ++ /* Disable ast2600 problematic HW arbitration */ ++ if (of_device_is_compatible(np, "aspeed,ast2600-mac")) { ++ iowrite32(FTGMAC100_TM_DEFAULT, ++ priv->base + FTGMAC100_OFFSET_TM); ++ } + } else { + priv->rxdes0_edorr_mask = BIT(15); + priv->txdes0_edotr_mask = BIT(15); +diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h +index e5876a3fda91d..63b3e02fab162 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.h ++++ b/drivers/net/ethernet/faraday/ftgmac100.h +@@ -169,6 +169,14 @@ + #define FTGMAC100_MACCR_FAST_MODE (1 << 19) + #define FTGMAC100_MACCR_SW_RST (1 << 31) + ++/* ++ * test mode control register ++ */ ++#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28) ++#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27) ++#define FTGMAC100_TM_DEFAULT \ ++ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV) ++ + /* + * PHY control register + */ +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index fb37816a74db9..31f60b542feb4 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1912,6 +1912,27 @@ out: + return ret; + } + ++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) ++{ ++ struct fec_enet_private *fep = netdev_priv(ndev); ++ struct phy_device *phy_dev = ndev->phydev; ++ ++ if (phy_dev) { ++ phy_reset_after_clk_enable(phy_dev); ++ } else if (fep->phy_node) { ++ /* ++ * If the PHY still is not bound to the MAC, but there is ++ * OF PHY node and a matching PHY device instance already, ++ * use the OF PHY node to obtain the PHY device instance, ++ * and then use that PHY device instance when triggering ++ * the PHY reset. ++ */ ++ phy_dev = of_phy_find_device(fep->phy_node); ++ phy_reset_after_clk_enable(phy_dev); ++ put_device(&phy_dev->mdio.dev); ++ } ++} ++ + static int fec_enet_clk_enable(struct net_device *ndev, bool enable) + { + struct fec_enet_private *fep = netdev_priv(ndev); +@@ -1938,7 +1959,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) + if (ret) + goto failed_clk_ref; + +- phy_reset_after_clk_enable(ndev->phydev); ++ fec_enet_phy_reset_after_clk_enable(ndev); + } else { + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) { +@@ -2984,16 +3005,16 @@ fec_enet_open(struct net_device *ndev) + /* Init MAC prior to mii bus probe */ + fec_restart(ndev); + +- /* Probe and connect to PHY when open the interface */ +- ret = fec_enet_mii_probe(ndev); +- if (ret) +- goto err_enet_mii_probe; +- + /* Call phy_reset_after_clk_enable() again if it failed during + * phy_reset_after_clk_enable() before because the PHY wasn't probed. + */ + if (reset_again) +- phy_reset_after_clk_enable(ndev->phydev); ++ fec_enet_phy_reset_after_clk_enable(ndev); ++ ++ /* Probe and connect to PHY when open the interface */ ++ ret = fec_enet_mii_probe(ndev); ++ if (ret) ++ goto err_enet_mii_probe; + + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index c5c732601e35e..7ef3369953b6a 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); + int lrg_pkt = ibmveth_rxq_large_packet(adapter); ++ __sum16 iph_check = 0; + + skb = ibmveth_rxq_get_buffer(adapter); + +@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); + +- if (csum_good) { +- skb->ip_summed = CHECKSUM_UNNECESSARY; +- ibmveth_rx_csum_helper(skb, adapter); ++ /* PHYP without PLSO support places a -1 in the ip ++ * checksum for large send frames. ++ */ ++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) { ++ struct iphdr *iph = (struct iphdr *)skb->data; ++ ++ iph_check = iph->check; + } + +- if (length > netdev->mtu + ETH_HLEN) { ++ if ((length > netdev->mtu + ETH_HLEN) || ++ lrg_pkt || iph_check == 0xffff) { + ibmveth_rx_mss_helper(skb, mss, lrg_pkt); + adapter->rx_large_packets++; + } + ++ if (csum_good) { ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ ibmveth_rx_csum_helper(skb, adapter); ++ } ++ + napi_gro_receive(napi, skb); /* send it up */ + + netdev->stats.rx_packets++; +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 1b702a43a5d01..3e0aab04d86fb 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -4194,8 +4194,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq, + dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); + goto out; + } ++ /* crq->change_mac_addr.mac_addr is the requested one ++ * crq->change_mac_addr_rsp.mac_addr is the returned valid one. ++ */ + ether_addr_copy(netdev->dev_addr, + &crq->change_mac_addr_rsp.mac_addr[0]); ++ ether_addr_copy(adapter->mac_addr, ++ &crq->change_mac_addr_rsp.mac_addr[0]); + out: + complete(&adapter->fw_done); + return rc; +@@ -4605,7 +4610,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, + case IBMVNIC_1GBPS: + adapter->speed = SPEED_1000; + break; +- case IBMVNIC_10GBP: ++ case IBMVNIC_10GBPS: + adapter->speed = SPEED_10000; + break; + case IBMVNIC_25GBPS: +@@ -4620,6 +4625,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, + case IBMVNIC_100GBPS: + adapter->speed = SPEED_100000; + break; ++ case IBMVNIC_200GBPS: ++ adapter->speed = SPEED_200000; ++ break; + default: + if (netif_carrier_ok(netdev)) + netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h +index f8416e1d4cf09..43feb96b0a68a 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.h ++++ b/drivers/net/ethernet/ibm/ibmvnic.h +@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms { + #define IBMVNIC_10MBPS 0x40000000 + #define IBMVNIC_100MBPS 0x20000000 + #define IBMVNIC_1GBPS 0x10000000 +-#define IBMVNIC_10GBP 0x08000000 ++#define IBMVNIC_10GBPS 0x08000000 + #define IBMVNIC_40GBPS 0x04000000 + #define IBMVNIC_100GBPS 0x02000000 + #define IBMVNIC_25GBPS 0x01000000 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +index 7980d7265e106..d26f40c0aff01 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +@@ -901,15 +901,13 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) + **/ + s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) + { ++ s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); ++ s32 (*read)(struct mii_bus *bus, int addr, int regnum); + struct ixgbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &adapter->netdev->dev; + struct mii_bus *bus; + +- bus = devm_mdiobus_alloc(dev); +- if (!bus) +- return -ENOMEM; +- + switch (hw->device_id) { + /* C3000 SoCs */ + case IXGBE_DEV_ID_X550EM_A_KR: +@@ -922,16 +920,23 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + if (!ixgbe_x550em_a_has_mii(hw)) +- return -ENODEV; +- bus->read = &ixgbe_x550em_a_mii_bus_read; +- bus->write = &ixgbe_x550em_a_mii_bus_write; ++ return 0; ++ read = &ixgbe_x550em_a_mii_bus_read; ++ write = &ixgbe_x550em_a_mii_bus_write; + break; + default: +- bus->read = &ixgbe_mii_bus_read; +- bus->write = &ixgbe_mii_bus_write; ++ read = &ixgbe_mii_bus_read; ++ write = &ixgbe_mii_bus_write; + break; + } + ++ bus = devm_mdiobus_alloc(dev); ++ if (!bus) ++ return -ENOMEM; ++ ++ bus->read = read; ++ bus->write = write; ++ + /* Use the position of the device in the PCI hierarchy as the id */ + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, + pci_name(pdev)); +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c +index 03e034918d147..bf48f0ded9c7d 100644 +--- a/drivers/net/ethernet/korina.c ++++ b/drivers/net/ethernet/korina.c +@@ -1113,7 +1113,7 @@ out: + return rc; + + probe_err_register: +- kfree(lp->td_ring); ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); + probe_err_td_ring: + iounmap(lp->tx_dma_regs); + probe_err_dma_tx: +@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev) + iounmap(lp->eth_regs); + iounmap(lp->rx_dma_regs); + iounmap(lp->tx_dma_regs); ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); + + unregister_netdev(bif->dev); + free_netdev(bif->dev); +diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig +index 62a820b1eb163..3362b148de23c 100644 +--- a/drivers/net/ethernet/mediatek/Kconfig ++++ b/drivers/net/ethernet/mediatek/Kconfig +@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC + config NET_MEDIATEK_STAR_EMAC + tristate "MediaTek STAR Ethernet MAC support" + select PHYLIB ++ select REGMAP_MMIO + help + This driver supports the ethernet MAC IP first used on + MediaTek MT85** SoCs. +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +index b50c567ef508e..24006440e86e2 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) + bool clean_complete = true; + int done; + ++ if (!budget) ++ return 0; ++ + if (priv->tx_ring_num[TX_XDP]) { + xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; + if (xdp_tx_cq->xdp_busy) { +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +index 9dff7b086c9fb..1f11379ad5b64 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + .dma = tx_info->map0_dma, + }; + +- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { ++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { + dma_unmap_page(priv->ddev, tx_info->map0_dma, + PAGE_SIZE, priv->dma_dir); + put_page(tx_info->page); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index 3dc200bcfabde..69a05da0e3e3d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg, + + { + u32 data_size; ++ int err = 0; + u32 offset; +- int err; + + for (offset = 0; offset < value_len; offset += data_size) { + data_size = value_len - offset; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +index 429428bbc903c..b974f3cd10058 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +@@ -228,8 +228,8 @@ static int rx_fs_create(struct mlx5e_priv *priv, + fs_prot->miss_rule = miss_rule; + + out: +- kfree(flow_group_in); +- kfree(spec); ++ kvfree(flow_group_in); ++ kvfree(spec); + return err; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +index 2d55b7c22c034..4e7cfa22b3d2f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +@@ -550,8 +550,9 @@ static int mlx5_pps_event(struct notifier_block *nb, + switch (clock->ptp_info.pin_config[pin].func) { + case PTP_PF_EXTTS: + ptp_event.index = pin; +- ptp_event.timestamp = timecounter_cyc2time(&clock->tc, +- be64_to_cpu(eqe->data.pps.time_stamp)); ++ ptp_event.timestamp = ++ mlx5_timecounter_cyc2time(clock, ++ be64_to_cpu(eqe->data.pps.time_stamp)); + if (clock->pps_info.enabled) { + ptp_event.type = PTP_CLOCK_PPSUSR; + ptp_event.pps_times.ts_real = +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 11e6962a18e42..88b4b17ea22c9 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -4686,7 +4686,7 @@ static int rtl8169_close(struct net_device *dev) + + phy_disconnect(tp->phydev); + +- pci_free_irq(pdev, 0, tp); ++ free_irq(pci_irq_vector(pdev, 0), tp); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); +@@ -4737,8 +4737,8 @@ static int rtl_open(struct net_device *dev) + + rtl_request_firmware(tp); + +- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, +- dev->name); ++ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, ++ IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + +@@ -4755,7 +4755,7 @@ out: + return retval; + + err_free_irq: +- pci_free_irq(pdev, 0, tp); ++ free_irq(pci_irq_vector(pdev, 0), tp); + err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c +index 19fe86b3b3169..9cf5b8f8fab9a 100644 +--- a/drivers/net/ethernet/sfc/ef100_nic.c ++++ b/drivers/net/ethernet/sfc/ef100_nic.c +@@ -428,24 +428,12 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) + __clear_bit(reset_type, &efx->reset_pending); + rc = dev_open(efx->net_dev, NULL); + } else if (reset_type == RESET_TYPE_ALL) { +- /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters +- * and reprobe after reset to avoid removing filters twice +- */ +- down_write(&efx->filter_sem); +- ef100_filter_table_down(efx); +- up_write(&efx->filter_sem); + rc = efx_mcdi_reset(efx, reset_type); + if (rc) + return rc; + + netif_device_attach(efx->net_dev); + +- down_write(&efx->filter_sem); +- rc = ef100_filter_table_up(efx); +- up_write(&efx->filter_sem); +- if (rc) +- return rc; +- + rc = dev_open(efx->net_dev, NULL); + } else { + rc = 1; /* Leave the device closed */ +diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c +index dfc6032e75f48..ea0f8eb036ae5 100644 +--- a/drivers/net/ethernet/sfc/efx_common.c ++++ b/drivers/net/ethernet/sfc/efx_common.c +@@ -1030,6 +1030,7 @@ int efx_init_struct(struct efx_nic *efx, + efx->num_mac_stats = MC_CMD_MAC_NSTATS; + BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); + mutex_init(&efx->mac_lock); ++ init_rwsem(&efx->filter_sem); + #ifdef CONFIG_RFS_ACCEL + mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); +diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c +index 5e29284c89c98..19cf7cac1e6e9 100644 +--- a/drivers/net/ethernet/sfc/rx_common.c ++++ b/drivers/net/ethernet/sfc/rx_common.c +@@ -797,7 +797,6 @@ int efx_probe_filters(struct efx_nic *efx) + { + int rc; + +- init_rwsem(&efx->filter_sem); + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + rc = efx->type->filter_table_probe(efx); +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c +index 806eb651cea30..1503cc9ec6e2d 100644 +--- a/drivers/net/ethernet/socionext/netsec.c ++++ b/drivers/net/ethernet/socionext/netsec.c +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1833,6 +1834,14 @@ static const struct net_device_ops netsec_netdev_ops = { + static int netsec_of_probe(struct platform_device *pdev, + struct netsec_priv *priv, u32 *phy_addr) + { ++ int err; ++ ++ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); ++ if (err) { ++ dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); ++ return err; ++ } ++ + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_np) { + dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); +@@ -1859,6 +1868,14 @@ static int netsec_acpi_probe(struct platform_device *pdev, + if (!IS_ENABLED(CONFIG_ACPI)) + return -ENODEV; + ++ /* ACPI systems are assumed to configure the PHY in firmware, so ++ * there is really no need to discover the PHY mode from the DSDT. ++ * Since firmware is known to exist in the field that configures the ++ * PHY correctly but passes the wrong mode string in the phy-mode ++ * device property, we have no choice but to ignore it. ++ */ ++ priv->phy_interface = PHY_INTERFACE_MODE_NA; ++ + ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); + if (ret) { + dev_err(&pdev->dev, +@@ -1995,13 +2012,6 @@ static int netsec_probe(struct platform_device *pdev) + priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | + NETIF_MSG_LINK | NETIF_MSG_PROBE; + +- priv->phy_interface = device_get_phy_mode(&pdev->dev); +- if ((int)priv->phy_interface < 0) { +- dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); +- ret = -ENODEV; +- goto free_ndev; +- } +- + priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, + resource_size(mmio_res)); + if (!priv->ioaddr) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index b56b13d64ab48..122a0697229af 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) + } + } + +-/** +- * stmmac_stop_all_queues - Stop all queues +- * @priv: driver private structure +- */ +-static void stmmac_stop_all_queues(struct stmmac_priv *priv) +-{ +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use; +- u32 queue; +- +- for (queue = 0; queue < tx_queues_cnt; queue++) +- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); +-} +- +-/** +- * stmmac_start_all_queues - Start all queues +- * @priv: driver private structure +- */ +-static void stmmac_start_all_queues(struct stmmac_priv *priv) +-{ +- u32 tx_queues_cnt = priv->plat->tx_queues_to_use; +- u32 queue; +- +- for (queue = 0; queue < tx_queues_cnt; queue++) +- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); +-} +- + static void stmmac_service_event_schedule(struct stmmac_priv *priv) + { + if (!test_bit(STMMAC_DOWN, &priv->state) && +@@ -2740,6 +2714,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); + } + ++ /* Configure real RX and TX queues */ ++ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); ++ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); ++ + /* Start the ball rolling... */ + stmmac_start_all_dma(priv); + +@@ -2868,7 +2846,7 @@ static int stmmac_open(struct net_device *dev) + } + + stmmac_enable_all_queues(priv); +- stmmac_start_all_queues(priv); ++ netif_tx_start_all_queues(priv->dev); + + return 0; + +@@ -2911,8 +2889,6 @@ static int stmmac_release(struct net_device *dev) + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); + +- stmmac_stop_all_queues(priv); +- + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) +@@ -4827,10 +4803,6 @@ int stmmac_dvr_probe(struct device *device, + + stmmac_check_ether_addr(priv); + +- /* Configure real RX and TX queues */ +- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); +- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); +- + ndev->netdev_ops = &stmmac_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | +@@ -5086,7 +5058,6 @@ int stmmac_suspend(struct device *dev) + mutex_lock(&priv->lock); + + netif_device_detach(ndev); +- stmmac_stop_all_queues(priv); + + stmmac_disable_all_queues(priv); + +@@ -5213,8 +5184,6 @@ int stmmac_resume(struct device *dev) + + stmmac_enable_all_queues(priv); + +- stmmac_start_all_queues(priv); +- + mutex_unlock(&priv->lock); + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { +diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c +index b7efd7c95e9c8..ed60fa5bcdaca 100644 +--- a/drivers/net/ipa/ipa_endpoint.c ++++ b/drivers/net/ipa/ipa_endpoint.c +@@ -1471,6 +1471,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) + + void ipa_endpoint_suspend(struct ipa *ipa) + { ++ if (!ipa->setup_complete) ++ return; ++ + if (ipa->modem_netdev) + ipa_modem_suspend(ipa->modem_netdev); + +@@ -1482,6 +1485,9 @@ void ipa_endpoint_suspend(struct ipa *ipa) + + void ipa_endpoint_resume(struct ipa *ipa) + { ++ if (!ipa->setup_complete) ++ return; ++ + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); + +diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c +index 9b00708676cf7..1bdd3df0867a5 100644 +--- a/drivers/net/wan/hdlc.c ++++ b/drivers/net/wan/hdlc.c +@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto; + static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *p, struct net_device *orig_dev) + { +- struct hdlc_device *hdlc = dev_to_hdlc(dev); ++ struct hdlc_device *hdlc; ++ ++ /* First make sure "dev" is an HDLC device */ ++ if (!(dev->priv_flags & IFF_WAN_HDLC)) { ++ kfree_skb(skb); ++ return NET_RX_SUCCESS; ++ } ++ ++ hdlc = dev_to_hdlc(dev); + + if (!net_eq(dev_net(dev), &init_net)) { + kfree_skb(skb); +diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c +index 08e0a46501dec..c70a518b8b478 100644 +--- a/drivers/net/wan/hdlc_raw_eth.c ++++ b/drivers/net/wan/hdlc_raw_eth.c +@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->tx_queue_len = old_qlen; ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING; + eth_hw_addr_random(dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_off(dev); +diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c +index 294fbc1e89ab8..e6e0284e47837 100644 +--- a/drivers/net/wireless/ath/ath10k/ce.c ++++ b/drivers/net/wireless/ath/ath10k/ce.c +@@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, +- (nentries * sizeof(struct ce_desc_64) + ++ (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c +index d787cbead56ab..215ade6faf328 100644 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c +@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) + BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); + + idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ++ ++ if (idx < 0 || idx >= htt->rx_ring.size) { ++ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); ++ idx &= htt->rx_ring.size_mask; ++ ret = -ENOMEM; ++ goto fail; ++ } ++ + while (num > 0) { + skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); + if (!skb) { +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 3c0c33a9f30cb..2177e9d92bdff 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -7278,7 +7278,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar, + struct ieee80211_channel *channel) + { + int ret; +- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; ++ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; + + lockdep_assert_held(&ar->conf_mutex); + +diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c +index 30092841ac464..a0314c1c84653 100644 +--- a/drivers/net/wireless/ath/ath11k/ahb.c ++++ b/drivers/net/wireless/ath/ath11k/ahb.c +@@ -981,12 +981,16 @@ err_core_free: + static int ath11k_ahb_remove(struct platform_device *pdev) + { + struct ath11k_base *ab = platform_get_drvdata(pdev); ++ unsigned long left; + + reinit_completion(&ab->driver_recovery); + +- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) +- wait_for_completion_timeout(&ab->driver_recovery, +- ATH11K_AHB_RECOVERY_TIMEOUT); ++ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { ++ left = wait_for_completion_timeout(&ab->driver_recovery, ++ ATH11K_AHB_RECOVERY_TIMEOUT); ++ if (!left) ++ ath11k_warn(ab, "failed to receive recovery response completion\n"); ++ } + + set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); + cancel_work_sync(&ab->restart_work); +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c +index 94ae2b9ea6635..4674f0aca8e9b 100644 +--- a/drivers/net/wireless/ath/ath11k/mac.c ++++ b/drivers/net/wireless/ath/ath11k/mac.c +@@ -6006,7 +6006,7 @@ static int __ath11k_mac_register(struct ath11k *ar) + ret = ath11k_mac_setup_channels_rates(ar, + cap->supported_bands); + if (ret) +- goto err_free; ++ goto err; + + ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); + ath11k_mac_setup_he_cap(ar, cap); +@@ -6120,7 +6120,9 @@ static int __ath11k_mac_register(struct ath11k *ar) + err_free: + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); ++ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); + ++err: + SET_IEEE80211_DEV(ar->hw, NULL); + return ret; + } +diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c +index c00a99ad8dbc1..497cff7e64cc5 100644 +--- a/drivers/net/wireless/ath/ath11k/qmi.c ++++ b/drivers/net/wireless/ath/ath11k/qmi.c +@@ -2419,6 +2419,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) + ATH11K_QMI_WLFW_SERVICE_INS_ID_V01); + if (ret < 0) { + ath11k_warn(ab, "failed to add qmi lookup\n"); ++ destroy_workqueue(ab->qmi.event_wq); + return ret; + } + +diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c +index 1c5d65bb411f7..6d6a7e34645f2 100644 +--- a/drivers/net/wireless/ath/ath11k/spectral.c ++++ b/drivers/net/wireless/ath/ath11k/spectral.c +@@ -773,6 +773,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar, + i += sizeof(*tlv) + tlv_len; + } + ++ ret = 0; ++ + err: + kfree(fft_sample); + unlock: +diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c +index 5e7ea838a9218..814131a0680a4 100644 +--- a/drivers/net/wireless/ath/ath6kl/main.c ++++ b/drivers/net/wireless/ath/ath6kl/main.c +@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, + + ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); + ++ if (aid < 1 || aid > AP_MAX_NUM_STA) ++ return; ++ + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *) assoc_info; +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c +index 6885d2ded53a8..3d5db84d64650 100644 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c +@@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + return -EINVAL; + } + ++ if (tsid >= 16) { ++ ath6kl_err("invalid tsid: %d\n", tsid); ++ return -EINVAL; ++ } ++ + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c +index 3f563e02d17da..2ed98aaed6fb5 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c +@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle) + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + /* The pending URBs have to be canceled. */ ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + list_for_each_entry_safe(tx_buf, tx_buf_tmp, + &hif_dev->tx.tx_pending, list) { ++ usb_get_urb(tx_buf->urb); ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + usb_kill_urb(tx_buf->urb); ++ list_del(&tx_buf->list); ++ usb_free_urb(tx_buf->urb); ++ kfree(tx_buf->buf); ++ kfree(tx_buf); ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + } ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); + } +@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) + struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; + unsigned long flags; + ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + list_for_each_entry_safe(tx_buf, tx_buf_tmp, + &hif_dev->tx.tx_buf, list) { ++ usb_get_urb(tx_buf->urb); ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + usb_kill_urb(tx_buf->urb); + list_del(&tx_buf->list); + usb_free_urb(tx_buf->urb); + kfree(tx_buf->buf); + kfree(tx_buf); ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + } ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + hif_dev->tx.flags |= HIF_USB_TX_FLUSH; + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + list_for_each_entry_safe(tx_buf, tx_buf_tmp, + &hif_dev->tx.tx_pending, list) { ++ usb_get_urb(tx_buf->urb); ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + usb_kill_urb(tx_buf->urb); + list_del(&tx_buf->list); + usb_free_urb(tx_buf->urb); + kfree(tx_buf->buf); + kfree(tx_buf); ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + } ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); + } +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c +index d2e062eaf5614..510e61e97dbcb 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c +@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, + + if (skb) { + htc_hdr = (struct htc_frame_hdr *) skb->data; ++ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint)) ++ goto ret; + endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; + skb_pull(skb, sizeof(struct htc_frame_hdr)); + +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c +index 702b689c06df3..f3ea629764fa8 100644 +--- a/drivers/net/wireless/ath/wcn36xx/main.c ++++ b/drivers/net/wireless/ath/wcn36xx/main.c +@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = { + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .mcs = { + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, +- .rx_highest = cpu_to_le16(72), ++ .rx_highest = cpu_to_le16(150), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + } + } +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +index f89010a81ffbe..aa9ced3c86fbd 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +@@ -486,7 +486,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, + ret = brcmf_proto_hdrpull(drvr, true, skb, ifp); + + if (ret || !(*ifp) || !(*ifp)->ndev) { +- if (ret != -ENODATA && *ifp) ++ if (ret != -ENODATA && *ifp && (*ifp)->ndev) + (*ifp)->ndev->stats.rx_errors++; + brcmu_pkt_buf_free_skb(skb); + return -ENODATA; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +index f1a20db8daab9..bfddb851e386e 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +@@ -1620,6 +1620,8 @@ fail: + BRCMF_TX_IOCTL_MAX_MSG_SIZE, + msgbuf->ioctbuf, + msgbuf->ioctbuf_handle); ++ if (msgbuf->txflow_wq) ++ destroy_workqueue(msgbuf->txflow_wq); + kfree(msgbuf); + } + return -ENOMEM; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +index 7ef36234a25dc..66797dc5e90d5 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +@@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) + pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft; + pi->pi_fptr.detach = wlc_phy_detach_lcnphy; + +- if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) ++ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) { ++ kfree(pi->u.pi_lcnphy); + return false; ++ } + + if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { + if (pi_lcn->lcnphy_tempsense_option == 3) { +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +index 9ce7207d9ec5b..83caaa3c60a95 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +@@ -947,9 +947,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt = tp_data->fw_pkt; + struct iwl_cmd_header *wanted_hdr = (void *)&trig_data; + +- if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) || +- (pkt->hdr.cmd == wanted_hdr->cmd && +- pkt->hdr.group_id == wanted_hdr->group_id))) { ++ if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd && ++ pkt->hdr.group_id == wanted_hdr->group_id)) { + struct iwl_rx_packet *fw_pkt = + kmemdup(pkt, + sizeof(*pkt) + iwl_rx_packet_payload_len(pkt), +@@ -1012,6 +1011,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) + enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest; + int ret, i; + ++ if (*ini_dest != IWL_FW_INI_LOCATION_INVALID) ++ return; ++ + IWL_DEBUG_FW(fwrt, + "WRT: Generating active triggers list, domain 0x%x\n", + fwrt->trans->dbg.domains_bitmap); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 9374c85c5caf9..c918c0887ed01 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -3693,9 +3693,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, + tail->apply_time_max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, +- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", +- channel->hw_value, req_dur, duration, delay, +- dtim_interval); ++ "ROC: Requesting to remain on channel %u for %ums\n", ++ channel->hw_value, req_dur); ++ IWL_DEBUG_TE(mvm, ++ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", ++ duration, delay, dtim_interval); ++ + /* Set the node address */ + memcpy(tail->node_addr, vif->addr, ETH_ALEN); + +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c +index ff932627a46c1..2fb69a590bd8e 100644 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c +@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, + chan, CFG80211_BSS_FTYPE_UNKNOWN, + bssid, timestamp, + cap_info_bitmap, beacon_period, +- ie_buf, ie_len, rssi, GFP_KERNEL); ++ ie_buf, ie_len, rssi, GFP_ATOMIC); + if (bss) { + bss_priv = (struct mwifiex_bss_priv *)bss->priv; + bss_priv->band = band; +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c +index a042965962a2d..1b6bee5465288 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c +@@ -1976,6 +1976,8 @@ error: + kfree(card->mpa_rx.buf); + card->mpa_tx.buf_size = 0; + card->mpa_rx.buf_size = 0; ++ card->mpa_tx.buf = NULL; ++ card->mpa_rx.buf = NULL; + } + + return ret; +diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c +index 6f3cfde4654cc..426e39d4ccf0f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/usb.c ++++ b/drivers/net/wireless/marvell/mwifiex/usb.c +@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter) + skb_dequeue(&port->tx_aggr.aggr_list))) + mwifiex_write_data_complete(adapter, skb_tmp, + 0, -1); +- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer); ++ if (port->tx_aggr.timer_cnxt.hold_timer.function) ++ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer); + port->tx_aggr.timer_cnxt.is_hold_timer_set = false; + port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; + } +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +index 88931658a9fbb..937cb71bed642 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +@@ -165,15 +165,14 @@ mt7615_reset_test_set(void *data, u64 val) + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + +- mt7615_mutex_acquire(dev); +- + skb = alloc_skb(1, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_put(skb, 1); +- mt76_tx_queue_skb_raw(dev, 0, skb, 0); + ++ mt7615_mutex_acquire(dev); ++ mt76_tx_queue_skb_raw(dev, 0, skb, 0); + mt7615_mutex_release(dev); + + return 0; +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +index 3dd8dd28690ed..019031d436de8 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +@@ -1845,7 +1845,7 @@ void mt7615_pm_wake_work(struct work_struct *work) + pm.wake_work); + mphy = dev->phy.mt76; + +- if (mt7615_driver_own(dev)) { ++ if (mt7615_mcu_set_drv_ctrl(dev)) { + dev_err(mphy->dev->dev, "failed to wake device\n"); + goto out; + } +@@ -1853,12 +1853,13 @@ void mt7615_pm_wake_work(struct work_struct *work) + spin_lock_bh(&dev->pm.txq_lock); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + struct mt7615_sta *msta = dev->pm.tx_q[i].msta; +- struct mt76_wcid *wcid = msta ? &msta->wcid : NULL; + struct ieee80211_sta *sta = NULL; ++ struct mt76_wcid *wcid; + + if (!dev->pm.tx_q[i].skb) + continue; + ++ wcid = msta ? &msta->wcid : &dev->mt76.global_wcid; + if (msta && wcid->sta) + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); +@@ -1943,7 +1944,7 @@ void mt7615_pm_power_save_work(struct work_struct *work) + goto out; + } + +- if (!mt7615_firmware_own(dev)) ++ if (!mt7615_mcu_set_fw_ctrl(dev)) + return; + out: + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c +index 2d0b1f49fdbcf..bafe2bdeb5eb4 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c +@@ -361,7 +361,10 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd, + wd->key.keylen = key->keylen; + wd->key.cmd = cmd; + ++ spin_lock_bh(&dev->mt76.lock); + list_add_tail(&wd->node, &dev->wd_head); ++ spin_unlock_bh(&dev->mt76.lock); ++ + queue_work(dev->mt76.wq, &dev->wtbl_work); + + return 0; +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +index bd316dbd9041d..f42a69ee5635a 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +@@ -324,6 +324,97 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) + sizeof(req), false); + } + ++static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) ++{ ++ if (!is_mt7622(&dev->mt76)) ++ return; ++ ++ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC, ++ MT_INFRACFG_MISC_AP2CONN_WAKE, ++ !en * MT_INFRACFG_MISC_AP2CONN_WAKE); ++} ++ ++static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) ++{ ++ struct mt76_phy *mphy = &dev->mt76.phy; ++ struct mt76_dev *mdev = &dev->mt76; ++ u32 addr; ++ int err; ++ ++ addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; ++ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); ++ ++ mt7622_trigger_hif_int(dev, true); ++ ++ addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; ++ err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); ++ ++ mt7622_trigger_hif_int(dev, false); ++ ++ if (err) { ++ dev_err(mdev->dev, "driver own failed\n"); ++ return -ETIMEDOUT; ++ } ++ ++ clear_bit(MT76_STATE_PM, &mphy->state); ++ ++ return 0; ++} ++ ++static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev) ++{ ++ struct mt76_phy *mphy = &dev->mt76.phy; ++ int i; ++ ++ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) ++ goto out; ++ ++ for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { ++ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN); ++ if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL, ++ MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) ++ break; ++ } ++ ++ if (i == MT7615_DRV_OWN_RETRY_COUNT) { ++ dev_err(dev->mt76.dev, "driver own failed\n"); ++ set_bit(MT76_STATE_PM, &mphy->state); ++ return -EIO; ++ } ++ ++out: ++ dev->pm.last_activity = jiffies; ++ ++ return 0; ++} ++ ++static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev) ++{ ++ struct mt76_phy *mphy = &dev->mt76.phy; ++ int err = 0; ++ u32 addr; ++ ++ if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) ++ return 0; ++ ++ mt7622_trigger_hif_int(dev, true); ++ ++ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; ++ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); ++ ++ if (is_mt7622(&dev->mt76) && ++ !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, ++ MT_CFG_LPCR_HOST_FW_OWN, 3000)) { ++ dev_err(dev->mt76.dev, "Timeout for firmware own\n"); ++ clear_bit(MT76_STATE_PM, &mphy->state); ++ err = -EIO; ++ } ++ ++ mt7622_trigger_hif_int(dev, false); ++ ++ return err; ++} ++ + static void + mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) + { +@@ -1314,6 +1405,8 @@ static const struct mt7615_mcu_ops wtbl_update_ops = { + .add_tx_ba = mt7615_mcu_wtbl_tx_ba, + .add_rx_ba = mt7615_mcu_wtbl_rx_ba, + .sta_add = mt7615_mcu_wtbl_sta_add, ++ .set_drv_ctrl = mt7615_mcu_drv_pmctrl, ++ .set_fw_ctrl = mt7615_mcu_fw_pmctrl, + }; + + static int +@@ -1410,6 +1503,8 @@ static const struct mt7615_mcu_ops sta_update_ops = { + .add_tx_ba = mt7615_mcu_sta_tx_ba, + .add_rx_ba = mt7615_mcu_sta_rx_ba, + .sta_add = mt7615_mcu_add_sta, ++ .set_drv_ctrl = mt7615_mcu_drv_pmctrl, ++ .set_fw_ctrl = mt7615_mcu_fw_pmctrl, + }; + + static int +@@ -1823,6 +1918,8 @@ static const struct mt7615_mcu_ops uni_update_ops = { + .add_tx_ba = mt7615_mcu_uni_tx_ba, + .add_rx_ba = mt7615_mcu_uni_rx_ba, + .sta_add = mt7615_mcu_uni_add_sta, ++ .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl, ++ .set_fw_ctrl = mt7615_mcu_fw_pmctrl, + }; + + static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data, +@@ -1895,81 +1992,6 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev) + &req, sizeof(req), true); + } + +-static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) +-{ +- if (!is_mt7622(&dev->mt76)) +- return; +- +- regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC, +- MT_INFRACFG_MISC_AP2CONN_WAKE, +- !en * MT_INFRACFG_MISC_AP2CONN_WAKE); +-} +- +-int mt7615_driver_own(struct mt7615_dev *dev) +-{ +- struct mt76_phy *mphy = &dev->mt76.phy; +- struct mt76_dev *mdev = &dev->mt76; +- int i; +- +- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) +- goto out; +- +- mt7622_trigger_hif_int(dev, true); +- +- for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { +- u32 addr; +- +- addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; +- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); +- +- addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; +- if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) +- break; +- } +- +- mt7622_trigger_hif_int(dev, false); +- +- if (i == MT7615_DRV_OWN_RETRY_COUNT) { +- dev_err(mdev->dev, "driver own failed\n"); +- set_bit(MT76_STATE_PM, &mphy->state); +- return -EIO; +- } +- +-out: +- dev->pm.last_activity = jiffies; +- +- return 0; +-} +-EXPORT_SYMBOL_GPL(mt7615_driver_own); +- +-int mt7615_firmware_own(struct mt7615_dev *dev) +-{ +- struct mt76_phy *mphy = &dev->mt76.phy; +- int err = 0; +- u32 addr; +- +- if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) +- return 0; +- +- mt7622_trigger_hif_int(dev, true); +- +- addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; +- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); +- +- if (is_mt7622(&dev->mt76) && +- !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, +- MT_CFG_LPCR_HOST_FW_OWN, 300)) { +- dev_err(dev->mt76.dev, "Timeout for firmware own\n"); +- clear_bit(MT76_STATE_PM, &mphy->state); +- err = -EIO; +- } +- +- mt7622_trigger_hif_int(dev, false); +- +- return err; +-} +-EXPORT_SYMBOL_GPL(mt7615_firmware_own); +- + static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) + { + const struct mt7615_patch_hdr *hdr; +@@ -2452,7 +2474,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev) + + dev->mt76.mcu_ops = &mt7615_mcu_ops, + +- ret = mt7615_driver_own(dev); ++ ret = mt7615_mcu_drv_pmctrl(dev); + if (ret) + return ret; + +@@ -2482,7 +2504,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init); + void mt7615_mcu_exit(struct mt7615_dev *dev) + { + __mt76_mcu_restart(&dev->mt76); +- mt7615_firmware_own(dev); ++ mt7615_mcu_set_fw_ctrl(dev); + skb_queue_purge(&dev->mt76.mcu.res_q); + } + EXPORT_SYMBOL_GPL(mt7615_mcu_exit); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +index 571eadc033a3b..c2e1cfb071a82 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +@@ -220,6 +220,8 @@ struct mt7615_phy { + #define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__) + #define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__) + #define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__) ++#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev)) ++#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev)) + struct mt7615_mcu_ops { + int (*add_tx_ba)(struct mt7615_dev *dev, + struct ieee80211_ampdu_params *params, +@@ -238,6 +240,8 @@ struct mt7615_mcu_ops { + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, bool enable); + int (*set_pm_state)(struct mt7615_dev *dev, int band, int state); ++ int (*set_drv_ctrl)(struct mt7615_dev *dev); ++ int (*set_fw_ctrl)(struct mt7615_dev *dev); + }; + + struct mt7615_dev { +@@ -638,8 +642,6 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration); +-int mt7615_firmware_own(struct mt7615_dev *dev); +-int mt7615_driver_own(struct mt7615_dev *dev); + + int mt7615_init_debugfs(struct mt7615_dev *dev); + int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +index 2328d78e06a10..b9794f8a8df41 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +@@ -118,7 +118,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state) + if (err) + goto restore; + +- err = mt7615_firmware_own(dev); ++ err = mt7615_mcu_set_fw_ctrl(dev); + if (err) + goto restore; + +@@ -142,7 +142,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev) + bool pdma_reset; + int i, err; + +- err = mt7615_driver_own(dev); ++ err = mt7615_mcu_set_drv_ctrl(dev); + if (err < 0) + return err; + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +index dabce51117b0a..57d60876db544 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +@@ -426,6 +426,8 @@ static int mt7663s_suspend(struct device *dev) + return err; + } + ++ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); ++ + mt76s_stop_txrx(&mdev->mt76); + + return mt7663s_firmware_own(mdev); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c +index 1730751133aa2..2cfa58d49832f 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c +@@ -70,7 +70,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy) + if (dev->mt76.test.state != MT76_TM_STATE_OFF) + tx_power = dev->mt76.test.tx_power; + +- len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0; ++ len = MT7615_EE_MAX - MT_EE_NIC_CONF_0; + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len); + if (!skb) + return -ENOMEM; +@@ -83,8 +83,10 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy) + int index; + + ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i); +- if (ret < 0) ++ if (ret < 0) { ++ dev_kfree_skb(skb); + return -EINVAL; ++ } + + index = ret - MT_EE_NIC_CONF_0; + if (tx_power && tx_power[i]) +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +index 0b33df3e3bfec..adbed373798e8 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +@@ -19,6 +19,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + { + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + int ret, seq, ep; ++ u32 len; + + mutex_lock(&mdev->mcu.mutex); + +@@ -28,7 +29,8 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + else + ep = MT_EP_OUT_AC_BE; + +- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); ++ len = skb->len; ++ put_unaligned_le32(len, skb_push(skb, sizeof(len))); + ret = mt76_skb_adjust_pad(skb); + if (ret < 0) + goto out; +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +index 6dffdaaa9ad53..294276e2280d2 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +@@ -259,8 +259,11 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + } + + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); +- if (mt76_is_usb(mdev)) +- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); ++ if (mt76_is_usb(mdev)) { ++ u32 len = skb->len; ++ ++ put_unaligned_le32(len, skb_push(skb, sizeof(len))); ++ } + + return mt76_skb_adjust_pad(skb); + } +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +index a8832c5e60041..8a1ae08d9572e 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +@@ -95,16 +95,13 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget) + dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); + + mt7915_tx_cleanup(dev); +- +- if (napi_complete_done(napi, 0)) +- mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL); +- +- mt7915_tx_cleanup(dev); +- + mt7915_mac_sta_poll(dev); + + tasklet_schedule(&dev->mt76.tx_tasklet); + ++ if (napi_complete_done(napi, 0)) ++ mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL); ++ + return 0; + } + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +index eaed5ef054016..bfd87974a5796 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +@@ -2335,14 +2335,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, + struct bss_info_bcn *bcn; + int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE; + +- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); +- if (IS_ERR(rskb)) +- return PTR_ERR(rskb); +- +- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); +- bcn = (struct bss_info_bcn *)tlv; +- bcn->enable = en; +- + skb = ieee80211_beacon_get_template(hw, vif, &offs); + if (!skb) + return -EINVAL; +@@ -2353,6 +2345,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, + return -EINVAL; + } + ++ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); ++ if (IS_ERR(rskb)) { ++ dev_kfree_skb(skb); ++ return PTR_ERR(rskb); ++ } ++ ++ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); ++ bcn = (struct bss_info_bcn *)tlv; ++ bcn->enable = en; ++ + if (mvif->band_idx) { + info = IEEE80211_SKB_CB(skb); + info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; +diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c +index 75bb02cdfdae4..5bd6ac1ba3b5b 100644 +--- a/drivers/net/wireless/mediatek/mt76/testmode.c ++++ b/drivers/net/wireless/mediatek/mt76/testmode.c +@@ -442,9 +442,13 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, + mutex_lock(&dev->mutex); + + if (tb[MT76_TM_ATTR_STATS]) { ++ err = -EINVAL; ++ + a = nla_nest_start(msg, MT76_TM_ATTR_STATS); +- err = mt76_testmode_dump_stats(dev, msg); +- nla_nest_end(msg, a); ++ if (a) { ++ err = mt76_testmode_dump_stats(dev, msg); ++ nla_nest_end(msg, a); ++ } + + goto out; + } +diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c +index 358ac86013338..b5a1b65c087ca 100644 +--- a/drivers/net/wireless/microchip/wilc1000/mon.c ++++ b/drivers/net/wireless/microchip/wilc1000/mon.c +@@ -235,11 +235,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, + + if (register_netdevice(wl->monitor_dev)) { + netdev_err(real_dev, "register_netdevice failed\n"); ++ free_netdev(wl->monitor_dev); + return NULL; + } + priv = netdev_priv(wl->monitor_dev); +- if (!priv) +- return NULL; + + priv->real_ndev = real_dev; + +diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c +index 3ece7b0b03929..351ff909ab1c7 100644 +--- a/drivers/net/wireless/microchip/wilc1000/sdio.c ++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c +@@ -149,9 +149,10 @@ static int wilc_sdio_probe(struct sdio_func *func, + wilc->dev = &func->dev; + + wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc"); +- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) ++ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) { ++ kfree(sdio_priv); + return -EPROBE_DEFER; +- else if (!IS_ERR(wilc->rtc_clk)) ++ } else if (!IS_ERR(wilc->rtc_clk)) + clk_prepare_enable(wilc->rtc_clk); + + dev_info(&func->dev, "Driver Initializing success\n"); +diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c +index 3f19e3f38a397..a18dac0aa6b67 100644 +--- a/drivers/net/wireless/microchip/wilc1000/spi.c ++++ b/drivers/net/wireless/microchip/wilc1000/spi.c +@@ -112,9 +112,10 @@ static int wilc_bus_probe(struct spi_device *spi) + wilc->dev_irq_num = spi->irq; + + wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk"); +- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) ++ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) { ++ kfree(spi_priv); + return -EPROBE_DEFER; +- else if (!IS_ERR(wilc->rtc_clk)) ++ } else if (!IS_ERR(wilc->rtc_clk)) + clk_prepare_enable(wilc->rtc_clk); + + return 0; +diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c +index f40d8c3c3d9e5..f3ccbd2b10847 100644 +--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c ++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c +@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) + default: + pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid, + vif->vifid, vif->wdev.iftype); ++ dev_kfree_skb(cmd_skb); + ret = -EINVAL; + goto out; + } +@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, + break; + default: + pr_err("unsupported iftype %d\n", vif->wdev.iftype); ++ dev_kfree_skb(cmd_skb); + ret = -EINVAL; + goto out; + } +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +index 19efae462a242..5cd7ef3625c5e 100644 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); +- usb_free_urb(urb); + goto error; + } + +@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) + rtl8xxxu_write32(priv, REG_USB_HIMR, val32); + + error: ++ usb_free_urb(urb); + return ret; + } + +@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) + struct rtl8xxxu_priv *priv = hw->priv; + struct rtl8xxxu_rx_urb *rx_urb; + struct rtl8xxxu_tx_urb *tx_urb; ++ struct sk_buff *skb; + unsigned long flags; + int ret, i; + +@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) + rx_urb->hw = hw; + + ret = rtl8xxxu_submit_rx_urb(priv, rx_urb); ++ if (ret) { ++ if (ret != -ENOMEM) { ++ skb = (struct sk_buff *)rx_urb->urb.context; ++ dev_kfree_skb(skb); ++ } ++ rtl8xxxu_queue_rx_urb(priv, rx_urb); ++ } + } + + schedule_delayed_work(&priv->ra_watchdog, 2 * HZ); +diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c +index 54044abf30d7c..d69e4c6fc680a 100644 +--- a/drivers/net/wireless/realtek/rtw88/main.c ++++ b/drivers/net/wireless/realtek/rtw88/main.c +@@ -1473,6 +1473,9 @@ int rtw_core_init(struct rtw_dev *rtwdev) + ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW); + if (ret) { + rtw_warn(rtwdev, "no wow firmware loaded\n"); ++ wait_for_completion(&rtwdev->fw.completion); ++ if (rtwdev->fw.firmware) ++ release_firmware(rtwdev->fw.firmware); + return ret; + } + } +@@ -1487,6 +1490,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev) + struct rtw_rsvd_page *rsvd_pkt, *tmp; + unsigned long flags; + ++ rtw_wait_firmware_completion(rtwdev); ++ + if (fw->firmware) + release_firmware(fw->firmware); + +diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c +index 3413973bc4750..7f1f5073b9f4d 100644 +--- a/drivers/net/wireless/realtek/rtw88/pci.c ++++ b/drivers/net/wireless/realtek/rtw88/pci.c +@@ -1599,6 +1599,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev) + + if (chip->ops->shutdown) + chip->ops->shutdown(rtwdev); ++ ++ pci_set_power_state(pdev, PCI_D3hot); + } + EXPORT_SYMBOL(rtw_pci_shutdown); + +diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h +index 024c2bc275cbe..ca17aa9cf7dc7 100644 +--- a/drivers/net/wireless/realtek/rtw88/pci.h ++++ b/drivers/net/wireless/realtek/rtw88/pci.h +@@ -9,8 +9,8 @@ + #define RTK_BEQ_TX_DESC_NUM 256 + + #define RTK_MAX_RX_DESC_NUM 512 +-/* 8K + rx desc size */ +-#define RTK_PCI_RX_BUF_SIZE (8192 + 24) ++/* 11K + rx desc size */ ++#define RTK_PCI_RX_BUF_SIZE (11454 + 24) + + #define RTK_PCI_CTRL 0x300 + #define BIT_RST_TRXDMA_INTF BIT(20) +diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c +index 8d93f31597469..9687b376d221b 100644 +--- a/drivers/net/wireless/realtek/rtw88/phy.c ++++ b/drivers/net/wireless/realtek/rtw88/phy.c +@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi) + { + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_hal *hal = &rtwdev->hal; +- const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0]; + u32 addr, mask; + u8 path; + +- if (dig_cck) ++ if (chip->dig_cck) { ++ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0]; + rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1); ++ } + + for (path = 0; path < hal->rf_path_num; path++) { + addr = chip->dig[path].addr; +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c +index 88e1db65be02c..71428d8cbcfc5 100644 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c +@@ -1203,6 +1203,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, + + err_dma_mask: + pci_clear_master(pdev); ++ pci_release_regions(pdev); + err_pci_regions: + pci_disable_device(pdev); + err_pci_enable: +diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c +index 3185efeab487b..093dd20057b92 100644 +--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c ++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c +@@ -1893,7 +1893,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, + goto err_init_dev; + } else { + rc = -EINVAL; +- goto err_ndev; ++ goto err_init_pci; + } + + ndev_reset_unsafe_flags(ndev); +diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c +index 57cfd78731fbb..53efecb678983 100644 +--- a/drivers/nvme/host/zns.c ++++ b/drivers/nvme/host/zns.c +@@ -133,28 +133,6 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, + return NULL; + } + +-static int __nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, +- struct nvme_zone_report *report, +- size_t buflen) +-{ +- struct nvme_command c = { }; +- int ret; +- +- c.zmr.opcode = nvme_cmd_zone_mgmt_recv; +- c.zmr.nsid = cpu_to_le32(ns->head->ns_id); +- c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); +- c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen)); +- c.zmr.zra = NVME_ZRA_ZONE_REPORT; +- c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; +- c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; +- +- ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); +- if (ret) +- return ret; +- +- return le64_to_cpu(report->nr_zones); +-} +- + static int nvme_zone_parse_entry(struct nvme_ns *ns, + struct nvme_zone_descriptor *entry, + unsigned int idx, report_zones_cb cb, +@@ -182,6 +160,7 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) + { + struct nvme_zone_report *report; ++ struct nvme_command c = { }; + int ret, zone_idx = 0; + unsigned int nz, i; + size_t buflen; +@@ -190,14 +169,26 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + if (!report) + return -ENOMEM; + ++ c.zmr.opcode = nvme_cmd_zone_mgmt_recv; ++ c.zmr.nsid = cpu_to_le32(ns->head->ns_id); ++ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen)); ++ c.zmr.zra = NVME_ZRA_ZONE_REPORT; ++ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; ++ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; ++ + sector &= ~(ns->zsze - 1); + while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { + memset(report, 0, buflen); +- ret = __nvme_ns_report_zones(ns, sector, report, buflen); +- if (ret < 0) ++ ++ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); ++ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); ++ if (ret) { ++ if (ret > 0) ++ ret = -EIO; + goto out_free; ++ } + +- nz = min_t(unsigned int, ret, nr_zones); ++ nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); + if (!nz) + break; + +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index b7b63330b5efd..90e0c84df2af9 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -1126,7 +1126,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) + * in case a host died before it enabled the controller. Hence, simply + * reset the keep alive timer when the controller is enabled. + */ +- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); ++ if (ctrl->kato) ++ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + } + + static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) +diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c +index dacfa7435d0b2..1ab88df3310f6 100644 +--- a/drivers/nvme/target/passthru.c ++++ b/drivers/nvme/target/passthru.c +@@ -26,7 +26,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) + struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; + u16 status = NVME_SC_SUCCESS; + struct nvme_id_ctrl *id; +- u32 max_hw_sectors; ++ int max_hw_sectors; + int page_shift; + + id = kzalloc(sizeof(*id), GFP_KERNEL); +@@ -48,6 +48,13 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) + max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), + pctrl->max_hw_sectors); + ++ /* ++ * nvmet_passthru_map_sg is limitted to using a single bio so limit ++ * the mdts based on BIO_MAX_PAGES as well ++ */ ++ max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9), ++ max_hw_sectors); ++ + page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; + + id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 6cd3edb2eaf65..29a51cd795609 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -361,16 +361,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell) + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); + } + +-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, +- const struct nvmem_cell_info *info, +- struct nvmem_cell *cell) ++static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, ++ const struct nvmem_cell_info *info, ++ struct nvmem_cell *cell) + { + cell->nvmem = nvmem; + cell->offset = info->offset; + cell->bytes = info->bytes; +- cell->name = kstrdup_const(info->name, GFP_KERNEL); +- if (!cell->name) +- return -ENOMEM; ++ cell->name = info->name; + + cell->bit_offset = info->bit_offset; + cell->nbits = info->nbits; +@@ -382,13 +380,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", +- cell->name, nvmem->stride); ++ cell->name ?: "", nvmem->stride); + return -EINVAL; + } + + return 0; + } + ++static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, ++ const struct nvmem_cell_info *info, ++ struct nvmem_cell *cell) ++{ ++ int err; ++ ++ err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); ++ if (err) ++ return err; ++ ++ cell->name = kstrdup_const(info->name, GFP_KERNEL); ++ if (!cell->name) ++ return -ENOMEM; ++ ++ return 0; ++} ++ + /** + * nvmem_add_cells() - Add cell information to an nvmem device + * +@@ -835,6 +850,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) + { + + struct device_node *nvmem_np; ++ struct nvmem_device *nvmem; + int index = 0; + + if (id) +@@ -844,7 +860,9 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) + if (!nvmem_np) + return ERR_PTR(-ENOENT); + +- return __nvmem_device_get(nvmem_np, device_match_of_node); ++ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); ++ of_node_put(nvmem_np); ++ return nvmem; + } + EXPORT_SYMBOL_GPL(of_nvmem_device_get); + #endif +@@ -1460,7 +1478,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + if (!nvmem) + return -EINVAL; + +- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); ++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); + if (rc) + return rc; + +@@ -1490,7 +1508,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, + if (!nvmem) + return -EINVAL; + +- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); ++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); + if (rc) + return rc; + +diff --git a/drivers/opp/core.c b/drivers/opp/core.c +index 3ca7543142bf3..1a95ad40795be 100644 +--- a/drivers/opp/core.c ++++ b/drivers/opp/core.c +@@ -1949,6 +1949,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table) + { + int index; + ++ if (!opp_table->genpd_virt_devs) ++ return; ++ + for (index = 0; index < opp_table->required_opp_count; index++) { + if (!opp_table->genpd_virt_devs[index]) + continue; +@@ -1995,6 +1998,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, + if (!opp_table) + return ERR_PTR(-ENOMEM); + ++ if (opp_table->genpd_virt_devs) ++ return opp_table; ++ + /* + * If the genpd's OPP table isn't already initialized, parsing of the + * required-opps fail for dev. We should retry this after genpd's OPP +diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c +index 305bfec2424d8..29f5c616c3bc6 100644 +--- a/drivers/pci/controller/dwc/pcie-designware-ep.c ++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c +@@ -505,7 +505,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) + u32 reg; + int i; + +- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); ++ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & ++ PCI_HEADER_TYPE_MASK; + if (hdr_type != PCI_HEADER_TYPE_NORMAL) { + dev_err(pci->dev, + "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c +index 1559f79e63b6f..2e2e2a2ff51d3 100644 +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -9,7 +9,7 @@ + */ + + #include +-#include ++#include + #include + #include + #include +@@ -607,7 +607,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { + * Initialize the configuration space of the PCI-to-PCI bridge + * associated with the given PCIe interface. + */ +-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) ++static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) + { + struct pci_bridge_emul *bridge = &pcie->bridge; + +@@ -633,8 +633,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) + bridge->data = pcie; + bridge->ops = &advk_pci_bridge_emul_ops; + +- pci_bridge_emul_init(bridge, 0); +- ++ return pci_bridge_emul_init(bridge, 0); + } + + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, +@@ -1167,7 +1166,11 @@ static int advk_pcie_probe(struct platform_device *pdev) + + advk_pcie_setup_hw(pcie); + +- advk_sw_pci_bridge_init(pcie); ++ ret = advk_sw_pci_bridge_init(pcie); ++ if (ret) { ++ dev_err(dev, "Failed to register emulated root PCI bridge\n"); ++ return ret; ++ } + + ret = advk_pcie_init_irq_domain(pcie); + if (ret) { +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c +index fc4c3a15e5707..a9df492fbffa2 100644 +--- a/drivers/pci/controller/pci-hyperv.c ++++ b/drivers/pci/controller/pci-hyperv.c +@@ -1276,11 +1276,25 @@ static void hv_irq_unmask(struct irq_data *data) + exit_unlock: + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + +- if (res) { ++ /* ++ * During hibernation, when a CPU is offlined, the kernel tries ++ * to move the interrupt to the remaining CPUs that haven't ++ * been offlined yet. In this case, the below hv_do_hypercall() ++ * always fails since the vmbus channel has been closed: ++ * refer to cpu_disable_common() -> fixup_irqs() -> ++ * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). ++ * ++ * Suppress the error message for hibernation because the failure ++ * during hibernation does not matter (at this time all the devices ++ * have been frozen). Note: the correct affinity info is still updated ++ * into the irqdata data structure in migrate_one_irq() -> ++ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM ++ * resumes, hv_pci_restore_msi_state() is able to correctly restore ++ * the interrupt with the correct affinity. ++ */ ++ if (res && hbus->state != hv_pcibus_removing) + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); +- return; +- } + + pci_msi_unmask_irq(data); + } +@@ -3372,6 +3386,34 @@ static int hv_pci_suspend(struct hv_device *hdev) + return 0; + } + ++static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) ++{ ++ struct msi_desc *entry; ++ struct irq_data *irq_data; ++ ++ for_each_pci_msi_entry(entry, pdev) { ++ irq_data = irq_get_irq_data(entry->irq); ++ if (WARN_ON_ONCE(!irq_data)) ++ return -EINVAL; ++ ++ hv_compose_msi_msg(irq_data, &entry->msg); ++ } ++ ++ return 0; ++} ++ ++/* ++ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg() ++ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V ++ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg() ++ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping ++ * Table entries. ++ */ ++static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus) ++{ ++ pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL); ++} ++ + static int hv_pci_resume(struct hv_device *hdev) + { + struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); +@@ -3405,6 +3447,8 @@ static int hv_pci_resume(struct hv_device *hdev) + + prepopulate_bars(hbus); + ++ hv_pci_restore_msi_state(hbus); ++ + hbus->state = hv_pcibus_installed; + return 0; + out: +diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c +index 3176ad3ab0e52..908475d27e0e7 100644 +--- a/drivers/pci/controller/pcie-iproc-msi.c ++++ b/drivers/pci/controller/pcie-iproc-msi.c +@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data, + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + int target_cpu = cpumask_first(mask); + int curr_cpu; ++ int ret; + + curr_cpu = hwirq_to_cpu(msi, data->hwirq); + if (curr_cpu == target_cpu) +- return IRQ_SET_MASK_OK_DONE; ++ ret = IRQ_SET_MASK_OK_DONE; ++ else { ++ /* steer MSI to the target CPU */ ++ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; ++ ret = IRQ_SET_MASK_OK; ++ } + +- /* steer MSI to the target CPU */ +- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; ++ irq_data_update_effective_affinity(data, cpumask_of(target_cpu)); + +- return IRQ_SET_MASK_OK; ++ return ret; + } + + static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, +diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c +index b37e08c4f9d1a..4afd4ee4f7f04 100644 +--- a/drivers/pci/iov.c ++++ b/drivers/pci/iov.c +@@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) + virtfn->device = iov->vf_device; + virtfn->is_virtfn = 1; + virtfn->physfn = pci_dev_get(dev); ++ virtfn->no_command_memory = 1; + + if (id == 0) + pci_read_vf_config_common(virtfn); +diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c +index aac9823b0c6bb..e116815fa8092 100644 +--- a/drivers/perf/thunderx2_pmu.c ++++ b/drivers/perf/thunderx2_pmu.c +@@ -805,14 +805,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev, + list_for_each_entry(rentry, &list, node) { + if (resource_type(rentry->res) == IORESOURCE_MEM) { + res = *rentry->res; ++ rentry = NULL; + break; + } + } ++ acpi_dev_free_resource_list(&list); + +- if (!rentry->res) ++ if (rentry) { ++ dev_err(dev, "PMU type %d: Fail to find resource\n", type); + return NULL; ++ } + +- acpi_dev_free_resource_list(&list); + base = devm_ioremap_resource(dev, &res); + if (IS_ERR(base)) { + dev_err(dev, "PMU type %d: Fail to map resource\n", type); +diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c +index edac28cd25ddc..633cf07ba6723 100644 +--- a/drivers/perf/xgene_pmu.c ++++ b/drivers/perf/xgene_pmu.c +@@ -1453,17 +1453,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) + } + + #if defined(CONFIG_ACPI) +-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data) +-{ +- struct resource *res = data; +- +- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) +- acpi_dev_resource_memory(ares, res); +- +- /* Always tell the ACPI core to skip this resource */ +- return 1; +-} +- + static struct + xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct acpi_device *adev, u32 type) +@@ -1475,6 +1464,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct hw_pmu_info *inf; + void __iomem *dev_csr; + struct resource res; ++ struct resource_entry *rentry; + int enable_bit; + int rc; + +@@ -1483,11 +1473,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + return NULL; + + INIT_LIST_HEAD(&resource_list); +- rc = acpi_dev_get_resources(adev, &resource_list, +- acpi_pmu_dev_add_resource, &res); ++ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); ++ if (rc <= 0) { ++ dev_err(dev, "PMU type %d: No resources found\n", type); ++ return NULL; ++ } ++ ++ list_for_each_entry(rentry, &resource_list, node) { ++ if (resource_type(rentry->res) == IORESOURCE_MEM) { ++ res = *rentry->res; ++ rentry = NULL; ++ break; ++ } ++ } + acpi_dev_free_resource_list(&resource_list); +- if (rc < 0) { +- dev_err(dev, "PMU type %d: No resource address found\n", type); ++ ++ if (rentry) { ++ dev_err(dev, "PMU type %d: No memory resource found\n", type); + return NULL; + } + +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +index 53f3f8aec6956..3e6567355d97d 100644 +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +@@ -534,7 +534,7 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset, + val = pmap->val << __ffs(pconf->mask); + + rc = regmap_update_bits(pdata->scu, pconf->reg, +- pmap->mask, val); ++ pconf->mask, val); + + if (rc < 0) + return rc; +diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig +index dcf7df797af75..0ed14de0134cf 100644 +--- a/drivers/pinctrl/bcm/Kconfig ++++ b/drivers/pinctrl/bcm/Kconfig +@@ -23,6 +23,7 @@ config PINCTRL_BCM2835 + select PINMUX + select PINCONF + select GENERIC_PINCONF ++ select GPIOLIB + select GPIOLIB_IRQCHIP + default ARCH_BCM2835 || ARCH_BRCMSTB + help +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c +index 5eff8c2965528..3fb2387147189 100644 +--- a/drivers/pinctrl/devicetree.c ++++ b/drivers/pinctrl/devicetree.c +@@ -130,9 +130,8 @@ static int dt_to_map_one_config(struct pinctrl *p, + if (!np_pctldev || of_node_is_root(np_pctldev)) { + of_node_put(np_pctldev); + ret = driver_deferred_probe_check_state(p->dev); +- /* keep deferring if modules are enabled unless we've timed out */ +- if (IS_ENABLED(CONFIG_MODULES) && !allow_default && +- (ret == -ENODEV)) ++ /* keep deferring if modules are enabled */ ++ if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0) + ret = -EPROBE_DEFER; + return ret; + } +diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c +index 8c162dd5f5a10..3e354e02f4084 100644 +--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c ++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c +@@ -15,11 +15,13 @@ + + #include "pinctrl-intel.h" + +-#define TGL_PAD_OWN 0x020 +-#define TGL_PADCFGLOCK 0x080 +-#define TGL_HOSTSW_OWN 0x0b0 +-#define TGL_GPI_IS 0x100 +-#define TGL_GPI_IE 0x120 ++#define TGL_PAD_OWN 0x020 ++#define TGL_LP_PADCFGLOCK 0x080 ++#define TGL_H_PADCFGLOCK 0x090 ++#define TGL_LP_HOSTSW_OWN 0x0b0 ++#define TGL_H_HOSTSW_OWN 0x0c0 ++#define TGL_GPI_IS 0x100 ++#define TGL_GPI_IE 0x120 + + #define TGL_GPP(r, s, e, g) \ + { \ +@@ -29,12 +31,12 @@ + .gpio_base = (g), \ + } + +-#define TGL_COMMUNITY(b, s, e, g) \ ++#define TGL_COMMUNITY(b, s, e, pl, ho, g) \ + { \ + .barno = (b), \ + .padown_offset = TGL_PAD_OWN, \ +- .padcfglock_offset = TGL_PADCFGLOCK, \ +- .hostown_offset = TGL_HOSTSW_OWN, \ ++ .padcfglock_offset = (pl), \ ++ .hostown_offset = (ho), \ + .is_offset = TGL_GPI_IS, \ + .ie_offset = TGL_GPI_IE, \ + .pin_base = (s), \ +@@ -43,6 +45,12 @@ + .ngpps = ARRAY_SIZE(g), \ + } + ++#define TGL_LP_COMMUNITY(b, s, e, g) \ ++ TGL_COMMUNITY(b, s, e, TGL_LP_PADCFGLOCK, TGL_LP_HOSTSW_OWN, g) ++ ++#define TGL_H_COMMUNITY(b, s, e, g) \ ++ TGL_COMMUNITY(b, s, e, TGL_H_PADCFGLOCK, TGL_H_HOSTSW_OWN, g) ++ + /* Tiger Lake-LP */ + static const struct pinctrl_pin_desc tgllp_pins[] = { + /* GPP_B */ +@@ -367,10 +375,10 @@ static const struct intel_padgroup tgllp_community5_gpps[] = { + }; + + static const struct intel_community tgllp_communities[] = { +- TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps), +- TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps), +- TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps), +- TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps), ++ TGL_LP_COMMUNITY(0, 0, 66, tgllp_community0_gpps), ++ TGL_LP_COMMUNITY(1, 67, 170, tgllp_community1_gpps), ++ TGL_LP_COMMUNITY(2, 171, 259, tgllp_community4_gpps), ++ TGL_LP_COMMUNITY(3, 260, 276, tgllp_community5_gpps), + }; + + static const struct intel_pinctrl_soc_data tgllp_soc_data = { +@@ -723,11 +731,11 @@ static const struct intel_padgroup tglh_community5_gpps[] = { + }; + + static const struct intel_community tglh_communities[] = { +- TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps), +- TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps), +- TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps), +- TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps), +- TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps), ++ TGL_H_COMMUNITY(0, 0, 78, tglh_community0_gpps), ++ TGL_H_COMMUNITY(1, 79, 180, tglh_community1_gpps), ++ TGL_H_COMMUNITY(2, 181, 217, tglh_community3_gpps), ++ TGL_H_COMMUNITY(3, 218, 266, tglh_community4_gpps), ++ TGL_H_COMMUNITY(4, 267, 290, tglh_community5_gpps), + }; + + static const struct intel_pinctrl_soc_data tglh_soc_data = { +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c +index 42b12ea14d6be..7edb067f5e76a 100644 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c +@@ -87,7 +87,7 @@ const struct regmap_config mcp23x08_regmap = { + }; + EXPORT_SYMBOL_GPL(mcp23x08_regmap); + +-static const struct reg_default mcp23x16_defaults[] = { ++static const struct reg_default mcp23x17_defaults[] = { + {.reg = MCP_IODIR << 1, .def = 0xffff}, + {.reg = MCP_IPOL << 1, .def = 0x0000}, + {.reg = MCP_GPINTEN << 1, .def = 0x0000}, +@@ -98,23 +98,23 @@ static const struct reg_default mcp23x16_defaults[] = { + {.reg = MCP_OLAT << 1, .def = 0x0000}, + }; + +-static const struct regmap_range mcp23x16_volatile_range = { ++static const struct regmap_range mcp23x17_volatile_range = { + .range_min = MCP_INTF << 1, + .range_max = MCP_GPIO << 1, + }; + +-static const struct regmap_access_table mcp23x16_volatile_table = { +- .yes_ranges = &mcp23x16_volatile_range, ++static const struct regmap_access_table mcp23x17_volatile_table = { ++ .yes_ranges = &mcp23x17_volatile_range, + .n_yes_ranges = 1, + }; + +-static const struct regmap_range mcp23x16_precious_range = { +- .range_min = MCP_GPIO << 1, ++static const struct regmap_range mcp23x17_precious_range = { ++ .range_min = MCP_INTCAP << 1, + .range_max = MCP_GPIO << 1, + }; + +-static const struct regmap_access_table mcp23x16_precious_table = { +- .yes_ranges = &mcp23x16_precious_range, ++static const struct regmap_access_table mcp23x17_precious_table = { ++ .yes_ranges = &mcp23x17_precious_range, + .n_yes_ranges = 1, + }; + +@@ -124,10 +124,10 @@ const struct regmap_config mcp23x17_regmap = { + + .reg_stride = 2, + .max_register = MCP_OLAT << 1, +- .volatile_table = &mcp23x16_volatile_table, +- .precious_table = &mcp23x16_precious_table, +- .reg_defaults = mcp23x16_defaults, +- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults), ++ .volatile_table = &mcp23x17_volatile_table, ++ .precious_table = &mcp23x17_precious_table, ++ .reg_defaults = mcp23x17_defaults, ++ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults), + .cache_type = REGCACHE_FLAT, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + }; +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c +index efe41abc5d472..f3cd7e2967126 100644 +--- a/drivers/pinctrl/pinctrl-single.c ++++ b/drivers/pinctrl/pinctrl-single.c +@@ -1014,7 +1014,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, + if (res) + return res; + +- if (pinctrl_spec.args_count < 2) { ++ if (pinctrl_spec.args_count < 2 || pinctrl_spec.args_count > 3) { + dev_err(pcs->dev, "invalid args_count for spec: %i\n", + pinctrl_spec.args_count); + break; +@@ -1033,7 +1033,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, + } + + dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n", +- pinctrl_spec.np, offset, pinctrl_spec.args[1]); ++ pinctrl_spec.np, offset, vals[found].val); + + pin = pcs_get_pin_by_offset(pcs, offset); + if (pin < 0) { +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index a2567e772cd57..1df232266f63a 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -1077,12 +1077,10 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) + * when TLMM is powered on. To allow that, enable the GPIO + * summary line to be wakeup capable at GIC. + */ +- if (d->parent_data) +- irq_chip_set_wake_parent(d, on); +- +- irq_set_irq_wake(pctrl->irq, on); ++ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) ++ return irq_chip_set_wake_parent(d, on); + +- return 0; ++ return irq_set_irq_wake(pctrl->irq, on); + } + + static int msm_gpio_irq_reqres(struct irq_data *d) +@@ -1243,6 +1241,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) + pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres; + pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity; + pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity; ++ pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND | ++ IRQCHIP_SET_TYPE_MASKED; + + np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0); + if (np) { +diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c +index b59180bff5a3e..ef61298c30bdd 100644 +--- a/drivers/platform/chrome/cros_ec_lightbar.c ++++ b/drivers/platform/chrome/cros_ec_lightbar.c +@@ -116,6 +116,8 @@ static int get_lightbar_version(struct cros_ec_dev *ec, + + param = (struct ec_params_lightbar *)msg->data; + param->cmd = LIGHTBAR_CMD_VERSION; ++ msg->outsize = sizeof(param->cmd); ++ msg->result = sizeof(resp->version); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + if (ret < 0) { + ret = 0; +diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c +index 3fcd27ec9ad8f..10ef1fc75c0e1 100644 +--- a/drivers/platform/chrome/cros_ec_typec.c ++++ b/drivers/platform/chrome/cros_ec_typec.c +@@ -591,7 +591,8 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) + dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret); + + return usb_role_switch_set_role(typec->ports[port_num]->role_sw, +- !!(resp.role & PD_CTRL_RESP_ROLE_DATA)); ++ resp.role & PD_CTRL_RESP_ROLE_DATA ++ ? USB_ROLE_HOST : USB_ROLE_DEVICE); + } + + static int cros_typec_get_cmd_version(struct cros_typec_data *typec) +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c +index 1506ec0a47771..04a745095c379 100644 +--- a/drivers/platform/x86/mlx-platform.c ++++ b/drivers/platform/x86/mlx-platform.c +@@ -328,15 +328,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { + }, + }; + +-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = { +- { +- I2C_BOARD_INFO("24c32", 0x51), +- }, +- { +- I2C_BOARD_INFO("24c32", 0x50), +- }, +-}; +- + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = { + { + I2C_BOARD_INFO("dps460", 0x59), +@@ -770,15 +761,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0], +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), +- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1], +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + }; + +diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c +index 599a0f66a3845..a34d95ed70b20 100644 +--- a/drivers/pwm/pwm-img.c ++++ b/drivers/pwm/pwm-img.c +@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev) + return PTR_ERR(pwm->pwm_clk); + } + ++ platform_set_drvdata(pdev, pwm); ++ + pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); +@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev) + goto err_suspend; + } + +- platform_set_drvdata(pdev, pwm); + return 0; + + err_suspend: +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c +index 9d965ffe66d1e..da9bc3d10104a 100644 +--- a/drivers/pwm/pwm-lpss.c ++++ b/drivers/pwm/pwm-lpss.c +@@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + * The equation is: + * base_unit = round(base_unit_range * freq / c) + */ +- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; ++ base_unit_range = BIT(lpwm->info->base_unit_bits); + freq *= base_unit_range; + + base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); ++ /* base_unit must not be 0 and we also want to avoid overflowing it */ ++ base_unit = clamp_val(base_unit, 1, base_unit_range - 1); + + on_time_div = 255ULL * duty_ns; + do_div(on_time_div, period_ns); +@@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, + + orig_ctrl = ctrl = pwm_lpss_read(pwm); + ctrl &= ~PWM_ON_TIME_DIV_MASK; +- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); +- base_unit &= base_unit_range; ++ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT); + ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; + ctrl |= on_time_div; + +diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c +index eb8c9cb645a6c..098e94335cb5b 100644 +--- a/drivers/pwm/pwm-rockchip.c ++++ b/drivers/pwm/pwm-rockchip.c +@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev) + const struct of_device_id *id; + struct rockchip_pwm_chip *pc; + struct resource *r; ++ u32 enable_conf, ctrl; + int ret, count; + + id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev); +@@ -362,7 +363,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev) + } + + /* Keep the PWM clk enabled if the PWM appears to be up and running. */ +- if (!pwm_is_enabled(pc->chip.pwms)) ++ enable_conf = pc->data->enable_conf; ++ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl); ++ if ((ctrl & enable_conf) != enable_conf) + clk_disable(pc->clk); + + return 0; +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c +index a30342942e26f..94331d999d273 100644 +--- a/drivers/rapidio/devices/rio_mport_cdev.c ++++ b/drivers/rapidio/devices/rio_mport_cdev.c +@@ -871,15 +871,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, + rmcd_error("pin_user_pages_fast err=%ld", + pinned); + nr_pages = 0; +- } else ++ } else { + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); ++ /* ++ * Set nr_pages up to mean "how many pages to unpin, in ++ * the error handler: ++ */ ++ nr_pages = pinned; ++ } + ret = -EFAULT; +- /* +- * Set nr_pages up to mean "how many pages to unpin, in +- * the error handler: +- */ +- nr_pages = pinned; + goto err_pg; + } + +@@ -1679,6 +1680,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, + struct rio_dev *rdev; + struct rio_switch *rswitch = NULL; + struct rio_mport *mport; ++ struct device *dev; + size_t size; + u32 rval; + u32 swpinfo = 0; +@@ -1693,8 +1695,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, + rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, + dev_info.comptag, dev_info.destid, dev_info.hopcount); + +- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) { ++ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); ++ if (dev) { + rmcd_debug(RDEV, "device %s already exists", dev_info.name); ++ put_device(dev); + return -EEXIST; + } + +diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c +index 569d9ad2c5942..6939aa5b3dc7f 100644 +--- a/drivers/ras/cec.c ++++ b/drivers/ras/cec.c +@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = { + .priority = MCE_PRIO_CEC, + }; + +-static void __init cec_init(void) ++static int __init cec_init(void) + { + if (ce_arr.disabled) +- return; ++ return -ENODEV; + + ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL); + if (!ce_arr.array) { + pr_err("Error allocating CE array page!\n"); +- return; ++ return -ENOMEM; + } + + if (create_debugfs_nodes()) { + free_page((unsigned long)ce_arr.array); +- return; ++ return -ENOMEM; + } + + INIT_DELAYED_WORK(&cec_work, cec_work_fn); +@@ -575,6 +575,7 @@ static void __init cec_init(void) + mce_register_decode_chain(&cec_nb); + + pr_info("Correctable Errors collector initialized.\n"); ++ return 0; + } + late_initcall(cec_init); + +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 7ff507ec875a8..4859cf84c0b2f 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -5256,15 +5256,20 @@ regulator_register(const struct regulator_desc *regulator_desc, + else if (regulator_desc->supply_name) + rdev->supply_name = regulator_desc->supply_name; + +- /* +- * Attempt to resolve the regulator supply, if specified, +- * but don't return an error if we fail because we will try +- * to resolve it again later as more regulators are added. +- */ +- if (regulator_resolve_supply(rdev)) +- rdev_dbg(rdev, "unable to resolve supply\n"); +- + ret = set_machine_constraints(rdev, constraints); ++ if (ret == -EPROBE_DEFER) { ++ /* Regulator might be in bypass mode and so needs its supply ++ * to set the constraints */ ++ /* FIXME: this currently triggers a chicken-and-egg problem ++ * when creating -SUPPLY symlink in sysfs to a regulator ++ * that is just being created */ ++ ret = regulator_resolve_supply(rdev); ++ if (!ret) ++ ret = set_machine_constraints(rdev, constraints); ++ else ++ rdev_dbg(rdev, "unable to resolve supply early: %pe\n", ++ ERR_PTR(ret)); ++ } + if (ret < 0) + goto wash; + +diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c +index 8ba947f3585f5..457788b505720 100644 +--- a/drivers/regulator/qcom_usb_vbus-regulator.c ++++ b/drivers/regulator/qcom_usb_vbus-regulator.c +@@ -63,6 +63,7 @@ static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev) + qcom_usb_vbus_rdesc.enable_mask = OTG_EN; + config.dev = dev; + config.init_data = init_data; ++ config.of_node = dev->of_node; + config.regmap = regmap; + + rdev = devm_regulator_register(dev, &qcom_usb_vbus_rdesc, &config); +diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c +index 3d3d87210ef2c..58d1d7e571d66 100644 +--- a/drivers/remoteproc/mtk_scp_ipi.c ++++ b/drivers/remoteproc/mtk_scp_ipi.c +@@ -30,10 +30,8 @@ int scp_ipi_register(struct mtk_scp *scp, + scp_ipi_handler_t handler, + void *priv) + { +- if (!scp) { +- dev_err(scp->dev, "scp device is not ready\n"); ++ if (!scp) + return -EPROBE_DEFER; +- } + + if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL)) + return -EINVAL; +diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c +index f4da42fc0eeb1..d2414cc1d90d6 100644 +--- a/drivers/remoteproc/stm32_rproc.c ++++ b/drivers/remoteproc/stm32_rproc.c +@@ -685,7 +685,7 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata, + * We couldn't get the coprocessor's state, assume + * it is not running. + */ +- state = M4_STATE_OFF; ++ *state = M4_STATE_OFF; + return 0; + } + +diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c +index 83f2b8804ee98..96a17ec291401 100644 +--- a/drivers/rpmsg/mtk_rpmsg.c ++++ b/drivers/rpmsg/mtk_rpmsg.c +@@ -200,7 +200,6 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev, + struct rpmsg_device *rpdev; + struct mtk_rpmsg_device *mdev; + struct platform_device *pdev = mtk_subdev->pdev; +- int ret; + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) +@@ -219,13 +218,7 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev, + rpdev->dev.parent = &pdev->dev; + rpdev->dev.release = mtk_rpmsg_release_device; + +- ret = rpmsg_register_device(rpdev); +- if (ret) { +- kfree(mdev); +- return ret; +- } +- +- return 0; ++ return rpmsg_register_device(rpdev); + } + + static void mtk_register_device_work_function(struct work_struct *register_work) +diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c +index 4abbeea782fa4..19903de6268db 100644 +--- a/drivers/rpmsg/qcom_smd.c ++++ b/drivers/rpmsg/qcom_smd.c +@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev, + ret = of_property_read_u32(node, key, &edge->edge_id); + if (ret) { + dev_err(dev, "edge missing %s property\n", key); +- return -EINVAL; ++ goto put_node; + } + + edge->remote_pid = QCOM_SMEM_HOST_ANY; +@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev, + edge->mbox_client.knows_txdone = true; + edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0); + if (IS_ERR(edge->mbox_chan)) { +- if (PTR_ERR(edge->mbox_chan) != -ENODEV) +- return PTR_ERR(edge->mbox_chan); ++ if (PTR_ERR(edge->mbox_chan) != -ENODEV) { ++ ret = PTR_ERR(edge->mbox_chan); ++ goto put_node; ++ } + + edge->mbox_chan = NULL; + + syscon_np = of_parse_phandle(node, "qcom,ipc", 0); + if (!syscon_np) { + dev_err(dev, "no qcom,ipc node\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto put_node; + } + + edge->ipc_regmap = syscon_node_to_regmap(syscon_np); +- if (IS_ERR(edge->ipc_regmap)) +- return PTR_ERR(edge->ipc_regmap); ++ if (IS_ERR(edge->ipc_regmap)) { ++ ret = PTR_ERR(edge->ipc_regmap); ++ goto put_node; ++ } + + key = "qcom,ipc"; + ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); + if (ret < 0) { + dev_err(dev, "no offset in %s\n", key); +- return -EINVAL; ++ goto put_node; + } + + ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); + if (ret < 0) { + dev_err(dev, "no bit in %s\n", key); +- return -EINVAL; ++ goto put_node; + } + } + +@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev, + irq = irq_of_parse_and_map(node, 0); + if (irq < 0) { + dev_err(dev, "required smd interrupt missing\n"); +- return -EINVAL; ++ ret = irq; ++ goto put_node; + } + + ret = devm_request_irq(dev, irq, +@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev, + node->name, edge); + if (ret) { + dev_err(dev, "failed to request smd irq\n"); +- return ret; ++ goto put_node; + } + + edge->irq = irq; + + return 0; ++ ++put_node: ++ of_node_put(node); ++ edge->of_node = NULL; ++ ++ return ret; + } + + /* +diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c +index 54c85cdd019dd..c9c3de14bc62f 100644 +--- a/drivers/rtc/rtc-ds1307.c ++++ b/drivers/rtc/rtc-ds1307.c +@@ -352,6 +352,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) + regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG, + DS1340_BIT_OSF, 0); + break; ++ case ds_1388: ++ regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG, ++ DS1388_BIT_OSF, 0); ++ break; + case mcp794xx: + /* + * these bits were cleared when preparing the date/time +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h +index ecfd6d152e862..6b5cf9ba03e5b 100644 +--- a/drivers/s390/net/qeth_core.h ++++ b/drivers/s390/net/qeth_core.h +@@ -680,6 +680,11 @@ struct qeth_card_blkt { + int inter_packet_jumbo; + }; + ++enum qeth_pnso_mode { ++ QETH_PNSO_NONE, ++ QETH_PNSO_BRIDGEPORT, ++}; ++ + #define QETH_BROADCAST_WITH_ECHO 0x01 + #define QETH_BROADCAST_WITHOUT_ECHO 0x02 + struct qeth_card_info { +@@ -696,6 +701,7 @@ struct qeth_card_info { + /* no bitfield, we take a pointer on these two: */ + u8 has_lp2lp_cso_v6; + u8 has_lp2lp_cso_v4; ++ enum qeth_pnso_mode pnso_mode; + enum qeth_card_types type; + enum qeth_link_types link_type; + int broadcast_capable; +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c +index 6384f7adba660..4af7b5d57b4e4 100644 +--- a/drivers/s390/net/qeth_l2_main.c ++++ b/drivers/s390/net/qeth_l2_main.c +@@ -273,6 +273,17 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); + } + ++static void qeth_l2_set_pnso_mode(struct qeth_card *card, ++ enum qeth_pnso_mode mode) ++{ ++ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); ++ WRITE_ONCE(card->info.pnso_mode, mode); ++ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); ++ ++ if (mode == QETH_PNSO_NONE) ++ drain_workqueue(card->event_wq); ++} ++ + static void qeth_l2_stop_card(struct qeth_card *card) + { + QETH_CARD_TEXT(card, 2, "stopcard"); +@@ -290,7 +301,7 @@ static void qeth_l2_stop_card(struct qeth_card *card) + qeth_qdio_clear_card(card, 0); + qeth_drain_output_queues(card); + qeth_clear_working_pool_list(card); +- flush_workqueue(card->event_wq); ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + qeth_flush_local_addrs(card); + card->info.promisc_mode = 0; + } +@@ -1109,12 +1120,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work) + NULL + }; + +- /* Role should not change by itself, but if it did, */ +- /* information from the hardware is authoritative. */ +- mutex_lock(&data->card->sbp_lock); +- data->card->options.sbp.role = entry->role; +- mutex_unlock(&data->card->sbp_lock); +- + snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); + snprintf(env_role, sizeof(env_role), "ROLE=%s", + (entry->role == QETH_SBP_ROLE_NONE) ? "none" : +@@ -1163,19 +1168,34 @@ static void qeth_bridge_state_change(struct qeth_card *card, + } + + struct qeth_addr_change_data { +- struct work_struct worker; ++ struct delayed_work dwork; + struct qeth_card *card; + struct qeth_ipacmd_addr_change ac_event; + }; + + static void qeth_addr_change_event_worker(struct work_struct *work) + { +- struct qeth_addr_change_data *data = +- container_of(work, struct qeth_addr_change_data, worker); ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct qeth_addr_change_data *data; ++ struct qeth_card *card; + int i; + ++ data = container_of(dwork, struct qeth_addr_change_data, dwork); ++ card = data->card; ++ + QETH_CARD_TEXT(data->card, 4, "adrchgew"); ++ ++ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) ++ goto free; ++ + if (data->ac_event.lost_event_mask) { ++ /* Potential re-config in progress, try again later: */ ++ if (!mutex_trylock(&card->sbp_lock)) { ++ queue_delayed_work(card->event_wq, dwork, ++ msecs_to_jiffies(100)); ++ return; ++ } ++ + dev_info(&data->card->gdev->dev, + "Address change notification stopped on %s (%s)\n", + data->card->dev->name, +@@ -1184,8 +1204,9 @@ static void qeth_addr_change_event_worker(struct work_struct *work) + : (data->ac_event.lost_event_mask == 0x02) + ? "Bridge port state change" + : "Unknown reason"); +- mutex_lock(&data->card->sbp_lock); ++ + data->card->options.sbp.hostnotification = 0; ++ card->info.pnso_mode = QETH_PNSO_NONE; + mutex_unlock(&data->card->sbp_lock); + qeth_bridge_emit_host_event(data->card, anev_abort, + 0, NULL, NULL); +@@ -1199,6 +1220,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work) + &entry->token, + &entry->addr_lnid); + } ++ ++free: + kfree(data); + } + +@@ -1210,6 +1233,9 @@ static void qeth_addr_change_event(struct qeth_card *card, + struct qeth_addr_change_data *data; + int extrasize; + ++ if (card->info.pnso_mode == QETH_PNSO_NONE) ++ return; ++ + QETH_CARD_TEXT(card, 4, "adrchgev"); + if (cmd->hdr.return_code != 0x0000) { + if (cmd->hdr.return_code == 0x0010) { +@@ -1229,11 +1255,11 @@ static void qeth_addr_change_event(struct qeth_card *card, + QETH_CARD_TEXT(card, 2, "ACNalloc"); + return; + } +- INIT_WORK(&data->worker, qeth_addr_change_event_worker); ++ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker); + data->card = card; + memcpy(&data->ac_event, hostevs, + sizeof(struct qeth_ipacmd_addr_change) + extrasize); +- queue_work(card->event_wq, &data->worker); ++ queue_delayed_work(card->event_wq, &data->dwork, 0); + } + + /* SETBRIDGEPORT support; sending commands */ +@@ -1554,9 +1580,14 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable) + + if (enable) { + qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT); + rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card); +- } else ++ if (rc) ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); ++ } else { + rc = qeth_l2_pnso(card, 0, NULL, NULL); ++ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); ++ } + return rc; + } + +diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c +index 86bcae992f725..4695d25e54f24 100644 +--- a/drivers/s390/net/qeth_l2_sys.c ++++ b/drivers/s390/net/qeth_l2_sys.c +@@ -157,6 +157,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, + rc = -EBUSY; + else if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_an_set(card, enable); ++ /* sbp_lock ensures ordering vs notifications-stopped events */ + if (!rc) + card->options.sbp.hostnotification = enable; + } else +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index 5c3513a4b450e..202ba925c4940 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, + goto create_eq_error; + } + ++ mem->dma = paddr; + mem->va = eq_vaddress; + ret = be_fill_queue(eq, phba->params.num_eq_entries, + sizeof(struct be_eq_entry), eq_vaddress); +@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, + goto create_eq_error; + } + +- mem->dma = paddr; + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, + BEISCSI_EQ_DELAY_DEF); + if (ret) { +@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba, + goto create_cq_error; + } + ++ mem->dma = paddr; + ret = be_fill_queue(cq, phba->params.num_cq_entries, + sizeof(struct sol_cqe), cq_vaddress); + if (ret) { +@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba, + goto create_cq_error; + } + +- mem->dma = paddr; + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, + false, 0); + if (ret) { +diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c +index bc5d84f87d8fc..440ef32be048f 100644 +--- a/drivers/scsi/bfa/bfad.c ++++ b/drivers/scsi/bfa/bfad.c +@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) + + if (bfad->pci_bar0_kva == NULL) { + printk(KERN_ERR "Fail to map bar0\n"); ++ rc = -ENODEV; + goto out_release_region; + } + +diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c +index 7fa20609d5e7f..e43c5413ce29b 100644 +--- a/drivers/scsi/csiostor/csio_hw.c ++++ b/drivers/scsi/csiostor/csio_hw.c +@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); +- ret = EINVAL; ++ ret = -EINVAL; + goto bye; + } + +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index ea7c8930592dc..70daa0605082d 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -4928,6 +4928,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) + if (IS_ERR(vhost->work_thread)) { + dev_err(dev, "Couldn't create kernel thread: %ld\n", + PTR_ERR(vhost->work_thread)); ++ rc = PTR_ERR(vhost->work_thread); + goto free_host_mem; + } + +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 8062bd99add85..e86682dc34eca 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -1809,18 +1809,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; ++ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + if (reply_q->irq_poll_scheduled) { + /* Calling irq_poll_disable will wait for any pending + * callbacks to have completed. + */ + irq_poll_disable(&reply_q->irqpoll); + irq_poll_enable(&reply_q->irqpoll); +- reply_q->irq_poll_scheduled = false; +- reply_q->irq_line_enable = true; +- enable_irq(reply_q->os_irq); +- continue; ++ /* check how the scheduled poll has ended, ++ * clean up only if necessary ++ */ ++ if (reply_q->irq_poll_scheduled) { ++ reply_q->irq_poll_scheduled = false; ++ reply_q->irq_line_enable = true; ++ enable_irq(reply_q->os_irq); ++ } + } +- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + } + } + +diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c +index 8906aceda4c43..0354898d7cac1 100644 +--- a/drivers/scsi/mvumi.c ++++ b/drivers/scsi/mvumi.c +@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) + if (IS_ERR(mhba->dm_thread)) { + dev_err(&mhba->pdev->dev, + "failed to create device scan thread\n"); ++ ret = PTR_ERR(mhba->dm_thread); + mutex_unlock(&mhba->sas_discovery_mutex); + goto fail_create_thread; + } +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c +index 5ca424df355c1..bc30e3e039dd2 100644 +--- a/drivers/scsi/qedf/qedf_main.c ++++ b/drivers/scsi/qedf/qedf_main.c +@@ -726,7 +726,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) + rdata = fcport->rdata; + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { + QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); +- rc = 1; ++ rc = SUCCESS; + goto out; + } + +diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c +index 6ed74583b1b9b..f158fde0a43c1 100644 +--- a/drivers/scsi/qedi/qedi_fw.c ++++ b/drivers/scsi/qedi/qedi_fw.c +@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + ++ spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); +@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi, + cmd->task_id, qedi_conn->iscsi_conn_id, + &cmd->io_cmd); + } ++ spin_unlock(&qedi_conn->list_lock); + + cmd->state = RESPONSE_RECEIVED; + qedi_clear_task_idx(qedi, cmd->task_id); +@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, + "Freeing tid=0x%x for cid=0x%x\n", + cmd->task_id, qedi_conn->iscsi_conn_id); + ++ spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); +@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, + cmd->task_id, qedi_conn->iscsi_conn_id, + &cmd->io_cmd); + } ++ spin_unlock(&qedi_conn->list_lock); + + cmd->state = RESPONSE_RECEIVED; + qedi_clear_task_idx(qedi, cmd->task_id); +@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi, + + tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; + ++ spin_lock(&qedi_conn->list_lock); + if (likely(qedi_cmd->io_cmd_in_list)) { + qedi_cmd->io_cmd_in_list = false; + list_del_init(&qedi_cmd->io_cmd); + qedi_conn->active_cmd_count--; + } ++ spin_unlock(&qedi_conn->list_lock); + + if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == + ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) || +@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi, + ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK; + qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; + ++ spin_lock(&qedi_conn->list_lock); + if (likely(cmd->io_cmd_in_list)) { + cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } ++ spin_unlock(&qedi_conn->list_lock); + + memset(task_ctx, '\0', sizeof(*task_ctx)); + +@@ -816,8 +824,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, + qedi_clear_task_idx(qedi_conn->qedi, rtid); + + spin_lock(&qedi_conn->list_lock); +- list_del_init(&dbg_cmd->io_cmd); +- qedi_conn->active_cmd_count--; ++ if (likely(dbg_cmd->io_cmd_in_list)) { ++ dbg_cmd->io_cmd_in_list = false; ++ list_del_init(&dbg_cmd->io_cmd); ++ qedi_conn->active_cmd_count--; ++ } + spin_unlock(&qedi_conn->list_lock); + qedi_cmd->state = CLEANUP_RECV; + wake_up_interruptible(&qedi_conn->wait_queue); +@@ -1235,6 +1246,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, + qedi_conn->cmd_cleanup_req++; + qedi_iscsi_cleanup_task(ctask, true); + ++ cmd->io_cmd_in_list = false; + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + QEDI_WARN(&qedi->dbg_ctx, +@@ -1446,8 +1458,11 @@ ldel_exit: + spin_unlock_bh(&qedi_conn->tmf_work_lock); + + spin_lock(&qedi_conn->list_lock); +- list_del_init(&cmd->io_cmd); +- qedi_conn->active_cmd_count--; ++ if (likely(cmd->io_cmd_in_list)) { ++ cmd->io_cmd_in_list = false; ++ list_del_init(&cmd->io_cmd); ++ qedi_conn->active_cmd_count--; ++ } + spin_unlock(&qedi_conn->list_lock); + + clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c +index c14ac7882afac..10b9a986a41dc 100644 +--- a/drivers/scsi/qedi/qedi_iscsi.c ++++ b/drivers/scsi/qedi/qedi_iscsi.c +@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn) + { + struct qedi_cmd *cmd, *cmd_tmp; + ++ spin_lock(&qedi_conn->list_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, + io_cmd) { + list_del_init(&cmd->io_cmd); + qedi_conn->active_cmd_count--; + } ++ spin_unlock(&qedi_conn->list_lock); + } + + static void qedi_ep_disconnect(struct iscsi_endpoint *ep) +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c +index 6f038ae5efcaf..dfe24b505b402 100644 +--- a/drivers/scsi/qedi/qedi_main.c ++++ b/drivers/scsi/qedi/qedi_main.c +@@ -1127,6 +1127,15 @@ static void qedi_schedule_recovery_handler(void *dev) + schedule_delayed_work(&qedi->recovery_work, 0); + } + ++static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session) ++{ ++ struct iscsi_session *session = cls_session->dd_data; ++ struct iscsi_conn *conn = session->leadconn; ++ struct qedi_conn *qedi_conn = conn->dd_data; ++ ++ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); ++} ++ + static void qedi_link_update(void *dev, struct qed_link_output *link) + { + struct qedi_ctx *qedi = (struct qedi_ctx *)dev; +@@ -1138,6 +1147,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link) + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Link Down event.\n"); + atomic_set(&qedi->link_state, QEDI_LINK_DOWN); ++ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery); + } + } + +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c +index 0bd04a62af836..8d4b651e14422 100644 +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp) + qla2x00_rel_sp(sp); + } + ++void qla2xxx_rel_done_warning(srb_t *sp, int res) ++{ ++ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); ++} ++ ++void qla2xxx_rel_free_warning(srb_t *sp) ++{ ++ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); ++} ++ + /* Asynchronous Login/Logout Routines -------------------------------------- */ + + unsigned long +diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h +index 861dc522723ce..2aa6f81f87c43 100644 +--- a/drivers/scsi/qla2xxx/qla_inline.h ++++ b/drivers/scsi/qla2xxx/qla_inline.h +@@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, + return sp; + } + ++void qla2xxx_rel_done_warning(srb_t *sp, int res); ++void qla2xxx_rel_free_warning(srb_t *sp); ++ + static inline void + qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) + { + sp->qpair = NULL; ++ sp->done = qla2xxx_rel_done_warning; ++ sp->free = qla2xxx_rel_free_warning; + mempool_free(sp, qpair->srb_mempool); + QLA_QPAIR_MARK_NOT_BUSY(qpair); + } +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c +index 226f1428d3e52..78ad9827bbb98 100644 +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -4958,7 +4958,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) + "Done %s.\n", __func__); + } + +- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, ++ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, + els_cmd_map, els_cmd_map_dma); + + return rval; +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c +index 90bbc61f361b9..0ded9a778bb0d 100644 +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -683,7 +683,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) + struct nvme_fc_port_template *tmpl; + struct qla_hw_data *ha; + struct nvme_fc_port_info pinfo; +- int ret = EINVAL; ++ int ret = -EINVAL; + + if (!IS_ENABLED(CONFIG_NVME_FC)) + return ret; +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c +index 2d445bdb21290..2a88e7e79bd50 100644 +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -5668,7 +5668,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, + /* found existing exchange */ + qpair->retry_term_cnt++; + if (qpair->retry_term_cnt >= 5) { +- rc = EIO; ++ rc = -EIO; + qpair->retry_term_cnt = 0; + ql_log(ql_log_warn, vha, 0xffff, + "Unable to send ABTS Respond. Dumping firmware.\n"); +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 676778cbc5509..4775baac43c29 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -1254,7 +1254,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); + exit_host_stats: + if (ql_iscsi_stats) +- dma_free_coherent(&ha->pdev->dev, host_stats_size, ++ dma_free_coherent(&ha->pdev->dev, stats_size, + ql_iscsi_stats, iscsi_stats_dma); + + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", +diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h +index 1129fe7a27edd..ee069a8b442a7 100644 +--- a/drivers/scsi/smartpqi/smartpqi.h ++++ b/drivers/scsi/smartpqi/smartpqi.h +@@ -359,7 +359,7 @@ struct pqi_event_response { + struct pqi_iu_header header; + u8 event_type; + u8 reserved2 : 7; +- u8 request_acknowlege : 1; ++ u8 request_acknowledge : 1; + __le16 event_id; + __le32 additional_event_id; + union { +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c +index ca1e6cf6a38ef..714a3d38fc431 100644 +--- a/drivers/scsi/smartpqi/smartpqi_init.c ++++ b/drivers/scsi/smartpqi/smartpqi_init.c +@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, + put_unaligned_be16(cdb_length, &cdb[7]); + break; + default: +- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", +- cmd); ++ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); + break; + } + +@@ -2462,7 +2461,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, + offload_to_mirror = + (offload_to_mirror >= layout_map_count - 1) ? + 0 : offload_to_mirror + 1; +- WARN_ON(offload_to_mirror >= layout_map_count); + device->offload_to_mirror = offload_to_mirror; + /* + * Avoid direct use of device->offload_to_mirror within this +@@ -2915,10 +2913,14 @@ static int pqi_interpret_task_management_response( + return rc; + } + +-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, +- struct pqi_queue_group *queue_group) ++static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) ++{ ++ pqi_take_ctrl_offline(ctrl_info); ++} ++ ++static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) + { +- unsigned int num_responses; ++ int num_responses; + pqi_index_t oq_pi; + pqi_index_t oq_ci; + struct pqi_io_request *io_request; +@@ -2930,6 +2932,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, + + while (1) { + oq_pi = readl(queue_group->oq_pi); ++ if (oq_pi >= ctrl_info->num_elements_per_oq) { ++ pqi_invalid_response(ctrl_info); ++ dev_err(&ctrl_info->pci_dev->dev, ++ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", ++ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); ++ return -1; ++ } + if (oq_pi == oq_ci) + break; + +@@ -2938,10 +2947,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, + (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); + + request_id = get_unaligned_le16(&response->request_id); +- WARN_ON(request_id >= ctrl_info->max_io_slots); ++ if (request_id >= ctrl_info->max_io_slots) { ++ pqi_invalid_response(ctrl_info); ++ dev_err(&ctrl_info->pci_dev->dev, ++ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", ++ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); ++ return -1; ++ } + + io_request = &ctrl_info->io_request_pool[request_id]; +- WARN_ON(atomic_read(&io_request->refcount) == 0); ++ if (atomic_read(&io_request->refcount) == 0) { ++ pqi_invalid_response(ctrl_info); ++ dev_err(&ctrl_info->pci_dev->dev, ++ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", ++ request_id, oq_pi, oq_ci); ++ return -1; ++ } + + switch (response->header.iu_type) { + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: +@@ -2971,24 +2992,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, + io_request->error_info = ctrl_info->error_buffer + + (get_unaligned_le16(&response->error_index) * + PQI_ERROR_BUFFER_ELEMENT_LENGTH); +- pqi_process_io_error(response->header.iu_type, +- io_request); ++ pqi_process_io_error(response->header.iu_type, io_request); + break; + default: ++ pqi_invalid_response(ctrl_info); + dev_err(&ctrl_info->pci_dev->dev, +- "unexpected IU type: 0x%x\n", +- response->header.iu_type); +- break; ++ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", ++ response->header.iu_type, oq_pi, oq_ci); ++ return -1; + } + +- io_request->io_complete_callback(io_request, +- io_request->context); ++ io_request->io_complete_callback(io_request, io_request->context); + + /* + * Note that the I/O request structure CANNOT BE TOUCHED after + * returning from the I/O completion callback! + */ +- + oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; + } + +@@ -3300,9 +3319,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event, + } + } + +-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) ++static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) + { +- unsigned int num_events; ++ int num_events; + pqi_index_t oq_pi; + pqi_index_t oq_ci; + struct pqi_event_queue *event_queue; +@@ -3316,26 +3335,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) + + while (1) { + oq_pi = readl(event_queue->oq_pi); ++ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { ++ pqi_invalid_response(ctrl_info); ++ dev_err(&ctrl_info->pci_dev->dev, ++ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", ++ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); ++ return -1; ++ } ++ + if (oq_pi == oq_ci) + break; + + num_events++; +- response = event_queue->oq_element_array + +- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); ++ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); + + event_index = + pqi_event_type_to_event_index(response->event_type); + +- if (event_index >= 0) { +- if (response->request_acknowlege) { +- event = &ctrl_info->events[event_index]; +- event->pending = true; +- event->event_type = response->event_type; +- event->event_id = response->event_id; +- event->additional_event_id = +- response->additional_event_id; ++ if (event_index >= 0 && response->request_acknowledge) { ++ event = &ctrl_info->events[event_index]; ++ event->pending = true; ++ event->event_type = response->event_type; ++ event->event_id = response->event_id; ++ event->additional_event_id = response->additional_event_id; ++ if (event->event_type == PQI_EVENT_TYPE_OFA) + pqi_ofa_capture_event_payload(event, response); +- } + } + + oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; +@@ -3450,7 +3474,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) + { + struct pqi_ctrl_info *ctrl_info; + struct pqi_queue_group *queue_group; +- unsigned int num_responses_handled; ++ int num_io_responses_handled; ++ int num_events_handled; + + queue_group = data; + ctrl_info = queue_group->ctrl_info; +@@ -3458,17 +3483,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data) + if (!pqi_is_valid_irq(ctrl_info)) + return IRQ_NONE; + +- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); ++ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); ++ if (num_io_responses_handled < 0) ++ goto out; + +- if (irq == ctrl_info->event_irq) +- num_responses_handled += pqi_process_event_intr(ctrl_info); ++ if (irq == ctrl_info->event_irq) { ++ num_events_handled = pqi_process_event_intr(ctrl_info); ++ if (num_events_handled < 0) ++ goto out; ++ } else { ++ num_events_handled = 0; ++ } + +- if (num_responses_handled) ++ if (num_io_responses_handled + num_events_handled > 0) + atomic_inc(&ctrl_info->num_interrupts); + + pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); + pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); + ++out: + return IRQ_HANDLED; + } + +diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c +index 1755dd6b04aec..6b661135c03b5 100644 +--- a/drivers/scsi/ufs/ufs-mediatek.c ++++ b/drivers/scsi/ufs/ufs-mediatek.c +@@ -129,7 +129,10 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba) + __func__, err); + } else if (IS_ERR(host->mphy)) { + err = PTR_ERR(host->mphy); +- dev_info(dev, "%s: PHY get failed %d\n", __func__, err); ++ if (err != -ENODEV) { ++ dev_info(dev, "%s: PHY get failed %d\n", __func__, ++ err); ++ } + } + + if (err) +@@ -669,13 +672,7 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) + + static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) + { +- struct ufs_dev_info *dev_info = &hba->dev_info; +- u16 mid = dev_info->wmanufacturerid; +- + ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); +- +- if (mid == UFS_VENDOR_SAMSUNG) +- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; + } + + /** +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c +index d0d75527830e9..823eccfdd00af 100644 +--- a/drivers/scsi/ufs/ufs-qcom.c ++++ b/drivers/scsi/ufs/ufs-qcom.c +@@ -1614,9 +1614,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) + */ + } + mask <<= offset; +- +- pm_runtime_get_sync(host->hba->dev); +- ufshcd_hold(host->hba, false); + ufshcd_rmwl(host->hba, TEST_BUS_SEL, + (u32)host->testbus.select_major << 19, + REG_UFS_CFG1); +@@ -1629,8 +1626,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) + * committed before returning. + */ + mb(); +- ufshcd_release(host->hba); +- pm_runtime_put_sync(host->hba->dev); + + return 0; + } +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 1d157ff58d817..316b861305eae 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -474,6 +474,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) + + prdt_length = le16_to_cpu( + lrbp->utr_descriptor_ptr->prd_table_length); ++ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) ++ prdt_length /= sizeof(struct ufshcd_sg_entry); ++ + dev_err(hba->dev, + "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", + tag, prdt_length, +diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c +index ae1e248a8fb8a..1d2bc181da050 100644 +--- a/drivers/slimbus/core.c ++++ b/drivers/slimbus/core.c +@@ -301,8 +301,6 @@ int slim_unregister_controller(struct slim_controller *ctrl) + { + /* Remove all clients */ + device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device); +- /* Enter Clock Pause */ +- slim_ctrl_clk_pause(ctrl, false, 0); + ida_simple_remove(&ctrl_ida, ctrl->id); + + return 0; +@@ -326,8 +324,8 @@ void slim_report_absent(struct slim_device *sbdev) + mutex_lock(&ctrl->lock); + sbdev->is_laddr_valid = false; + mutex_unlock(&ctrl->lock); +- +- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); ++ if (!ctrl->get_laddr) ++ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); + slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN); + } + EXPORT_SYMBOL_GPL(slim_report_absent); +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c +index 743ee7b4e63f2..218aefc3531cd 100644 +--- a/drivers/slimbus/qcom-ngd-ctrl.c ++++ b/drivers/slimbus/qcom-ngd-ctrl.c +@@ -1277,9 +1277,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, + { + struct qcom_slim_ngd_qmi *qmi = + container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); ++ struct qcom_slim_ngd_ctrl *ctrl = ++ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); + + qmi->svc_info.sq_node = 0; + qmi->svc_info.sq_port = 0; ++ ++ qcom_slim_ngd_enable(ctrl, false); + } + + static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { +diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c +index f4fb527d83018..c5dd026fe889f 100644 +--- a/drivers/soc/fsl/qbman/bman.c ++++ b/drivers/soc/fsl/qbman/bman.c +@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid) + } + done: + put_affine_portal(); +- return 0; ++ return err; + } + + struct gen_pool *bm_bpalloc; +diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c +index dc644cfb6419e..c4609cd562ac4 100644 +--- a/drivers/soc/mediatek/mtk-cmdq-helper.c ++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c +@@ -223,15 +223,16 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, + } + EXPORT_SYMBOL(cmdq_pkt_write_mask); + +-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) ++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear) + { + struct cmdq_instruction inst = { {0} }; ++ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0; + + if (event >= CMDQ_MAX_EVENT) + return -EINVAL; + + inst.op = CMDQ_CODE_WFE; +- inst.value = CMDQ_WFE_OPTION; ++ inst.value = CMDQ_WFE_OPTION | clear_option; + inst.event = event; + + return cmdq_pkt_append_command(pkt, inst); +diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c +index 1f35b097c6356..7abfc8c4fdc72 100644 +--- a/drivers/soc/qcom/apr.c ++++ b/drivers/soc/qcom/apr.c +@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev) + + pds = pdr_add_lookup(apr->pdr, service_name, service_path); + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { +- dev_err(dev, "pdr add lookup failed: %d\n", ret); ++ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds)); + return PTR_ERR(pds); + } + } +diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h +index 15b5002e4127b..ab9ae8cdfa54c 100644 +--- a/drivers/soc/qcom/pdr_internal.h ++++ b/drivers/soc/qcom/pdr_internal.h +@@ -185,7 +185,7 @@ struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { + .data_type = QMI_STRUCT, + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, + .elem_size = sizeof(struct servreg_location_entry), +- .array_type = NO_ARRAY, ++ .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list), +diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c +index 31ff49fcd078b..c556623dae024 100644 +--- a/drivers/soc/xilinx/zynqmp_power.c ++++ b/drivers/soc/xilinx/zynqmp_power.c +@@ -205,7 +205,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev) + rx_chan = mbox_request_channel_byname(client, "rx"); + if (IS_ERR(rx_chan)) { + dev_err(&pdev->dev, "Failed to request rx channel\n"); +- return IS_ERR(rx_chan); ++ return PTR_ERR(rx_chan); + } + } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) { + irq = platform_get_irq(pdev, 0); +diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c +index 24eafe0aa1c3e..1330ffc475960 100644 +--- a/drivers/soundwire/cadence_master.c ++++ b/drivers/soundwire/cadence_master.c +@@ -791,7 +791,16 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id) + CDNS_MCP_INT_SLAVE_MASK, 0); + + int_status &= ~CDNS_MCP_INT_SLAVE_MASK; +- schedule_work(&cdns->work); ++ ++ /* ++ * Deal with possible race condition between interrupt ++ * handling and disabling interrupts on suspend. ++ * ++ * If the master is in the process of disabling ++ * interrupts, don't schedule a workqueue ++ */ ++ if (cdns->interrupt_enabled) ++ schedule_work(&cdns->work); + } + + cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status); +@@ -924,6 +933,19 @@ update_masks: + slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1); + cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state); + } ++ cdns->interrupt_enabled = state; ++ ++ /* ++ * Complete any on-going status updates before updating masks, ++ * and cancel queued status updates. ++ * ++ * There could be a race with a new interrupt thrown before ++ * the 3 mask updates below are complete, so in the interrupt ++ * we use the 'interrupt_enabled' status to prevent new work ++ * from being queued. ++ */ ++ if (!state) ++ cancel_work_sync(&cdns->work); + + cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0); + cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1); +diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h +index 7638858397df9..4d1aab5b5ec2d 100644 +--- a/drivers/soundwire/cadence_master.h ++++ b/drivers/soundwire/cadence_master.h +@@ -84,6 +84,8 @@ struct sdw_cdns_stream_config { + * @bus: Bus handle + * @stream_type: Stream type + * @link_id: Master link id ++ * @hw_params: hw_params to be applied in .prepare step ++ * @suspended: status set when suspended, to be used in .prepare + */ + struct sdw_cdns_dma_data { + char *name; +@@ -92,6 +94,8 @@ struct sdw_cdns_dma_data { + struct sdw_bus *bus; + enum sdw_stream_type stream_type; + int link_id; ++ struct snd_pcm_hw_params *hw_params; ++ bool suspended; + }; + + /** +@@ -129,6 +133,7 @@ struct sdw_cdns { + + bool link_up; + unsigned int msg_count; ++ bool interrupt_enabled; + + struct work_struct work; + +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c +index a283670659a92..50b9bad8fba7f 100644 +--- a/drivers/soundwire/intel.c ++++ b/drivers/soundwire/intel.c +@@ -856,6 +856,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream, + intel_pdi_alh_configure(sdw, pdi); + sdw_cdns_config_stream(cdns, ch, dir, pdi); + ++ /* store pdi and hw_params, may be needed in prepare step */ ++ dma->suspended = false; ++ dma->pdi = pdi; ++ dma->hw_params = params; + + /* Inform DSP about PDI stream number */ + ret = intel_params_stream(sdw, substream, dai, params, +@@ -899,7 +903,11 @@ error: + static int intel_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) + { ++ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); ++ struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_cdns_dma_data *dma; ++ int ch, dir; ++ int ret; + + dma = snd_soc_dai_get_dma_data(dai, substream); + if (!dma) { +@@ -908,7 +916,41 @@ static int intel_prepare(struct snd_pcm_substream *substream, + return -EIO; + } + +- return sdw_prepare_stream(dma->stream); ++ if (dma->suspended) { ++ dma->suspended = false; ++ ++ /* ++ * .prepare() is called after system resume, where we ++ * need to reinitialize the SHIM/ALH/Cadence IP. ++ * .prepare() is also called to deal with underflows, ++ * but in those cases we cannot touch ALH/SHIM ++ * registers ++ */ ++ ++ /* configure stream */ ++ ch = params_channels(dma->hw_params); ++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ++ dir = SDW_DATA_DIR_RX; ++ else ++ dir = SDW_DATA_DIR_TX; ++ ++ intel_pdi_shim_configure(sdw, dma->pdi); ++ intel_pdi_alh_configure(sdw, dma->pdi); ++ sdw_cdns_config_stream(cdns, ch, dir, dma->pdi); ++ ++ /* Inform DSP about PDI stream number */ ++ ret = intel_params_stream(sdw, substream, dai, ++ dma->hw_params, ++ sdw->instance, ++ dma->pdi->intel_alh_id); ++ if (ret) ++ goto err; ++ } ++ ++ ret = sdw_prepare_stream(dma->stream); ++ ++err: ++ return ret; + } + + static int intel_trigger(struct snd_pcm_substream *substream, int cmd, +@@ -979,6 +1021,9 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) + return ret; + } + ++ dma->hw_params = NULL; ++ dma->pdi = NULL; ++ + return 0; + } + +@@ -988,6 +1033,29 @@ static void intel_shutdown(struct snd_pcm_substream *substream, + + } + ++static int intel_component_dais_suspend(struct snd_soc_component *component) ++{ ++ struct sdw_cdns_dma_data *dma; ++ struct snd_soc_dai *dai; ++ ++ for_each_component_dais(component, dai) { ++ /* ++ * we don't have a .suspend dai_ops, and we don't have access ++ * to the substream, so let's mark both capture and playback ++ * DMA contexts as suspended ++ */ ++ dma = dai->playback_dma_data; ++ if (dma) ++ dma->suspended = true; ++ ++ dma = dai->capture_dma_data; ++ if (dma) ++ dma->suspended = true; ++ } ++ ++ return 0; ++} ++ + static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, + void *stream, int direction) + { +@@ -1011,7 +1079,7 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai, + dma = dai->capture_dma_data; + + if (!dma) +- return NULL; ++ return ERR_PTR(-EINVAL); + + return dma->stream; + } +@@ -1040,6 +1108,7 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = { + + static const struct snd_soc_component_driver dai_component = { + .name = "soundwire", ++ .suspend = intel_component_dais_suspend + }; + + static int intel_create_dai(struct sdw_cdns *cdns, +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c +index 6e36deb505b1e..610957f82b39c 100644 +--- a/drivers/soundwire/stream.c ++++ b/drivers/soundwire/stream.c +@@ -1913,7 +1913,7 @@ void sdw_shutdown_stream(void *sdw_substream) + + sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + +- if (!sdw_stream) { ++ if (IS_ERR(sdw_stream)) { + dev_err(rtd->dev, "no stream found for DAI %s", dai->name); + return; + } +diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c +index 2ea73809ca345..271839a8add0e 100644 +--- a/drivers/spi/spi-dw-pci.c ++++ b/drivers/spi/spi-dw-pci.c +@@ -127,18 +127,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (desc->setup) { + ret = desc->setup(dws); + if (ret) +- return ret; ++ goto err_free_irq_vectors; + } + } else { +- pci_free_irq_vectors(pdev); +- return -ENODEV; ++ ret = -ENODEV; ++ goto err_free_irq_vectors; + } + + ret = dw_spi_add_host(&pdev->dev, dws); +- if (ret) { +- pci_free_irq_vectors(pdev); +- return ret; +- } ++ if (ret) ++ goto err_free_irq_vectors; + + /* PCI hook and SPI hook use the same drv data */ + pci_set_drvdata(pdev, dws); +@@ -152,6 +150,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + pm_runtime_allow(&pdev->dev); + + return 0; ++ ++err_free_irq_vectors: ++ pci_free_irq_vectors(pdev); ++ return ret; + } + + static void spi_pci_remove(struct pci_dev *pdev) +diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c +index 37a3e0f8e7526..a702e9d7d68c0 100644 +--- a/drivers/spi/spi-fsi.c ++++ b/drivers/spi/spi-fsi.c +@@ -24,11 +24,16 @@ + + #define SPI_FSI_BASE 0x70000 + #define SPI_FSI_INIT_TIMEOUT_MS 1000 +-#define SPI_FSI_MAX_TRANSFER_SIZE 2048 ++#define SPI_FSI_MAX_XFR_SIZE 2048 ++#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32 + + #define SPI_FSI_ERROR 0x0 + #define SPI_FSI_COUNTER_CFG 0x1 + #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32) ++#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8) ++#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9) ++#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10) ++#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11) + #define SPI_FSI_CFG1 0x2 + #define SPI_FSI_CLOCK_CFG 0x3 + #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32) +@@ -61,7 +66,7 @@ + #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62) + #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63) + #define SPI_FSI_STATUS_ANY_ERROR \ +- (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \ ++ (SPI_FSI_STATUS_ERROR | \ + SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \ + SPI_FSI_STATUS_RDR_OVERRUN) + #define SPI_FSI_PORT_CTRL 0x9 +@@ -70,6 +75,8 @@ struct fsi_spi { + struct device *dev; /* SPI controller device */ + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ + u32 base; ++ size_t max_xfr_size; ++ bool restricted; + }; + + struct fsi_spi_sequence { +@@ -205,8 +212,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx) + if (rc) + return rc; + +- return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, +- SPI_FSI_CLOCK_CFG_RESET2); ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, ++ SPI_FSI_CLOCK_CFG_RESET2); ++ if (rc) ++ return rc; ++ ++ return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL); + } + + static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) +@@ -214,8 +225,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) + /* + * Add the next byte of instruction to the 8-byte sequence register. + * Then decrement the counter so that the next instruction will go in +- * the right place. Return the number of "slots" left in the sequence +- * register. ++ * the right place. Return the index of the slot we just filled in the ++ * sequence register. + */ + seq->data |= (u64)val << seq->bit; + seq->bit -= 8; +@@ -233,40 +244,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx, + struct fsi_spi_sequence *seq, + struct spi_transfer *transfer) + { ++ bool docfg = false; + int loops; + int idx; + int rc; ++ u8 val = 0; + u8 len = min(transfer->len, 8U); + u8 rem = transfer->len % len; ++ u64 cfg = 0ULL; + + loops = transfer->len / len; + + if (transfer->tx_buf) { +- idx = fsi_spi_sequence_add(seq, +- SPI_FSI_SEQUENCE_SHIFT_OUT(len)); ++ val = SPI_FSI_SEQUENCE_SHIFT_OUT(len); ++ idx = fsi_spi_sequence_add(seq, val); ++ + if (rem) + rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem); + } else if (transfer->rx_buf) { +- idx = fsi_spi_sequence_add(seq, +- SPI_FSI_SEQUENCE_SHIFT_IN(len)); ++ val = SPI_FSI_SEQUENCE_SHIFT_IN(len); ++ idx = fsi_spi_sequence_add(seq, val); ++ + if (rem) + rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem); + } else { + return -EINVAL; + } + ++ if (ctx->restricted) { ++ const int eidx = rem ? 5 : 6; ++ ++ while (loops > 1 && idx <= eidx) { ++ idx = fsi_spi_sequence_add(seq, val); ++ loops--; ++ docfg = true; ++ } ++ ++ if (loops > 1) { ++ dev_warn(ctx->dev, "No sequencer slots; aborting.\n"); ++ return -EINVAL; ++ } ++ } ++ + if (loops > 1) { + fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx)); ++ docfg = true; ++ } + +- if (rem) +- fsi_spi_sequence_add(seq, rem); ++ if (docfg) { ++ cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1); ++ if (transfer->rx_buf) ++ cfg |= SPI_FSI_COUNTER_CFG_N2_RX | ++ SPI_FSI_COUNTER_CFG_N2_TX | ++ SPI_FSI_COUNTER_CFG_N2_IMPLICIT | ++ SPI_FSI_COUNTER_CFG_N2_RELOAD; + +- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, +- SPI_FSI_COUNTER_CFG_LOOPS(loops - 1)); ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg); + if (rc) + return rc; ++ } else { ++ fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL); + } + ++ if (rem) ++ fsi_spi_sequence_add(seq, rem); ++ + return 0; + } + +@@ -275,6 +317,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, + { + int rc = 0; + u64 status = 0ULL; ++ u64 cfg = 0ULL; + + if (transfer->tx_buf) { + int nb; +@@ -312,6 +355,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, + u64 in = 0ULL; + u8 *rx = transfer->rx_buf; + ++ rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg); ++ if (rc) ++ return rc; ++ ++ if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) { ++ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0); ++ if (rc) ++ return rc; ++ } ++ + while (transfer->len > recv) { + do { + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, +@@ -350,7 +403,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx) + u64 status = 0ULL; + u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE | + SPI_FSI_CLOCK_CFG_SCK_NO_DEL | +- FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4); ++ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19); + + end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); + do { +@@ -407,7 +460,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, + + /* Sequencer must do shift out (tx) first. */ + if (!transfer->tx_buf || +- transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) { ++ transfer->len > (ctx->max_xfr_size + 8)) { + rc = -EINVAL; + goto error; + } +@@ -431,7 +484,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, + + /* Sequencer can only do shift in (rx) after tx. */ + if (next->rx_buf) { +- if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) { ++ if (next->len > ctx->max_xfr_size) { + rc = -EINVAL; + goto error; + } +@@ -476,7 +529,9 @@ error: + + static size_t fsi_spi_max_transfer_size(struct spi_device *spi) + { +- return SPI_FSI_MAX_TRANSFER_SIZE; ++ struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller); ++ ++ return ctx->max_xfr_size; + } + + static int fsi_spi_probe(struct device *dev) +@@ -524,6 +579,14 @@ static int fsi_spi_probe(struct device *dev) + ctx->fsi = fsi; + ctx->base = base + SPI_FSI_BASE; + ++ if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) { ++ ctx->restricted = true; ++ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED; ++ } else { ++ ctx->restricted = false; ++ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE; ++ } ++ + rc = devm_spi_register_controller(dev, ctlr); + if (rc) + spi_controller_put(ctlr); +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c +index 38a5f1304cec4..e38e5ad3c7068 100644 +--- a/drivers/spi/spi-imx.c ++++ b/drivers/spi/spi-imx.c +@@ -1707,7 +1707,7 @@ static int spi_imx_probe(struct platform_device *pdev) + ret = spi_bitbang_start(&spi_imx->bitbang); + if (ret) { + dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); +- goto out_runtime_pm_put; ++ goto out_bitbang_start; + } + + dev_info(&pdev->dev, "probed\n"); +@@ -1717,6 +1717,9 @@ static int spi_imx_probe(struct platform_device *pdev) + + return ret; + ++out_bitbang_start: ++ if (spi_imx->devtype_data->has_dmamode) ++ spi_imx_sdma_exit(spi_imx); + out_runtime_pm_put: + pm_runtime_dont_use_autosuspend(spi_imx->dev); + pm_runtime_put_sync(spi_imx->dev); +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c +index 1c9478e6e5d99..d4c9510af3931 100644 +--- a/drivers/spi/spi-omap2-mcspi.c ++++ b/drivers/spi/spi-omap2-mcspi.c +@@ -24,7 +24,6 @@ + #include + #include + #include +-#include + + #include + +@@ -348,9 +347,19 @@ disable_fifo: + + static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) + { +- u32 val; +- +- return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC); ++ unsigned long timeout; ++ ++ timeout = jiffies + msecs_to_jiffies(1000); ++ while (!(readl_relaxed(reg) & bit)) { ++ if (time_after(jiffies, timeout)) { ++ if (!(readl_relaxed(reg) & bit)) ++ return -ETIMEDOUT; ++ else ++ return 0; ++ } ++ cpu_relax(); ++ } ++ return 0; + } + + static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi, +diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c +index 924b24441789a..1f08e32a10fe2 100644 +--- a/drivers/spi/spi-s3c64xx.c ++++ b/drivers/spi/spi-s3c64xx.c +@@ -122,6 +122,7 @@ + + struct s3c64xx_spi_dma_data { + struct dma_chan *ch; ++ dma_cookie_t cookie; + enum dma_transfer_direction direction; + }; + +@@ -271,12 +272,13 @@ static void s3c64xx_spi_dmacb(void *data) + spin_unlock_irqrestore(&sdd->lock, flags); + } + +-static void prepare_dma(struct s3c64xx_spi_dma_data *dma, ++static int prepare_dma(struct s3c64xx_spi_dma_data *dma, + struct sg_table *sgt) + { + struct s3c64xx_spi_driver_data *sdd; + struct dma_slave_config config; + struct dma_async_tx_descriptor *desc; ++ int ret; + + memset(&config, 0, sizeof(config)); + +@@ -300,12 +302,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, + + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, + dma->direction, DMA_PREP_INTERRUPT); ++ if (!desc) { ++ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist", ++ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx"); ++ return -ENOMEM; ++ } + + desc->callback = s3c64xx_spi_dmacb; + desc->callback_param = dma; + +- dmaengine_submit(desc); ++ dma->cookie = dmaengine_submit(desc); ++ ret = dma_submit_error(dma->cookie); ++ if (ret) { ++ dev_err(&sdd->pdev->dev, "DMA submission failed"); ++ return -EIO; ++ } ++ + dma_async_issue_pending(dma->ch); ++ return 0; + } + + static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) +@@ -355,11 +369,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master, + return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; + } + +-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, ++static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, + struct spi_transfer *xfer, int dma_mode) + { + void __iomem *regs = sdd->regs; + u32 modecfg, chcfg; ++ int ret = 0; + + modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); + modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); +@@ -385,7 +400,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, + chcfg |= S3C64XX_SPI_CH_TXCH_ON; + if (dma_mode) { + modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; +- prepare_dma(&sdd->tx_dma, &xfer->tx_sg); ++ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg); + } else { + switch (sdd->cur_bpw) { + case 32: +@@ -417,12 +432,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) + | S3C64XX_SPI_PACKET_CNT_EN, + regs + S3C64XX_SPI_PACKET_CNT); +- prepare_dma(&sdd->rx_dma, &xfer->rx_sg); ++ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg); + } + } + ++ if (ret) ++ return ret; ++ + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); + writel(chcfg, regs + S3C64XX_SPI_CH_CFG); ++ ++ return 0; + } + + static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, +@@ -555,9 +575,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd, + return 0; + } + +-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) ++static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) + { + void __iomem *regs = sdd->regs; ++ int ret; + u32 val; + + /* Disable Clock */ +@@ -605,7 +626,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) + + if (sdd->port_conf->clk_from_cmu) { + /* The src_clk clock is divided internally by 2 */ +- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ++ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ++ if (ret) ++ return ret; + } else { + /* Configure Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); +@@ -619,6 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) + val |= S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } ++ ++ return 0; + } + + #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) +@@ -661,7 +686,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, + sdd->cur_bpw = bpw; + sdd->cur_speed = speed; + sdd->cur_mode = spi->mode; +- s3c64xx_spi_config(sdd); ++ status = s3c64xx_spi_config(sdd); ++ if (status) ++ return status; + } + + if (!is_polling(sdd) && (xfer->len > fifo_len) && +@@ -685,13 +712,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master, + sdd->state &= ~RXBUSY; + sdd->state &= ~TXBUSY; + +- s3c64xx_enable_datapath(sdd, xfer, use_dma); +- + /* Start the signals */ + s3c64xx_spi_set_cs(spi, true); + ++ status = s3c64xx_enable_datapath(sdd, xfer, use_dma); ++ + spin_unlock_irqrestore(&sdd->lock, flags); + ++ if (status) { ++ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status); ++ break; ++ } ++ + if (use_dma) + status = s3c64xx_wait_for_dma(sdd, xfer); + else +diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c +index 03929b9d3a8bc..d0725bc8b48a4 100644 +--- a/drivers/staging/emxx_udc/emxx_udc.c ++++ b/drivers/staging/emxx_udc/emxx_udc.c +@@ -2593,7 +2593,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep, + + if (req->unaligned) { + if (!ep->virt_buf) +- ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE, ++ ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE, + &ep->phys_buf, + GFP_ATOMIC | GFP_DMA); + if (ep->epnum > 0) { +@@ -3148,7 +3148,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev) + for (i = 0; i < NUM_ENDPOINTS; i++) { + ep = &udc->ep[i]; + if (ep->virt_buf) +- dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf, ++ dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf, + ep->phys_buf); + } + +diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c +index a68cbb4995f0f..33a0f8ff82aa8 100644 +--- a/drivers/staging/media/atomisp/pci/sh_css.c ++++ b/drivers/staging/media/atomisp/pci/sh_css.c +@@ -9521,7 +9521,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config, + if (err) + { + IA_CSS_LEAVE_ERR(err); +- return err; ++ goto ERR; + } + #endif + for (i = 0; i < num_pipes; i++) +diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c +index 194d058480777..6dcd47bd9ed3f 100644 +--- a/drivers/staging/media/hantro/hantro_h264.c ++++ b/drivers/staging/media/hantro/hantro_h264.c +@@ -325,7 +325,7 @@ dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx, + */ + dst_buf = hantro_get_dst_buf(ctx); + buf = &dst_buf->vb2_buf; +- dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0); ++ dma_addr = hantro_get_dec_buf_addr(ctx, buf); + } + + return dma_addr; +diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c +index 44062ffceaea7..6d2a8f2a8f0bb 100644 +--- a/drivers/staging/media/hantro/hantro_postproc.c ++++ b/drivers/staging/media/hantro/hantro_postproc.c +@@ -118,7 +118,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx) + unsigned int num_buffers = cap_queue->num_buffers; + unsigned int i, buf_size; + +- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage; ++ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage + ++ hantro_h264_mv_size(ctx->dst_fmt.width, ++ ctx->dst_fmt.height); + + for (i = 0; i < num_buffers; ++i) { + struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i]; +diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c +index fbd53d7c097cd..e9d6bd9e9332a 100644 +--- a/drivers/staging/media/ipu3/ipu3-css-params.c ++++ b/drivers/staging/media/ipu3/ipu3-css-params.c +@@ -159,7 +159,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width, + + memset(&cfg->scaler_coeffs_chroma, 0, + sizeof(cfg->scaler_coeffs_chroma)); +- memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma)); ++ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma)); + do { + phase_step_correction++; + +diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c +index 7c4df6d48c43d..4df9476ef2a9b 100644 +--- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c ++++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c +@@ -16,6 +16,7 @@ + */ + + #include ++#include + #include + #include + #include +diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h +index 483ce04789ed0..7f6798b223ef8 100644 +--- a/drivers/staging/qlge/qlge.h ++++ b/drivers/staging/qlge/qlge.h +@@ -2338,21 +2338,21 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); + #endif + + #ifdef QL_OB_DUMP +-void ql_dump_tx_desc(struct tx_buf_desc *tbd); +-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); +-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); +-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) +-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) ++void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd); ++void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb); ++void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp); ++#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb) ++#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp) + #else +-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) +-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ++#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ++#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) + #endif + + #ifdef QL_IB_DUMP +-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); +-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) ++void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp); ++#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp) + #else +-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ++#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) + #endif + + #ifdef QL_ALL_DUMP +diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c +index a55bf0b3e9dcc..42fd13990f3a8 100644 +--- a/drivers/staging/qlge/qlge_dbg.c ++++ b/drivers/staging/qlge/qlge_dbg.c +@@ -1431,7 +1431,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev) + } + if (value) + netdev_err(qdev->ndev, +- "%s: Routing Mask %d = 0x%.08x\n", ++ "Routing Mask %d = 0x%.08x\n", + i, value); + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +@@ -1617,6 +1617,9 @@ void ql_dump_qdev(struct ql_adapter *qdev) + #ifdef QL_CB_DUMP + void ql_dump_wqicb(struct wqicb *wqicb) + { ++ struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb); ++ struct ql_adapter *qdev = tx_ring->qdev; ++ + netdev_err(qdev->ndev, "Dumping wqicb stuff...\n"); + netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); + netdev_err(qdev->ndev, "wqicb->flags = %x\n", +@@ -1632,8 +1635,8 @@ void ql_dump_wqicb(struct wqicb *wqicb) + + void ql_dump_tx_ring(struct tx_ring *tx_ring) + { +- if (!tx_ring) +- return; ++ struct ql_adapter *qdev = tx_ring->qdev; ++ + netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n", + tx_ring->wq_id); + netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base); +@@ -1657,6 +1660,8 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) + void ql_dump_ricb(struct ricb *ricb) + { + int i; ++ struct ql_adapter *qdev = ++ container_of(ricb, struct ql_adapter, ricb); + + netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n"); + netdev_err(qdev->ndev, "Dumping ricb stuff...\n"); +@@ -1686,6 +1691,9 @@ void ql_dump_ricb(struct ricb *ricb) + + void ql_dump_cqicb(struct cqicb *cqicb) + { ++ struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb); ++ struct ql_adapter *qdev = rx_ring->qdev; ++ + netdev_err(qdev->ndev, "Dumping cqicb stuff...\n"); + + netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect); +@@ -1725,8 +1733,8 @@ static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring) + + void ql_dump_rx_ring(struct rx_ring *rx_ring) + { +- if (!rx_ring) +- return; ++ struct ql_adapter *qdev = rx_ring->qdev; ++ + netdev_err(qdev->ndev, + "===================== Dumping rx_ring %d ===============\n", + rx_ring->cq_id); +@@ -1816,7 +1824,7 @@ fail_it: + #endif + + #ifdef QL_OB_DUMP +-void ql_dump_tx_desc(struct tx_buf_desc *tbd) ++void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd) + { + netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n", + le64_to_cpu((u64)tbd->addr)); +@@ -1843,7 +1851,7 @@ void ql_dump_tx_desc(struct tx_buf_desc *tbd) + tbd->len & TX_DESC_E ? "E" : "."); + } + +-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) ++void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb) + { + struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = + (struct ob_mac_tso_iocb_req *)ob_mac_iocb; +@@ -1886,10 +1894,10 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) + frame_len = le16_to_cpu(ob_mac_iocb->frame_len); + } + tbd = &ob_mac_iocb->tbd[0]; +- ql_dump_tx_desc(tbd); ++ ql_dump_tx_desc(qdev, tbd); + } + +-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) ++void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp) + { + netdev_err(qdev->ndev, "%s\n", __func__); + netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode); +@@ -1906,7 +1914,7 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) + #endif + + #ifdef QL_IB_DUMP +-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) ++void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp) + { + netdev_err(qdev->ndev, "%s\n", __func__); + netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode); +diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c +index 2028458bea6f0..b351a7eb7a897 100644 +--- a/drivers/staging/qlge/qlge_main.c ++++ b/drivers/staging/qlge/qlge_main.c +@@ -1856,7 +1856,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + +- QL_DUMP_IB_MAC_RSP(ib_mac_rsp); ++ QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp); + + skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); + if (unlikely(!skb)) { +@@ -1954,7 +1954,7 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, + ((le16_to_cpu(ib_mac_rsp->vlan_id) & + IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; + +- QL_DUMP_IB_MAC_RSP(ib_mac_rsp); ++ QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + /* The data and headers are split into +@@ -2001,7 +2001,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + +- QL_DUMP_OB_MAC_RSP(mac_rsp); ++ QL_DUMP_OB_MAC_RSP(qdev, mac_rsp); + tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; + tx_ring_desc = &tx_ring->q[mac_rsp->tid]; + ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); +@@ -2593,7 +2593,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } +- QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); ++ QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr); + tx_ring->prod_idx++; + if (tx_ring->prod_idx == tx_ring->wq_len) + tx_ring->prod_idx = 0; +diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +index 195d963c4fbb4..b6fee7230ce05 100644 +--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c ++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +@@ -597,7 +597,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, + + prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE, + sizeof(struct ieee80211_rxb *), +- GFP_KERNEL); ++ GFP_ATOMIC); + if (!prxbIndicateArray) + return; + +diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c +index d83f421acfc1e..a397dc6231f13 100644 +--- a/drivers/staging/rtl8712/rtl8712_recv.c ++++ b/drivers/staging/rtl8712/rtl8712_recv.c +@@ -477,11 +477,14 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, + while (!end_of_queue_search(phead, plist)) { + pnextrframe = container_of(plist, union recv_frame, u.list); + pnextattrib = &pnextrframe->u.hdr.attrib; ++ ++ if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) ++ return false; ++ + if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num)) + plist = plist->next; +- else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) +- return false; +- break; ++ else ++ break; + } + list_del_init(&(prframe->u.hdr.list)); + list_add_tail(&(prframe->u.hdr.list), plist); +diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c +index 6fb0788807426..ef0cc1e474ae6 100644 +--- a/drivers/staging/wfx/data_rx.c ++++ b/drivers/staging/wfx/data_rx.c +@@ -17,6 +17,9 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) + { + int params, tid; + ++ if (wfx_api_older_than(wvif->wdev, 3, 6)) ++ return; ++ + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + params = le16_to_cpu(mgmt->u.action.u.addba_req.capab); +@@ -41,7 +44,7 @@ void wfx_rx_cb(struct wfx_vif *wvif, + memset(hdr, 0, sizeof(*hdr)); + + if (arg->status == HIF_STATUS_RX_FAIL_MIC) +- hdr->flag |= RX_FLAG_MMIC_ERROR; ++ hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED; + else if (arg->status) + goto drop; + +diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c +index 4e30ab17a93d4..7dace7c17bf5c 100644 +--- a/drivers/staging/wfx/sta.c ++++ b/drivers/staging/wfx/sta.c +@@ -682,15 +682,16 @@ int wfx_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) + { +- /* Aggregation is implemented fully in firmware, +- * including block ack negotiation. Do not allow +- * mac80211 stack to do anything: it interferes with +- * the firmware. +- */ +- +- /* Note that we still need this function stubbed. */ +- +- return -ENOTSUPP; ++ // Aggregation is implemented fully in firmware ++ switch (params->action) { ++ case IEEE80211_AMPDU_RX_START: ++ case IEEE80211_AMPDU_RX_STOP: ++ // Just acknowledge it to enable frame re-ordering ++ return 0; ++ default: ++ // Leave the firmware doing its business for tx aggregation ++ return -ENOTSUPP; ++ } + } + + int wfx_add_chanctx(struct ieee80211_hw *hw, +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c +index 9b75923505020..86b28117787ec 100644 +--- a/drivers/target/target_core_user.c ++++ b/drivers/target/target_core_user.c +@@ -681,7 +681,7 @@ static void scatter_data_area(struct tcmu_dev *udev, + void *from, *to = NULL; + size_t copy_bytes, to_offset, offset; + struct scatterlist *sg; +- struct page *page; ++ struct page *page = NULL; + + for_each_sg(data_sg, sg, data_nents, i) { + int sg_remaining = sg->length; +diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c +index af7b2383e8f6b..019f4812def6c 100644 +--- a/drivers/thermal/thermal_netlink.c ++++ b/drivers/thermal/thermal_netlink.c +@@ -78,7 +78,7 @@ int thermal_genl_sampling_temp(int id, int temp) + hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0, + THERMAL_GENL_SAMPLING_TEMP); + if (!hdr) +- return -EMSGSIZE; ++ goto out_free; + + if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_ID, id)) + goto out_cancel; +@@ -93,6 +93,7 @@ int thermal_genl_sampling_temp(int id, int temp) + return 0; + out_cancel: + genlmsg_cancel(skb, hdr); ++out_free: + nlmsg_free(skb); + + return -EMSGSIZE; +diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig +index d1b27b0522a3c..8d60e0ff67b4d 100644 +--- a/drivers/tty/hvc/Kconfig ++++ b/drivers/tty/hvc/Kconfig +@@ -81,6 +81,7 @@ config HVC_DCC + bool "ARM JTAG DCC console" + depends on ARM || ARM64 + select HVC_DRIVER ++ select SERIAL_CORE_CONSOLE + help + This console uses the JTAG DCC on ARM to create a console under the HVC + driver. This console is used through a JTAG only on ARM. If you don't have +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c +index 55105ac38f89b..509d1042825a1 100644 +--- a/drivers/tty/hvc/hvcs.c ++++ b/drivers/tty/hvc/hvcs.c +@@ -1216,13 +1216,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + + tty_wait_until_sent(tty, HVCS_CLOSE_WAIT); + +- /* +- * This line is important because it tells hvcs_open that this +- * device needs to be re-configured the next time hvcs_open is +- * called. +- */ +- tty->driver_data = NULL; +- + free_irq(irq, hvcsd); + return; + } else if (hvcsd->port.count < 0) { +@@ -1237,6 +1230,13 @@ static void hvcs_cleanup(struct tty_struct * tty) + { + struct hvcs_struct *hvcsd = tty->driver_data; + ++ /* ++ * This line is important because it tells hvcs_open that this ++ * device needs to be re-configured the next time hvcs_open is ++ * called. ++ */ ++ tty->driver_data = NULL; ++ + tty_port_put(&hvcsd->port); + } + +diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c +index cf20616340a1a..fe569f6294a24 100644 +--- a/drivers/tty/ipwireless/network.c ++++ b/drivers/tty/ipwireless/network.c +@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, + skb->len, + notify_packet_sent, + network); +- if (ret == -1) { ++ if (ret < 0) { + skb_pull(skb, 2); + return 0; + } +@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, + notify_packet_sent, + network); + kfree(buf); +- if (ret == -1) ++ if (ret < 0) + return 0; + } + kfree_skb(skb); +diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c +index fad3401e604d9..23584769fc292 100644 +--- a/drivers/tty/ipwireless/tty.c ++++ b/drivers/tty/ipwireless/tty.c +@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty, + ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS, + buf, count, + ipw_write_packet_sent_callback, tty); +- if (ret == -1) { ++ if (ret < 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return 0; + } +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 00099a8439d21..c6a1d8c4e6894 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) + spin_lock_irqsave(&to->port->lock, flags); + /* Stuff the data into the input queue of the other end */ + c = tty_insert_flip_string(to->port, buf, c); ++ spin_unlock_irqrestore(&to->port->lock, flags); + /* And shovel */ + if (c) + tty_flip_buffer_push(to->port); +- spin_unlock_irqrestore(&to->port->lock, flags); + } + return c; + } +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index 87f450b7c1779..9e204f9b799a1 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -373,39 +373,6 @@ static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios) + serial8250_do_set_ldisc(p, termios); + } + +-static int dw8250_startup(struct uart_port *p) +-{ +- struct dw8250_data *d = to_dw8250_data(p->private_data); +- int ret; +- +- /* +- * Some platforms may provide a reference clock shared between several +- * devices. In this case before using the serial port first we have to +- * make sure that any clock state change is known to the UART port at +- * least post factum. +- */ +- if (d->clk) { +- ret = clk_notifier_register(d->clk, &d->clk_notifier); +- if (ret) +- dev_warn(p->dev, "Failed to set the clock notifier\n"); +- } +- +- return serial8250_do_startup(p); +-} +- +-static void dw8250_shutdown(struct uart_port *p) +-{ +- struct dw8250_data *d = to_dw8250_data(p->private_data); +- +- serial8250_do_shutdown(p); +- +- if (d->clk) { +- clk_notifier_unregister(d->clk, &d->clk_notifier); +- +- flush_work(&d->clk_work); +- } +-} +- + /* + * dw8250_fallback_dma_filter will prevent the UART from getting just any free + * channel on platforms that have DMA engines, but don't have any channels +@@ -501,8 +468,6 @@ static int dw8250_probe(struct platform_device *pdev) + p->serial_out = dw8250_serial_out; + p->set_ldisc = dw8250_set_ldisc; + p->set_termios = dw8250_set_termios; +- p->startup = dw8250_startup; +- p->shutdown = dw8250_shutdown; + + p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!p->membase) +@@ -622,6 +587,19 @@ static int dw8250_probe(struct platform_device *pdev) + goto err_reset; + } + ++ /* ++ * Some platforms may provide a reference clock shared between several ++ * devices. In this case any clock state change must be known to the ++ * UART port at least post factum. ++ */ ++ if (data->clk) { ++ err = clk_notifier_register(data->clk, &data->clk_notifier); ++ if (err) ++ dev_warn(p->dev, "Failed to set the clock notifier\n"); ++ else ++ queue_work(system_unbound_wq, &data->clk_work); ++ } ++ + platform_set_drvdata(pdev, data); + + pm_runtime_set_active(dev); +@@ -648,6 +626,12 @@ static int dw8250_remove(struct platform_device *pdev) + + pm_runtime_get_sync(dev); + ++ if (data->clk) { ++ clk_notifier_unregister(data->clk, &data->clk_notifier); ++ ++ flush_work(&data->clk_work); ++ } ++ + serial8250_unregister_port(data->data.line); + + reset_control_assert(data->rst); +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index c71d647eb87a0..b0af13074cd36 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2653,6 +2653,10 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk) + goto out_lock; + + port->uartclk = uartclk; ++ ++ if (!tty_port_initialized(&port->state->port)) ++ goto out_lock; ++ + termios = &port->state->port.tty->termios; + + baud = serial8250_get_baud_rate(port, termios, NULL); +@@ -2665,7 +2669,6 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk) + + serial8250_set_divisor(port, baud, quot, frac); + serial_port_out(port, UART_LCR, up->lcr); +- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); + + spin_unlock_irqrestore(&port->lock, flags); + serial8250_rpm_put(up); +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig +index 9409be982aa64..20b98a3ba0466 100644 +--- a/drivers/tty/serial/Kconfig ++++ b/drivers/tty/serial/Kconfig +@@ -8,6 +8,7 @@ menu "Serial drivers" + + config SERIAL_EARLYCON + bool ++ depends on SERIAL_CORE + help + Support for early consoles with the earlycon parameter. This enables + the console before standard serial driver is probed. The console is +@@ -520,6 +521,7 @@ config SERIAL_IMX_EARLYCON + depends on ARCH_MXC || COMPILE_TEST + depends on OF + select SERIAL_EARLYCON ++ select SERIAL_CORE_CONSOLE + help + If you have enabled the earlycon on the Freescale IMX + CPU you can make it the earlycon by answering Y to this option. +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index 7ca6422492241..e17465a8a773c 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -649,26 +649,24 @@ static int lpuart32_poll_init(struct uart_port *port) + spin_lock_irqsave(&sport->port.lock, flags); + + /* Disable Rx & Tx */ +- lpuart32_write(&sport->port, UARTCTRL, 0); ++ lpuart32_write(&sport->port, 0, UARTCTRL); + + temp = lpuart32_read(&sport->port, UARTFIFO); + + /* Enable Rx and Tx FIFO */ +- lpuart32_write(&sport->port, UARTFIFO, +- temp | UARTFIFO_RXFE | UARTFIFO_TXFE); ++ lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO); + + /* flush Tx and Rx FIFO */ +- lpuart32_write(&sport->port, UARTFIFO, +- UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH); ++ lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO); + + /* explicitly clear RDRF */ + if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) { + lpuart32_read(&sport->port, UARTDATA); +- lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF); ++ lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO); + } + + /* Enable Rx and Tx */ +- lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE); ++ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); + spin_unlock_irqrestore(&sport->port.lock, flags); + + return 0; +@@ -677,12 +675,12 @@ static int lpuart32_poll_init(struct uart_port *port) + static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c) + { + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE); +- lpuart32_write(port, UARTDATA, c); ++ lpuart32_write(port, c, UARTDATA); + } + + static int lpuart32_poll_get_char(struct uart_port *port) + { +- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF)) ++ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF)) + return NO_POLL_CHAR; + + return lpuart32_read(port, UARTDATA); +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c +index dea649ee173ba..02a69e20014b1 100644 +--- a/drivers/usb/cdns3/gadget.c ++++ b/drivers/usb/cdns3/gadget.c +@@ -2990,12 +2990,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns) + + priv_dev = cdns->gadget_dev; + +- devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); + + pm_runtime_mark_last_busy(cdns->dev); + pm_runtime_put_autosuspend(cdns->dev); + + usb_del_gadget_udc(&priv_dev->gadget); ++ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); + + cdns3_free_all_eps(priv_dev); + +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 7f6f3ab5b8a67..24d79eec6654e 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf, + } + } + } else { ++ int class = -1; ++ + data_intf_num = union_header->bSlaveInterface0; + control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); + data_interface = usb_ifnum_to_if(usb_dev, data_intf_num); ++ ++ if (control_interface) ++ class = control_interface->cur_altsetting->desc.bInterfaceClass; ++ ++ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) { ++ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n"); ++ combined_interfaces = 1; ++ control_interface = data_interface = intf; ++ goto look_for_collapsed_interface; ++ } + } + + if (!control_interface || !data_interface) { +@@ -1906,6 +1918,17 @@ static const struct usb_device_id acm_ids[] = { + .driver_info = IGNORE_DEVICE, + }, + ++ /* Exclude ETAS ES58x */ ++ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */ ++ .driver_info = IGNORE_DEVICE, ++ }, ++ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */ ++ .driver_info = IGNORE_DEVICE, ++ }, ++ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */ ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ + .driver_info = SEND_ZERO_PACKET, + }, +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 7f5de956a2fc8..02d0cfd23bb29 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); + + #define WDM_MAX 16 + ++/* we cannot wait forever at flush() */ ++#define WDM_FLUSH_TIMEOUT (30 * HZ) ++ + /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ + #define WDM_DEFAULT_BUFSIZE 256 + +@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb) + kfree(desc->outbuf); + desc->outbuf = NULL; + clear_bit(WDM_IN_USE, &desc->flags); +- wake_up(&desc->wait); ++ wake_up_all(&desc->wait); + } + + static void wdm_in_callback(struct urb *urb) +@@ -393,6 +396,9 @@ static ssize_t wdm_write + if (test_bit(WDM_RESETTING, &desc->flags)) + r = -EIO; + ++ if (test_bit(WDM_DISCONNECTING, &desc->flags)) ++ r = -ENODEV; ++ + if (r < 0) { + rv = r; + goto out_free_mem_pm; +@@ -424,6 +430,7 @@ static ssize_t wdm_write + if (rv < 0) { + desc->outbuf = NULL; + clear_bit(WDM_IN_USE, &desc->flags); ++ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */ + dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); + rv = usb_translate_errors(rv); + goto out_free_mem_pm; +@@ -583,28 +590,58 @@ err: + return rv; + } + +-static int wdm_flush(struct file *file, fl_owner_t id) ++static int wdm_wait_for_response(struct file *file, long timeout) + { + struct wdm_device *desc = file->private_data; ++ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */ ++ ++ /* ++ * Needs both flags. We cannot do with one because resetting it would ++ * cause a race with write() yet we need to signal a disconnect. ++ */ ++ rv = wait_event_interruptible_timeout(desc->wait, ++ !test_bit(WDM_IN_USE, &desc->flags) || ++ test_bit(WDM_DISCONNECTING, &desc->flags), ++ timeout); + +- wait_event(desc->wait, +- /* +- * needs both flags. We cannot do with one +- * because resetting it would cause a race +- * with write() yet we need to signal +- * a disconnect +- */ +- !test_bit(WDM_IN_USE, &desc->flags) || +- test_bit(WDM_DISCONNECTING, &desc->flags)); +- +- /* cannot dereference desc->intf if WDM_DISCONNECTING */ ++ /* ++ * To report the correct error. This is best effort. ++ * We are inevitably racing with the hardware. ++ */ + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + return -ENODEV; +- if (desc->werr < 0) +- dev_err(&desc->intf->dev, "Error in flush path: %d\n", +- desc->werr); ++ if (!rv) ++ return -EIO; ++ if (rv < 0) ++ return -EINTR; ++ ++ spin_lock_irq(&desc->iuspin); ++ rv = desc->werr; ++ desc->werr = 0; ++ spin_unlock_irq(&desc->iuspin); ++ ++ return usb_translate_errors(rv); ++ ++} ++ ++/* ++ * You need to send a signal when you react to malicious or defective hardware. ++ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do ++ * not implement wdm_flush() will return -EINVAL. ++ */ ++static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync) ++{ ++ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT); ++} + +- return usb_translate_errors(desc->werr); ++/* ++ * Same with wdm_fsync(), except it uses finite timeout in order to react to ++ * malicious or defective hardware which ceased communication after close() was ++ * implicitly called due to process termination. ++ */ ++static int wdm_flush(struct file *file, fl_owner_t id) ++{ ++ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT); + } + + static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait) +@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = { + .owner = THIS_MODULE, + .read = wdm_read, + .write = wdm_write, ++ .fsync = wdm_fsync, + .open = wdm_open, + .flush = wdm_flush, + .release = wdm_release, +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c +index 7bc23469f4e4e..27e83e55a5901 100644 +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb) + EXPORT_SYMBOL_GPL(usb_block_urb); + + /** +- * usb_kill_anchored_urbs - cancel transfer requests en masse ++ * usb_kill_anchored_urbs - kill all URBs associated with an anchor + * @anchor: anchor the requests are bound to + * +- * this allows all outstanding URBs to be killed starting +- * from the back of the queue ++ * This kills all outstanding URBs starting from the back of the queue, ++ * with guarantee that no completer callbacks will take place from the ++ * anchor after this function returns. + * + * This routine should not be called by a driver after its disconnect + * method has returned. +@@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb); + void usb_kill_anchored_urbs(struct usb_anchor *anchor) + { + struct urb *victim; ++ int surely_empty; + +- spin_lock_irq(&anchor->lock); +- while (!list_empty(&anchor->urb_list)) { +- victim = list_entry(anchor->urb_list.prev, struct urb, +- anchor_list); +- /* we must make sure the URB isn't freed before we kill it*/ +- usb_get_urb(victim); +- spin_unlock_irq(&anchor->lock); +- /* this will unanchor the URB */ +- usb_kill_urb(victim); +- usb_put_urb(victim); ++ do { + spin_lock_irq(&anchor->lock); +- } +- spin_unlock_irq(&anchor->lock); ++ while (!list_empty(&anchor->urb_list)) { ++ victim = list_entry(anchor->urb_list.prev, ++ struct urb, anchor_list); ++ /* make sure the URB isn't freed before we kill it */ ++ usb_get_urb(victim); ++ spin_unlock_irq(&anchor->lock); ++ /* this will unanchor the URB */ ++ usb_kill_urb(victim); ++ usb_put_urb(victim); ++ spin_lock_irq(&anchor->lock); ++ } ++ surely_empty = usb_anchor_check_wakeup(anchor); ++ ++ spin_unlock_irq(&anchor->lock); ++ cpu_relax(); ++ } while (!surely_empty); + } + EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); + +@@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); + void usb_poison_anchored_urbs(struct usb_anchor *anchor) + { + struct urb *victim; ++ int surely_empty; + +- spin_lock_irq(&anchor->lock); +- anchor->poisoned = 1; +- while (!list_empty(&anchor->urb_list)) { +- victim = list_entry(anchor->urb_list.prev, struct urb, +- anchor_list); +- /* we must make sure the URB isn't freed before we kill it*/ +- usb_get_urb(victim); +- spin_unlock_irq(&anchor->lock); +- /* this will unanchor the URB */ +- usb_poison_urb(victim); +- usb_put_urb(victim); ++ do { + spin_lock_irq(&anchor->lock); +- } +- spin_unlock_irq(&anchor->lock); ++ anchor->poisoned = 1; ++ while (!list_empty(&anchor->urb_list)) { ++ victim = list_entry(anchor->urb_list.prev, ++ struct urb, anchor_list); ++ /* make sure the URB isn't freed before we kill it */ ++ usb_get_urb(victim); ++ spin_unlock_irq(&anchor->lock); ++ /* this will unanchor the URB */ ++ usb_poison_urb(victim); ++ usb_put_urb(victim); ++ spin_lock_irq(&anchor->lock); ++ } ++ surely_empty = usb_anchor_check_wakeup(anchor); ++ ++ spin_unlock_irq(&anchor->lock); ++ cpu_relax(); ++ } while (!surely_empty); + } + EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); + +@@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) + { + struct urb *victim; + unsigned long flags; ++ int surely_empty; ++ ++ do { ++ spin_lock_irqsave(&anchor->lock, flags); ++ while (!list_empty(&anchor->urb_list)) { ++ victim = list_entry(anchor->urb_list.prev, ++ struct urb, anchor_list); ++ __usb_unanchor_urb(victim, anchor); ++ } ++ surely_empty = usb_anchor_check_wakeup(anchor); + +- spin_lock_irqsave(&anchor->lock, flags); +- while (!list_empty(&anchor->urb_list)) { +- victim = list_entry(anchor->urb_list.prev, struct urb, +- anchor_list); +- __usb_unanchor_urb(victim, anchor); +- } +- spin_unlock_irqrestore(&anchor->lock, flags); ++ spin_unlock_irqrestore(&anchor->lock, flags); ++ cpu_relax(); ++ } while (!surely_empty); + } + + EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index 5b9d23991c99d..d367da4c6f850 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -713,8 +713,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) + */ + static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) + { ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; + int is_isoc = hs_ep->isochronous; + unsigned int maxsize; ++ u32 mps = hs_ep->ep.maxpacket; ++ int dir_in = hs_ep->dir_in; + + if (is_isoc) + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : +@@ -723,6 +726,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) + else + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; + ++ /* Interrupt OUT EP with mps not multiple of 4 */ ++ if (hs_ep->index) ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) ++ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC; ++ + return maxsize; + } + +@@ -738,11 +746,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) + * Isochronous - descriptor rx/tx bytes bitfield limit, + * Control In/Bulk/Interrupt - multiple of mps. This will allow to not + * have concatenations from various descriptors within one packet. ++ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds ++ * to a single descriptor. + * + * Selects corresponding mask for RX/TX bytes as well. + */ + static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) + { ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; + u32 desc_size = 0; +@@ -766,6 +777,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) + desc_size -= desc_size % mps; + } + ++ /* Interrupt OUT EP with mps not multiple of 4 */ ++ if (hs_ep->index) ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) { ++ desc_size = mps; ++ *mask = DEV_DMA_NBYTES_MASK; ++ } ++ + return desc_size; + } + +@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, + length += (mps - (length % mps)); + } + +- /* +- * If more data to send, adjust DMA for EP0 out data stage. +- * ureq->dma stays unchanged, hence increment it by already +- * passed passed data count before starting new transaction. +- */ +- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && +- continuing) ++ if (continuing) + offset = ureq->actual; + + /* Fill DDMA chain entries */ +@@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, + */ + static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) + { ++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; + struct dwc2_hsotg *hsotg = hs_ep->parent; + unsigned int bytes_rem = 0; ++ unsigned int bytes_rem_correction = 0; + struct dwc2_dma_desc *desc = hs_ep->desc_list; + int i; + u32 status; ++ u32 mps = hs_ep->ep.maxpacket; ++ int dir_in = hs_ep->dir_in; + + if (!desc) + return -EINVAL; + ++ /* Interrupt OUT EP with mps not multiple of 4 */ ++ if (hs_ep->index) ++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) ++ bytes_rem_correction = 4 - (mps % 4); ++ + for (i = 0; i < hs_ep->desc_count; ++i) { + status = desc->status; + bytes_rem += status & DEV_DMA_NBYTES_MASK; ++ bytes_rem -= bytes_rem_correction; + + if (status & DEV_DMA_STS_MASK) + dev_err(hsotg->dev, "descriptor %d closed with %x\n", + i, status & DEV_DMA_STS_MASK); ++ ++ if (status & DEV_DMA_L) ++ break; ++ + desc++; + } + +diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c +index 8f9d061c4d5fa..a3611cdd1deaa 100644 +--- a/drivers/usb/dwc2/params.c ++++ b/drivers/usb/dwc2/params.c +@@ -860,7 +860,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) + int dwc2_init_params(struct dwc2_hsotg *hsotg) + { + const struct of_device_id *match; +- void (*set_params)(void *data); ++ void (*set_params)(struct dwc2_hsotg *data); + + dwc2_set_default_params(hsotg); + dwc2_get_device_properties(hsotg); +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c +index db9fd4bd1a38c..b28e90e0b685d 100644 +--- a/drivers/usb/dwc2/platform.c ++++ b/drivers/usb/dwc2/platform.c +@@ -584,12 +584,16 @@ static int dwc2_driver_probe(struct platform_device *dev) + if (retval) { + hsotg->gadget.udc = NULL; + dwc2_hsotg_remove(hsotg); +- goto error_init; ++ goto error_debugfs; + } + } + #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ + return 0; + ++error_debugfs: ++ dwc2_debugfs_exit(hsotg); ++ if (hsotg->hcd_enabled) ++ dwc2_hcd_remove(hsotg); + error_init: + if (hsotg->params.activate_stm_id_vb_detection) + regulator_disable(hsotg->usb33d); +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 2eb34c8b4065f..2f9f4ad562d4e 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -119,6 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work) + struct dwc3 *dwc = work_to_dwc(work); + unsigned long flags; + int ret; ++ u32 reg; + + if (dwc->dr_mode != USB_DR_MODE_OTG) + return; +@@ -172,6 +173,11 @@ static void __dwc3_set_mode(struct work_struct *work) + otg_set_vbus(dwc->usb2_phy->otg, true); + phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); + phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); ++ if (dwc->dis_split_quirk) { ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); ++ reg |= DWC3_GUCTL3_SPLITDISABLE; ++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); ++ } + } + break; + case DWC3_GCTL_PRTCAP_DEVICE: +@@ -929,13 +935,6 @@ static int dwc3_core_init(struct dwc3 *dwc) + */ + dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); + +- /* Handle USB2.0-only core configuration */ +- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == +- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) { +- if (dwc->maximum_speed == USB_SPEED_SUPER) +- dwc->maximum_speed = USB_SPEED_HIGH; +- } +- + ret = dwc3_phy_setup(dwc); + if (ret) + goto err0; +@@ -1356,6 +1355,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) + dwc->dis_metastability_quirk = device_property_read_bool(dev, + "snps,dis_metastability_quirk"); + ++ dwc->dis_split_quirk = device_property_read_bool(dev, ++ "snps,dis-split-quirk"); ++ + dwc->lpm_nyet_threshold = lpm_nyet_threshold; + dwc->tx_de_emphasis = tx_de_emphasis; + +@@ -1381,6 +1383,8 @@ bool dwc3_has_imod(struct dwc3 *dwc) + static void dwc3_check_params(struct dwc3 *dwc) + { + struct device *dev = dwc->dev; ++ unsigned int hwparam_gen = ++ DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); + + /* Check for proper value of imod_interval */ + if (dwc->imod_interval && !dwc3_has_imod(dwc)) { +@@ -1412,17 +1416,23 @@ static void dwc3_check_params(struct dwc3 *dwc) + dwc->maximum_speed); + fallthrough; + case USB_SPEED_UNKNOWN: +- /* default to superspeed */ +- dwc->maximum_speed = USB_SPEED_SUPER; +- +- /* +- * default to superspeed plus if we are capable. +- */ +- if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) && +- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == +- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) ++ switch (hwparam_gen) { ++ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; +- ++ break; ++ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: ++ if (DWC3_IP_IS(DWC32)) ++ dwc->maximum_speed = USB_SPEED_SUPER_PLUS; ++ else ++ dwc->maximum_speed = USB_SPEED_SUPER; ++ break; ++ case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: ++ dwc->maximum_speed = USB_SPEED_HIGH; ++ break; ++ default: ++ dwc->maximum_speed = USB_SPEED_SUPER; ++ break; ++ } + break; + } + } +@@ -1865,10 +1875,26 @@ static int dwc3_resume(struct device *dev) + + return 0; + } ++ ++static void dwc3_complete(struct device *dev) ++{ ++ struct dwc3 *dwc = dev_get_drvdata(dev); ++ u32 reg; ++ ++ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && ++ dwc->dis_split_quirk) { ++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); ++ reg |= DWC3_GUCTL3_SPLITDISABLE; ++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); ++ } ++} ++#else ++#define dwc3_complete NULL + #endif /* CONFIG_PM_SLEEP */ + + static const struct dev_pm_ops dwc3_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) ++ .complete = dwc3_complete, + SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, + dwc3_runtime_idle) + }; +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 2f04b3e42bf1c..ba0f743f35528 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -138,6 +138,7 @@ + #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10)) + + #define DWC3_GHWPARAMS8 0xc600 ++#define DWC3_GUCTL3 0xc60c + #define DWC3_GFLADJ 0xc630 + + /* Device Registers */ +@@ -380,6 +381,9 @@ + /* Global User Control Register 2 */ + #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) + ++/* Global User Control Register 3 */ ++#define DWC3_GUCTL3_SPLITDISABLE BIT(14) ++ + /* Device Configuration Register */ + #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) + #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) +@@ -1052,6 +1056,7 @@ struct dwc3_scratchpad_array { + * 2 - No de-emphasis + * 3 - Reserved + * @dis_metastability_quirk: set to disable metastability quirk. ++ * @dis_split_quirk: set to disable split boundary. + * @imod_interval: set the interrupt moderation interval in 250ns + * increments or 0 to disable. + */ +@@ -1245,6 +1250,8 @@ struct dwc3 { + + unsigned dis_metastability_quirk:1; + ++ unsigned dis_split_quirk:1; ++ + u16 imod_interval; + }; + +diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c +index 7df1150129354..2816e4a9813ad 100644 +--- a/drivers/usb/dwc3/dwc3-of-simple.c ++++ b/drivers/usb/dwc3/dwc3-of-simple.c +@@ -176,6 +176,7 @@ static const struct of_device_id of_dwc3_simple_match[] = { + { .compatible = "cavium,octeon-7130-usb-uctl" }, + { .compatible = "sprd,sc9860-dwc3" }, + { .compatible = "allwinner,sun50i-h6-dwc3" }, ++ { .compatible = "hisilicon,hi3670-dwc3" }, + { /* Sentinel */ } + }; + MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index 1f638759a9533..92a7c3a839454 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) + /* peak (theoretical) bulk transfer rate in bits-per-second */ + static inline unsigned ncm_bitrate(struct usb_gadget *g) + { +- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) +- return 13 * 1024 * 8 * 1000 * 8; ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) ++ return 4250000000U; ++ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) ++ return 3750000000U; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else +@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + fs_ncm_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, +- ncm_ss_function, NULL); ++ ncm_ss_function, ncm_ss_function); + if (status) + goto fail; + +diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c +index 68697f596066c..64a4112068fc8 100644 +--- a/drivers/usb/gadget/function/f_printer.c ++++ b/drivers/usb/gadget/function/f_printer.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -64,7 +65,7 @@ struct printer_dev { + struct usb_gadget *gadget; + s8 interface; + struct usb_ep *in_ep, *out_ep; +- ++ struct kref kref; + struct list_head rx_reqs; /* List of free RX structs */ + struct list_head rx_reqs_active; /* List of Active RX xfers */ + struct list_head rx_buffers; /* List of completed xfers */ +@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, + + /*-------------------------------------------------------------------------*/ + ++static void printer_dev_free(struct kref *kref) ++{ ++ struct printer_dev *dev = container_of(kref, struct printer_dev, kref); ++ ++ kfree(dev); ++} ++ + static struct usb_request * + printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags) + { +@@ -353,6 +361,7 @@ printer_open(struct inode *inode, struct file *fd) + + spin_unlock_irqrestore(&dev->lock, flags); + ++ kref_get(&dev->kref); + DBG(dev, "printer_open returned %x\n", ret); + return ret; + } +@@ -370,6 +379,7 @@ printer_close(struct inode *inode, struct file *fd) + dev->printer_status &= ~PRINTER_SELECTED; + spin_unlock_irqrestore(&dev->lock, flags); + ++ kref_put(&dev->kref, printer_dev_free); + DBG(dev, "printer_close\n"); + + return 0; +@@ -1386,7 +1396,8 @@ static void gprinter_free(struct usb_function *f) + struct f_printer_opts *opts; + + opts = container_of(f->fi, struct f_printer_opts, func_inst); +- kfree(dev); ++ ++ kref_put(&dev->kref, printer_dev_free); + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); +@@ -1455,6 +1466,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) + return ERR_PTR(-ENOMEM); + } + ++ kref_init(&dev->kref); + ++opts->refcnt; + dev->minor = opts->minor; + dev->pnp_string = opts->pnp_string; +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c +index c3cc6bd14e615..31ea76adcc0db 100644 +--- a/drivers/usb/gadget/function/u_ether.c ++++ b/drivers/usb/gadget/function/u_ether.c +@@ -93,7 +93,7 @@ struct eth_dev { + static inline int qlen(struct usb_gadget *gadget, unsigned qmult) + { + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || +- gadget->speed == USB_SPEED_SUPER)) ++ gadget->speed >= USB_SPEED_SUPER)) + return qmult * DEFAULT_QLEN; + else + return DEFAULT_QLEN; +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c +index 127ecc2b43176..2caccbb6e0140 100644 +--- a/drivers/usb/gadget/function/u_serial.c ++++ b/drivers/usb/gadget/function/u_serial.c +@@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser) + if (port->port.tty) + tty_hangup(port->port.tty); + } ++ port->suspended = false; + spin_unlock_irqrestore(&port->port_lock, flags); + + /* disable endpoints, aborting down any active I/O */ +diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c +index feaec00a3c169..9cd4a70ccdd6d 100644 +--- a/drivers/usb/gadget/udc/bcm63xx_udc.c ++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c +index dd37e77dae001..2845ea328a064 100644 +--- a/drivers/usb/host/ohci-hcd.c ++++ b/drivers/usb/host/ohci-hcd.c +@@ -673,20 +673,24 @@ retry: + + /* handle root hub init quirks ... */ + val = roothub_a (ohci); +- val &= ~(RH_A_PSM | RH_A_OCPM); ++ /* Configure for per-port over-current protection by default */ ++ val &= ~RH_A_NOCP; ++ val |= RH_A_OCPM; + if (ohci->flags & OHCI_QUIRK_SUPERIO) { +- /* NSC 87560 and maybe others */ ++ /* NSC 87560 and maybe others. ++ * Ganged power switching, no over-current protection. ++ */ + val |= RH_A_NOCP; +- val &= ~(RH_A_POTPGT | RH_A_NPS); +- ohci_writel (ohci, val, &ohci->regs->roothub.a); ++ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM); + } else if ((ohci->flags & OHCI_QUIRK_AMD756) || + (ohci->flags & OHCI_QUIRK_HUB_POWER)) { + /* hub power always on; required for AMD-756 and some +- * Mac platforms. ganged overcurrent reporting, if any. ++ * Mac platforms. + */ + val |= RH_A_NPS; +- ohci_writel (ohci, val, &ohci->regs->roothub.a); + } ++ ohci_writel(ohci, val, &ohci->regs->roothub.a); ++ + ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status); + ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM, + &ohci->regs->roothub.b); +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index f4cedcaee14b3..e534f524b7f87 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + trace_xhci_add_endpoint(ep_ctx); + +- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); +- + xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", + (unsigned int) ep->desc.bEndpointAddress, + udev->slot_id, +@@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; + virt_dev->eps[i].new_ring = NULL; ++ xhci_debugfs_create_endpoint(xhci, virt_dev, i); + } + command_cleanup: + kfree(command->completion); +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c +index 74264e5906951..1fa6fcac82992 100644 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c +@@ -1522,6 +1522,11 @@ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) + (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1)); + } + ++static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) ++{ ++ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); ++} ++ + static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) + { + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); +@@ -1535,8 +1540,8 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) + return err; + + ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; +- ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), +- ndev->mtu); ++ ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); ++ ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); + return err; + } + +@@ -1653,6 +1658,9 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb * + if (err) + goto err_mr; + ++ if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) ++ return 0; ++ + restore_channels_info(ndev); + err = setup_driver(ndev); + if (err) +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index d98843feddce0..5076d0155bc3f 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ +- return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); ++ return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY); + } + + /* +@@ -520,8 +520,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos, + + count = vfio_default_config_read(vdev, pos, count, perm, offset, val); + +- /* Mask in virtual memory enable for SR-IOV devices */ +- if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) { ++ /* Mask in virtual memory enable */ ++ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) { + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + u32 tmp_val = le32_to_cpu(*val); + +@@ -589,9 +589,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, + * shows it disabled (phys_mem/io, then the device has + * undergone some kind of backdoor reset and needs to be + * restored before we allow it to enable the bars. +- * SR-IOV devices will trigger this, but we catch them later ++ * SR-IOV devices will trigger this - for mem enable let's ++ * catch this now and for io enable it will be caught later + */ +- if ((new_mem && virt_mem && !phys_mem) || ++ if ((new_mem && virt_mem && !phys_mem && ++ !pdev->no_command_memory) || + (new_io && virt_io && !phys_io) || + vfio_need_bar_restore(vdev)) + vfio_bar_restore(vdev); +@@ -1734,12 +1736,14 @@ int vfio_config_init(struct vfio_pci_device *vdev) + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ +- ++ } ++ if (pdev->no_command_memory) { + /* +- * VFs do no implement the memory enable bit of the COMMAND +- * register therefore we'll not have it set in our initial +- * copy of config space after pci_enable_device(). For +- * consistency with PFs, set the virtual enable bit here. ++ * VFs and devices that set pdev->no_command_memory do not ++ * implement the memory enable bit of the COMMAND register ++ * therefore we'll not have it set in our initial copy of ++ * config space after pci_enable_device(). For consistency ++ * with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 1d9fb25929459..869dce5f134dd 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + vdev->ctx[vector].producer.token = trigger; + vdev->ctx[vector].producer.irq = irq; + ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); +- if (unlikely(ret)) ++ if (unlikely(ret)) { + dev_info(&pdev->dev, + "irq bypass producer (token %p) registration fails: %d\n", + vdev->ctx[vector].producer.token, ret); + ++ vdev->ctx[vector].producer.token = NULL; ++ } + vdev->ctx[vector].trigger = trigger; + + return 0; +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c +index 262ab0efd06c6..2151bc7f87ab1 100644 +--- a/drivers/vfio/vfio.c ++++ b/drivers/vfio/vfio.c +@@ -1949,8 +1949,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, + if (!group) + return -ENODEV; + +- if (group->dev_counter > 1) +- return -EINVAL; ++ if (group->dev_counter > 1) { ++ ret = -EINVAL; ++ goto err_pin_pages; ++ } + + ret = vfio_group_add_container_user(group); + if (ret) +@@ -2051,6 +2053,9 @@ int vfio_group_pin_pages(struct vfio_group *group, + if (!group || !user_iova_pfn || !phys_pfn || !npage) + return -EINVAL; + ++ if (group->dev_counter > 1) ++ return -EINVAL; ++ + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index 5fbf0c1f74338..9dde5ed852fd0 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -693,7 +693,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, + + ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); + if (ret) { +- vfio_unpin_page_external(dma, iova, do_accounting); ++ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting) ++ vfio_lock_acct(dma, -1, true); + goto pin_unwind; + } + +@@ -2933,7 +2934,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, + * size + */ + bitmap_set(dma->bitmap, offset >> pgshift, +- *copied >> pgshift); ++ ((offset + *copied - 1) >> pgshift) - ++ (offset >> pgshift) + 1); + } + } else + *copied = copy_from_user(data, (void __user *)vaddr, +diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c +index 0ce1815850080..8268ac43d54f7 100644 +--- a/drivers/video/backlight/sky81452-backlight.c ++++ b/drivers/video/backlight/sky81452-backlight.c +@@ -217,6 +217,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( + num_entry); + if (ret < 0) { + dev_err(dev, "led-sources node is invalid.\n"); ++ of_node_put(np); + return ERR_PTR(-EINVAL); + } + +diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c +index 3fe509cb9b874..13bd2bd5c043a 100644 +--- a/drivers/video/fbdev/aty/radeon_base.c ++++ b/drivers/video/fbdev/aty/radeon_base.c +@@ -2307,7 +2307,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev, + + ret = radeon_kick_out_firmware_fb(pdev); + if (ret) +- return ret; ++ goto err_release_fb; + + /* request the mem regions */ + ret = pci_request_region(pdev, 0, "radeonfb framebuffer"); +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 6815bfb7f5724..e33bf1c386926 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) + return 0; + } + ++ /* bitfill_aligned() assumes that it's at least 8x8 */ ++ if (var->xres < 8 || var->yres < 8) ++ return -EINVAL; ++ + ret = info->fbops->fb_check_var(var, info); + + if (ret) +diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c +index dfe3eb769638b..fde27feae5d0c 100644 +--- a/drivers/video/fbdev/sis/init.c ++++ b/drivers/video/fbdev/sis/init.c +@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, + + i = 0; + ++ if (SiS_Pr->ChipType == SIS_730) ++ queuedata = &FQBQData730[0]; ++ else ++ queuedata = &FQBQData[0]; ++ + if(ModeNo > 0x13) { + + /* Get VCLK */ +@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, + /* Get half colordepth */ + colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; + +- if(SiS_Pr->ChipType == SIS_730) { +- queuedata = &FQBQData730[0]; +- } else { +- queuedata = &FQBQData[0]; +- } +- + do { + templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth; + +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c +index 578d3541e3d6f..1e8a38a7967d8 100644 +--- a/drivers/video/fbdev/vga16fb.c ++++ b/drivers/video/fbdev/vga16fb.c +@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info) + } + + static void vga16fb_clock_chip(struct vga16fb_par *par, +- unsigned int pixclock, ++ unsigned int *pixclock, + const struct fb_info *info, + int mul, int div) + { +@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, + { 0 /* bad */, 0x00, 0x00}}; + int err; + +- pixclock = (pixclock * mul) / div; ++ *pixclock = (*pixclock * mul) / div; + best = vgaclocks; +- err = pixclock - best->pixclock; ++ err = *pixclock - best->pixclock; + if (err < 0) err = -err; + for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) { + int tmp; + +- tmp = pixclock - ptr->pixclock; ++ tmp = *pixclock - ptr->pixclock; + if (tmp < 0) tmp = -tmp; + if (tmp < err) { + err = tmp; +@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, + } + par->misc |= best->misc; + par->clkdiv = best->seq_clock_mode; +- pixclock = (best->pixclock * div) / mul; ++ *pixclock = (best->pixclock * div) / mul; + } + + #define FAIL(X) return -EINVAL +@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var, + + if (mode & MODE_8BPP) + /* pixel clock == vga clock / 2 */ +- vga16fb_clock_chip(par, var->pixclock, info, 1, 2); ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2); + else + /* pixel clock == vga clock */ +- vga16fb_clock_chip(par, var->pixclock, info, 1, 1); ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1); + + var->red.offset = var->green.offset = var->blue.offset = + var->transp.offset = 0; +diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c +index 1b0b11b55d2a0..46ee0a0998b6f 100644 +--- a/drivers/virt/fsl_hypervisor.c ++++ b/drivers/virt/fsl_hypervisor.c +@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + + unsigned int i; + long ret = 0; +- int num_pinned; /* return value from get_user_pages() */ ++ int num_pinned = 0; /* return value from get_user_pages_fast() */ + phys_addr_t remote_paddr; /* The next address in the remote buffer */ + uint32_t count; /* The number of bytes left to copy */ + +@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + return -EINVAL; + + /* +- * The array of pages returned by get_user_pages() covers only ++ * The array of pages returned by get_user_pages_fast() covers only + * page-aligned memory. Since the user buffer is probably not + * page-aligned, we need to handle the discrepancy. + * +@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + + /* + * 'pages' is an array of struct page pointers that's initialized by +- * get_user_pages(). ++ * get_user_pages_fast(). + */ + pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) { +@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + if (!sg_list_unaligned) { + pr_debug("fsl-hv: could not allocate S/G list\n"); + ret = -ENOMEM; +- goto exit; ++ goto free_pages; + } + sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); + +@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + num_pages, param.source != -1 ? FOLL_WRITE : 0, pages); + + if (num_pinned != num_pages) { +- /* get_user_pages() failed */ + pr_debug("fsl-hv: could not lock source buffer\n"); + ret = (num_pinned < 0) ? num_pinned : -EFAULT; + goto exit; +@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + virt_to_phys(sg_list), num_pages); + + exit: +- if (pages) { +- for (i = 0; i < num_pages; i++) +- if (pages[i]) +- put_page(pages[i]); ++ if (pages && (num_pinned > 0)) { ++ for (i = 0; i < num_pinned; i++) ++ put_page(pages[i]); + } + + kfree(sg_list_unaligned); ++free_pages: + kfree(pages); + + if (!ret) +diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h +index 87eaf357ae01f..adf015aa4126f 100644 +--- a/drivers/watchdog/sp5100_tco.h ++++ b/drivers/watchdog/sp5100_tco.h +@@ -70,7 +70,7 @@ + #define EFCH_PM_DECODEEN_WDT_TMREN BIT(7) + + +-#define EFCH_PM_DECODEEN3 0x00 ++#define EFCH_PM_DECODEEN3 0x03 + #define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0) + #define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2)) + +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c +index 6798addabd5a0..bcf01af3fa6a8 100644 +--- a/drivers/watchdog/watchdog_dev.c ++++ b/drivers/watchdog/watchdog_dev.c +@@ -994,8 +994,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) + wd_data->wdd = wdd; + wdd->wd_data = wd_data; + +- if (IS_ERR_OR_NULL(watchdog_kworker)) ++ if (IS_ERR_OR_NULL(watchdog_kworker)) { ++ kfree(wd_data); + return -ENODEV; ++ } + + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); +@@ -1021,7 +1023,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) + pr_err("%s: a legacy watchdog module is probably present.\n", + wdd->info->identity); + old_wd_data = NULL; +- kfree(wd_data); ++ put_device(&wd_data->dev); + return err; + } + } +diff --git a/fs/afs/cell.c b/fs/afs/cell.c +index 5b79cdceefa0f..bc7ed46aaca9f 100644 +--- a/fs/afs/cell.c ++++ b/fs/afs/cell.c +@@ -19,7 +19,8 @@ static unsigned __read_mostly afs_cell_gc_delay = 10; + static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; + static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; + +-static void afs_manage_cell(struct work_struct *); ++static void afs_queue_cell_manager(struct afs_net *); ++static void afs_manage_cell_work(struct work_struct *); + + static void afs_dec_cells_outstanding(struct afs_net *net) + { +@@ -37,19 +38,21 @@ static void afs_set_cell_timer(struct afs_net *net, time64_t delay) + atomic_inc(&net->cells_outstanding); + if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) + afs_dec_cells_outstanding(net); ++ } else { ++ afs_queue_cell_manager(net); + } + } + + /* +- * Look up and get an activation reference on a cell record under RCU +- * conditions. The caller must hold the RCU read lock. ++ * Look up and get an activation reference on a cell record. The caller must ++ * hold net->cells_lock at least read-locked. + */ +-struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, +- const char *name, unsigned int namesz) ++static struct afs_cell *afs_find_cell_locked(struct afs_net *net, ++ const char *name, unsigned int namesz) + { + struct afs_cell *cell = NULL; + struct rb_node *p; +- int n, seq = 0, ret = 0; ++ int n; + + _enter("%*.*s", namesz, namesz, name); + +@@ -58,61 +61,47 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, + if (namesz > AFS_MAXCELLNAME) + return ERR_PTR(-ENAMETOOLONG); + +- do { +- /* Unfortunately, rbtree walking doesn't give reliable results +- * under just the RCU read lock, so we have to check for +- * changes. +- */ +- if (cell) +- afs_put_cell(net, cell); +- cell = NULL; +- ret = -ENOENT; +- +- read_seqbegin_or_lock(&net->cells_lock, &seq); +- +- if (!name) { +- cell = rcu_dereference_raw(net->ws_cell); +- if (cell) { +- afs_get_cell(cell); +- ret = 0; +- break; +- } +- ret = -EDESTADDRREQ; +- continue; +- } ++ if (!name) { ++ cell = net->ws_cell; ++ if (!cell) ++ return ERR_PTR(-EDESTADDRREQ); ++ goto found; ++ } + +- p = rcu_dereference_raw(net->cells.rb_node); +- while (p) { +- cell = rb_entry(p, struct afs_cell, net_node); +- +- n = strncasecmp(cell->name, name, +- min_t(size_t, cell->name_len, namesz)); +- if (n == 0) +- n = cell->name_len - namesz; +- if (n < 0) { +- p = rcu_dereference_raw(p->rb_left); +- } else if (n > 0) { +- p = rcu_dereference_raw(p->rb_right); +- } else { +- if (atomic_inc_not_zero(&cell->usage)) { +- ret = 0; +- break; +- } +- /* We want to repeat the search, this time with +- * the lock properly locked. +- */ +- } +- cell = NULL; +- } ++ p = net->cells.rb_node; ++ while (p) { ++ cell = rb_entry(p, struct afs_cell, net_node); ++ ++ n = strncasecmp(cell->name, name, ++ min_t(size_t, cell->name_len, namesz)); ++ if (n == 0) ++ n = cell->name_len - namesz; ++ if (n < 0) ++ p = p->rb_left; ++ else if (n > 0) ++ p = p->rb_right; ++ else ++ goto found; ++ } + +- } while (need_seqretry(&net->cells_lock, seq)); ++ return ERR_PTR(-ENOENT); + +- done_seqretry(&net->cells_lock, seq); ++found: ++ return afs_use_cell(cell); ++} + +- if (ret != 0 && cell) +- afs_put_cell(net, cell); ++/* ++ * Look up and get an activation reference on a cell record. ++ */ ++struct afs_cell *afs_find_cell(struct afs_net *net, ++ const char *name, unsigned int namesz) ++{ ++ struct afs_cell *cell; + +- return ret == 0 ? cell : ERR_PTR(ret); ++ down_read(&net->cells_lock); ++ cell = afs_find_cell_locked(net, name, namesz); ++ up_read(&net->cells_lock); ++ return cell; + } + + /* +@@ -166,8 +155,9 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, + cell->name[i] = tolower(name[i]); + cell->name[i] = 0; + +- atomic_set(&cell->usage, 2); +- INIT_WORK(&cell->manager, afs_manage_cell); ++ atomic_set(&cell->ref, 1); ++ atomic_set(&cell->active, 0); ++ INIT_WORK(&cell->manager, afs_manage_cell_work); + cell->volumes = RB_ROOT; + INIT_HLIST_HEAD(&cell->proc_volumes); + seqlock_init(&cell->volume_lock); +@@ -206,6 +196,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, + cell->dns_source = vllist->source; + cell->dns_status = vllist->status; + smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ ++ atomic_inc(&net->cells_outstanding); + + _leave(" = %p", cell); + return cell; +@@ -245,9 +236,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net, + _enter("%s,%s", name, vllist); + + if (!excl) { +- rcu_read_lock(); +- cell = afs_lookup_cell_rcu(net, name, namesz); +- rcu_read_unlock(); ++ cell = afs_find_cell(net, name, namesz); + if (!IS_ERR(cell)) + goto wait_for_cell; + } +@@ -268,7 +257,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net, + /* Find the insertion point and check to see if someone else added a + * cell whilst we were allocating. + */ +- write_seqlock(&net->cells_lock); ++ down_write(&net->cells_lock); + + pp = &net->cells.rb_node; + parent = NULL; +@@ -290,23 +279,23 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net, + + cell = candidate; + candidate = NULL; ++ atomic_set(&cell->active, 2); + rb_link_node_rcu(&cell->net_node, parent, pp); + rb_insert_color(&cell->net_node, &net->cells); +- atomic_inc(&net->cells_outstanding); +- write_sequnlock(&net->cells_lock); ++ up_write(&net->cells_lock); + +- queue_work(afs_wq, &cell->manager); ++ afs_queue_cell(cell); + + wait_for_cell: + _debug("wait_for_cell"); + wait_var_event(&cell->state, + ({ + state = smp_load_acquire(&cell->state); /* vs error */ +- state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED; ++ state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED; + })); + + /* Check the state obtained from the wait check. */ +- if (state == AFS_CELL_FAILED) { ++ if (state == AFS_CELL_REMOVED) { + ret = cell->error; + goto error; + } +@@ -320,16 +309,17 @@ cell_already_exists: + if (excl) { + ret = -EEXIST; + } else { +- afs_get_cell(cursor); ++ afs_use_cell(cursor); + ret = 0; + } +- write_sequnlock(&net->cells_lock); +- kfree(candidate); ++ up_write(&net->cells_lock); ++ if (candidate) ++ afs_put_cell(candidate); + if (ret == 0) + goto wait_for_cell; + goto error_noput; + error: +- afs_put_cell(net, cell); ++ afs_unuse_cell(net, cell); + error_noput: + _leave(" = %d [error]", ret); + return ERR_PTR(ret); +@@ -374,15 +364,15 @@ int afs_cell_init(struct afs_net *net, const char *rootcell) + } + + if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) +- afs_get_cell(new_root); ++ afs_use_cell(new_root); + + /* install the new cell */ +- write_seqlock(&net->cells_lock); +- old_root = rcu_access_pointer(net->ws_cell); +- rcu_assign_pointer(net->ws_cell, new_root); +- write_sequnlock(&net->cells_lock); ++ down_write(&net->cells_lock); ++ old_root = net->ws_cell; ++ net->ws_cell = new_root; ++ up_write(&net->cells_lock); + +- afs_put_cell(net, old_root); ++ afs_unuse_cell(net, old_root); + _leave(" = 0"); + return 0; + } +@@ -488,18 +478,21 @@ out_wake: + static void afs_cell_destroy(struct rcu_head *rcu) + { + struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); ++ struct afs_net *net = cell->net; ++ int u; + + _enter("%p{%s}", cell, cell->name); + +- ASSERTCMP(atomic_read(&cell->usage), ==, 0); ++ u = atomic_read(&cell->ref); ++ ASSERTCMP(u, ==, 0); + +- afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root); +- afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); +- afs_put_cell(cell->net, cell->alias_of); ++ afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); ++ afs_unuse_cell(net, cell->alias_of); + key_put(cell->anonymous_key); + kfree(cell->name); + kfree(cell); + ++ afs_dec_cells_outstanding(net); + _leave(" [destroyed]"); + } + +@@ -534,16 +527,50 @@ void afs_cells_timer(struct timer_list *timer) + */ + struct afs_cell *afs_get_cell(struct afs_cell *cell) + { +- atomic_inc(&cell->usage); ++ if (atomic_read(&cell->ref) <= 0) ++ BUG(); ++ ++ atomic_inc(&cell->ref); + return cell; + } + + /* + * Drop a reference on a cell record. + */ +-void afs_put_cell(struct afs_net *net, struct afs_cell *cell) ++void afs_put_cell(struct afs_cell *cell) ++{ ++ if (cell) { ++ unsigned int u, a; ++ ++ u = atomic_dec_return(&cell->ref); ++ if (u == 0) { ++ a = atomic_read(&cell->active); ++ WARN(a != 0, "Cell active count %u > 0\n", a); ++ call_rcu(&cell->rcu, afs_cell_destroy); ++ } ++ } ++} ++ ++/* ++ * Note a cell becoming more active. ++ */ ++struct afs_cell *afs_use_cell(struct afs_cell *cell) ++{ ++ if (atomic_read(&cell->ref) <= 0) ++ BUG(); ++ ++ atomic_inc(&cell->active); ++ return cell; ++} ++ ++/* ++ * Record a cell becoming less active. When the active counter reaches 1, it ++ * is scheduled for destruction, but may get reactivated. ++ */ ++void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell) + { + time64_t now, expire_delay; ++ int a; + + if (!cell) + return; +@@ -556,11 +583,21 @@ void afs_put_cell(struct afs_net *net, struct afs_cell *cell) + if (cell->vl_servers->nr_servers) + expire_delay = afs_cell_gc_delay; + +- if (atomic_dec_return(&cell->usage) > 1) +- return; ++ a = atomic_dec_return(&cell->active); ++ WARN_ON(a == 0); ++ if (a == 1) ++ /* 'cell' may now be garbage collected. */ ++ afs_set_cell_timer(net, expire_delay); ++} + +- /* 'cell' may now be garbage collected. */ +- afs_set_cell_timer(net, expire_delay); ++/* ++ * Queue a cell for management, giving the workqueue a ref to hold. ++ */ ++void afs_queue_cell(struct afs_cell *cell) ++{ ++ afs_get_cell(cell); ++ if (!queue_work(afs_wq, &cell->manager)) ++ afs_put_cell(cell); + } + + /* +@@ -660,12 +697,10 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) + * Manage a cell record, initialising and destroying it, maintaining its DNS + * records. + */ +-static void afs_manage_cell(struct work_struct *work) ++static void afs_manage_cell(struct afs_cell *cell) + { +- struct afs_cell *cell = container_of(work, struct afs_cell, manager); + struct afs_net *net = cell->net; +- bool deleted; +- int ret, usage; ++ int ret, active; + + _enter("%s", cell->name); + +@@ -674,14 +709,17 @@ again: + switch (cell->state) { + case AFS_CELL_INACTIVE: + case AFS_CELL_FAILED: +- write_seqlock(&net->cells_lock); +- usage = 1; +- deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0); +- if (deleted) ++ down_write(&net->cells_lock); ++ active = 1; ++ if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) { + rb_erase(&cell->net_node, &net->cells); +- write_sequnlock(&net->cells_lock); +- if (deleted) ++ smp_store_release(&cell->state, AFS_CELL_REMOVED); ++ } ++ up_write(&net->cells_lock); ++ if (cell->state == AFS_CELL_REMOVED) { ++ wake_up_var(&cell->state); + goto final_destruction; ++ } + if (cell->state == AFS_CELL_FAILED) + goto done; + smp_store_release(&cell->state, AFS_CELL_UNSET); +@@ -703,7 +741,7 @@ again: + goto again; + + case AFS_CELL_ACTIVE: +- if (atomic_read(&cell->usage) > 1) { ++ if (atomic_read(&cell->active) > 1) { + if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { + ret = afs_update_cell(cell); + if (ret < 0) +@@ -716,13 +754,16 @@ again: + goto again; + + case AFS_CELL_DEACTIVATING: +- if (atomic_read(&cell->usage) > 1) ++ if (atomic_read(&cell->active) > 1) + goto reverse_deactivation; + afs_deactivate_cell(net, cell); + smp_store_release(&cell->state, AFS_CELL_INACTIVE); + wake_up_var(&cell->state); + goto again; + ++ case AFS_CELL_REMOVED: ++ goto done; ++ + default: + break; + } +@@ -748,9 +789,18 @@ done: + return; + + final_destruction: +- call_rcu(&cell->rcu, afs_cell_destroy); +- afs_dec_cells_outstanding(net); +- _leave(" [destruct %d]", atomic_read(&net->cells_outstanding)); ++ /* The root volume is pinning the cell */ ++ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root); ++ cell->root_volume = NULL; ++ afs_put_cell(cell); ++} ++ ++static void afs_manage_cell_work(struct work_struct *work) ++{ ++ struct afs_cell *cell = container_of(work, struct afs_cell, manager); ++ ++ afs_manage_cell(cell); ++ afs_put_cell(cell); + } + + /* +@@ -779,26 +829,25 @@ void afs_manage_cells(struct work_struct *work) + * lack of use and cells whose DNS results have expired and dispatch + * their managers. + */ +- read_seqlock_excl(&net->cells_lock); ++ down_read(&net->cells_lock); + + for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { + struct afs_cell *cell = + rb_entry(cursor, struct afs_cell, net_node); +- unsigned usage; ++ unsigned active; + bool sched_cell = false; + +- usage = atomic_read(&cell->usage); +- _debug("manage %s %u", cell->name, usage); ++ active = atomic_read(&cell->active); ++ _debug("manage %s %u %u", cell->name, atomic_read(&cell->ref), active); + +- ASSERTCMP(usage, >=, 1); ++ ASSERTCMP(active, >=, 1); + + if (purging) { + if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) +- usage = atomic_dec_return(&cell->usage); +- ASSERTCMP(usage, ==, 1); ++ atomic_dec(&cell->active); + } + +- if (usage == 1) { ++ if (active == 1) { + struct afs_vlserver_list *vllist; + time64_t expire_at = cell->last_inactive; + +@@ -821,10 +870,10 @@ void afs_manage_cells(struct work_struct *work) + } + + if (sched_cell) +- queue_work(afs_wq, &cell->manager); ++ afs_queue_cell(cell); + } + +- read_sequnlock_excl(&net->cells_lock); ++ up_read(&net->cells_lock); + + /* Update the timer on the way out. We have to pass an increment on + * cells_outstanding in the namespace that we are in to the timer or +@@ -854,11 +903,11 @@ void afs_cell_purge(struct afs_net *net) + + _enter(""); + +- write_seqlock(&net->cells_lock); +- ws = rcu_access_pointer(net->ws_cell); +- RCU_INIT_POINTER(net->ws_cell, NULL); +- write_sequnlock(&net->cells_lock); +- afs_put_cell(net, ws); ++ down_write(&net->cells_lock); ++ ws = net->ws_cell; ++ net->ws_cell = NULL; ++ up_write(&net->cells_lock); ++ afs_unuse_cell(net, ws); + + _debug("del timer"); + if (del_timer_sync(&net->cells_timer)) +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c +index 7b784af604fd9..da32797dd4257 100644 +--- a/fs/afs/dynroot.c ++++ b/fs/afs/dynroot.c +@@ -123,9 +123,9 @@ static int afs_probe_cell_name(struct dentry *dentry) + len--; + } + +- cell = afs_lookup_cell_rcu(net, name, len); ++ cell = afs_find_cell(net, name, len); + if (!IS_ERR(cell)) { +- afs_put_cell(net, cell); ++ afs_unuse_cell(net, cell); + return 0; + } + +@@ -179,7 +179,6 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry) + struct afs_cell *cell; + struct afs_net *net = afs_d2net(dentry); + struct dentry *ret; +- unsigned int seq = 0; + char *name; + int len; + +@@ -191,17 +190,13 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry) + if (!name) + goto out_p; + +- rcu_read_lock(); +- do { +- read_seqbegin_or_lock(&net->cells_lock, &seq); +- cell = rcu_dereference_raw(net->ws_cell); +- if (cell) { +- len = cell->name_len; +- memcpy(name, cell->name, len + 1); +- } +- } while (need_seqretry(&net->cells_lock, seq)); +- done_seqretry(&net->cells_lock, seq); +- rcu_read_unlock(); ++ down_read(&net->cells_lock); ++ cell = net->ws_cell; ++ if (cell) { ++ len = cell->name_len; ++ memcpy(name, cell->name, len + 1); ++ } ++ up_read(&net->cells_lock); + + ret = ERR_PTR(-ENOENT); + if (!cell) +diff --git a/fs/afs/internal.h b/fs/afs/internal.h +index e5f0446f27e5f..06e617ee4cd1e 100644 +--- a/fs/afs/internal.h ++++ b/fs/afs/internal.h +@@ -263,11 +263,11 @@ struct afs_net { + + /* Cell database */ + struct rb_root cells; +- struct afs_cell __rcu *ws_cell; ++ struct afs_cell *ws_cell; + struct work_struct cells_manager; + struct timer_list cells_timer; + atomic_t cells_outstanding; +- seqlock_t cells_lock; ++ struct rw_semaphore cells_lock; + struct mutex cells_alias_lock; + + struct mutex proc_cells_lock; +@@ -326,6 +326,7 @@ enum afs_cell_state { + AFS_CELL_DEACTIVATING, + AFS_CELL_INACTIVE, + AFS_CELL_FAILED, ++ AFS_CELL_REMOVED, + }; + + /* +@@ -363,7 +364,8 @@ struct afs_cell { + #endif + time64_t dns_expiry; /* Time AFSDB/SRV record expires */ + time64_t last_inactive; /* Time of last drop of usage count */ +- atomic_t usage; ++ atomic_t ref; /* Struct refcount */ ++ atomic_t active; /* Active usage counter */ + unsigned long flags; + #define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */ + #define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */ +@@ -917,11 +919,14 @@ static inline bool afs_cb_is_broken(unsigned int cb_break, + * cell.c + */ + extern int afs_cell_init(struct afs_net *, const char *); +-extern struct afs_cell *afs_lookup_cell_rcu(struct afs_net *, const char *, unsigned); ++extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned); + extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned, + const char *, bool); ++extern struct afs_cell *afs_use_cell(struct afs_cell *); ++extern void afs_unuse_cell(struct afs_net *, struct afs_cell *); + extern struct afs_cell *afs_get_cell(struct afs_cell *); +-extern void afs_put_cell(struct afs_net *, struct afs_cell *); ++extern void afs_put_cell(struct afs_cell *); ++extern void afs_queue_cell(struct afs_cell *); + extern void afs_manage_cells(struct work_struct *); + extern void afs_cells_timer(struct timer_list *); + extern void __net_exit afs_cell_purge(struct afs_net *); +diff --git a/fs/afs/main.c b/fs/afs/main.c +index 31b472f7c734c..accdd8970e7c0 100644 +--- a/fs/afs/main.c ++++ b/fs/afs/main.c +@@ -78,7 +78,7 @@ static int __net_init afs_net_init(struct net *net_ns) + mutex_init(&net->socket_mutex); + + net->cells = RB_ROOT; +- seqlock_init(&net->cells_lock); ++ init_rwsem(&net->cells_lock); + INIT_WORK(&net->cells_manager, afs_manage_cells); + timer_setup(&net->cells_timer, afs_cells_timer, 0); + +diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c +index 79bc5f1338edf..c69a0282960cc 100644 +--- a/fs/afs/mntpt.c ++++ b/fs/afs/mntpt.c +@@ -88,7 +88,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) + ctx->force = true; + } + if (ctx->cell) { +- afs_put_cell(ctx->net, ctx->cell); ++ afs_unuse_cell(ctx->net, ctx->cell); + ctx->cell = NULL; + } + if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) { +@@ -124,7 +124,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) + char *buf; + + if (src_as->cell) +- ctx->cell = afs_get_cell(src_as->cell); ++ ctx->cell = afs_use_cell(src_as->cell); + + if (size < 2 || size > PAGE_SIZE - 1) + return -EINVAL; +diff --git a/fs/afs/proc.c b/fs/afs/proc.c +index e8babb62ed442..76fbe0560cfb7 100644 +--- a/fs/afs/proc.c ++++ b/fs/afs/proc.c +@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v) + + if (v == SEQ_START_TOKEN) { + /* display header on line 1 */ +- seq_puts(m, "USE TTL SV ST NAME\n"); ++ seq_puts(m, "USE ACT TTL SV ST NAME\n"); + return 0; + } + +@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v) + vllist = rcu_dereference(cell->vl_servers); + + /* display one cell per line on subsequent lines */ +- seq_printf(m, "%3u %6lld %2u %2u %s\n", +- atomic_read(&cell->usage), ++ seq_printf(m, "%3u %3u %6lld %2u %2u %s\n", ++ atomic_read(&cell->ref), ++ atomic_read(&cell->active), + cell->dns_expiry - ktime_get_real_seconds(), +- vllist->nr_servers, ++ vllist ? vllist->nr_servers : 0, + cell->state, + cell->name); + return 0; +@@ -128,7 +129,7 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size) + } + + if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) +- afs_put_cell(net, cell); ++ afs_unuse_cell(net, cell); + } else { + goto inval; + } +@@ -154,13 +155,11 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v) + struct afs_net *net; + + net = afs_seq2net_single(m); +- if (rcu_access_pointer(net->ws_cell)) { +- rcu_read_lock(); +- cell = rcu_dereference(net->ws_cell); +- if (cell) +- seq_printf(m, "%s\n", cell->name); +- rcu_read_unlock(); +- } ++ down_read(&net->cells_lock); ++ cell = net->ws_cell; ++ if (cell) ++ seq_printf(m, "%s\n", cell->name); ++ up_read(&net->cells_lock); + return 0; + } + +diff --git a/fs/afs/super.c b/fs/afs/super.c +index b552357b1d137..e72c223f831d2 100644 +--- a/fs/afs/super.c ++++ b/fs/afs/super.c +@@ -294,7 +294,7 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param) + cellnamesz, cellnamesz, cellname ?: ""); + return PTR_ERR(cell); + } +- afs_put_cell(ctx->net, ctx->cell); ++ afs_unuse_cell(ctx->net, ctx->cell); + ctx->cell = cell; + } + +@@ -389,8 +389,8 @@ static int afs_validate_fc(struct fs_context *fc) + _debug("switch to alias"); + key_put(ctx->key); + ctx->key = NULL; +- cell = afs_get_cell(ctx->cell->alias_of); +- afs_put_cell(ctx->net, ctx->cell); ++ cell = afs_use_cell(ctx->cell->alias_of); ++ afs_unuse_cell(ctx->net, ctx->cell); + ctx->cell = cell; + goto reget_key; + } +@@ -508,7 +508,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc) + if (ctx->dyn_root) { + as->dyn_root = true; + } else { +- as->cell = afs_get_cell(ctx->cell); ++ as->cell = afs_use_cell(ctx->cell); + as->volume = afs_get_volume(ctx->volume, + afs_volume_trace_get_alloc_sbi); + } +@@ -521,7 +521,7 @@ static void afs_destroy_sbi(struct afs_super_info *as) + if (as) { + struct afs_net *net = afs_net(as->net_ns); + afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi); +- afs_put_cell(net, as->cell); ++ afs_unuse_cell(net, as->cell); + put_net(as->net_ns); + kfree(as); + } +@@ -607,7 +607,7 @@ static void afs_free_fc(struct fs_context *fc) + + afs_destroy_sbi(fc->s_fs_info); + afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc); +- afs_put_cell(ctx->net, ctx->cell); ++ afs_unuse_cell(ctx->net, ctx->cell); + key_put(ctx->key); + kfree(ctx); + } +@@ -634,9 +634,7 @@ static int afs_init_fs_context(struct fs_context *fc) + ctx->net = afs_net(fc->net_ns); + + /* Default to the workstation cell. */ +- rcu_read_lock(); +- cell = afs_lookup_cell_rcu(ctx->net, NULL, 0); +- rcu_read_unlock(); ++ cell = afs_find_cell(ctx->net, NULL, 0); + if (IS_ERR(cell)) + cell = NULL; + ctx->cell = cell; +diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c +index 5082ef04e99c5..ddb4cb67d0fd9 100644 +--- a/fs/afs/vl_alias.c ++++ b/fs/afs/vl_alias.c +@@ -177,7 +177,7 @@ static int afs_compare_cell_roots(struct afs_cell *cell) + + is_alias: + rcu_read_unlock(); +- cell->alias_of = afs_get_cell(p); ++ cell->alias_of = afs_use_cell(p); + return 1; + } + +@@ -247,18 +247,18 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key) + continue; + if (p->root_volume) + continue; /* Ignore cells that have a root.cell volume. */ +- afs_get_cell(p); ++ afs_use_cell(p); + mutex_unlock(&cell->net->proc_cells_lock); + + if (afs_query_for_alias_one(cell, key, p) != 0) + goto is_alias; + + if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) { +- afs_put_cell(cell->net, p); ++ afs_unuse_cell(cell->net, p); + return -ERESTARTSYS; + } + +- afs_put_cell(cell->net, p); ++ afs_unuse_cell(cell->net, p); + } + + mutex_unlock(&cell->net->proc_cells_lock); +diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c +index c0458c903b310..da3b072d4d638 100644 +--- a/fs/afs/vl_rotate.c ++++ b/fs/afs/vl_rotate.c +@@ -45,7 +45,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc) + cell->dns_expiry <= ktime_get_real_seconds()) { + dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count); + set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags); +- queue_work(afs_wq, &cell->manager); ++ afs_queue_cell(cell); + + if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { + if (wait_var_event_interruptible( +diff --git a/fs/afs/volume.c b/fs/afs/volume.c +index 9bc0509e3634c..a838030e95634 100644 +--- a/fs/afs/volume.c ++++ b/fs/afs/volume.c +@@ -106,7 +106,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, + return volume; + + error_1: +- afs_put_cell(params->net, volume->cell); ++ afs_put_cell(volume->cell); + kfree(volume); + error_0: + return ERR_PTR(ret); +@@ -228,7 +228,7 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume) + + afs_remove_volume_from_cell(volume); + afs_put_serverlist(net, rcu_access_pointer(volume->servers)); +- afs_put_cell(net, volume->cell); ++ afs_put_cell(volume->cell); + trace_afs_volume(volume->vid, atomic_read(&volume->usage), + afs_volume_trace_free); + kfree_rcu(volume, rcu); +diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h +index 219a09a2b7340..250b8cbaaf97a 100644 +--- a/fs/btrfs/extent-io-tree.h ++++ b/fs/btrfs/extent-io-tree.h +@@ -48,6 +48,7 @@ enum { + IO_TREE_INODE_FILE_EXTENT, + IO_TREE_LOG_CSUM_RANGE, + IO_TREE_SELFTEST, ++ IO_TREE_DEVICE_ALLOC_STATE, + }; + + struct extent_io_tree { +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 1997a7d67f22f..e61c298ce2b42 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -406,7 +406,7 @@ void __exit btrfs_cleanup_fs_uuids(void) + * Returned struct is not linked onto any lists and must be destroyed using + * btrfs_free_device. + */ +-static struct btrfs_device *__alloc_device(void) ++static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info) + { + struct btrfs_device *dev; + +@@ -433,7 +433,8 @@ static struct btrfs_device *__alloc_device(void) + btrfs_device_data_ordered_init(dev); + INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); + INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); +- extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL); ++ extent_io_tree_init(fs_info, &dev->alloc_state, ++ IO_TREE_DEVICE_ALLOC_STATE, NULL); + + return dev; + } +@@ -6529,7 +6530,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, + if (WARN_ON(!devid && !fs_info)) + return ERR_PTR(-EINVAL); + +- dev = __alloc_device(); ++ dev = __alloc_device(fs_info); + if (IS_ERR(dev)) + return dev; + +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c +index 689162e2e1755..3150c19cdc2fb 100644 +--- a/fs/cifs/asn1.c ++++ b/fs/cifs/asn1.c +@@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON) + || (tag != ASN1_EOC)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", ++ cls, con, tag, end); + return 0; + } + +@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n", ++ cls, con, tag, end); + return 0; + } + +@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON) + || (tag != ASN1_EOC)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", ++ cls, con, tag, end); + return 0; + } + +@@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n", ++ cls, con, tag, sequence_end); + return 0; + } + +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c +index fcff14ef1c701..23b21e9436528 100644 +--- a/fs/cifs/cifsacl.c ++++ b/fs/cifs/cifsacl.c +@@ -338,7 +338,7 @@ invalidate_key: + goto out_key_put; + } + +-static int ++int + sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, + struct cifs_fattr *fattr, uint sidtype) + { +@@ -359,7 +359,8 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, + return -EIO; + } + +- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) { ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) || ++ (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) { + uint32_t unix_id; + bool is_group; + +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h +index bb68cbf810740..24c6f36177bac 100644 +--- a/fs/cifs/cifsproto.h ++++ b/fs/cifs/cifsproto.h +@@ -209,6 +209,8 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs, + extern int cifs_rename_pending_delete(const char *full_path, + struct dentry *dentry, + const unsigned int xid); ++extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, ++ struct cifs_fattr *fattr, uint sidtype); + extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, + struct cifs_fattr *fattr, struct inode *inode, + bool get_mode_from_special_sid, +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index a5731dd6e6566..9817a31a39db6 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -3595,7 +3595,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) + */ + tcon->retry = volume_info->retry; + tcon->nocase = volume_info->nocase; +- tcon->nohandlecache = volume_info->nohandlecache; ++ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) ++ tcon->nohandlecache = volume_info->nohandlecache; ++ else ++ tcon->nohandlecache = 1; + tcon->nodelete = volume_info->nodelete; + tcon->local_lease = volume_info->local_lease; + INIT_LIST_HEAD(&tcon->pending_opens); +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c +index 6df0922e7e304..709fb53e9fee1 100644 +--- a/fs/cifs/readdir.c ++++ b/fs/cifs/readdir.c +@@ -267,9 +267,8 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info, + if (reparse_file_needs_reval(fattr)) + fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; + +- /* TODO map SIDs */ +- fattr->cf_uid = cifs_sb->mnt_uid; +- fattr->cf_gid = cifs_sb->mnt_gid; ++ sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER); ++ sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP); + } + + static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info) +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index d44df8f95bcd4..09e1cd320ee56 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -3072,7 +3072,12 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, + oparms.tcon = tcon; + oparms.desired_access = READ_CONTROL; + oparms.disposition = FILE_OPEN; +- oparms.create_options = cifs_create_options(cifs_sb, 0); ++ /* ++ * When querying an ACL, even if the file is a symlink we want to open ++ * the source not the target, and so the protocol requires that the ++ * client specify this flag when opening a reparse point ++ */ ++ oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT; + oparms.fid = &fid; + oparms.reconnect = false; + +@@ -3924,7 +3929,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, + if (rc) { + cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__, + enc ? "en" : "de"); +- return 0; ++ return rc; + } + + rc = smb3_crypto_aead_allocate(server); +@@ -4103,7 +4108,8 @@ smb3_is_transform_hdr(void *buf) + static int + decrypt_raw_data(struct TCP_Server_Info *server, char *buf, + unsigned int buf_data_size, struct page **pages, +- unsigned int npages, unsigned int page_data_size) ++ unsigned int npages, unsigned int page_data_size, ++ bool is_offloaded) + { + struct kvec iov[2]; + struct smb_rqst rqst = {NULL}; +@@ -4129,7 +4135,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf, + + memmove(buf, iov[1].iov_base, buf_data_size); + +- server->total_read = buf_data_size + page_data_size; ++ if (!is_offloaded) ++ server->total_read = buf_data_size + page_data_size; + + return rc; + } +@@ -4342,7 +4349,7 @@ static void smb2_decrypt_offload(struct work_struct *work) + struct mid_q_entry *mid; + + rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size, +- dw->ppages, dw->npages, dw->len); ++ dw->ppages, dw->npages, dw->len, true); + if (rc) { + cifs_dbg(VFS, "error decrypting rc=%d\n", rc); + goto free_pages; +@@ -4448,7 +4455,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid, + + non_offloaded_decrypt: + rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size, +- pages, npages, len); ++ pages, npages, len, false); + if (rc) + goto free_pages; + +@@ -4504,7 +4511,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server, + server->total_read += length; + + buf_size = pdu_length - sizeof(struct smb2_transform_hdr); +- length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0); ++ length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false); + if (length) + return length; + +diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c +index 2d73fd39ad96f..b92f345231780 100644 +--- a/fs/crypto/policy.c ++++ b/fs/crypto/policy.c +@@ -192,10 +192,15 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, + 32, 32)) + return false; + ++ /* ++ * IV_INO_LBLK_32 hashes the inode number, so in principle it can ++ * support any ino_bits. However, currently the inode number is gotten ++ * from inode::i_ino which is 'unsigned long'. So for now the ++ * implementation limit is 32 bits. ++ */ + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && +- /* This uses hashed inode numbers, so ino_bits doesn't matter. */ + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32", +- INT_MAX, 32)) ++ 32, 32)) + return false; + + if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { +diff --git a/fs/d_path.c b/fs/d_path.c +index 0f1fc1743302f..a69e2cd36e6e3 100644 +--- a/fs/d_path.c ++++ b/fs/d_path.c +@@ -102,6 +102,8 @@ restart: + + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { + struct mount *parent = READ_ONCE(mnt->mnt_parent); ++ struct mnt_namespace *mnt_ns; ++ + /* Escaped? */ + if (dentry != vfsmnt->mnt_root) { + bptr = *buffer; +@@ -116,7 +118,9 @@ restart: + vfsmnt = &mnt->mnt; + continue; + } +- if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns)) ++ mnt_ns = READ_ONCE(mnt->mnt_ns); ++ /* open-coded is_mounted() to use local mnt_ns */ ++ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns)) + error = 1; // absolute root + else + error = 2; // detached or not attached yet +diff --git a/fs/dlm/config.c b/fs/dlm/config.c +index 47f0b98b707f8..f33a7e4ae917b 100644 +--- a/fs/dlm/config.c ++++ b/fs/dlm/config.c +@@ -221,6 +221,7 @@ struct dlm_space { + struct list_head members; + struct mutex members_lock; + int members_count; ++ struct dlm_nodes *nds; + }; + + struct dlm_comms { +@@ -430,6 +431,7 @@ static struct config_group *make_space(struct config_group *g, const char *name) + INIT_LIST_HEAD(&sp->members); + mutex_init(&sp->members_lock); + sp->members_count = 0; ++ sp->nds = nds; + return &sp->group; + + fail: +@@ -451,6 +453,7 @@ static void drop_space(struct config_group *g, struct config_item *i) + static void release_space(struct config_item *i) + { + struct dlm_space *sp = config_item_to_space(i); ++ kfree(sp->nds); + kfree(sp); + } + +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 523e00d7b3924..69187b6205b2b 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -492,7 +492,7 @@ struct flex_groups { + + /* Flags which are mutually exclusive to DAX */ + #define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\ +- EXT4_JOURNAL_DATA_FL) ++ EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL) + + /* Mask out flags that are inappropriate for the given type of inode. */ + static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) +diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c +index dbccf46f17709..37347ba868b70 100644 +--- a/fs/ext4/fsmap.c ++++ b/fs/ext4/fsmap.c +@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb, + + /* Are we just counting mappings? */ + if (info->gfi_head->fmh_count == 0) { ++ if (info->gfi_head->fmh_entries == UINT_MAX) ++ return EXT4_QUERY_RANGE_ABORT; ++ + if (rec_fsblk > info->gfi_next_fsblk) + info->gfi_head->fmh_entries++; + +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 132c118d12e15..a8d99f676fb1f 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -4160,7 +4160,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, + struct ext4_buddy e4b; + int err; + int busy = 0; +- int free = 0; ++ int free, free_total = 0; + + mb_debug(sb, "discard preallocation for group %u\n", group); + if (list_empty(&grp->bb_prealloc_list)) +@@ -4188,8 +4188,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, + + INIT_LIST_HEAD(&list); + repeat: ++ free = 0; + ext4_lock_group(sb, group); +- this_cpu_inc(discard_pa_seq); + list_for_each_entry_safe(pa, tmp, + &grp->bb_prealloc_list, pa_group_list) { + spin_lock(&pa->pa_lock); +@@ -4206,6 +4206,9 @@ repeat: + /* seems this one can be freed ... */ + ext4_mb_mark_pa_deleted(sb, pa); + ++ if (!free) ++ this_cpu_inc(discard_pa_seq); ++ + /* we can trust pa_free ... */ + free += pa->pa_free; + +@@ -4215,22 +4218,6 @@ repeat: + list_add(&pa->u.pa_tmp_list, &list); + } + +- /* if we still need more blocks and some PAs were used, try again */ +- if (free < needed && busy) { +- busy = 0; +- ext4_unlock_group(sb, group); +- cond_resched(); +- goto repeat; +- } +- +- /* found anything to free? */ +- if (list_empty(&list)) { +- BUG_ON(free != 0); +- mb_debug(sb, "Someone else may have freed PA for this group %u\n", +- group); +- goto out; +- } +- + /* now free all selected PAs */ + list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { + +@@ -4248,14 +4235,22 @@ repeat: + call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); + } + +-out: ++ free_total += free; ++ ++ /* if we still need more blocks and some PAs were used, try again */ ++ if (free_total < needed && busy) { ++ ext4_unlock_group(sb, group); ++ cond_resched(); ++ busy = 0; ++ goto repeat; ++ } + ext4_unlock_group(sb, group); + ext4_mb_unload_buddy(&e4b); + put_bh(bitmap_bh); + out_dbg: + mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", +- free, group, grp->bb_free); +- return free; ++ free_total, group, grp->bb_free); ++ return free_total; + } + + /* +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c +index 66969ae852b97..5195e083fc1e6 100644 +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -287,6 +287,13 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) + return false; + } + ++ if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) { ++ set_sbi_flag(sbi, SBI_NEED_FSCK); ++ f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off", ++ __func__, inode->i_ino); ++ return false; ++ } ++ + if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) && + fi->i_flags & F2FS_COMPR_FL && + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c +index 88ed9969cc862..5fe7d8fa93801 100644 +--- a/fs/f2fs/sysfs.c ++++ b/fs/f2fs/sysfs.c +@@ -968,4 +968,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi) + } + kobject_del(&sbi->s_kobj); + kobject_put(&sbi->s_kobj); ++ wait_for_completion(&sbi->s_kobj_unregister); + } +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c +index bcfc288dba3fb..b115e7d47fcec 100644 +--- a/fs/iomap/buffered-io.c ++++ b/fs/iomap/buffered-io.c +@@ -49,16 +49,8 @@ iomap_page_create(struct inode *inode, struct page *page) + if (iop || i_blocksize(inode) == PAGE_SIZE) + return iop; + +- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); +- atomic_set(&iop->read_count, 0); +- atomic_set(&iop->write_count, 0); ++ iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); + spin_lock_init(&iop->uptodate_lock); +- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); +- +- /* +- * migrate_page_move_mapping() assumes that pages with private data have +- * their count elevated by 1. +- */ + attach_page_private(page, iop); + return iop; + } +@@ -574,10 +566,10 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags, + loff_t block_start = pos & ~(block_size - 1); + loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); + unsigned from = offset_in_page(pos), to = from + len, poff, plen; +- int status; + + if (PageUptodate(page)) + return 0; ++ ClearPageError(page); + + do { + iomap_adjust_read_range(inode, iop, &block_start, +@@ -594,14 +586,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags, + if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE)) + return -EIO; + zero_user_segments(page, poff, from, to, poff + plen); +- iomap_set_range_uptodate(page, poff, plen); +- continue; ++ } else { ++ int status = iomap_read_page_sync(block_start, page, ++ poff, plen, srcmap); ++ if (status) ++ return status; + } +- +- status = iomap_read_page_sync(block_start, page, poff, plen, +- srcmap); +- if (status) +- return status; ++ iomap_set_range_uptodate(page, poff, plen); + } while ((block_start += plen) < block_end); + + return 0; +diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c +index c1aafb2ab9907..9519113ebc352 100644 +--- a/fs/iomap/direct-io.c ++++ b/fs/iomap/direct-io.c +@@ -388,6 +388,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, + return iomap_dio_bio_actor(inode, pos, length, dio, iomap); + case IOMAP_INLINE: + return iomap_dio_inline_actor(inode, pos, length, dio, iomap); ++ case IOMAP_DELALLOC: ++ /* ++ * DIO is not serialised against mmap() access at all, and so ++ * if the page_mkwrite occurs between the writeback and the ++ * iomap_apply() call in the DIO path, then it will see the ++ * DELALLOC block that the page-mkwrite allocated. ++ */ ++ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", ++ dio->iocb->ki_filp, current->comm); ++ return -EIO; + default: + WARN_ON_ONCE(1); + return -EIO; +diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c +index 524812984e2d4..009987e690207 100644 +--- a/fs/nfs/fs_context.c ++++ b/fs/nfs/fs_context.c +@@ -94,6 +94,7 @@ enum { + static const struct constant_table nfs_param_enums_local_lock[] = { + { "all", Opt_local_lock_all }, + { "flock", Opt_local_lock_flock }, ++ { "posix", Opt_local_lock_posix }, + { "none", Opt_local_lock_none }, + {} + }; +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c +index fdfc77486acee..984938024011b 100644 +--- a/fs/nfs/nfs4file.c ++++ b/fs/nfs/nfs4file.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include "delegation.h" + #include "internal.h" + #include "iostat.h" +@@ -314,9 +315,8 @@ out: + static int read_name_gen = 1; + #define SSC_READ_NAME_BODY "ssc_read_%d" + +-struct file * +-nfs42_ssc_open(struct vfsmount *ss_mnt, struct nfs_fh *src_fh, +- nfs4_stateid *stateid) ++static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, ++ struct nfs_fh *src_fh, nfs4_stateid *stateid) + { + struct nfs_fattr fattr; + struct file *filep, *res; +@@ -398,14 +398,40 @@ out_filep: + fput(filep); + goto out_free_name; + } +-EXPORT_SYMBOL_GPL(nfs42_ssc_open); +-void nfs42_ssc_close(struct file *filep) ++ ++static void __nfs42_ssc_close(struct file *filep) + { + struct nfs_open_context *ctx = nfs_file_open_context(filep); + + ctx->state->flags = 0; + } +-EXPORT_SYMBOL_GPL(nfs42_ssc_close); ++ ++static const struct nfs4_ssc_client_ops nfs4_ssc_clnt_ops_tbl = { ++ .sco_open = __nfs42_ssc_open, ++ .sco_close = __nfs42_ssc_close, ++}; ++ ++/** ++ * nfs42_ssc_register_ops - Wrapper to register NFS_V4 ops in nfs_common ++ * ++ * Return values: ++ * None ++ */ ++void nfs42_ssc_register_ops(void) ++{ ++ nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl); ++} ++ ++/** ++ * nfs42_ssc_unregister_ops - wrapper to un-register NFS_V4 ops in nfs_common ++ * ++ * Return values: ++ * None. ++ */ ++void nfs42_ssc_unregister_ops(void) ++{ ++ nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl); ++} + #endif /* CONFIG_NFS_V4_2 */ + + const struct file_operations nfs4_file_operations = { +diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c +index 0c1ab846b83dd..93f5c1678ec29 100644 +--- a/fs/nfs/nfs4super.c ++++ b/fs/nfs/nfs4super.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include "delegation.h" + #include "internal.h" + #include "nfs4_fs.h" +@@ -279,6 +280,9 @@ static int __init init_nfs_v4(void) + if (err) + goto out2; + ++#ifdef CONFIG_NFS_V4_2 ++ nfs42_ssc_register_ops(); ++#endif + register_nfs_version(&nfs_v4); + return 0; + out2: +@@ -297,6 +301,7 @@ static void __exit exit_nfs_v4(void) + unregister_nfs_version(&nfs_v4); + #ifdef CONFIG_NFS_V4_2 + nfs4_xattr_cache_exit(); ++ nfs42_ssc_unregister_ops(); + #endif + nfs4_unregister_sysctl(); + nfs_idmap_quit(); +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index 7a70287f21a2c..f7dad8227a5f4 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -57,6 +57,7 @@ + #include + + #include ++#include + + #include "nfs4_fs.h" + #include "callback.h" +@@ -85,6 +86,10 @@ const struct super_operations nfs_sops = { + }; + EXPORT_SYMBOL_GPL(nfs_sops); + ++static const struct nfs_ssc_client_ops nfs_ssc_clnt_ops_tbl = { ++ .sco_sb_deactive = nfs_sb_deactive, ++}; ++ + #if IS_ENABLED(CONFIG_NFS_V4) + static int __init register_nfs4_fs(void) + { +@@ -106,6 +111,16 @@ static void unregister_nfs4_fs(void) + } + #endif + ++static void nfs_ssc_register_ops(void) ++{ ++ nfs_ssc_register(&nfs_ssc_clnt_ops_tbl); ++} ++ ++static void nfs_ssc_unregister_ops(void) ++{ ++ nfs_ssc_unregister(&nfs_ssc_clnt_ops_tbl); ++} ++ + static struct shrinker acl_shrinker = { + .count_objects = nfs_access_cache_count, + .scan_objects = nfs_access_cache_scan, +@@ -133,6 +148,7 @@ int __init register_nfs_fs(void) + ret = register_shrinker(&acl_shrinker); + if (ret < 0) + goto error_3; ++ nfs_ssc_register_ops(); + return 0; + error_3: + nfs_unregister_sysctl(); +@@ -152,6 +168,7 @@ void __exit unregister_nfs_fs(void) + unregister_shrinker(&acl_shrinker); + nfs_unregister_sysctl(); + unregister_nfs4_fs(); ++ nfs_ssc_unregister_ops(); + unregister_filesystem(&nfs_fs_type); + } + +diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile +index 4bebe834c0091..fa82f5aaa6d95 100644 +--- a/fs/nfs_common/Makefile ++++ b/fs/nfs_common/Makefile +@@ -7,3 +7,4 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o + nfs_acl-objs := nfsacl.o + + obj-$(CONFIG_GRACE_PERIOD) += grace.o ++obj-$(CONFIG_GRACE_PERIOD) += nfs_ssc.o +diff --git a/fs/nfs_common/nfs_ssc.c b/fs/nfs_common/nfs_ssc.c +new file mode 100644 +index 0000000000000..f43bbb3739134 +--- /dev/null ++++ b/fs/nfs_common/nfs_ssc.c +@@ -0,0 +1,94 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * fs/nfs_common/nfs_ssc_comm.c ++ * ++ * Helper for knfsd's SSC to access ops in NFS client modules ++ * ++ * Author: Dai Ngo ++ * ++ * Copyright (c) 2020, Oracle and/or its affiliates. ++ */ ++ ++#include ++#include ++#include ++#include "../nfs/nfs4_fs.h" ++ ++MODULE_LICENSE("GPL"); ++ ++struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl; ++EXPORT_SYMBOL_GPL(nfs_ssc_client_tbl); ++ ++#ifdef CONFIG_NFS_V4_2 ++/** ++ * nfs42_ssc_register - install the NFS_V4 client ops in the nfs_ssc_client_tbl ++ * @ops: NFS_V4 ops to be installed ++ * ++ * Return values: ++ * None ++ */ ++void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops) ++{ ++ nfs_ssc_client_tbl.ssc_nfs4_ops = ops; ++} ++EXPORT_SYMBOL_GPL(nfs42_ssc_register); ++ ++/** ++ * nfs42_ssc_unregister - uninstall the NFS_V4 client ops from ++ * the nfs_ssc_client_tbl ++ * @ops: ops to be uninstalled ++ * ++ * Return values: ++ * None ++ */ ++void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops) ++{ ++ if (nfs_ssc_client_tbl.ssc_nfs4_ops != ops) ++ return; ++ ++ nfs_ssc_client_tbl.ssc_nfs4_ops = NULL; ++} ++EXPORT_SYMBOL_GPL(nfs42_ssc_unregister); ++#endif /* CONFIG_NFS_V4_2 */ ++ ++#ifdef CONFIG_NFS_V4_2 ++/** ++ * nfs_ssc_register - install the NFS_FS client ops in the nfs_ssc_client_tbl ++ * @ops: NFS_FS ops to be installed ++ * ++ * Return values: ++ * None ++ */ ++void nfs_ssc_register(const struct nfs_ssc_client_ops *ops) ++{ ++ nfs_ssc_client_tbl.ssc_nfs_ops = ops; ++} ++EXPORT_SYMBOL_GPL(nfs_ssc_register); ++ ++/** ++ * nfs_ssc_unregister - uninstall the NFS_FS client ops from ++ * the nfs_ssc_client_tbl ++ * @ops: ops to be uninstalled ++ * ++ * Return values: ++ * None ++ */ ++void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops) ++{ ++ if (nfs_ssc_client_tbl.ssc_nfs_ops != ops) ++ return; ++ nfs_ssc_client_tbl.ssc_nfs_ops = NULL; ++} ++EXPORT_SYMBOL_GPL(nfs_ssc_unregister); ++ ++#else ++void nfs_ssc_register(const struct nfs_ssc_client_ops *ops) ++{ ++} ++EXPORT_SYMBOL_GPL(nfs_ssc_register); ++ ++void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops) ++{ ++} ++EXPORT_SYMBOL_GPL(nfs_ssc_unregister); ++#endif /* CONFIG_NFS_V4_2 */ +diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig +index 99d2cae91bd68..f368f3215f88f 100644 +--- a/fs/nfsd/Kconfig ++++ b/fs/nfsd/Kconfig +@@ -136,7 +136,7 @@ config NFSD_FLEXFILELAYOUT + + config NFSD_V4_2_INTER_SSC + bool "NFSv4.2 inter server to server COPY" +- depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2 && NFS_FS=y ++ depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2 + help + This option enables support for NFSv4.2 inter server to + server copy where the destination server calls the NFSv4.2 +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c +index c8b9d2667ee6f..3c6c2f7d1688b 100644 +--- a/fs/nfsd/filecache.c ++++ b/fs/nfsd/filecache.c +@@ -889,7 +889,7 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags, + + hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head, + nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) { +- if ((need & nf->nf_may) != need) ++ if (nf->nf_may != need) + continue; + if (nf->nf_inode != inode) + continue; +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index eaf50eafa9359..84e10aef14175 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + #include "idmap.h" + #include "cache.h" +@@ -1247,7 +1248,7 @@ out_err: + static void + nfsd4_interssc_disconnect(struct vfsmount *ss_mnt) + { +- nfs_sb_deactive(ss_mnt->mnt_sb); ++ nfs_do_sb_deactive(ss_mnt->mnt_sb); + mntput(ss_mnt); + } + +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c +index 9bb9f0952b186..caf563981532b 100644 +--- a/fs/ntfs/inode.c ++++ b/fs/ntfs/inode.c +@@ -1810,6 +1810,12 @@ int ntfs_read_inode_mount(struct inode *vi) + brelse(bh); + } + ++ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) { ++ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.", ++ le32_to_cpu(m->bytes_allocated), vol->mft_record_size); ++ goto err_out; ++ } ++ + /* Apply the mst fixups. */ + if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) { + /* FIXME: Try to use the $MFTMirr now. */ +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 617db4e0faa09..aa69c35d904ca 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -1055,7 +1055,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, + + static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) + { +- static DEFINE_MUTEX(oom_adj_mutex); + struct mm_struct *mm = NULL; + struct task_struct *task; + int err = 0; +@@ -1095,7 +1094,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) + struct task_struct *p = find_lock_task_mm(task); + + if (p) { +- if (atomic_read(&p->mm->mm_users) > 1) { ++ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) { + mm = p->mm; + mmgrab(mm); + } +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c +index 58fc2a7c7fd19..e69a2bfdd81c0 100644 +--- a/fs/quota/quota_v2.c ++++ b/fs/quota/quota_v2.c +@@ -282,6 +282,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot) + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); ++ d->dqb_pad = 0; + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); + } +diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c +index 4146954549560..355523f4a4bf3 100644 +--- a/fs/ramfs/file-nommu.c ++++ b/fs/ramfs/file-nommu.c +@@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, + if (!pages) + goto out_free; + +- nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages); ++ nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages); + if (nr != lpages) + goto out_free_pages; /* leave if some pages were missing */ + +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c +index e43fed96704d8..c76d563dec0e1 100644 +--- a/fs/reiserfs/inode.c ++++ b/fs/reiserfs/inode.c +@@ -2159,7 +2159,8 @@ out_end_trans: + out_inserted_sd: + clear_nlink(inode); + th->t_trans_id = 0; /* so the caller can't use this handle later */ +- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */ ++ if (inode->i_state & I_NEW) ++ unlock_new_inode(inode); + iput(inode); + return err; + } +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index a6bce5b1fb1dc..1b9c7a387dc71 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s, + "turned on."); + return 0; + } ++ if (qf_names[qtype] != ++ REISERFS_SB(s)->s_qf_names[qtype]) ++ kfree(qf_names[qtype]); ++ qf_names[qtype] = NULL; + if (*arg) { /* Some filename specified? */ + if (REISERFS_SB(s)->s_qf_names[qtype] + && strcmp(REISERFS_SB(s)->s_qf_names[qtype], +@@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s, + else + *mount_options |= 1 << REISERFS_GRPQUOTA; + } else { +- if (qf_names[qtype] != +- REISERFS_SB(s)->s_qf_names[qtype]) +- kfree(qf_names[qtype]); +- qf_names[qtype] = NULL; + if (qtype == USRQUOTA) + *mount_options &= ~(1 << REISERFS_USRQUOTA); + else +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index adaba8e8b326e..566118417e562 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode) + struct udf_inode_info *iinfo = UDF_I(inode); + int want_delete = 0; + +- if (!inode->i_nlink && !is_bad_inode(inode)) { +- want_delete = 1; +- udf_setsize(inode, 0); +- udf_update_inode(inode, IS_SYNC(inode)); ++ if (!is_bad_inode(inode)) { ++ if (!inode->i_nlink) { ++ want_delete = 1; ++ udf_setsize(inode, 0); ++ udf_update_inode(inode, IS_SYNC(inode)); ++ } ++ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && ++ inode->i_size != iinfo->i_lenExtents) { ++ udf_warn(inode->i_sb, ++ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", ++ inode->i_ino, inode->i_mode, ++ (unsigned long long)inode->i_size, ++ (unsigned long long)iinfo->i_lenExtents); ++ } + } + truncate_inode_pages_final(&inode->i_data); + invalidate_inode_buffers(inode); + clear_inode(inode); +- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && +- inode->i_size != iinfo->i_lenExtents) { +- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", +- inode->i_ino, inode->i_mode, +- (unsigned long long)inode->i_size, +- (unsigned long long)iinfo->i_lenExtents); +- } + kfree(iinfo->i_ext.i_data); + iinfo->i_ext.i_data = NULL; + udf_clear_extent_cache(inode); +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 1c42f544096d8..a03b8ce5ef0fd 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -1353,6 +1353,12 @@ static int udf_load_sparable_map(struct super_block *sb, + (int)spm->numSparingTables); + return -EIO; + } ++ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) { ++ udf_err(sb, "error loading logical volume descriptor: " ++ "Too big sparing table size (%u)\n", ++ le32_to_cpu(spm->sizeSparingTable)); ++ return -EIO; ++ } + + for (i = 0; i < spm->numSparingTables; i++) { + loc = le32_to_cpu(spm->locSparingTable[i]); +diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c +index 1d9fa8a300f15..6c1aba16113c5 100644 +--- a/fs/xfs/libxfs/xfs_rtbitmap.c ++++ b/fs/xfs/libxfs/xfs_rtbitmap.c +@@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range( + struct xfs_mount *mp = tp->t_mountp; + xfs_rtblock_t rtstart; + xfs_rtblock_t rtend; +- xfs_rtblock_t rem; + int is_free; + int error = 0; + +@@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range( + if (low_rec->ar_startext >= mp->m_sb.sb_rextents || + low_rec->ar_startext == high_rec->ar_startext) + return 0; +- if (high_rec->ar_startext > mp->m_sb.sb_rextents) +- high_rec->ar_startext = mp->m_sb.sb_rextents; ++ high_rec->ar_startext = min(high_rec->ar_startext, ++ mp->m_sb.sb_rextents - 1); + + /* Iterate the bitmap, looking for discrepancies. */ + rtstart = low_rec->ar_startext; +- rem = high_rec->ar_startext - rtstart; +- while (rem) { ++ while (rtstart <= high_rec->ar_startext) { + /* Is the first block free? */ + error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend, + &is_free); +@@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range( + + /* How long does the extent go for? */ + error = xfs_rtfind_forw(mp, tp, rtstart, +- high_rec->ar_startext - 1, &rtend); ++ high_rec->ar_startext, &rtend); + if (error) + break; + +@@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range( + break; + } + +- rem -= rtend - rtstart + 1; + rtstart = rtend + 1; + } + +diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c +index 8f0457d67d779..de2772394de21 100644 +--- a/fs/xfs/xfs_buf_item_recover.c ++++ b/fs/xfs/xfs_buf_item_recover.c +@@ -719,6 +719,8 @@ xlog_recover_get_buf_lsn( + case XFS_ABTC_MAGIC: + case XFS_RMAP_CRC_MAGIC: + case XFS_REFC_CRC_MAGIC: ++ case XFS_FIBT_CRC_MAGIC: ++ case XFS_FIBT_MAGIC: + case XFS_IBT_CRC_MAGIC: + case XFS_IBT_MAGIC: { + struct xfs_btree_block *btb = blk; +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c +index a29f78a663ca5..3d1b951247440 100644 +--- a/fs/xfs/xfs_file.c ++++ b/fs/xfs/xfs_file.c +@@ -1008,6 +1008,21 @@ xfs_file_fadvise( + return ret; + } + ++/* Does this file, inode, or mount want synchronous writes? */ ++static inline bool xfs_file_sync_writes(struct file *filp) ++{ ++ struct xfs_inode *ip = XFS_I(file_inode(filp)); ++ ++ if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC) ++ return true; ++ if (filp->f_flags & (__O_SYNC | O_DSYNC)) ++ return true; ++ if (IS_SYNC(file_inode(filp))) ++ return true; ++ ++ return false; ++} ++ + STATIC loff_t + xfs_file_remap_range( + struct file *file_in, +@@ -1065,7 +1080,7 @@ xfs_file_remap_range( + if (ret) + goto out_unlock; + +- if (mp->m_flags & XFS_MOUNT_WSYNC) ++ if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out)) + xfs_log_force_inode(dest); + out_unlock: + xfs_iunlock2_io_mmap(src, dest); +diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c +index 4eebcec4aae6c..9ce5e7d5bf8f2 100644 +--- a/fs/xfs/xfs_fsmap.c ++++ b/fs/xfs/xfs_fsmap.c +@@ -26,7 +26,7 @@ + #include "xfs_rtalloc.h" + + /* Convert an xfs_fsmap to an fsmap. */ +-void ++static void + xfs_fsmap_from_internal( + struct fsmap *dest, + struct xfs_fsmap *src) +@@ -155,8 +155,7 @@ xfs_fsmap_owner_from_rmap( + /* getfsmap query state */ + struct xfs_getfsmap_info { + struct xfs_fsmap_head *head; +- xfs_fsmap_format_t formatter; /* formatting fn */ +- void *format_arg; /* format buffer */ ++ struct fsmap *fsmap_recs; /* mapping records */ + struct xfs_buf *agf_bp; /* AGF, for refcount queries */ + xfs_daddr_t next_daddr; /* next daddr we expect */ + u64 missing_owner; /* owner of holes */ +@@ -224,6 +223,20 @@ xfs_getfsmap_is_shared( + return 0; + } + ++static inline void ++xfs_getfsmap_format( ++ struct xfs_mount *mp, ++ struct xfs_fsmap *xfm, ++ struct xfs_getfsmap_info *info) ++{ ++ struct fsmap *rec; ++ ++ trace_xfs_getfsmap_mapping(mp, xfm); ++ ++ rec = &info->fsmap_recs[info->head->fmh_entries++]; ++ xfs_fsmap_from_internal(rec, xfm); ++} ++ + /* + * Format a reverse mapping for getfsmap, having translated rm_startblock + * into the appropriate daddr units. +@@ -256,6 +269,9 @@ xfs_getfsmap_helper( + + /* Are we just counting mappings? */ + if (info->head->fmh_count == 0) { ++ if (info->head->fmh_entries == UINT_MAX) ++ return -ECANCELED; ++ + if (rec_daddr > info->next_daddr) + info->head->fmh_entries++; + +@@ -285,10 +301,7 @@ xfs_getfsmap_helper( + fmr.fmr_offset = 0; + fmr.fmr_length = rec_daddr - info->next_daddr; + fmr.fmr_flags = FMR_OF_SPECIAL_OWNER; +- error = info->formatter(&fmr, info->format_arg); +- if (error) +- return error; +- info->head->fmh_entries++; ++ xfs_getfsmap_format(mp, &fmr, info); + } + + if (info->last) +@@ -320,11 +333,8 @@ xfs_getfsmap_helper( + if (shared) + fmr.fmr_flags |= FMR_OF_SHARED; + } +- error = info->formatter(&fmr, info->format_arg); +- if (error) +- return error; +- info->head->fmh_entries++; + ++ xfs_getfsmap_format(mp, &fmr, info); + out: + rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount); + if (info->next_daddr < rec_daddr) +@@ -792,11 +802,11 @@ xfs_getfsmap_check_keys( + #endif /* CONFIG_XFS_RT */ + + /* +- * Get filesystem's extents as described in head, and format for +- * output. Calls formatter to fill the user's buffer until all +- * extents are mapped, until the passed-in head->fmh_count slots have +- * been filled, or until the formatter short-circuits the loop, if it +- * is tracking filled-in extents on its own. ++ * Get filesystem's extents as described in head, and format for output. Fills ++ * in the supplied records array until there are no more reverse mappings to ++ * return or head.fmh_entries == head.fmh_count. In the second case, this ++ * function returns -ECANCELED to indicate that more records would have been ++ * returned. + * + * Key to Confusion + * ---------------- +@@ -816,8 +826,7 @@ int + xfs_getfsmap( + struct xfs_mount *mp, + struct xfs_fsmap_head *head, +- xfs_fsmap_format_t formatter, +- void *arg) ++ struct fsmap *fsmap_recs) + { + struct xfs_trans *tp = NULL; + struct xfs_fsmap dkeys[2]; /* per-dev keys */ +@@ -892,8 +901,7 @@ xfs_getfsmap( + + info.next_daddr = head->fmh_keys[0].fmr_physical + + head->fmh_keys[0].fmr_length; +- info.formatter = formatter; +- info.format_arg = arg; ++ info.fsmap_recs = fsmap_recs; + info.head = head; + + /* +diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h +index c6c57739b8626..a0775788e7b13 100644 +--- a/fs/xfs/xfs_fsmap.h ++++ b/fs/xfs/xfs_fsmap.h +@@ -27,13 +27,9 @@ struct xfs_fsmap_head { + struct xfs_fsmap fmh_keys[2]; /* low and high keys */ + }; + +-void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src); + void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src); + +-/* fsmap to userspace formatter - copy to user & advance pointer */ +-typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *); +- + int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head, +- xfs_fsmap_format_t formatter, void *arg); ++ struct fsmap *out_recs); + + #endif /* __XFS_FSMAP_H__ */ +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index 6f22a66777cd0..b0882f8a787f1 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -1715,39 +1715,17 @@ out_free_buf: + return error; + } + +-struct getfsmap_info { +- struct xfs_mount *mp; +- struct fsmap_head __user *data; +- unsigned int idx; +- __u32 last_flags; +-}; +- +-STATIC int +-xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv) +-{ +- struct getfsmap_info *info = priv; +- struct fsmap fm; +- +- trace_xfs_getfsmap_mapping(info->mp, xfm); +- +- info->last_flags = xfm->fmr_flags; +- xfs_fsmap_from_internal(&fm, xfm); +- if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm, +- sizeof(struct fsmap))) +- return -EFAULT; +- +- return 0; +-} +- + STATIC int + xfs_ioc_getfsmap( + struct xfs_inode *ip, + struct fsmap_head __user *arg) + { +- struct getfsmap_info info = { NULL }; + struct xfs_fsmap_head xhead = {0}; + struct fsmap_head head; +- bool aborted = false; ++ struct fsmap *recs; ++ unsigned int count; ++ __u32 last_flags = 0; ++ bool done = false; + int error; + + if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) +@@ -1759,38 +1737,112 @@ xfs_ioc_getfsmap( + sizeof(head.fmh_keys[1].fmr_reserved))) + return -EINVAL; + ++ /* ++ * Use an internal memory buffer so that we don't have to copy fsmap ++ * data to userspace while holding locks. Start by trying to allocate ++ * up to 128k for the buffer, but fall back to a single page if needed. ++ */ ++ count = min_t(unsigned int, head.fmh_count, ++ 131072 / sizeof(struct fsmap)); ++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL); ++ if (!recs) { ++ count = min_t(unsigned int, head.fmh_count, ++ PAGE_SIZE / sizeof(struct fsmap)); ++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL); ++ if (!recs) ++ return -ENOMEM; ++ } ++ + xhead.fmh_iflags = head.fmh_iflags; +- xhead.fmh_count = head.fmh_count; + xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); + xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); + + trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); + trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); + +- info.mp = ip->i_mount; +- info.data = arg; +- error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info); +- if (error == -ECANCELED) { +- error = 0; +- aborted = true; +- } else if (error) +- return error; ++ head.fmh_entries = 0; ++ do { ++ struct fsmap __user *user_recs; ++ struct fsmap *last_rec; ++ ++ user_recs = &arg->fmh_recs[head.fmh_entries]; ++ xhead.fmh_entries = 0; ++ xhead.fmh_count = min_t(unsigned int, count, ++ head.fmh_count - head.fmh_entries); ++ ++ /* Run query, record how many entries we got. */ ++ error = xfs_getfsmap(ip->i_mount, &xhead, recs); ++ switch (error) { ++ case 0: ++ /* ++ * There are no more records in the result set. Copy ++ * whatever we got to userspace and break out. ++ */ ++ done = true; ++ break; ++ case -ECANCELED: ++ /* ++ * The internal memory buffer is full. Copy whatever ++ * records we got to userspace and go again if we have ++ * not yet filled the userspace buffer. ++ */ ++ error = 0; ++ break; ++ default: ++ goto out_free; ++ } ++ head.fmh_entries += xhead.fmh_entries; ++ head.fmh_oflags = xhead.fmh_oflags; + +- /* If we didn't abort, set the "last" flag in the last fmx */ +- if (!aborted && info.idx) { +- info.last_flags |= FMR_OF_LAST; +- if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags, +- &info.last_flags, sizeof(info.last_flags))) +- return -EFAULT; ++ /* ++ * If the caller wanted a record count or there aren't any ++ * new records to return, we're done. ++ */ ++ if (head.fmh_count == 0 || xhead.fmh_entries == 0) ++ break; ++ ++ /* Copy all the records we got out to userspace. */ ++ if (copy_to_user(user_recs, recs, ++ xhead.fmh_entries * sizeof(struct fsmap))) { ++ error = -EFAULT; ++ goto out_free; ++ } ++ ++ /* Remember the last record flags we copied to userspace. */ ++ last_rec = &recs[xhead.fmh_entries - 1]; ++ last_flags = last_rec->fmr_flags; ++ ++ /* Set up the low key for the next iteration. */ ++ xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec); ++ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); ++ } while (!done && head.fmh_entries < head.fmh_count); ++ ++ /* ++ * If there are no more records in the query result set and we're not ++ * in counting mode, mark the last record returned with the LAST flag. ++ */ ++ if (done && head.fmh_count > 0 && head.fmh_entries > 0) { ++ struct fsmap __user *user_rec; ++ ++ last_flags |= FMR_OF_LAST; ++ user_rec = &arg->fmh_recs[head.fmh_entries - 1]; ++ ++ if (copy_to_user(&user_rec->fmr_flags, &last_flags, ++ sizeof(last_flags))) { ++ error = -EFAULT; ++ goto out_free; ++ } + } + + /* copy back header */ +- head.fmh_entries = xhead.fmh_entries; +- head.fmh_oflags = xhead.fmh_oflags; +- if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) +- return -EFAULT; ++ if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) { ++ error = -EFAULT; ++ goto out_free; ++ } + +- return 0; ++out_free: ++ kmem_free(recs); ++ return error; + } + + STATIC int +diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c +index 6209e7b6b895b..86994d7f7cba3 100644 +--- a/fs/xfs/xfs_rtalloc.c ++++ b/fs/xfs/xfs_rtalloc.c +@@ -247,6 +247,9 @@ xfs_rtallocate_extent_block( + end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1; + i <= end; + i++) { ++ /* Make sure we don't scan off the end of the rt volume. */ ++ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i; ++ + /* + * See if there's a free extent of maxlen starting at i. + * If it's not so then next will contain the first non-free. +@@ -442,6 +445,14 @@ xfs_rtallocate_extent_near( + */ + if (bno >= mp->m_sb.sb_rextents) + bno = mp->m_sb.sb_rextents - 1; ++ ++ /* Make sure we don't run off the end of the rt volume. */ ++ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno; ++ if (maxlen < minlen) { ++ *rtblock = NULLRTBLOCK; ++ return 0; ++ } ++ + /* + * Try the exact allocation first. + */ +diff --git a/include/dt-bindings/mux/mux-j721e-wiz.h b/include/dt-bindings/mux/mux-j721e-wiz.h +deleted file mode 100644 +index fd1c4ea9fc7f0..0000000000000 +--- a/include/dt-bindings/mux/mux-j721e-wiz.h ++++ /dev/null +@@ -1,53 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * This header provides constants for J721E WIZ. +- */ +- +-#ifndef _DT_BINDINGS_J721E_WIZ +-#define _DT_BINDINGS_J721E_WIZ +- +-#define SERDES0_LANE0_QSGMII_LANE1 0x0 +-#define SERDES0_LANE0_PCIE0_LANE0 0x1 +-#define SERDES0_LANE0_USB3_0_SWAP 0x2 +- +-#define SERDES0_LANE1_QSGMII_LANE2 0x0 +-#define SERDES0_LANE1_PCIE0_LANE1 0x1 +-#define SERDES0_LANE1_USB3_0 0x2 +- +-#define SERDES1_LANE0_QSGMII_LANE3 0x0 +-#define SERDES1_LANE0_PCIE1_LANE0 0x1 +-#define SERDES1_LANE0_USB3_1_SWAP 0x2 +-#define SERDES1_LANE0_SGMII_LANE0 0x3 +- +-#define SERDES1_LANE1_QSGMII_LANE4 0x0 +-#define SERDES1_LANE1_PCIE1_LANE1 0x1 +-#define SERDES1_LANE1_USB3_1 0x2 +-#define SERDES1_LANE1_SGMII_LANE1 0x3 +- +-#define SERDES2_LANE0_PCIE2_LANE0 0x1 +-#define SERDES2_LANE0_SGMII_LANE0 0x3 +-#define SERDES2_LANE0_USB3_1_SWAP 0x2 +- +-#define SERDES2_LANE1_PCIE2_LANE1 0x1 +-#define SERDES2_LANE1_USB3_1 0x2 +-#define SERDES2_LANE1_SGMII_LANE1 0x3 +- +-#define SERDES3_LANE0_PCIE3_LANE0 0x1 +-#define SERDES3_LANE0_USB3_0_SWAP 0x2 +- +-#define SERDES3_LANE1_PCIE3_LANE1 0x1 +-#define SERDES3_LANE1_USB3_0 0x2 +- +-#define SERDES4_LANE0_EDP_LANE0 0x0 +-#define SERDES4_LANE0_QSGMII_LANE5 0x2 +- +-#define SERDES4_LANE1_EDP_LANE1 0x0 +-#define SERDES4_LANE1_QSGMII_LANE6 0x2 +- +-#define SERDES4_LANE2_EDP_LANE2 0x0 +-#define SERDES4_LANE2_QSGMII_LANE7 0x2 +- +-#define SERDES4_LANE3_EDP_LANE3 0x0 +-#define SERDES4_LANE3_QSGMII_LANE8 0x2 +- +-#endif /* _DT_BINDINGS_J721E_WIZ */ +diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h +new file mode 100644 +index 0000000000000..146d0685a9251 +--- /dev/null ++++ b/include/dt-bindings/mux/ti-serdes.h +@@ -0,0 +1,71 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This header provides constants for SERDES MUX for TI SoCs ++ */ ++ ++#ifndef _DT_BINDINGS_MUX_TI_SERDES ++#define _DT_BINDINGS_MUX_TI_SERDES ++ ++/* J721E */ ++ ++#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0 ++#define J721E_SERDES0_LANE0_PCIE0_LANE0 0x1 ++#define J721E_SERDES0_LANE0_USB3_0_SWAP 0x2 ++#define J721E_SERDES0_LANE0_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES0_LANE1_QSGMII_LANE2 0x0 ++#define J721E_SERDES0_LANE1_PCIE0_LANE1 0x1 ++#define J721E_SERDES0_LANE1_USB3_0 0x2 ++#define J721E_SERDES0_LANE1_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES1_LANE0_QSGMII_LANE3 0x0 ++#define J721E_SERDES1_LANE0_PCIE1_LANE0 0x1 ++#define J721E_SERDES1_LANE0_USB3_1_SWAP 0x2 ++#define J721E_SERDES1_LANE0_SGMII_LANE0 0x3 ++ ++#define J721E_SERDES1_LANE1_QSGMII_LANE4 0x0 ++#define J721E_SERDES1_LANE1_PCIE1_LANE1 0x1 ++#define J721E_SERDES1_LANE1_USB3_1 0x2 ++#define J721E_SERDES1_LANE1_SGMII_LANE1 0x3 ++ ++#define J721E_SERDES2_LANE0_IP1_UNUSED 0x0 ++#define J721E_SERDES2_LANE0_PCIE2_LANE0 0x1 ++#define J721E_SERDES2_LANE0_USB3_1_SWAP 0x2 ++#define J721E_SERDES2_LANE0_SGMII_LANE0 0x3 ++ ++#define J721E_SERDES2_LANE1_IP1_UNUSED 0x0 ++#define J721E_SERDES2_LANE1_PCIE2_LANE1 0x1 ++#define J721E_SERDES2_LANE1_USB3_1 0x2 ++#define J721E_SERDES2_LANE1_SGMII_LANE1 0x3 ++ ++#define J721E_SERDES3_LANE0_IP1_UNUSED 0x0 ++#define J721E_SERDES3_LANE0_PCIE3_LANE0 0x1 ++#define J721E_SERDES3_LANE0_USB3_0_SWAP 0x2 ++#define J721E_SERDES3_LANE0_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES3_LANE1_IP1_UNUSED 0x0 ++#define J721E_SERDES3_LANE1_PCIE3_LANE1 0x1 ++#define J721E_SERDES3_LANE1_USB3_0 0x2 ++#define J721E_SERDES3_LANE1_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES4_LANE0_EDP_LANE0 0x0 ++#define J721E_SERDES4_LANE0_IP2_UNUSED 0x1 ++#define J721E_SERDES4_LANE0_QSGMII_LANE5 0x2 ++#define J721E_SERDES4_LANE0_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES4_LANE1_EDP_LANE1 0x0 ++#define J721E_SERDES4_LANE1_IP2_UNUSED 0x1 ++#define J721E_SERDES4_LANE1_QSGMII_LANE6 0x2 ++#define J721E_SERDES4_LANE1_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES4_LANE2_EDP_LANE2 0x0 ++#define J721E_SERDES4_LANE2_IP2_UNUSED 0x1 ++#define J721E_SERDES4_LANE2_QSGMII_LANE7 0x2 ++#define J721E_SERDES4_LANE2_IP4_UNUSED 0x3 ++ ++#define J721E_SERDES4_LANE3_EDP_LANE3 0x0 ++#define J721E_SERDES4_LANE3_IP2_UNUSED 0x1 ++#define J721E_SERDES4_LANE3_QSGMII_LANE8 0x2 ++#define J721E_SERDES4_LANE3_IP4_UNUSED 0x3 ++ ++#endif /* _DT_BINDINGS_MUX_TI_SERDES */ +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h +index 53c7bd568c5d4..5026b75db9725 100644 +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -358,6 +358,7 @@ struct bpf_subprog_info { + u32 start; /* insn idx of function entry point */ + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ + u16 stack_depth; /* max. stack depth used by this function */ ++ bool has_tail_call; + }; + + /* single container for all structs +diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h +index 6e87225600ae3..064870844f06c 100644 +--- a/include/linux/dma-direct.h ++++ b/include/linux/dma-direct.h +@@ -62,9 +62,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, + { + dma_addr_t end = addr + size - 1; + +- if (!dev->dma_mask) +- return false; +- + if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) + return false; +diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h +index 6a584b3e5c74f..1130f271de669 100644 +--- a/include/linux/lockdep.h ++++ b/include/linux/lockdep.h +@@ -512,19 +512,19 @@ static inline void print_irqtrace_events(struct task_struct *curr) + #define lock_map_release(l) lock_release(l, _THIS_IP_) + + #ifdef CONFIG_PROVE_LOCKING +-# define might_lock(lock) \ ++# define might_lock(lock) \ + do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, _THIS_IP_); \ + } while (0) +-# define might_lock_read(lock) \ ++# define might_lock_read(lock) \ + do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, _THIS_IP_); \ + } while (0) +-# define might_lock_nested(lock, subclass) \ ++# define might_lock_nested(lock, subclass) \ + do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ +@@ -534,44 +534,39 @@ do { \ + + DECLARE_PER_CPU(int, hardirqs_enabled); + DECLARE_PER_CPU(int, hardirq_context); ++DECLARE_PER_CPU(unsigned int, lockdep_recursion); + +-/* +- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above +- * per-cpu variables. This is required because this_cpu_read() will potentially +- * call into preempt/irq-disable and that obviously isn't right. This is also +- * correct because when IRQs are enabled, it doesn't matter if we accidentally +- * read the value from our previous CPU. +- */ ++#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) + + #define lockdep_assert_irqs_enabled() \ + do { \ +- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ ++ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ + } while (0) + + #define lockdep_assert_irqs_disabled() \ + do { \ +- WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ ++ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ + } while (0) + + #define lockdep_assert_in_irq() \ + do { \ +- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ ++ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ + } while (0) + + #define lockdep_assert_preemption_enabled() \ + do { \ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ +- debug_locks && \ ++ __lockdep_enabled && \ + (preempt_count() != 0 || \ +- !raw_cpu_read(hardirqs_enabled))); \ ++ !this_cpu_read(hardirqs_enabled))); \ + } while (0) + + #define lockdep_assert_preemption_disabled() \ + do { \ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ +- debug_locks && \ ++ __lockdep_enabled && \ + (preempt_count() == 0 && \ +- raw_cpu_read(hardirqs_enabled))); \ ++ this_cpu_read(hardirqs_enabled))); \ + } while (0) + + #else +diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h +index bb35b449f5330..9a1fd49df17f6 100644 +--- a/include/linux/lockdep_types.h ++++ b/include/linux/lockdep_types.h +@@ -35,8 +35,12 @@ enum lockdep_wait_type { + /* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( ++ * ++ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each ++ * of those we generates 4 states, Additionally we report on USED and USED_READ. + */ +-#define XXX_LOCK_USAGE_STATES (1+2*4) ++#define XXX_LOCK_USAGE_STATES 2 ++#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2) + + /* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes +@@ -106,7 +110,7 @@ struct lock_class { + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; +- const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; ++ const struct lock_trace *usage_traces[LOCK_TRACE_STATES]; + + /* + * Generation counter, when doing certain classes of graph walking, +diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h +index 05eea1aef5aa0..ea35157974187 100644 +--- a/include/linux/mailbox/mtk-cmdq-mailbox.h ++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h +@@ -28,8 +28,7 @@ + * bit 16-27: update value + * bit 31: 1 - update, 0 - no update + */ +-#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \ +- CMDQ_WFE_WAIT_VALUE) ++#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE) + + /** cmdq event maximum */ + #define CMDQ_MAX_EVENT 0x3ff +diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h +new file mode 100644 +index 0000000000000..f5ba0fbff72fe +--- /dev/null ++++ b/include/linux/nfs_ssc.h +@@ -0,0 +1,67 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * include/linux/nfs_ssc.h ++ * ++ * Author: Dai Ngo ++ * ++ * Copyright (c) 2020, Oracle and/or its affiliates. ++ */ ++ ++#include ++ ++extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl; ++ ++/* ++ * NFS_V4 ++ */ ++struct nfs4_ssc_client_ops { ++ struct file *(*sco_open)(struct vfsmount *ss_mnt, ++ struct nfs_fh *src_fh, nfs4_stateid *stateid); ++ void (*sco_close)(struct file *filep); ++}; ++ ++/* ++ * NFS_FS ++ */ ++struct nfs_ssc_client_ops { ++ void (*sco_sb_deactive)(struct super_block *sb); ++}; ++ ++struct nfs_ssc_client_ops_tbl { ++ const struct nfs4_ssc_client_ops *ssc_nfs4_ops; ++ const struct nfs_ssc_client_ops *ssc_nfs_ops; ++}; ++ ++extern void nfs42_ssc_register_ops(void); ++extern void nfs42_ssc_unregister_ops(void); ++ ++extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops); ++extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops); ++ ++#ifdef CONFIG_NFSD_V4_2_INTER_SSC ++static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt, ++ struct nfs_fh *src_fh, nfs4_stateid *stateid) ++{ ++ if (nfs_ssc_client_tbl.ssc_nfs4_ops) ++ return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid); ++ return ERR_PTR(-EIO); ++} ++ ++static inline void nfs42_ssc_close(struct file *filep) ++{ ++ if (nfs_ssc_client_tbl.ssc_nfs4_ops) ++ (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep); ++} ++#endif ++ ++/* ++ * NFS_FS ++ */ ++extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops); ++extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops); ++ ++static inline void nfs_do_sb_deactive(struct super_block *sb) ++{ ++ if (nfs_ssc_client_tbl.ssc_nfs_ops) ++ (*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb); ++} +diff --git a/include/linux/notifier.h b/include/linux/notifier.h +index 018947611483e..2fb373a5c1ede 100644 +--- a/include/linux/notifier.h ++++ b/include/linux/notifier.h +@@ -161,20 +161,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + + extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v); +-extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, +- unsigned long val, void *v, int nr_to_call, int *nr_calls); + extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v); +-extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, +- unsigned long val, void *v, int nr_to_call, int *nr_calls); + extern int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v); +-extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, +- unsigned long val, void *v, int nr_to_call, int *nr_calls); + extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v); +-extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, +- unsigned long val, void *v, int nr_to_call, int *nr_calls); ++ ++extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, ++ unsigned long val_up, unsigned long val_down, void *v); ++extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, ++ unsigned long val_up, unsigned long val_down, void *v); ++extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, ++ unsigned long val_up, unsigned long val_down, void *v); + + #define NOTIFY_DONE 0x0000 /* Don't care */ + #define NOTIFY_OK 0x0001 /* Suits me */ +diff --git a/include/linux/oom.h b/include/linux/oom.h +index f022f581ac29d..2db9a14325112 100644 +--- a/include/linux/oom.h ++++ b/include/linux/oom.h +@@ -55,6 +55,7 @@ struct oom_control { + }; + + extern struct mutex oom_lock; ++extern struct mutex oom_adj_mutex; + + static inline void set_current_oom_origin(void) + { +diff --git a/include/linux/overflow.h b/include/linux/overflow.h +index 93fcef105061b..ff3c48f0abc5b 100644 +--- a/include/linux/overflow.h ++++ b/include/linux/overflow.h +@@ -3,6 +3,7 @@ + #define __LINUX_OVERFLOW_H + + #include ++#include + + /* + * In the fallback code below, we need to compute the minimum and +diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h +index 8679ccd722e89..3468794f83d23 100644 +--- a/include/linux/page_owner.h ++++ b/include/linux/page_owner.h +@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops; + extern void __reset_page_owner(struct page *page, unsigned int order); + extern void __set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask); +-extern void __split_page_owner(struct page *page, unsigned int order); ++extern void __split_page_owner(struct page *page, unsigned int nr); + extern void __copy_page_owner(struct page *oldpage, struct page *newpage); + extern void __set_page_owner_migrate_reason(struct page *page, int reason); + extern void __dump_page_owner(struct page *page); +@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page, + __set_page_owner(page, order, gfp_mask); + } + +-static inline void split_page_owner(struct page *page, unsigned int order) ++static inline void split_page_owner(struct page *page, unsigned int nr) + { + if (static_branch_unlikely(&page_owner_inited)) +- __split_page_owner(page, order); ++ __split_page_owner(page, nr); + } + static inline void copy_page_owner(struct page *oldpage, struct page *newpage) + { +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 835530605c0d7..3ff723124ca7f 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -445,6 +445,7 @@ struct pci_dev { + unsigned int is_probed:1; /* Device probing in progress */ + unsigned int link_active_reporting:1;/* Device capable of reporting link active */ + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ ++ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; /* pci_enable_device has been called */ + +diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h +index fbbeb2f6189b8..b34a094b2258d 100644 +--- a/include/linux/platform_data/dma-dw.h ++++ b/include/linux/platform_data/dma-dw.h +@@ -26,6 +26,7 @@ struct device; + * @dst_id: dst request line + * @m_master: memory master for transfers on allocated channel + * @p_master: peripheral master for transfers on allocated channel ++ * @channels: mask of the channels permitted for allocation (zero value means any) + * @hs_polarity:set active low polarity of handshake interface + */ + struct dw_dma_slave { +@@ -34,6 +35,7 @@ struct dw_dma_slave { + u8 dst_id; + u8 m_master; + u8 p_master; ++ u8 channels; + bool hs_polarity; + }; + +diff --git a/include/linux/prandom.h b/include/linux/prandom.h +index aa16e6468f91e..cc1e71334e53c 100644 +--- a/include/linux/prandom.h ++++ b/include/linux/prandom.h +@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes); + void prandom_seed(u32 seed); + void prandom_reseed_late(void); + ++#if BITS_PER_LONG == 64 ++/* ++ * The core SipHash round function. Each line can be executed in ++ * parallel given enough CPU resources. ++ */ ++#define PRND_SIPROUND(v0, v1, v2, v3) ( \ ++ v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \ ++ v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \ ++ v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \ ++ v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \ ++) ++ ++#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261) ++#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573) ++ ++#elif BITS_PER_LONG == 32 ++/* ++ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash. ++ * This is weaker, but 32-bit machines are not used for high-traffic ++ * applications, so there is less output for an attacker to analyze. ++ */ ++#define PRND_SIPROUND(v0, v1, v2, v3) ( \ ++ v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \ ++ v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \ ++ v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \ ++ v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \ ++) ++#define PRND_K0 0x6c796765 ++#define PRND_K1 0x74656462 ++ ++#else ++#error Unsupported BITS_PER_LONG ++#endif ++ + struct rnd_state { + __u32 s1, s2, s3, s4; + }; + +-DECLARE_PER_CPU(struct rnd_state, net_rand_state); +- + u32 prandom_u32_state(struct rnd_state *state); + void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); + void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); +diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h +index ecdc6542070f1..dfd82eab29025 100644 +--- a/include/linux/sched/coredump.h ++++ b/include/linux/sched/coredump.h +@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm) + #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ + #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ + #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ ++#define MMF_MULTIPROCESS 27 /* mm is shared between processes */ + #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) + + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h +index 962d9768945f0..7b99e3dba2065 100644 +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -154,6 +154,19 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) + * @lock: Pointer to the associated LOCKTYPE + */ + ++#define seqcount_LOCKNAME_init(s, _lock, lockname) \ ++ do { \ ++ seqcount_##lockname##_t *____s = (s); \ ++ seqcount_init(&____s->seqcount); \ ++ __SEQ_LOCK(____s->lock = (_lock)); \ ++ } while (0) ++ ++#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) ++#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) ++#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock); ++#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex); ++#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex); ++ + /* + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers + * @locktype: actual typename +@@ -167,13 +180,6 @@ typedef struct seqcount_##lockname { \ + __SEQ_LOCK(locktype *lock); \ + } seqcount_##lockname##_t; \ + \ +-static __always_inline void \ +-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \ +-{ \ +- seqcount_init(&s->seqcount); \ +- __SEQ_LOCK(s->lock = lock); \ +-} \ +- \ + static __always_inline seqcount_t * \ + __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ + { \ +@@ -228,13 +234,12 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) + __SEQ_LOCK(.lock = (assoc_lock)) \ + } + +-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) ++#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +- + #define __seqprop_case(s, lockname, prop) \ + seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) + +diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h +index 2249ecaf77e42..76a3075077533 100644 +--- a/include/linux/soc/mediatek/mtk-cmdq.h ++++ b/include/linux/soc/mediatek/mtk-cmdq.h +@@ -105,11 +105,12 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, + /** + * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet + * @pkt: the CMDQ packet +- * @event: the desired event type to "wait and CLEAR" ++ * @event: the desired event type to wait ++ * @clear: clear event or not after event arrive + * + * Return: 0 for success; else the error code is returned + */ +-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); ++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear); + + /** + * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet +diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h +index 0d3920896d502..716db4a0fed89 100644 +--- a/include/net/netfilter/nf_log.h ++++ b/include/net/netfilter/nf_log.h +@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, + unsigned int logflags); + void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, + struct sock *sk); ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb); + void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, + unsigned int hooknum, const struct sk_buff *skb, + const struct net_device *in, +diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h +index e1057b255f69a..879fe8cff5819 100644 +--- a/include/net/tc_act/tc_tunnel_key.h ++++ b/include/net/tc_act/tc_tunnel_key.h +@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a) + { + #ifdef CONFIG_NET_CLS_ACT + struct tcf_tunnel_key *t = to_tunnel_key(a); +- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params); ++ struct tcf_tunnel_key_params *params; ++ ++ params = rcu_dereference_protected(t->params, ++ lockdep_is_held(&a->tcfa_lock)); + + return ¶ms->tcft_enc_metadata->u.tun_info; + #else +diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h +index 71f573a418bf0..07a764eb692ee 100644 +--- a/include/rdma/ib_umem.h ++++ b/include/rdma/ib_umem.h +@@ -68,10 +68,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs + size_t length) { + return -EINVAL; + } +-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem, +- unsigned long pgsz_bitmap, +- unsigned long virt) { +- return -EINVAL; ++static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, ++ unsigned long pgsz_bitmap, ++ unsigned long virt) ++{ ++ return 0; + } + + #endif /* CONFIG_INFINIBAND_USER_MEM */ +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h +index c0b2fa7e9b959..5b4f0efc4241f 100644 +--- a/include/rdma/ib_verbs.h ++++ b/include/rdma/ib_verbs.h +@@ -2439,7 +2439,7 @@ struct ib_device_ops { + int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); +- void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); ++ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); + int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); + struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); + struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, +@@ -2468,7 +2468,7 @@ struct ib_device_ops { + int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); + int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); + int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); +- void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); ++ int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); + struct ib_flow *(*create_flow)(struct ib_qp *qp, + struct ib_flow_attr *flow_attr, + int domain, struct ib_udata *udata); +@@ -2496,7 +2496,7 @@ struct ib_device_ops { + struct ib_wq *(*create_wq)(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata); +- void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); ++ int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); + int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, + u32 wq_attr_mask, struct ib_udata *udata); + struct ib_rwq_ind_table *(*create_rwq_ind_table)( +@@ -3817,46 +3817,15 @@ static inline int ib_post_recv(struct ib_qp *qp, + return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); + } + +-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, +- int nr_cqe, int comp_vector, +- enum ib_poll_context poll_ctx, +- const char *caller, struct ib_udata *udata); +- +-/** +- * ib_alloc_cq_user: Allocate kernel/user CQ +- * @dev: The IB device +- * @private: Private data attached to the CQE +- * @nr_cqe: Number of CQEs in the CQ +- * @comp_vector: Completion vector used for the IRQs +- * @poll_ctx: Context used for polling the CQ +- * @udata: Valid user data or NULL for kernel objects +- */ +-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, +- void *private, int nr_cqe, +- int comp_vector, +- enum ib_poll_context poll_ctx, +- struct ib_udata *udata) +-{ +- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, +- KBUILD_MODNAME, udata); +-} +- +-/** +- * ib_alloc_cq: Allocate kernel CQ +- * @dev: The IB device +- * @private: Private data attached to the CQE +- * @nr_cqe: Number of CQEs in the CQ +- * @comp_vector: Completion vector used for the IRQs +- * @poll_ctx: Context used for polling the CQ +- * +- * NOTE: for user cq use ib_alloc_cq_user with valid udata! +- */ ++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, ++ int comp_vector, enum ib_poll_context poll_ctx, ++ const char *caller); + static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, + int nr_cqe, int comp_vector, + enum ib_poll_context poll_ctx) + { +- return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, +- NULL); ++ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, ++ KBUILD_MODNAME); + } + + struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, +@@ -3878,26 +3847,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, + KBUILD_MODNAME); + } + +-/** +- * ib_free_cq_user - Free kernel/user CQ +- * @cq: The CQ to free +- * @udata: Valid user data or NULL for kernel objects +- * +- * NOTE: This function shouldn't be called on shared CQs. +- */ +-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); +- +-/** +- * ib_free_cq - Free kernel CQ +- * @cq: The CQ to free +- * +- * NOTE: for user cq use ib_free_cq_user with valid udata! +- */ +-static inline void ib_free_cq(struct ib_cq *cq) +-{ +- ib_free_cq_user(cq, NULL); +-} +- ++void ib_free_cq(struct ib_cq *cq); + int ib_process_cq_direct(struct ib_cq *cq, int budget); + + /** +@@ -3955,7 +3905,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); + */ + static inline void ib_destroy_cq(struct ib_cq *cq) + { +- ib_destroy_cq_user(cq, NULL); ++ int ret = ib_destroy_cq_user(cq, NULL); ++ ++ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); + } + + /** +@@ -4379,7 +4331,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); + + struct ib_wq *ib_create_wq(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr); +-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); ++int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); + int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, + u32 wq_attr_mask); + int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); +diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h +index 731ac09ed2313..5b567b43e1b16 100644 +--- a/include/scsi/scsi_common.h ++++ b/include/scsi/scsi_common.h +@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd) + scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]); + } + ++static inline unsigned char ++scsi_command_control(const unsigned char *cmnd) ++{ ++ return (cmnd[0] == VARIABLE_LENGTH_CMD) ? ++ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1]; ++} ++ + /* Returns a human-readable name for the device */ + extern const char *scsi_device_type(unsigned type); + +diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h +index 0fea49bfc5e86..73827b7d17e00 100644 +--- a/include/sound/hda_codec.h ++++ b/include/sound/hda_codec.h +@@ -253,6 +253,7 @@ struct hda_codec { + unsigned int force_pin_prefix:1; /* Add location prefix */ + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ + unsigned int relaxed_resume:1; /* don't resume forcibly for jack */ ++ unsigned int forced_resume:1; /* forced resume for jack */ + unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */ + + #ifdef CONFIG_PM +diff --git a/include/trace/events/target.h b/include/trace/events/target.h +index 77408edd29d2a..67fad2677ed55 100644 +--- a/include/trace/events/target.h ++++ b/include/trace/events/target.h +@@ -141,6 +141,7 @@ TRACE_EVENT(target_sequencer_start, + __field( unsigned int, opcode ) + __field( unsigned int, data_length ) + __field( unsigned int, task_attribute ) ++ __field( unsigned char, control ) + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) + __string( initiator, cmd->se_sess->se_node_acl->initiatorname ) + ), +@@ -151,6 +152,7 @@ TRACE_EVENT(target_sequencer_start, + __entry->opcode = cmd->t_task_cdb[0]; + __entry->data_length = cmd->data_length; + __entry->task_attribute = cmd->sam_task_attr; ++ __entry->control = scsi_command_control(cmd->t_task_cdb); + memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); + __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); + ), +@@ -160,9 +162,7 @@ TRACE_EVENT(target_sequencer_start, + __entry->tag, show_opcode_name(__entry->opcode), + __entry->data_length, __print_hex(__entry->cdb, 16), + show_task_attribute_name(__entry->task_attribute), +- scsi_command_size(__entry->cdb) <= 16 ? +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] : +- __entry->cdb[1] ++ __entry->control + ) + ); + +@@ -178,6 +178,7 @@ TRACE_EVENT(target_cmd_complete, + __field( unsigned int, opcode ) + __field( unsigned int, data_length ) + __field( unsigned int, task_attribute ) ++ __field( unsigned char, control ) + __field( unsigned char, scsi_status ) + __field( unsigned char, sense_length ) + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) +@@ -191,6 +192,7 @@ TRACE_EVENT(target_cmd_complete, + __entry->opcode = cmd->t_task_cdb[0]; + __entry->data_length = cmd->data_length; + __entry->task_attribute = cmd->sam_task_attr; ++ __entry->control = scsi_command_control(cmd->t_task_cdb); + __entry->scsi_status = cmd->scsi_status; + __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ? + min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0; +@@ -208,9 +210,7 @@ TRACE_EVENT(target_cmd_complete, + show_opcode_name(__entry->opcode), + __entry->data_length, __print_hex(__entry->cdb, 16), + show_task_attribute_name(__entry->task_attribute), +- scsi_command_size(__entry->cdb) <= 16 ? +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] : +- __entry->cdb[1] ++ __entry->control + ) + ); + +diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h +index f9701410d3b52..57a222014cd20 100644 +--- a/include/uapi/linux/pci_regs.h ++++ b/include/uapi/linux/pci_regs.h +@@ -76,6 +76,7 @@ + #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ + #define PCI_LATENCY_TIMER 0x0d /* 8 bits */ + #define PCI_HEADER_TYPE 0x0e /* 8 bits */ ++#define PCI_HEADER_TYPE_MASK 0x7f + #define PCI_HEADER_TYPE_NORMAL 0 + #define PCI_HEADER_TYPE_BRIDGE 1 + #define PCI_HEADER_TYPE_CARDBUS 2 +diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h +index 077e7ee69e3d8..b95d3c485d27e 100644 +--- a/include/uapi/linux/perf_event.h ++++ b/include/uapi/linux/perf_event.h +@@ -1196,7 +1196,7 @@ union perf_mem_data_src { + + #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ + /* 1 free */ +-#define PERF_MEM_SNOOPX_SHIFT 37 ++#define PERF_MEM_SNOOPX_SHIFT 38 + + /* locked instruction */ + #define PERF_MEM_LOCK_NA 0x01 /* not available */ +diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c +index b367430e611c7..3d897de890612 100644 +--- a/kernel/bpf/percpu_freelist.c ++++ b/kernel/bpf/percpu_freelist.c +@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s) + raw_spin_lock_init(&head->lock); + head->first = NULL; + } ++ raw_spin_lock_init(&s->extralist.lock); ++ s->extralist.first = NULL; + return 0; + } + +@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, + raw_spin_unlock(&head->lock); + } + ++static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s, ++ struct pcpu_freelist_node *node) ++{ ++ if (!raw_spin_trylock(&s->extralist.lock)) ++ return false; ++ ++ pcpu_freelist_push_node(&s->extralist, node); ++ raw_spin_unlock(&s->extralist.lock); ++ return true; ++} ++ ++static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s, ++ struct pcpu_freelist_node *node) ++{ ++ int cpu, orig_cpu; ++ ++ orig_cpu = cpu = raw_smp_processor_id(); ++ while (1) { ++ struct pcpu_freelist_head *head; ++ ++ head = per_cpu_ptr(s->freelist, cpu); ++ if (raw_spin_trylock(&head->lock)) { ++ pcpu_freelist_push_node(head, node); ++ raw_spin_unlock(&head->lock); ++ return; ++ } ++ cpu = cpumask_next(cpu, cpu_possible_mask); ++ if (cpu >= nr_cpu_ids) ++ cpu = 0; ++ ++ /* cannot lock any per cpu lock, try extralist */ ++ if (cpu == orig_cpu && ++ pcpu_freelist_try_push_extra(s, node)) ++ return; ++ } ++} ++ + void __pcpu_freelist_push(struct pcpu_freelist *s, + struct pcpu_freelist_node *node) + { +- struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); +- +- ___pcpu_freelist_push(head, node); ++ if (in_nmi()) ++ ___pcpu_freelist_push_nmi(s, node); ++ else ++ ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node); + } + + void pcpu_freelist_push(struct pcpu_freelist *s, +@@ -81,7 +121,7 @@ again: + } + } + +-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) ++static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s) + { + struct pcpu_freelist_head *head; + struct pcpu_freelist_node *node; +@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) + if (cpu >= nr_cpu_ids) + cpu = 0; + if (cpu == orig_cpu) +- return NULL; ++ break; ++ } ++ ++ /* per cpu lists are all empty, try extralist */ ++ raw_spin_lock(&s->extralist.lock); ++ node = s->extralist.first; ++ if (node) ++ s->extralist.first = node->next; ++ raw_spin_unlock(&s->extralist.lock); ++ return node; ++} ++ ++static struct pcpu_freelist_node * ++___pcpu_freelist_pop_nmi(struct pcpu_freelist *s) ++{ ++ struct pcpu_freelist_head *head; ++ struct pcpu_freelist_node *node; ++ int orig_cpu, cpu; ++ ++ orig_cpu = cpu = raw_smp_processor_id(); ++ while (1) { ++ head = per_cpu_ptr(s->freelist, cpu); ++ if (raw_spin_trylock(&head->lock)) { ++ node = head->first; ++ if (node) { ++ head->first = node->next; ++ raw_spin_unlock(&head->lock); ++ return node; ++ } ++ raw_spin_unlock(&head->lock); ++ } ++ cpu = cpumask_next(cpu, cpu_possible_mask); ++ if (cpu >= nr_cpu_ids) ++ cpu = 0; ++ if (cpu == orig_cpu) ++ break; + } ++ ++ /* cannot pop from per cpu lists, try extralist */ ++ if (!raw_spin_trylock(&s->extralist.lock)) ++ return NULL; ++ node = s->extralist.first; ++ if (node) ++ s->extralist.first = node->next; ++ raw_spin_unlock(&s->extralist.lock); ++ return node; ++} ++ ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) ++{ ++ if (in_nmi()) ++ return ___pcpu_freelist_pop_nmi(s); ++ return ___pcpu_freelist_pop(s); + } + + struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) +diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h +index fbf8a8a289791..3c76553cfe571 100644 +--- a/kernel/bpf/percpu_freelist.h ++++ b/kernel/bpf/percpu_freelist.h +@@ -13,6 +13,7 @@ struct pcpu_freelist_head { + + struct pcpu_freelist { + struct pcpu_freelist_head __percpu *freelist; ++ struct pcpu_freelist_head extralist; + }; + + struct pcpu_freelist_node { +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index fba52d9ec8fc4..43cd175c66a55 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -1489,6 +1489,10 @@ static int check_subprogs(struct bpf_verifier_env *env) + for (i = 0; i < insn_cnt; i++) { + u8 code = insn[i].code; + ++ if (code == (BPF_JMP | BPF_CALL) && ++ insn[i].imm == BPF_FUNC_tail_call && ++ insn[i].src_reg != BPF_PSEUDO_CALL) ++ subprog[cur_subprog].has_tail_call = true; + if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) + goto next; + if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) +@@ -2974,6 +2978,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env) + int ret_prog[MAX_CALL_FRAMES]; + + process_func: ++ /* protect against potential stack overflow that might happen when ++ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack ++ * depth for such case down to 256 so that the worst case scenario ++ * would result in 8k stack size (32 which is tailcall limit * 256 = ++ * 8k). ++ * ++ * To get the idea what might happen, see an example: ++ * func1 -> sub rsp, 128 ++ * subfunc1 -> sub rsp, 256 ++ * tailcall1 -> add rsp, 256 ++ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) ++ * subfunc2 -> sub rsp, 64 ++ * subfunc22 -> sub rsp, 128 ++ * tailcall2 -> add rsp, 128 ++ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) ++ * ++ * tailcall will unwind the current stack frame but it will not get rid ++ * of caller's stack as shown on the example above. ++ */ ++ if (idx && subprog[idx].has_tail_call && depth >= 256) { ++ verbose(env, ++ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", ++ depth); ++ return -EACCES; ++ } + /* round up to 32-bytes, since this is granularity + * of interpreter stack size + */ +@@ -4885,24 +4914,19 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn + regs[BPF_REG_0].id = ++env->id_gen; + } else { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; +- regs[BPF_REG_0].id = ++env->id_gen; + } + } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; +- regs[BPF_REG_0].id = ++env->id_gen; + } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; +- regs[BPF_REG_0].id = ++env->id_gen; + } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; +- regs[BPF_REG_0].id = ++env->id_gen; + } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; +- regs[BPF_REG_0].id = ++env->id_gen; + regs[BPF_REG_0].mem_size = meta.mem_size; + } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { + int ret_btf_id; +@@ -4922,6 +4946,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn + return -EINVAL; + } + ++ if (reg_type_may_be_null(regs[BPF_REG_0].type)) ++ regs[BPF_REG_0].id = ++env->id_gen; ++ + if (is_ptr_cast_function(func_id)) { + /* For release_reference() */ + regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; +@@ -6847,7 +6874,8 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, + struct bpf_reg_state *reg, u32 id, + bool is_null) + { +- if (reg_type_may_be_null(reg->type) && reg->id == id) { ++ if (reg_type_may_be_null(reg->type) && reg->id == id && ++ !WARN_ON_ONCE(!reg->id)) { + /* Old offset (both fixed and variable parts) should + * have been known-zero, because we don't allow pointer + * arithmetic on pointers that might be NULL. +@@ -11046,6 +11074,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) + } + + if (prog->expected_attach_type == BPF_MODIFY_RETURN) { ++ if (tgt_prog) { ++ verbose(env, "can't modify return codes of BPF programs\n"); ++ ret = -EINVAL; ++ goto out; ++ } + ret = check_attach_modify_return(prog, addr); + if (ret) + verbose(env, "%s() is not modifiable\n", +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c +index 44a259338e33d..f7e1d0eccdbc6 100644 +--- a/kernel/cpu_pm.c ++++ b/kernel/cpu_pm.c +@@ -15,18 +15,28 @@ + + static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain); + +-static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) ++static int cpu_pm_notify(enum cpu_pm_event event) + { + int ret; + + /* +- * __atomic_notifier_call_chain has a RCU read critical section, which ++ * atomic_notifier_call_chain has a RCU read critical section, which + * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let + * RCU know this. + */ + rcu_irq_enter_irqson(); +- ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, +- nr_to_call, nr_calls); ++ ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL); ++ rcu_irq_exit_irqson(); ++ ++ return notifier_to_errno(ret); ++} ++ ++static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down) ++{ ++ int ret; ++ ++ rcu_irq_enter_irqson(); ++ ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL); + rcu_irq_exit_irqson(); + + return notifier_to_errno(ret); +@@ -80,18 +90,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); + */ + int cpu_pm_enter(void) + { +- int nr_calls = 0; +- int ret = 0; +- +- ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); +- if (ret) +- /* +- * Inform listeners (nr_calls - 1) about failure of CPU PM +- * PM entry who are notified earlier to prepare for it. +- */ +- cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); +- +- return ret; ++ return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED); + } + EXPORT_SYMBOL_GPL(cpu_pm_enter); + +@@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter); + */ + int cpu_pm_exit(void) + { +- return cpu_pm_notify(CPU_PM_EXIT, -1, NULL); ++ return cpu_pm_notify(CPU_PM_EXIT); + } + EXPORT_SYMBOL_GPL(cpu_pm_exit); + +@@ -131,18 +130,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); + */ + int cpu_cluster_pm_enter(void) + { +- int nr_calls = 0; +- int ret = 0; +- +- ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); +- if (ret) +- /* +- * Inform listeners (nr_calls - 1) about failure of CPU cluster +- * PM entry who are notified earlier to prepare for it. +- */ +- cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); +- +- return ret; ++ return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED); + } + EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); + +@@ -163,7 +151,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); + */ + int cpu_cluster_pm_exit(void) + { +- return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); ++ return cpu_pm_notify(CPU_CLUSTER_PM_EXIT); + } + EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); + +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c +index 9d847ab851dbe..e240c97086e20 100644 +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c +@@ -706,12 +706,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) + size_avail = sizeof(kdb_buffer) - len; + goto kdb_print_out; + } +- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) ++ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) { + /* + * This was a interactive search (using '/' at more +- * prompt) and it has completed. Clear the flag. ++ * prompt) and it has completed. Replace the \0 with ++ * its original value to ensure multi-line strings ++ * are handled properly, and return to normal mode. + */ ++ *cphold = replaced_byte; + kdb_grepping_flag = 0; ++ } + /* + * at this point the string is a full line and + * should be printed, up to the null. +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c +index 0d129421e75fc..7133d5c6e1a6d 100644 +--- a/kernel/dma/mapping.c ++++ b/kernel/dma/mapping.c +@@ -144,6 +144,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); ++ ++ if (WARN_ON_ONCE(!dev->dma_mask)) ++ return DMA_MAPPING_ERROR; ++ + if (dma_map_direct(dev, ops)) + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + else +@@ -179,6 +183,10 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, + int ents; + + BUG_ON(!valid_dma_direction(dir)); ++ ++ if (WARN_ON_ONCE(!dev->dma_mask)) ++ return 0; ++ + if (dma_map_direct(dev, ops)) + ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); + else +@@ -213,6 +221,9 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, + + BUG_ON(!valid_dma_direction(dir)); + ++ if (WARN_ON_ONCE(!dev->dma_mask)) ++ return DMA_MAPPING_ERROR; ++ + /* Don't allow RAM to be mapped */ + if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) + return DMA_MAPPING_ERROR; +diff --git a/kernel/events/core.c b/kernel/events/core.c +index e8bf92202542b..6a1ae6a62d489 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -5869,11 +5869,11 @@ static void perf_pmu_output_stop(struct perf_event *event); + static void perf_mmap_close(struct vm_area_struct *vma) + { + struct perf_event *event = vma->vm_file->private_data; +- + struct perf_buffer *rb = ring_buffer_get(event); + struct user_struct *mmap_user = rb->mmap_user; + int mmap_locked = rb->mmap_locked; + unsigned long size = perf_data_size(rb); ++ bool detach_rest = false; + + if (event->pmu->event_unmapped) + event->pmu->event_unmapped(event, vma->vm_mm); +@@ -5904,7 +5904,8 @@ static void perf_mmap_close(struct vm_area_struct *vma) + mutex_unlock(&event->mmap_mutex); + } + +- atomic_dec(&rb->mmap_count); ++ if (atomic_dec_and_test(&rb->mmap_count)) ++ detach_rest = true; + + if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) + goto out_put; +@@ -5913,7 +5914,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) + mutex_unlock(&event->mmap_mutex); + + /* If there's still other mmap()s of this buffer, we're done. */ +- if (atomic_read(&rb->mmap_count)) ++ if (!detach_rest) + goto out_put; + + /* +diff --git a/kernel/fork.c b/kernel/fork.c +index da8d360fb0326..a9ce750578cae 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1810,6 +1810,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk) + free_task(tsk); + } + ++static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) ++{ ++ /* Skip if kernel thread */ ++ if (!tsk->mm) ++ return; ++ ++ /* Skip if spawning a thread or using vfork */ ++ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) ++ return; ++ ++ /* We need to synchronize with __set_oom_adj */ ++ mutex_lock(&oom_adj_mutex); ++ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); ++ /* Update the values in case they were changed after copy_signal */ ++ tsk->signal->oom_score_adj = current->signal->oom_score_adj; ++ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; ++ mutex_unlock(&oom_adj_mutex); ++} ++ + /* + * This creates a new process as a copy of the old one, + * but does not actually start it yet. +@@ -2282,6 +2301,8 @@ static __latent_entropy struct task_struct *copy_process( + trace_task_newtask(p, clone_flags); + uprobe_copy_process(p, clone_flags); + ++ copy_oom_score_adj(clone_flags, p); ++ + return p; + + bad_fork_cancel_cgroup: +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index 2facbbd146ec2..85d15f0362dc5 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -76,6 +76,23 @@ module_param(lock_stat, int, 0644); + #define lock_stat 0 + #endif + ++DEFINE_PER_CPU(unsigned int, lockdep_recursion); ++EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion); ++ ++static inline bool lockdep_enabled(void) ++{ ++ if (!debug_locks) ++ return false; ++ ++ if (raw_cpu_read(lockdep_recursion)) ++ return false; ++ ++ if (current->lockdep_recursion) ++ return false; ++ ++ return true; ++} ++ + /* + * lockdep_lock: protects the lockdep graph, the hashes and the + * class/list/hash allocators. +@@ -93,7 +110,7 @@ static inline void lockdep_lock(void) + + arch_spin_lock(&__lock); + __owner = current; +- current->lockdep_recursion++; ++ __this_cpu_inc(lockdep_recursion); + } + + static inline void lockdep_unlock(void) +@@ -101,7 +118,7 @@ static inline void lockdep_unlock(void) + if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) + return; + +- current->lockdep_recursion--; ++ __this_cpu_dec(lockdep_recursion); + __owner = NULL; + arch_spin_unlock(&__lock); + } +@@ -393,10 +410,15 @@ void lockdep_init_task(struct task_struct *task) + task->lockdep_recursion = 0; + } + ++static __always_inline void lockdep_recursion_inc(void) ++{ ++ __this_cpu_inc(lockdep_recursion); ++} ++ + static __always_inline void lockdep_recursion_finish(void) + { +- if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK)) +- current->lockdep_recursion = 0; ++ if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion))) ++ __this_cpu_write(lockdep_recursion, 0); + } + + void lockdep_set_selftest_task(struct task_struct *task) +@@ -585,6 +607,8 @@ static const char *usage_str[] = + #include "lockdep_states.h" + #undef LOCKDEP_STATE + [LOCK_USED] = "INITIAL USE", ++ [LOCK_USED_READ] = "INITIAL READ USE", ++ /* abused as string storage for verify_lock_unused() */ + [LOCK_USAGE_STATES] = "IN-NMI", + }; + #endif +@@ -1939,7 +1963,7 @@ static void print_lock_class_header(struct lock_class *class, int depth) + #endif + printk(KERN_CONT " {\n"); + +- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { ++ for (bit = 0; bit < LOCK_TRACE_STATES; bit++) { + if (class->usage_mask & (1 << bit)) { + int len = depth; + +@@ -3657,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) + if (unlikely(in_nmi())) + return; + +- if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) ++ if (unlikely(__this_cpu_read(lockdep_recursion))) + return; + + if (unlikely(lockdep_hardirqs_enabled())) { +@@ -3693,7 +3717,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) + + current->hardirq_chain_key = current->curr_chain_key; + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + __trace_hardirqs_on_caller(); + lockdep_recursion_finish(); + } +@@ -3726,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) + goto skip_checks; + } + +- if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) ++ if (unlikely(__this_cpu_read(lockdep_recursion))) + return; + + if (lockdep_hardirqs_enabled()) { +@@ -3779,7 +3803,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) + if (in_nmi()) { + if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) + return; +- } else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK) ++ } else if (__this_cpu_read(lockdep_recursion)) + return; + + /* +@@ -3812,7 +3836,7 @@ void lockdep_softirqs_on(unsigned long ip) + { + struct irqtrace_events *trace = ¤t->irqtrace; + +- if (unlikely(!debug_locks || current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + /* +@@ -3827,7 +3851,7 @@ void lockdep_softirqs_on(unsigned long ip) + return; + } + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + /* + * We'll do an OFF -> ON transition: + */ +@@ -3850,7 +3874,7 @@ void lockdep_softirqs_on(unsigned long ip) + */ + void lockdep_softirqs_off(unsigned long ip) + { +- if (unlikely(!debug_locks || current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + /* +@@ -3969,7 +3993,7 @@ static int separate_irq_context(struct task_struct *curr, + static int mark_lock(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit new_bit) + { +- unsigned int old_mask, new_mask, ret = 1; ++ unsigned int new_mask, ret = 1; + + if (new_bit >= LOCK_USAGE_STATES) { + DEBUG_LOCKS_WARN_ON(1); +@@ -3996,30 +4020,26 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, + if (unlikely(hlock_class(this)->usage_mask & new_mask)) + goto unlock; + +- old_mask = hlock_class(this)->usage_mask; + hlock_class(this)->usage_mask |= new_mask; + +- /* +- * Save one usage_traces[] entry and map both LOCK_USED and +- * LOCK_USED_READ onto the same entry. +- */ +- if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) { +- if (old_mask & (LOCKF_USED | LOCKF_USED_READ)) +- goto unlock; +- new_bit = LOCK_USED; ++ if (new_bit < LOCK_TRACE_STATES) { ++ if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) ++ return 0; + } + +- if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) +- return 0; +- + switch (new_bit) { ++ case 0 ... LOCK_USED-1: ++ ret = mark_lock_irq(curr, this, new_bit); ++ if (!ret) ++ return 0; ++ break; ++ + case LOCK_USED: + debug_atomic_dec(nr_unused_locks); + break; ++ + default: +- ret = mark_lock_irq(curr, this, new_bit); +- if (!ret) +- return 0; ++ break; + } + + unlock: +@@ -4235,11 +4255,11 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, + if (subclass) { + unsigned long flags; + +- if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) ++ if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled())) + return; + + raw_local_irq_save(flags); +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + register_lock_class(lock, subclass, 1); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +@@ -4922,11 +4942,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name, + { + unsigned long flags; + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + raw_local_irq_save(flags); +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + check_flags(flags); + if (__lock_set_class(lock, name, key, subclass, ip)) + check_chain_key(current); +@@ -4939,11 +4959,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip) + { + unsigned long flags; + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + raw_local_irq_save(flags); +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + check_flags(flags); + if (__lock_downgrade(lock, ip)) + check_chain_key(current); +@@ -4981,7 +5001,7 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock + + static bool lockdep_nmi(void) + { +- if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK) ++ if (raw_cpu_read(lockdep_recursion)) + return false; + + if (!in_nmi()) +@@ -5002,7 +5022,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); + +- if (unlikely(current->lockdep_recursion)) { ++ if (!debug_locks) ++ return; ++ ++ if (unlikely(!lockdep_enabled())) { + /* XXX allow trylock from NMI ?!? */ + if (lockdep_nmi() && !trylock) { + struct held_lock hlock; +@@ -5025,7 +5048,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + raw_local_irq_save(flags); + check_flags(flags); + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + __lock_acquire(lock, subclass, trylock, read, check, + irqs_disabled_flags(flags), nest_lock, ip, 0, 0); + lockdep_recursion_finish(); +@@ -5039,13 +5062,13 @@ void lock_release(struct lockdep_map *lock, unsigned long ip) + + trace_lock_release(lock, ip); + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + raw_local_irq_save(flags); + check_flags(flags); + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + if (__lock_release(lock, ip)) + check_chain_key(current); + lockdep_recursion_finish(); +@@ -5058,13 +5081,13 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) + unsigned long flags; + int ret = 0; + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return 1; /* avoid false negative lockdep_assert_held() */ + + raw_local_irq_save(flags); + check_flags(flags); + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + ret = __lock_is_held(lock, read); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +@@ -5079,13 +5102,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock) + struct pin_cookie cookie = NIL_COOKIE; + unsigned long flags; + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return cookie; + + raw_local_irq_save(flags); + check_flags(flags); + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + cookie = __lock_pin_lock(lock); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +@@ -5098,13 +5121,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) + { + unsigned long flags; + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + raw_local_irq_save(flags); + check_flags(flags); + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + __lock_repin_lock(lock, cookie); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +@@ -5115,13 +5138,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) + { + unsigned long flags; + +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lockdep_enabled())) + return; + + raw_local_irq_save(flags); + check_flags(flags); + +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + __lock_unpin_lock(lock, cookie); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +@@ -5251,15 +5274,12 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) + + trace_lock_acquired(lock, ip); + +- if (unlikely(!lock_stat || !debug_locks)) +- return; +- +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lock_stat || !lockdep_enabled())) + return; + + raw_local_irq_save(flags); + check_flags(flags); +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + __lock_contended(lock, ip); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +@@ -5272,15 +5292,12 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip) + + trace_lock_contended(lock, ip); + +- if (unlikely(!lock_stat || !debug_locks)) +- return; +- +- if (unlikely(current->lockdep_recursion)) ++ if (unlikely(!lock_stat || !lockdep_enabled())) + return; + + raw_local_irq_save(flags); + check_flags(flags); +- current->lockdep_recursion++; ++ lockdep_recursion_inc(); + __lock_acquired(lock, ip); + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h +index b0be1560ed17a..de49f9e1c11ba 100644 +--- a/kernel/locking/lockdep_internals.h ++++ b/kernel/locking/lockdep_internals.h +@@ -20,9 +20,12 @@ enum lock_usage_bit { + #undef LOCKDEP_STATE + LOCK_USED, + LOCK_USED_READ, +- LOCK_USAGE_STATES ++ LOCK_USAGE_STATES, + }; + ++/* states after LOCK_USED_READ are not traced and printed */ ++static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES); ++ + #define LOCK_USAGE_READ_MASK 1 + #define LOCK_USAGE_DIR_MASK 2 + #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) +@@ -121,7 +124,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = + extern struct list_head all_lock_classes; + extern struct lock_chain lock_chains[]; + +-#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) ++#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) + + extern void get_usage_chars(struct lock_class *class, + char usage[LOCK_USAGE_CHARS]); +diff --git a/kernel/module.c b/kernel/module.c +index 1c5cff34d9f28..8486123ffd7af 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -91,8 +91,9 @@ EXPORT_SYMBOL_GPL(module_mutex); + static LIST_HEAD(modules); + + /* Work queue for freeing init sections in success case */ +-static struct work_struct init_free_wq; +-static struct llist_head init_free_list; ++static void do_free_init(struct work_struct *w); ++static DECLARE_WORK(init_free_wq, do_free_init); ++static LLIST_HEAD(init_free_list); + + #ifdef CONFIG_MODULES_TREE_LOOKUP + +@@ -3579,14 +3580,6 @@ static void do_free_init(struct work_struct *w) + } + } + +-static int __init modules_wq_init(void) +-{ +- INIT_WORK(&init_free_wq, do_free_init); +- init_llist_head(&init_free_list); +- return 0; +-} +-module_init(modules_wq_init); +- + /* + * This is where the real work happens. + * +diff --git a/kernel/notifier.c b/kernel/notifier.c +index 84c987dfbe036..1b019cbca594a 100644 +--- a/kernel/notifier.c ++++ b/kernel/notifier.c +@@ -94,6 +94,34 @@ static int notifier_call_chain(struct notifier_block **nl, + } + NOKPROBE_SYMBOL(notifier_call_chain); + ++/** ++ * notifier_call_chain_robust - Inform the registered notifiers about an event ++ * and rollback on error. ++ * @nl: Pointer to head of the blocking notifier chain ++ * @val_up: Value passed unmodified to the notifier function ++ * @val_down: Value passed unmodified to the notifier function when recovering ++ * from an error on @val_up ++ * @v Pointer passed unmodified to the notifier function ++ * ++ * NOTE: It is important the @nl chain doesn't change between the two ++ * invocations of notifier_call_chain() such that we visit the ++ * exact same notifier callbacks; this rules out any RCU usage. ++ * ++ * Returns: the return value of the @val_up call. ++ */ ++static int notifier_call_chain_robust(struct notifier_block **nl, ++ unsigned long val_up, unsigned long val_down, ++ void *v) ++{ ++ int ret, nr = 0; ++ ++ ret = notifier_call_chain(nl, val_up, v, -1, &nr); ++ if (ret & NOTIFY_STOP_MASK) ++ notifier_call_chain(nl, val_down, v, nr-1, NULL); ++ ++ return ret; ++} ++ + /* + * Atomic notifier chain routines. Registration and unregistration + * use a spinlock, and call_chain is synchronized by RCU (no locks). +@@ -144,13 +172,30 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + } + EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); + ++int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, ++ unsigned long val_up, unsigned long val_down, void *v) ++{ ++ unsigned long flags; ++ int ret; ++ ++ /* ++ * Musn't use RCU; because then the notifier list can ++ * change between the up and down traversal. ++ */ ++ spin_lock_irqsave(&nh->lock, flags); ++ ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v); ++ spin_unlock_irqrestore(&nh->lock, flags); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust); ++NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust); ++ + /** +- * __atomic_notifier_call_chain - Call functions in an atomic notifier chain ++ * atomic_notifier_call_chain - Call functions in an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function +- * @nr_to_call: See the comment for notifier_call_chain. +- * @nr_calls: See the comment for notifier_call_chain. + * + * Calls each function in a notifier chain in turn. The functions + * run in an atomic context, so they must not block. +@@ -163,24 +208,16 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); + * Otherwise the return value is the return value + * of the last notifier function called. + */ +-int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, +- unsigned long val, void *v, +- int nr_to_call, int *nr_calls) ++int atomic_notifier_call_chain(struct atomic_notifier_head *nh, ++ unsigned long val, void *v) + { + int ret; + + rcu_read_lock(); +- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); ++ ret = notifier_call_chain(&nh->head, val, v, -1, NULL); + rcu_read_unlock(); +- return ret; +-} +-EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); +-NOKPROBE_SYMBOL(__atomic_notifier_call_chain); + +-int atomic_notifier_call_chain(struct atomic_notifier_head *nh, +- unsigned long val, void *v) +-{ +- return __atomic_notifier_call_chain(nh, val, v, -1, NULL); ++ return ret; + } + EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); + NOKPROBE_SYMBOL(atomic_notifier_call_chain); +@@ -250,13 +287,30 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + } + EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); + ++int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, ++ unsigned long val_up, unsigned long val_down, void *v) ++{ ++ int ret = NOTIFY_DONE; ++ ++ /* ++ * We check the head outside the lock, but if this access is ++ * racy then it does not matter what the result of the test ++ * is, we re-check the list after having taken the lock anyway: ++ */ ++ if (rcu_access_pointer(nh->head)) { ++ down_read(&nh->rwsem); ++ ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v); ++ up_read(&nh->rwsem); ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust); ++ + /** +- * __blocking_notifier_call_chain - Call functions in a blocking notifier chain ++ * blocking_notifier_call_chain - Call functions in a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function +- * @nr_to_call: See comment for notifier_call_chain. +- * @nr_calls: See comment for notifier_call_chain. + * + * Calls each function in a notifier chain in turn. The functions + * run in a process context, so they are allowed to block. +@@ -268,9 +322,8 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); + * Otherwise the return value is the return value + * of the last notifier function called. + */ +-int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, +- unsigned long val, void *v, +- int nr_to_call, int *nr_calls) ++int blocking_notifier_call_chain(struct blocking_notifier_head *nh, ++ unsigned long val, void *v) + { + int ret = NOTIFY_DONE; + +@@ -281,19 +334,11 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + */ + if (rcu_access_pointer(nh->head)) { + down_read(&nh->rwsem); +- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, +- nr_calls); ++ ret = notifier_call_chain(&nh->head, val, v, -1, NULL); + up_read(&nh->rwsem); + } + return ret; + } +-EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); +- +-int blocking_notifier_call_chain(struct blocking_notifier_head *nh, +- unsigned long val, void *v) +-{ +- return __blocking_notifier_call_chain(nh, val, v, -1, NULL); +-} + EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); + + /* +@@ -335,13 +380,18 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + } + EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); + ++int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, ++ unsigned long val_up, unsigned long val_down, void *v) ++{ ++ return notifier_call_chain_robust(&nh->head, val_up, val_down, v); ++} ++EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust); ++ + /** +- * __raw_notifier_call_chain - Call functions in a raw notifier chain ++ * raw_notifier_call_chain - Call functions in a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function +- * @nr_to_call: See comment for notifier_call_chain. +- * @nr_calls: See comment for notifier_call_chain + * + * Calls each function in a notifier chain in turn. The functions + * run in an undefined context. +@@ -354,18 +404,10 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); + * Otherwise the return value is the return value + * of the last notifier function called. + */ +-int __raw_notifier_call_chain(struct raw_notifier_head *nh, +- unsigned long val, void *v, +- int nr_to_call, int *nr_calls) +-{ +- return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); +-} +-EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); +- + int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v) + { +- return __raw_notifier_call_chain(nh, val, v, -1, NULL); ++ return notifier_call_chain(&nh->head, val, v, -1, NULL); + } + EXPORT_SYMBOL_GPL(raw_notifier_call_chain); + +@@ -437,12 +479,10 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); + + /** +- * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain ++ * srcu_notifier_call_chain - Call functions in an SRCU notifier chain + * @nh: Pointer to head of the SRCU notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function +- * @nr_to_call: See comment for notifier_call_chain. +- * @nr_calls: See comment for notifier_call_chain + * + * Calls each function in a notifier chain in turn. The functions + * run in a process context, so they are allowed to block. +@@ -454,25 +494,17 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); + * Otherwise the return value is the return value + * of the last notifier function called. + */ +-int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, +- unsigned long val, void *v, +- int nr_to_call, int *nr_calls) ++int srcu_notifier_call_chain(struct srcu_notifier_head *nh, ++ unsigned long val, void *v) + { + int ret; + int idx; + + idx = srcu_read_lock(&nh->srcu); +- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); ++ ret = notifier_call_chain(&nh->head, val, v, -1, NULL); + srcu_read_unlock(&nh->srcu, idx); + return ret; + } +-EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); +- +-int srcu_notifier_call_chain(struct srcu_notifier_head *nh, +- unsigned long val, void *v) +-{ +- return __srcu_notifier_call_chain(nh, val, v, -1, NULL); +-} + EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); + + /** +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c +index e7aa57fb2fdc3..2fc7d509a34fc 100644 +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c +@@ -706,8 +706,8 @@ static int load_image_and_restore(void) + */ + int hibernate(void) + { +- int error, nr_calls = 0; + bool snapshot_test = false; ++ int error; + + if (!hibernation_available()) { + pm_pr_dbg("Hibernation not available.\n"); +@@ -723,11 +723,9 @@ int hibernate(void) + + pr_info("hibernation entry\n"); + pm_prepare_console(); +- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); +- if (error) { +- nr_calls--; +- goto Exit; +- } ++ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); ++ if (error) ++ goto Restore; + + ksys_sync_helper(); + +@@ -785,7 +783,8 @@ int hibernate(void) + /* Don't bother checking whether freezer_test_done is true */ + freezer_test_done = false; + Exit: +- __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); ++ pm_notifier_call_chain(PM_POST_HIBERNATION); ++ Restore: + pm_restore_console(); + hibernate_release(); + Unlock: +@@ -804,7 +803,7 @@ int hibernate(void) + */ + int hibernate_quiet_exec(int (*func)(void *data), void *data) + { +- int error, nr_calls = 0; ++ int error; + + lock_system_sleep(); + +@@ -815,11 +814,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data) + + pm_prepare_console(); + +- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); +- if (error) { +- nr_calls--; +- goto exit; +- } ++ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); ++ if (error) ++ goto restore; + + error = freeze_processes(); + if (error) +@@ -880,8 +877,9 @@ thaw: + thaw_processes(); + + exit: +- __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); ++ pm_notifier_call_chain(PM_POST_HIBERNATION); + ++restore: + pm_restore_console(); + + hibernate_release(); +@@ -910,7 +908,7 @@ EXPORT_SYMBOL_GPL(hibernate_quiet_exec); + */ + static int software_resume(void) + { +- int error, nr_calls = 0; ++ int error; + + /* + * If the user said "noresume".. bail out early. +@@ -948,17 +946,6 @@ static int software_resume(void) + + /* Check if the device is there */ + swsusp_resume_device = name_to_dev_t(resume_file); +- +- /* +- * name_to_dev_t is ineffective to verify parition if resume_file is in +- * integer format. (e.g. major:minor) +- */ +- if (isdigit(resume_file[0]) && resume_wait) { +- int partno; +- while (!get_gendisk(swsusp_resume_device, &partno)) +- msleep(10); +- } +- + if (!swsusp_resume_device) { + /* + * Some device discovery might still be in progress; we need +@@ -997,11 +984,9 @@ static int software_resume(void) + + pr_info("resume from hibernation\n"); + pm_prepare_console(); +- error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); +- if (error) { +- nr_calls--; +- goto Close_Finish; +- } ++ error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); ++ if (error) ++ goto Restore; + + pm_pr_dbg("Preparing processes for hibernation restore.\n"); + error = freeze_processes(); +@@ -1017,7 +1002,8 @@ static int software_resume(void) + error = load_image_and_restore(); + thaw_processes(); + Finish: +- __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); ++ pm_notifier_call_chain(PM_POST_RESTORE); ++ Restore: + pm_restore_console(); + pr_info("resume failed (%d)\n", error); + hibernate_release(); +diff --git a/kernel/power/main.c b/kernel/power/main.c +index 40f86ec4ab30d..0aefd6f57e0ac 100644 +--- a/kernel/power/main.c ++++ b/kernel/power/main.c +@@ -80,18 +80,18 @@ int unregister_pm_notifier(struct notifier_block *nb) + } + EXPORT_SYMBOL_GPL(unregister_pm_notifier); + +-int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls) ++int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down) + { + int ret; + +- ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL, +- nr_to_call, nr_calls); ++ ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL); + + return notifier_to_errno(ret); + } ++ + int pm_notifier_call_chain(unsigned long val) + { +- return __pm_notifier_call_chain(val, -1, NULL); ++ return blocking_notifier_call_chain(&pm_chain_head, val, NULL); + } + + /* If set, devices may be suspended and resumed asynchronously. */ +diff --git a/kernel/power/power.h b/kernel/power/power.h +index 32fc89ac96c30..24f12d534515f 100644 +--- a/kernel/power/power.h ++++ b/kernel/power/power.h +@@ -210,8 +210,7 @@ static inline void suspend_test_finish(const char *label) {} + + #ifdef CONFIG_PM_SLEEP + /* kernel/power/main.c */ +-extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call, +- int *nr_calls); ++extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down); + extern int pm_notifier_call_chain(unsigned long val); + #endif + +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c +index 8b1bb5ee7e5d6..32391acc806bf 100644 +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -342,18 +342,16 @@ static int suspend_test(int level) + */ + static int suspend_prepare(suspend_state_t state) + { +- int error, nr_calls = 0; ++ int error; + + if (!sleep_state_supported(state)) + return -EPERM; + + pm_prepare_console(); + +- error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls); +- if (error) { +- nr_calls--; +- goto Finish; +- } ++ error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND); ++ if (error) ++ goto Restore; + + trace_suspend_resume(TPS("freeze_processes"), 0, true); + error = suspend_freeze_processes(); +@@ -363,8 +361,8 @@ static int suspend_prepare(suspend_state_t state) + + suspend_stats.failed_freeze++; + dpm_save_failed_step(SUSPEND_FREEZE); +- Finish: +- __pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL); ++ pm_notifier_call_chain(PM_POST_SUSPEND); ++ Restore: + pm_restore_console(); + return error; + } +diff --git a/kernel/power/user.c b/kernel/power/user.c +index d5eedc2baa2a1..047f598f89a5c 100644 +--- a/kernel/power/user.c ++++ b/kernel/power/user.c +@@ -46,7 +46,7 @@ int is_hibernate_resume_dev(const struct inode *bd_inode) + static int snapshot_open(struct inode *inode, struct file *filp) + { + struct snapshot_data *data; +- int error, nr_calls = 0; ++ int error; + + if (!hibernation_available()) + return -EPERM; +@@ -73,9 +73,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) + swap_type_of(swsusp_resume_device, 0, NULL) : -1; + data->mode = O_RDONLY; + data->free_bitmaps = false; +- error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); +- if (error) +- __pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL); ++ error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); + } else { + /* + * Resuming. We may need to wait for the image device to +@@ -85,15 +83,11 @@ static int snapshot_open(struct inode *inode, struct file *filp) + + data->swap = -1; + data->mode = O_WRONLY; +- error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); ++ error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); + if (!error) { + error = create_basic_memory_bitmaps(); + data->free_bitmaps = !error; +- } else +- nr_calls--; +- +- if (error) +- __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); ++ } + } + if (error) + hibernate_release(); +diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c +index f453bf8d2f1ef..49202099692be 100644 +--- a/kernel/rcu/rcutorture.c ++++ b/kernel/rcu/rcutorture.c +@@ -2160,9 +2160,20 @@ static int __init rcu_torture_fwd_prog_init(void) + return -ENOMEM; + spin_lock_init(&rfp->rcu_fwd_lock); + rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; ++ rcu_fwds = rfp; + return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); + } + ++static void rcu_torture_fwd_prog_cleanup(void) ++{ ++ struct rcu_fwd *rfp; ++ ++ torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); ++ rfp = rcu_fwds; ++ rcu_fwds = NULL; ++ kfree(rfp); ++} ++ + /* Callback function for RCU barrier testing. */ + static void rcu_torture_barrier_cbf(struct rcu_head *rcu) + { +@@ -2460,7 +2471,7 @@ rcu_torture_cleanup(void) + show_rcu_gp_kthreads(); + rcu_torture_read_exit_cleanup(); + rcu_torture_barrier_cleanup(); +- torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); ++ rcu_torture_fwd_prog_cleanup(); + torture_stop_kthread(rcu_torture_stall, stall_task); + torture_stop_kthread(rcu_torture_writer, writer_task); + +diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c +index d9291f883b542..952595c678b37 100644 +--- a/kernel/rcu/refscale.c ++++ b/kernel/rcu/refscale.c +@@ -546,9 +546,11 @@ static int main_func(void *arg) + // Print the average of all experiments + SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); + +- buf[0] = 0; +- strcat(buf, "\n"); +- strcat(buf, "Runs\tTime(ns)\n"); ++ if (!errexit) { ++ buf[0] = 0; ++ strcat(buf, "\n"); ++ strcat(buf, "Runs\tTime(ns)\n"); ++ } + + for (exp = 0; exp < nruns; exp++) { + u64 avg; +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index f78ee759af9cb..388a2ad292bf4 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -1898,7 +1898,7 @@ static void rcu_gp_fqs_loop(void) + break; + /* If time for quiescent-state forcing, do it. */ + if (!time_after(rcu_state.jiffies_force_qs, jiffies) || +- (gf & RCU_GP_FLAG_FQS)) { ++ (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) { + trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, + TPS("fqsstart")); + rcu_gp_fqs(first_gp_fqs); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 2d95dc3f46444..b1e0da56abcac 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -43,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); + + DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); + +-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) ++#ifdef CONFIG_SCHED_DEBUG + /* + * Debugging: various feature bits + * +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 1a68a0536adda..48a6d442b4443 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -1548,7 +1548,7 @@ struct task_numa_env { + + static unsigned long cpu_load(struct rq *rq); + static unsigned long cpu_util(int cpu); +-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running); ++static inline long adjust_numa_imbalance(int imbalance, int nr_running); + + static inline enum + numa_type numa_classify(unsigned int imbalance_pct, +@@ -1925,7 +1925,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, + src_running = env->src_stats.nr_running - 1; + dst_running = env->dst_stats.nr_running + 1; + imbalance = max(0, dst_running - src_running); +- imbalance = adjust_numa_imbalance(imbalance, src_running); ++ imbalance = adjust_numa_imbalance(imbalance, dst_running); + + /* Use idle CPU if there is no imbalance */ + if (!imbalance) { +@@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int + /* + * Scan the local SMT mask for idle CPUs. + */ +-static int select_idle_smt(struct task_struct *p, int target) ++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) + { + int cpu; + +@@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target) + return -1; + + for_each_cpu(cpu, cpu_smt_mask(target)) { +- if (!cpumask_test_cpu(cpu, p->cpus_ptr)) ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr) || ++ !cpumask_test_cpu(cpu, sched_domain_span(sd))) + continue; + if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) + return cpu; +@@ -6099,7 +6100,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s + return -1; + } + +-static inline int select_idle_smt(struct task_struct *p, int target) ++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) + { + return -1; + } +@@ -6274,7 +6275,7 @@ symmetric: + if ((unsigned)i < nr_cpumask_bits) + return i; + +- i = select_idle_smt(p, target); ++ i = select_idle_smt(p, sd, target); + if ((unsigned)i < nr_cpumask_bits) + return i; + +@@ -6594,7 +6595,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) + + util = cpu_util_next(cpu, p, cpu); + cpu_cap = capacity_of(cpu); +- spare_cap = cpu_cap - util; ++ spare_cap = cpu_cap; ++ lsub_positive(&spare_cap, util); + + /* + * Skip CPUs that cannot satisfy the capacity request. +@@ -8957,7 +8959,7 @@ next_group: + } + } + +-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running) ++static inline long adjust_numa_imbalance(int imbalance, int nr_running) + { + unsigned int imbalance_min; + +@@ -8966,7 +8968,7 @@ static inline long adjust_numa_imbalance(int imbalance, int src_nr_running) + * tasks that remain local when the source domain is almost idle. + */ + imbalance_min = 2; +- if (src_nr_running <= imbalance_min) ++ if (nr_running <= imbalance_min) + return 0; + + return imbalance; +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 28709f6b0975c..8d1ca65db3b0d 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1629,7 +1629,7 @@ enum { + + #undef SCHED_FEAT + +-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) ++#ifdef CONFIG_SCHED_DEBUG + + /* + * To support run-time toggling of sched features, all the translation units +@@ -1637,6 +1637,7 @@ enum { + */ + extern const_debug unsigned int sysctl_sched_features; + ++#ifdef CONFIG_JUMP_LABEL + #define SCHED_FEAT(name, enabled) \ + static __always_inline bool static_branch_##name(struct static_key *key) \ + { \ +@@ -1649,7 +1650,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \ + extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; + #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) + +-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ ++#else /* !CONFIG_JUMP_LABEL */ ++ ++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) ++ ++#endif /* CONFIG_JUMP_LABEL */ ++ ++#else /* !SCHED_DEBUG */ + + /* + * Each translation unit has its own copy of sysctl_sched_features to allow +@@ -1665,7 +1672,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features = + + #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) + +-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ ++#endif /* SCHED_DEBUG */ + + extern struct static_key_false sched_numa_balancing; + extern struct static_key_false sched_schedstats; +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index a50364df10543..401fcb9d73886 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1715,13 +1715,6 @@ void update_process_times(int user_tick) + scheduler_tick(); + if (IS_ENABLED(CONFIG_POSIX_TIMERS)) + run_posix_cpu_timers(); +- +- /* The current CPU might make use of net randoms without receiving IRQs +- * to renew them often enough. Let's update the net_rand_state from a +- * non-constant value that's not affine to the number of calls to make +- * sure it's updated when there's some activity (we don't care in idle). +- */ +- this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); + } + + /** +diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c +index c6cca0d1d5840..c8892156db341 100644 +--- a/kernel/trace/trace_events_synth.c ++++ b/kernel/trace/trace_events_synth.c +@@ -132,7 +132,7 @@ static int synth_field_string_size(char *type) + start += sizeof("char[") - 1; + + end = strchr(type, ']'); +- if (!end || end < start) ++ if (!end || end < start || type + strlen(type) > end + 1) + return -EINVAL; + + len = end - start; +@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, + struct synth_field *field; + const char *prefix = NULL, *field_type = argv[0], *field_name, *array; + int len, ret = 0; ++ ssize_t size; + + if (field_type[0] == ';') + field_type++; +@@ -501,8 +502,14 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, + if (field_type[0] == ';') + field_type++; + len = strlen(field_type) + 1; +- if (array) +- len += strlen(array); ++ ++ if (array) { ++ int l = strlen(array); ++ ++ if (l && array[l - 1] == ';') ++ l--; ++ len += l; ++ } + if (prefix) + len += strlen(prefix); + +@@ -520,11 +527,12 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, + field->type[len - 1] = '\0'; + } + +- field->size = synth_field_size(field->type); +- if (!field->size) { ++ size = synth_field_size(field->type); ++ if (size <= 0) { + ret = -EINVAL; + goto free; + } ++ field->size = size; + + if (synth_field_is_string(field->type)) + field->is_string = true; +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 0c781f912f9f0..491789a793ae5 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -2367,6 +2367,15 @@ config TEST_HMM + + If unsure, say N. + ++config TEST_FREE_PAGES ++ tristate "Test freeing pages" ++ help ++ Test that a memory leak does not occur due to a race between ++ freeing a block of pages and a speculative page reference. ++ Loading this module is safe if your kernel has the bug fixed. ++ If the bug is not fixed, it will leak gigabytes of memory and ++ probably OOM your system. ++ + config TEST_FPU + tristate "Test floating point operations in kernel space" + depends on X86 && !KCOV_INSTRUMENT_ALL +diff --git a/lib/Makefile b/lib/Makefile +index a4a4c6864f518..071b687b7363f 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -99,6 +99,7 @@ obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o + obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o + obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o + obj-$(CONFIG_TEST_HMM) += test_hmm.o ++obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o + + # + # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns +diff --git a/lib/crc32.c b/lib/crc32.c +index 35a03d03f9738..2a68dfd3b96c8 100644 +--- a/lib/crc32.c ++++ b/lib/crc32.c +@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, + return crc; + } + +-#if CRC_LE_BITS == 1 ++#if CRC_BE_BITS == 1 + u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) + { + return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); +diff --git a/lib/idr.c b/lib/idr.c +index c2cf2c52bbde5..4d2eef0259d2c 100644 +--- a/lib/idr.c ++++ b/lib/idr.c +@@ -470,6 +470,7 @@ alloc: + goto retry; + nospc: + xas_unlock_irqrestore(&xas, flags); ++ kfree(alloc); + return -ENOSPC; + } + EXPORT_SYMBOL(ida_alloc_range); +diff --git a/lib/random32.c b/lib/random32.c +index dfb9981ab7989..be9f242a42075 100644 +--- a/lib/random32.c ++++ b/lib/random32.c +@@ -41,16 +41,6 @@ + #include + #include + +-#ifdef CONFIG_RANDOM32_SELFTEST +-static void __init prandom_state_selftest(void); +-#else +-static inline void prandom_state_selftest(void) +-{ +-} +-#endif +- +-DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +- + /** + * prandom_u32_state - seeded pseudo-random number generator. + * @state: pointer to state structure holding seeded state. +@@ -70,26 +60,6 @@ u32 prandom_u32_state(struct rnd_state *state) + } + EXPORT_SYMBOL(prandom_u32_state); + +-/** +- * prandom_u32 - pseudo random number generator +- * +- * A 32 bit pseudo-random number is generated using a fast +- * algorithm suitable for simulation. This algorithm is NOT +- * considered safe for cryptographic use. +- */ +-u32 prandom_u32(void) +-{ +- struct rnd_state *state = &get_cpu_var(net_rand_state); +- u32 res; +- +- res = prandom_u32_state(state); +- trace_prandom_u32(res); +- put_cpu_var(net_rand_state); +- +- return res; +-} +-EXPORT_SYMBOL(prandom_u32); +- + /** + * prandom_bytes_state - get the requested number of pseudo-random bytes + * +@@ -121,20 +91,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) + } + EXPORT_SYMBOL(prandom_bytes_state); + +-/** +- * prandom_bytes - get the requested number of pseudo-random bytes +- * @buf: where to copy the pseudo-random bytes to +- * @bytes: the requested number of bytes +- */ +-void prandom_bytes(void *buf, size_t bytes) +-{ +- struct rnd_state *state = &get_cpu_var(net_rand_state); +- +- prandom_bytes_state(state, buf, bytes); +- put_cpu_var(net_rand_state); +-} +-EXPORT_SYMBOL(prandom_bytes); +- + static void prandom_warmup(struct rnd_state *state) + { + /* Calling RNG ten times to satisfy recurrence condition */ +@@ -150,96 +106,6 @@ static void prandom_warmup(struct rnd_state *state) + prandom_u32_state(state); + } + +-static u32 __extract_hwseed(void) +-{ +- unsigned int val = 0; +- +- (void)(arch_get_random_seed_int(&val) || +- arch_get_random_int(&val)); +- +- return val; +-} +- +-static void prandom_seed_early(struct rnd_state *state, u32 seed, +- bool mix_with_hwseed) +-{ +-#define LCG(x) ((x) * 69069U) /* super-duper LCG */ +-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) +- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); +- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); +- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); +- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); +-} +- +-/** +- * prandom_seed - add entropy to pseudo random number generator +- * @entropy: entropy value +- * +- * Add some additional entropy to the prandom pool. +- */ +-void prandom_seed(u32 entropy) +-{ +- int i; +- /* +- * No locking on the CPUs, but then somewhat random results are, well, +- * expected. +- */ +- for_each_possible_cpu(i) { +- struct rnd_state *state = &per_cpu(net_rand_state, i); +- +- state->s1 = __seed(state->s1 ^ entropy, 2U); +- prandom_warmup(state); +- } +-} +-EXPORT_SYMBOL(prandom_seed); +- +-/* +- * Generate some initially weak seeding values to allow +- * to start the prandom_u32() engine. +- */ +-static int __init prandom_init(void) +-{ +- int i; +- +- prandom_state_selftest(); +- +- for_each_possible_cpu(i) { +- struct rnd_state *state = &per_cpu(net_rand_state, i); +- u32 weak_seed = (i + jiffies) ^ random_get_entropy(); +- +- prandom_seed_early(state, weak_seed, true); +- prandom_warmup(state); +- } +- +- return 0; +-} +-core_initcall(prandom_init); +- +-static void __prandom_timer(struct timer_list *unused); +- +-static DEFINE_TIMER(seed_timer, __prandom_timer); +- +-static void __prandom_timer(struct timer_list *unused) +-{ +- u32 entropy; +- unsigned long expires; +- +- get_random_bytes(&entropy, sizeof(entropy)); +- prandom_seed(entropy); +- +- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ +- expires = 40 + prandom_u32_max(40); +- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); +- +- add_timer(&seed_timer); +-} +- +-static void __init __prandom_start_seed_timer(void) +-{ +- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); +- add_timer(&seed_timer); +-} +- + void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) + { + int i; +@@ -259,51 +125,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) + } + EXPORT_SYMBOL(prandom_seed_full_state); + +-/* +- * Generate better values after random number generator +- * is fully initialized. +- */ +-static void __prandom_reseed(bool late) +-{ +- unsigned long flags; +- static bool latch = false; +- static DEFINE_SPINLOCK(lock); +- +- /* Asking for random bytes might result in bytes getting +- * moved into the nonblocking pool and thus marking it +- * as initialized. In this case we would double back into +- * this function and attempt to do a late reseed. +- * Ignore the pointless attempt to reseed again if we're +- * already waiting for bytes when the nonblocking pool +- * got initialized. +- */ +- +- /* only allow initial seeding (late == false) once */ +- if (!spin_trylock_irqsave(&lock, flags)) +- return; +- +- if (latch && !late) +- goto out; +- +- latch = true; +- prandom_seed_full_state(&net_rand_state); +-out: +- spin_unlock_irqrestore(&lock, flags); +-} +- +-void prandom_reseed_late(void) +-{ +- __prandom_reseed(true); +-} +- +-static int __init prandom_reseed(void) +-{ +- __prandom_reseed(false); +- __prandom_start_seed_timer(); +- return 0; +-} +-late_initcall(prandom_reseed); +- + #ifdef CONFIG_RANDOM32_SELFTEST + static struct prandom_test1 { + u32 seed; +@@ -423,7 +244,28 @@ static struct prandom_test2 { + { 407983964U, 921U, 728767059U }, + }; + +-static void __init prandom_state_selftest(void) ++static u32 __extract_hwseed(void) ++{ ++ unsigned int val = 0; ++ ++ (void)(arch_get_random_seed_int(&val) || ++ arch_get_random_int(&val)); ++ ++ return val; ++} ++ ++static void prandom_seed_early(struct rnd_state *state, u32 seed, ++ bool mix_with_hwseed) ++{ ++#define LCG(x) ((x) * 69069U) /* super-duper LCG */ ++#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) ++ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); ++ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); ++ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); ++ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); ++} ++ ++static int __init prandom_state_selftest(void) + { + int i, j, errors = 0, runs = 0; + bool error = false; +@@ -463,5 +305,267 @@ static void __init prandom_state_selftest(void) + pr_warn("prandom: %d/%d self tests failed\n", errors, runs); + else + pr_info("prandom: %d self tests passed\n", runs); ++ return 0; + } ++core_initcall(prandom_state_selftest); + #endif ++ ++/* ++ * The prandom_u32() implementation is now completely separate from the ++ * prandom_state() functions, which are retained (for now) for compatibility. ++ * ++ * Because of (ab)use in the networking code for choosing random TCP/UDP port ++ * numbers, which open DoS possibilities if guessable, we want something ++ * stronger than a standard PRNG. But the performance requirements of ++ * the network code do not allow robust crypto for this application. ++ * ++ * So this is a homebrew Junior Spaceman implementation, based on the ++ * lowest-latency trustworthy crypto primitive available, SipHash. ++ * (The authors of SipHash have not been consulted about this abuse of ++ * their work.) ++ * ++ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to ++ * one word of output. This abbreviated version uses 2 rounds per word ++ * of output. ++ */ ++ ++struct siprand_state { ++ unsigned long v0; ++ unsigned long v1; ++ unsigned long v2; ++ unsigned long v3; ++}; ++ ++static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; ++ ++/* ++ * This is the core CPRNG function. As "pseudorandom", this is not used ++ * for truly valuable things, just intended to be a PITA to guess. ++ * For maximum speed, we do just two SipHash rounds per word. This is ++ * the same rate as 4 rounds per 64 bits that SipHash normally uses, ++ * so hopefully it's reasonably secure. ++ * ++ * There are two changes from the official SipHash finalization: ++ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant; ++ * they are there only to make the output rounds distinct from the input ++ * rounds, and this application has no input rounds. ++ * - Rather than returning v0^v1^v2^v3, return v1+v3. ++ * If you look at the SipHash round, the last operation on v3 is ++ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time. ++ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but ++ * it still cancels out half of the bits in v2 for no benefit.) ++ * Second, since the last combining operation was xor, continue the ++ * pattern of alternating xor/add for a tiny bit of extra non-linearity. ++ */ ++static inline u32 siprand_u32(struct siprand_state *s) ++{ ++ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; ++ ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; ++ return v1 + v3; ++} ++ ++ ++/** ++ * prandom_u32 - pseudo random number generator ++ * ++ * A 32 bit pseudo-random number is generated using a fast ++ * algorithm suitable for simulation. This algorithm is NOT ++ * considered safe for cryptographic use. ++ */ ++u32 prandom_u32(void) ++{ ++ struct siprand_state *state = get_cpu_ptr(&net_rand_state); ++ u32 res = siprand_u32(state); ++ ++ trace_prandom_u32(res); ++ put_cpu_ptr(&net_rand_state); ++ return res; ++} ++EXPORT_SYMBOL(prandom_u32); ++ ++/** ++ * prandom_bytes - get the requested number of pseudo-random bytes ++ * @buf: where to copy the pseudo-random bytes to ++ * @bytes: the requested number of bytes ++ */ ++void prandom_bytes(void *buf, size_t bytes) ++{ ++ struct siprand_state *state = get_cpu_ptr(&net_rand_state); ++ u8 *ptr = buf; ++ ++ while (bytes >= sizeof(u32)) { ++ put_unaligned(siprand_u32(state), (u32 *)ptr); ++ ptr += sizeof(u32); ++ bytes -= sizeof(u32); ++ } ++ ++ if (bytes > 0) { ++ u32 rem = siprand_u32(state); ++ ++ do { ++ *ptr++ = (u8)rem; ++ rem >>= BITS_PER_BYTE; ++ } while (--bytes > 0); ++ } ++ put_cpu_ptr(&net_rand_state); ++} ++EXPORT_SYMBOL(prandom_bytes); ++ ++/** ++ * prandom_seed - add entropy to pseudo random number generator ++ * @entropy: entropy value ++ * ++ * Add some additional seed material to the prandom pool. ++ * The "entropy" is actually our IP address (the only caller is ++ * the network code), not for unpredictability, but to ensure that ++ * different machines are initialized differently. ++ */ ++void prandom_seed(u32 entropy) ++{ ++ int i; ++ ++ add_device_randomness(&entropy, sizeof(entropy)); ++ ++ for_each_possible_cpu(i) { ++ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i); ++ unsigned long v0 = state->v0, v1 = state->v1; ++ unsigned long v2 = state->v2, v3 = state->v3; ++ ++ do { ++ v3 ^= entropy; ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ v0 ^= entropy; ++ } while (unlikely(!v0 || !v1 || !v2 || !v3)); ++ ++ WRITE_ONCE(state->v0, v0); ++ WRITE_ONCE(state->v1, v1); ++ WRITE_ONCE(state->v2, v2); ++ WRITE_ONCE(state->v3, v3); ++ } ++} ++EXPORT_SYMBOL(prandom_seed); ++ ++/* ++ * Generate some initially weak seeding values to allow ++ * the prandom_u32() engine to be started. ++ */ ++static int __init prandom_init_early(void) ++{ ++ int i; ++ unsigned long v0, v1, v2, v3; ++ ++ if (!arch_get_random_long(&v0)) ++ v0 = jiffies; ++ if (!arch_get_random_long(&v1)) ++ v1 = random_get_entropy(); ++ v2 = v0 ^ PRND_K0; ++ v3 = v1 ^ PRND_K1; ++ ++ for_each_possible_cpu(i) { ++ struct siprand_state *state; ++ ++ v3 ^= i; ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ v0 ^= i; ++ ++ state = per_cpu_ptr(&net_rand_state, i); ++ state->v0 = v0; state->v1 = v1; ++ state->v2 = v2; state->v3 = v3; ++ } ++ ++ return 0; ++} ++core_initcall(prandom_init_early); ++ ++ ++/* Stronger reseeding when available, and periodically thereafter. */ ++static void prandom_reseed(struct timer_list *unused); ++ ++static DEFINE_TIMER(seed_timer, prandom_reseed); ++ ++static void prandom_reseed(struct timer_list *unused) ++{ ++ unsigned long expires; ++ int i; ++ ++ /* ++ * Reinitialize each CPU's PRNG with 128 bits of key. ++ * No locking on the CPUs, but then somewhat random results are, ++ * well, expected. ++ */ ++ for_each_possible_cpu(i) { ++ struct siprand_state *state; ++ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0; ++ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1; ++#if BITS_PER_LONG == 32 ++ int j; ++ ++ /* ++ * On 32-bit machines, hash in two extra words to ++ * approximate 128-bit key length. Not that the hash ++ * has that much security, but this prevents a trivial ++ * 64-bit brute force. ++ */ ++ for (j = 0; j < 2; j++) { ++ unsigned long m = get_random_long(); ++ ++ v3 ^= m; ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ v0 ^= m; ++ } ++#endif ++ /* ++ * Probably impossible in practice, but there is a ++ * theoretical risk that a race between this reseeding ++ * and the target CPU writing its state back could ++ * create the all-zero SipHash fixed point. ++ * ++ * To ensure that never happens, ensure the state ++ * we write contains no zero words. ++ */ ++ state = per_cpu_ptr(&net_rand_state, i); ++ WRITE_ONCE(state->v0, v0 ? v0 : -1ul); ++ WRITE_ONCE(state->v1, v1 ? v1 : -1ul); ++ WRITE_ONCE(state->v2, v2 ? v2 : -1ul); ++ WRITE_ONCE(state->v3, v3 ? v3 : -1ul); ++ } ++ ++ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ ++ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ)); ++ mod_timer(&seed_timer, expires); ++} ++ ++/* ++ * The random ready callback can be called from almost any interrupt. ++ * To avoid worrying about whether it's safe to delay that interrupt ++ * long enough to seed all CPUs, just schedule an immediate timer event. ++ */ ++static void prandom_timer_start(struct random_ready_callback *unused) ++{ ++ mod_timer(&seed_timer, jiffies); ++} ++ ++/* ++ * Start periodic full reseeding as soon as strong ++ * random numbers are available. ++ */ ++static int __init prandom_init_late(void) ++{ ++ static struct random_ready_callback random_ready = { ++ .func = prandom_timer_start ++ }; ++ int ret = add_random_ready_callback(&random_ready); ++ ++ if (ret == -EALREADY) { ++ prandom_timer_start(&random_ready); ++ ret = 0; ++ } ++ return ret; ++} ++late_initcall(prandom_init_late); +diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c +new file mode 100644 +index 0000000000000..074e76bd76b2b +--- /dev/null ++++ b/lib/test_free_pages.c +@@ -0,0 +1,42 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * test_free_pages.c: Check that free_pages() doesn't leak memory ++ * Copyright (c) 2020 Oracle ++ * Author: Matthew Wilcox ++ */ ++ ++#include ++#include ++#include ++ ++static void test_free_pages(gfp_t gfp) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < 1000 * 1000; i++) { ++ unsigned long addr = __get_free_pages(gfp, 3); ++ struct page *page = virt_to_page(addr); ++ ++ /* Simulate page cache getting a speculative reference */ ++ get_page(page); ++ free_pages(addr, 3); ++ put_page(page); ++ } ++} ++ ++static int m_in(void) ++{ ++ test_free_pages(GFP_KERNEL); ++ test_free_pages(GFP_KERNEL | __GFP_COMP); ++ ++ return 0; ++} ++ ++static void m_ex(void) ++{ ++} ++ ++module_init(m_in); ++module_exit(m_ex); ++MODULE_AUTHOR("Matthew Wilcox "); ++MODULE_LICENSE("GPL"); +diff --git a/mm/filemap.c b/mm/filemap.c +index 99c49eeae71b8..f6d36ccc23515 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -827,10 +827,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) + } + EXPORT_SYMBOL_GPL(replace_page_cache_page); + +-static int __add_to_page_cache_locked(struct page *page, +- struct address_space *mapping, +- pgoff_t offset, gfp_t gfp_mask, +- void **shadowp) ++noinline int __add_to_page_cache_locked(struct page *page, ++ struct address_space *mapping, ++ pgoff_t offset, gfp_t gfp_mask, ++ void **shadowp) + { + XA_STATE(xas, &mapping->i_pages, offset); + int huge = PageHuge(page); +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index da397779a6d43..18a6f8c8b2844 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2335,13 +2335,13 @@ static void unmap_page(struct page *page) + VM_BUG_ON_PAGE(!unmap_success, page); + } + +-static void remap_page(struct page *page) ++static void remap_page(struct page *page, unsigned int nr) + { + int i; + if (PageTransHuge(page)) { + remove_migration_ptes(page, page, true); + } else { +- for (i = 0; i < HPAGE_PMD_NR; i++) ++ for (i = 0; i < nr; i++) + remove_migration_ptes(page + i, page + i, true); + } + } +@@ -2416,6 +2416,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, + struct lruvec *lruvec; + struct address_space *swap_cache = NULL; + unsigned long offset = 0; ++ unsigned int nr = thp_nr_pages(head); + int i; + + lruvec = mem_cgroup_page_lruvec(head, pgdat); +@@ -2431,7 +2432,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, + xa_lock(&swap_cache->i_pages); + } + +- for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { ++ for (i = nr - 1; i >= 1; i--) { + __split_huge_page_tail(head, i, lruvec, list); + /* Some pages can be beyond i_size: drop them from page cache */ + if (head[i].index >= end) { +@@ -2451,7 +2452,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, + + ClearPageCompound(head); + +- split_page_owner(head, HPAGE_PMD_ORDER); ++ split_page_owner(head, nr); + + /* See comment in __split_huge_page_tail() */ + if (PageAnon(head)) { +@@ -2470,9 +2471,15 @@ static void __split_huge_page(struct page *page, struct list_head *list, + + spin_unlock_irqrestore(&pgdat->lru_lock, flags); + +- remap_page(head); ++ remap_page(head, nr); + +- for (i = 0; i < HPAGE_PMD_NR; i++) { ++ if (PageSwapCache(head)) { ++ swp_entry_t entry = { .val = page_private(head) }; ++ ++ split_swap_cluster(entry); ++ } ++ ++ for (i = 0; i < nr; i++) { + struct page *subpage = head + i; + if (subpage == page) + continue; +@@ -2706,12 +2713,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + } + + __split_huge_page(page, list, end, flags); +- if (PageSwapCache(head)) { +- swp_entry_t entry = { .val = page_private(head) }; +- +- ret = split_swap_cluster(entry); +- } else +- ret = 0; ++ ret = 0; + } else { + if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { + pr_alert("total_mapcount: %u, page_count(): %u\n", +@@ -2725,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + fail: if (mapping) + xa_unlock(&mapping->i_pages); + spin_unlock_irqrestore(&pgdata->lru_lock, flags); +- remap_page(head); ++ remap_page(head, thp_nr_pages(head)); + ret = -EBUSY; + } + +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 6877c765b8d03..9eefdb9cc2303 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -2887,6 +2887,17 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) + + page = virt_to_head_page(p); + ++ /* ++ * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer ++ * or a pointer to obj_cgroup vector. In the latter case the lowest ++ * bit of the pointer is set. ++ * The page->mem_cgroup pointer can be asynchronously changed ++ * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed ++ * from a valid memcg pointer to objcg vector or back. ++ */ ++ if (!page->mem_cgroup) ++ return NULL; ++ + /* + * Slab objects are accounted individually, not per-page. + * Memcg membership data for each individual object is saved in +@@ -5500,7 +5511,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, + struct page *page = NULL; + swp_entry_t ent = pte_to_swp_entry(ptent); + +- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) ++ if (!(mc.flags & MOVE_ANON)) + return NULL; + + /* +@@ -5519,6 +5530,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, + return page; + } + ++ if (non_swap_entry(ent)) ++ return NULL; ++ + /* + * Because lookup_swap_cache() updates some statistics counter, + * we call find_get_page() with swapper_space directly. +diff --git a/mm/mmap.c b/mm/mmap.c +index bdd19f5b994e0..7a8987aa69962 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -3227,7 +3227,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) + * By setting it to reflect the virtual start address of the + * vma, merges and splits can happen in a seamless way, just + * using the existing file pgoff checks and manipulations. +- * Similarly in do_mmap and in do_brk. ++ * Similarly in do_mmap and in do_brk_flags. + */ + if (vma_is_anonymous(vma)) { + BUG_ON(vma->anon_vma); +diff --git a/mm/oom_kill.c b/mm/oom_kill.c +index e90f25d6385d7..8b84661a64109 100644 +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -64,6 +64,8 @@ int sysctl_oom_dump_tasks = 1; + * and mark_oom_victim + */ + DEFINE_MUTEX(oom_lock); ++/* Serializes oom_score_adj and oom_score_adj_min updates */ ++DEFINE_MUTEX(oom_adj_mutex); + + static inline bool is_memcg_oom(struct oom_control *oc) + { +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 780c8f023b282..3fb35fe6a9e44 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -3209,7 +3209,7 @@ void split_page(struct page *page, unsigned int order) + + for (i = 1; i < (1 << order); i++) + set_page_refcounted(page + i); +- split_page_owner(page, order); ++ split_page_owner(page, 1 << order); + } + EXPORT_SYMBOL_GPL(split_page); + +@@ -3496,7 +3496,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) + + #endif /* CONFIG_FAIL_PAGE_ALLOC */ + +-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) ++noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) + { + return __should_fail_alloc_page(gfp_mask, order); + } +@@ -4961,6 +4961,9 @@ void __free_pages(struct page *page, unsigned int order) + { + if (put_page_testzero(page)) + free_the_page(page, order); ++ else if (!PageHead(page)) ++ while (order-- > 0) ++ free_the_page(page + (1 << order), order); + } + EXPORT_SYMBOL(__free_pages); + +diff --git a/mm/page_owner.c b/mm/page_owner.c +index 3604615094235..4ca3051a10358 100644 +--- a/mm/page_owner.c ++++ b/mm/page_owner.c +@@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) + page_owner->last_migrate_reason = reason; + } + +-void __split_page_owner(struct page *page, unsigned int order) ++void __split_page_owner(struct page *page, unsigned int nr) + { + int i; + struct page_ext *page_ext = lookup_page_ext(page); +@@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order) + if (unlikely(!page_ext)) + return; + +- for (i = 0; i < (1 << order); i++) { ++ for (i = 0; i < nr; i++) { + page_owner = get_page_owner(page_ext); + page_owner->order = 0; + page_ext = page_ext_next(page_ext); +diff --git a/mm/swapfile.c b/mm/swapfile.c +index debc94155f74d..b877c1504e00b 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -3343,7 +3343,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) + error = inode_drain_writes(inode); + if (error) { + inode->i_flags &= ~S_SWAPFILE; +- goto bad_swap_unlock_inode; ++ goto free_swap_address_space; + } + + mutex_lock(&swapon_mutex); +@@ -3368,6 +3368,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) + + error = 0; + goto out; ++free_swap_address_space: ++ exit_swap_address_space(p->type); + bad_swap_unlock_inode: + inode_unlock(inode); + bad_swap: +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index 68bfe57b66250..be9cdf5dabe5d 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -3442,6 +3442,16 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, + } + } + ++static void hci_suspend_clear_tasks(struct hci_dev *hdev) ++{ ++ int i; ++ ++ for (i = 0; i < __SUSPEND_NUM_TASKS; i++) ++ clear_bit(i, hdev->suspend_tasks); ++ ++ wake_up(&hdev->suspend_wait_q); ++} ++ + static int hci_suspend_wait_event(struct hci_dev *hdev) + { + #define WAKE_COND \ +@@ -3784,6 +3794,7 @@ void hci_unregister_dev(struct hci_dev *hdev) + + cancel_work_sync(&hdev->power_on); + ++ hci_suspend_clear_tasks(hdev); + unregister_pm_notifier(&hdev->suspend_notifier); + cancel_work_sync(&hdev->suspend_prepare); + +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 4b7fc430793cf..7cf42b9d3dfc8 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -2569,7 +2569,6 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) + static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) + { + struct hci_ev_conn_complete *ev = (void *) skb->data; +- struct inquiry_entry *ie; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); +@@ -2578,13 +2577,19 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { +- /* Connection may not exist if auto-connected. Check the inquiry +- * cache to see if we've already discovered this bdaddr before. +- * If found and link is an ACL type, create a connection class ++ /* Connection may not exist if auto-connected. Check the bredr ++ * allowlist to see if this device is allowed to auto connect. ++ * If link is an ACL type, create a connection class + * automatically. ++ * ++ * Auto-connect will only occur if the event filter is ++ * programmed with a given address. Right now, event filter is ++ * only used during suspend. + */ +- ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); +- if (ie && ev->link_type == ACL_LINK) { ++ if (ev->link_type == ACL_LINK && ++ hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, ++ &ev->bdaddr, ++ BDADDR_BREDR)) { + conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, + HCI_ROLE_SLAVE); + if (!conn) { +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c +index 79b4c01c515b9..f1b1edd0b6974 100644 +--- a/net/bluetooth/l2cap_sock.c ++++ b/net/bluetooth/l2cap_sock.c +@@ -1521,8 +1521,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) + + parent = bt_sk(sk)->parent; + +- sock_set_flag(sk, SOCK_ZAPPED); +- + switch (chan->state) { + case BT_OPEN: + case BT_BOUND: +@@ -1549,8 +1547,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) + + break; + } +- + release_sock(sk); ++ ++ /* Only zap after cleanup to avoid use after free race */ ++ sock_set_flag(sk, SOCK_ZAPPED); ++ + } + + static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state, +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 5758ccb524ef7..12a7cc9840b4d 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -4162,7 +4162,7 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, + { + struct adv_monitor *monitor = NULL; + struct mgmt_rp_read_adv_monitor_features *rp = NULL; +- int handle; ++ int handle, err; + size_t rp_size = 0; + __u32 supported = 0; + __u16 num_handles = 0; +@@ -4197,9 +4197,13 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, + if (num_handles) + memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); + +- return mgmt_cmd_complete(sk, hdev->id, +- MGMT_OP_READ_ADV_MONITOR_FEATURES, +- MGMT_STATUS_SUCCESS, rp, rp_size); ++ err = mgmt_cmd_complete(sk, hdev->id, ++ MGMT_OP_READ_ADV_MONITOR_FEATURES, ++ MGMT_STATUS_SUCCESS, rp, rp_size); ++ ++ kfree(rp); ++ ++ return err; + } + + static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, +diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c +index 12a4f4d936810..3fda71a8579d1 100644 +--- a/net/bridge/netfilter/ebt_dnat.c ++++ b/net/bridge/netfilter/ebt_dnat.c +@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par) + { + const struct ebt_nat_info *info = par->targinfo; + +- if (skb_ensure_writable(skb, ETH_ALEN)) ++ if (skb_ensure_writable(skb, 0)) + return EBT_DROP; + + ether_addr_copy(eth_hdr(skb)->h_dest, info->mac); +diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c +index 0cad62a4052b9..307790562b492 100644 +--- a/net/bridge/netfilter/ebt_redirect.c ++++ b/net/bridge/netfilter/ebt_redirect.c +@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par) + { + const struct ebt_redirect_info *info = par->targinfo; + +- if (skb_ensure_writable(skb, ETH_ALEN)) ++ if (skb_ensure_writable(skb, 0)) + return EBT_DROP; + + if (xt_hooknum(par) != NF_BR_BROUTING) +diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c +index 27443bf229a3b..7dfbcdfc30e5d 100644 +--- a/net/bridge/netfilter/ebt_snat.c ++++ b/net/bridge/netfilter/ebt_snat.c +@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par) + { + const struct ebt_nat_info *info = par->targinfo; + +- if (skb_ensure_writable(skb, ETH_ALEN * 2)) ++ if (skb_ensure_writable(skb, 0)) + return EBT_DROP; + + ether_addr_copy(eth_hdr(skb)->h_source, info->mac); +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c +index 0cec4152f9797..e09d087ba2409 100644 +--- a/net/can/j1939/transport.c ++++ b/net/can/j1939/transport.c +@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv, + skb->dev = priv->ndev; + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = priv->ndev->ifindex; ++ can_skb_prv(skb)->skbcnt = 0; + /* reserve CAN header */ + skb_reserve(skb, offsetof(struct can_frame, data)); + +@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv, + skb->dev = priv->ndev; + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = priv->ndev->ifindex; ++ can_skb_prv(skb)->skbcnt = 0; + skcb = j1939_skb_to_cb(skb); + memcpy(skcb, rel_skcb, sizeof(*skcb)); + +diff --git a/net/core/filter.c b/net/core/filter.c +index b5f3faac5e3b6..150650c53829e 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -4354,7 +4354,8 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, + cmpxchg(&sk->sk_pacing_status, + SK_PACING_NONE, + SK_PACING_NEEDED); +- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; ++ sk->sk_max_pacing_rate = (val == ~0U) ? ++ ~0UL : (unsigned int)val; + sk->sk_pacing_rate = min(sk->sk_pacing_rate, + sk->sk_max_pacing_rate); + break; +diff --git a/net/core/skmsg.c b/net/core/skmsg.c +index 649583158983a..30ddca6db6c6b 100644 +--- a/net/core/skmsg.c ++++ b/net/core/skmsg.c +@@ -662,15 +662,16 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog, + { + int ret; + ++ /* strparser clones the skb before handing it to a upper layer, ++ * meaning we have the same data, but sk is NULL. We do want an ++ * sk pointer though when we run the BPF program. So we set it ++ * here and then NULL it to ensure we don't trigger a BUG_ON() ++ * in skb/sk operations later if kfree_skb is called with a ++ * valid skb->sk pointer and no destructor assigned. ++ */ + skb->sk = psock->sk; + bpf_compute_data_end_sk_skb(skb); + ret = bpf_prog_run_pin_on_cpu(prog, skb); +- /* strparser clones the skb before handing it to a upper layer, +- * meaning skb_orphan has been called. We NULL sk on the way out +- * to ensure we don't trigger a BUG_ON() in skb/sk operations +- * later and because we are not charging the memory of this skb +- * to any socket yet. +- */ + skb->sk = NULL; + return ret; + } +@@ -794,7 +795,6 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) + } + prog = READ_ONCE(psock->progs.skb_verdict); + if (likely(prog)) { +- skb_orphan(skb); + tcp_skb_bpf_redirect_clear(skb); + ret = sk_psock_bpf_run(psock, prog, skb); + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); +diff --git a/net/core/sock.c b/net/core/sock.c +index 6c5c6b18eff4c..669f686ace801 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -769,7 +769,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) + } else { + sock_reset_flag(sk, SOCK_RCVTSTAMP); + sock_reset_flag(sk, SOCK_RCVTSTAMPNS); +- sock_reset_flag(sk, SOCK_TSTAMP_NEW); + } + } + +@@ -1007,8 +1006,6 @@ set_sndbuf: + __sock_set_timestamps(sk, valbool, true, true); + break; + case SO_TIMESTAMPING_NEW: +- sock_set_flag(sk, SOCK_TSTAMP_NEW); +- fallthrough; + case SO_TIMESTAMPING_OLD: + if (val & ~SOF_TIMESTAMPING_MASK) { + ret = -EINVAL; +@@ -1037,16 +1034,14 @@ set_sndbuf: + } + + sk->sk_tsflags = val; ++ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); ++ + if (val & SOF_TIMESTAMPING_RX_SOFTWARE) + sock_enable_timestamp(sk, + SOCK_TIMESTAMPING_RX_SOFTWARE); +- else { +- if (optname == SO_TIMESTAMPING_NEW) +- sock_reset_flag(sk, SOCK_TSTAMP_NEW); +- ++ else + sock_disable_timestamp(sk, + (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); +- } + break; + + case SO_RCVLOWAT: +@@ -1181,7 +1176,7 @@ set_sndbuf: + + case SO_MAX_PACING_RATE: + { +- unsigned long ulval = (val == ~0U) ? ~0UL : val; ++ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; + + if (sizeof(ulval) != sizeof(val) && + optlen >= sizeof(ulval) && +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index cf36f955bfe62..650f0391e22a1 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -239,7 +239,7 @@ static struct { + /** + * icmp_global_allow - Are we allowed to send one more ICMP message ? + * +- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec. ++ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. + * Returns false if we reached the limit and can not send another packet. + * Note: called with BH disabled + */ +@@ -267,7 +267,10 @@ bool icmp_global_allow(void) + } + credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); + if (credit) { +- credit--; ++ /* We want to use a credit of one in average, but need to randomize ++ * it for security reasons. ++ */ ++ credit = max_t(int, credit - prandom_u32_max(3), 0); + rc = true; + } + WRITE_ONCE(icmp_global.credit, credit); +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 4e31f23e4117e..e70291748889b 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -625,9 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, + } + + if (dev->header_ops) { +- /* Need space for new headers */ +- if (skb_cow_head(skb, dev->needed_headroom - +- (tunnel->hlen + sizeof(struct iphdr)))) ++ if (skb_cow_head(skb, 0)) + goto free_skb; + + tnl_params = (const struct iphdr *)skb->data; +@@ -748,7 +746,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu) + len = tunnel->tun_hlen - len; + tunnel->hlen = tunnel->hlen + len; + +- dev->needed_headroom = dev->needed_headroom + len; ++ if (dev->header_ops) ++ dev->hard_header_len += len; ++ else ++ dev->needed_headroom += len; ++ + if (set_mtu) + dev->mtu = max_t(int, dev->mtu - len, 68); + +@@ -944,6 +946,7 @@ static void __gre_tunnel_init(struct net_device *dev) + tunnel->parms.iph.protocol = IPPROTO_GRE; + + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; ++ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph); + + dev->features |= GRE_FEATURES; + dev->hw_features |= GRE_FEATURES; +@@ -987,10 +990,14 @@ static int ipgre_tunnel_init(struct net_device *dev) + return -EINVAL; + dev->flags = IFF_BROADCAST; + dev->header_ops = &ipgre_header_ops; ++ dev->hard_header_len = tunnel->hlen + sizeof(*iph); ++ dev->needed_headroom = 0; + } + #endif + } else if (!tunnel->collect_md) { + dev->header_ops = &ipgre_header_ops; ++ dev->hard_header_len = tunnel->hlen + sizeof(*iph); ++ dev->needed_headroom = 0; + } + + return ip_tunnel_init(dev); +diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c +index 7a83f881efa9e..136030ad2e546 100644 +--- a/net/ipv4/netfilter/nf_log_arp.c ++++ b/net/ipv4/netfilter/nf_log_arp.c +@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m, + const struct nf_loginfo *info, + const struct sk_buff *skb, unsigned int nhoff) + { +- const struct arphdr *ah; +- struct arphdr _arph; + const struct arppayload *ap; + struct arppayload _arpp; ++ const struct arphdr *ah; ++ unsigned int logflags; ++ struct arphdr _arph; + + ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); + if (ah == NULL) { + nf_log_buf_add(m, "TRUNCATED"); + return; + } ++ ++ if (info->type == NF_LOG_TYPE_LOG) ++ logflags = info->u.log.logflags; ++ else ++ logflags = NF_LOG_DEFAULT_MASK; ++ ++ if (logflags & NF_LOG_MACDECODE) { ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); ++ nf_log_dump_vlan(m, skb); ++ nf_log_buf_add(m, "MACPROTO=%04x ", ++ ntohs(eth_hdr(skb)->h_proto)); ++ } ++ + nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d", + ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); + +diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c +index 0c72156130b68..d07583fac8f8c 100644 +--- a/net/ipv4/netfilter/nf_log_ipv4.c ++++ b/net/ipv4/netfilter/nf_log_ipv4.c +@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m, + + switch (dev->type) { + case ARPHRD_ETHER: +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); ++ nf_log_dump_vlan(m, skb); ++ nf_log_buf_add(m, "MACPROTO=%04x ", + ntohs(eth_hdr(skb)->h_proto)); + return; + default: +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index 134e923822750..355c4499fa1b5 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -842,7 +842,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, + remove_nh_grp_entry(net, nhge, nlinfo); + + /* make sure all see the newly published array before releasing rtnl */ +- synchronize_rcu(); ++ synchronize_net(); + } + + static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 58642b29a499d..9bd30fd4de4b4 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2769,10 +2769,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, + if (IS_ERR(rt)) + return rt; + +- if (flp4->flowi4_proto) ++ if (flp4->flowi4_proto) { ++ flp4->flowi4_oif = rt->dst.dev->ifindex; + rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, + flowi4_to_flowi(flp4), + sk, 0); ++ } + + return rt; + } +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index b1ce2054291d4..75be97f6a7da1 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -5766,6 +5766,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) + tcp_data_snd_check(sk); + if (!inet_csk_ack_scheduled(sk)) + goto no_ack; ++ } else { ++ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); + } + + __tcp_ack_snd_check(sk, 0); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 4a664ad4f4d4b..f88693929e8d0 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -2618,8 +2618,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) + iter->skip = *pos; + + if (iter->tbl) { ++ loff_t p = 0; ++ + ipv6_route_seq_setup_walk(iter, net); +- return ipv6_route_seq_next(seq, NULL, pos); ++ return ipv6_route_seq_next(seq, NULL, &p); + } else { + return NULL; + } +diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c +index da64550a57075..8210ff34ed9b7 100644 +--- a/net/ipv6/netfilter/nf_log_ipv6.c ++++ b/net/ipv6/netfilter/nf_log_ipv6.c +@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m, + + switch (dev->type) { + case ARPHRD_ETHER: +- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", +- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, +- ntohs(eth_hdr(skb)->h_proto)); ++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", ++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); ++ nf_log_dump_vlan(m, skb); ++ nf_log_buf_add(m, "MACPROTO=%04x ", ++ ntohs(eth_hdr(skb)->h_proto)); + return; + default: + break; +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 87fddd84c621e..82d516d117385 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -709,7 +709,8 @@ void sta_set_rate_info_tx(struct sta_info *sta, + u16 brate; + + sband = ieee80211_get_sband(sta->sdata); +- if (sband) { ++ WARN_ON_ONCE(sband && !sband->bitrates); ++ if (sband && sband->bitrates) { + brate = sband->bitrates[rate->idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + } +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index f2840d1d95cfb..fb4f2b9b294f0 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -2122,6 +2122,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, + int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); + + sband = local->hw.wiphy->bands[band]; ++ ++ if (WARN_ON_ONCE(!sband->bitrates)) ++ break; ++ + brate = sband->bitrates[rate_idx].bitrate; + if (rinfo->bw == RATE_INFO_BW_5) + shift = 2; +diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig +index 698bc35251609..abb0a992d4a08 100644 +--- a/net/mptcp/Kconfig ++++ b/net/mptcp/Kconfig +@@ -22,11 +22,8 @@ config MPTCP_IPV6 + select IPV6 + default y + +-endif +- + config MPTCP_KUNIT_TESTS + tristate "This builds the MPTCP KUnit tests" if !KUNIT_ALL_TESTS +- select MPTCP + depends on KUNIT + default KUNIT_ALL_TESTS + help +@@ -39,3 +36,4 @@ config MPTCP_KUNIT_TESTS + + If unsure, say N. + ++endif +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 888bbbbb3e8a4..3127527fc7ac0 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -296,6 +296,7 @@ void mptcp_get_options(const struct sk_buff *skb, + mp_opt->mp_capable = 0; + mp_opt->mp_join = 0; + mp_opt->add_addr = 0; ++ mp_opt->ahmac = 0; + mp_opt->rm_addr = 0; + mp_opt->dss = 0; + +@@ -516,7 +517,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, + return ret; + } + +- if (subflow->use_64bit_ack) { ++ if (READ_ONCE(msk->use_64bit_ack)) { + ack_size = TCPOLEN_MPTCP_DSS_ACK64; + opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq); + opts->ext_copy.ack64 = 1; +@@ -626,6 +627,12 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, + if (unlikely(mptcp_check_fallback(sk))) + return false; + ++ /* prevent adding of any MPTCP related options on reset packet ++ * until we support MP_TCPRST/MP_FASTCLOSE ++ */ ++ if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) ++ return false; ++ + if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts)) + ret = true; + else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining, +@@ -676,7 +683,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, + return false; + } + +-static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk, ++static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, + struct mptcp_subflow_context *subflow, + struct sk_buff *skb, + struct mptcp_options_received *mp_opt) +@@ -693,15 +700,20 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk, + TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && + subflow->mp_join && mp_opt->mp_join && + READ_ONCE(msk->pm.server_side)) +- tcp_send_ack(sk); ++ tcp_send_ack(ssk); + goto fully_established; + } + +- /* we should process OoO packets before the first subflow is fully +- * established, but not expected for MP_JOIN subflows ++ /* we must process OoO packets before the first subflow is fully ++ * established. OoO packets are instead a protocol violation ++ * for MP_JOIN subflows as the peer must not send any data ++ * before receiving the forth ack - cfr. RFC 8684 section 3.2. + */ +- if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) ++ if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) { ++ if (subflow->mp_join) ++ goto reset; + return subflow->mp_capable; ++ } + + if (mp_opt->dss && mp_opt->use_ack) { + /* subflows are fully established as soon as we get any +@@ -713,9 +725,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk, + } + + /* If the first established packet does not contain MP_CAPABLE + data +- * then fallback to TCP ++ * then fallback to TCP. Fallback scenarios requires a reset for ++ * MP_JOIN subflows. + */ + if (!mp_opt->mp_capable) { ++ if (subflow->mp_join) ++ goto reset; + subflow->mp_capable = 0; + pr_fallback(msk); + __mptcp_do_fallback(msk); +@@ -732,12 +747,16 @@ fully_established: + + subflow->pm_notified = 1; + if (subflow->mp_join) { +- clear_3rdack_retransmission(sk); ++ clear_3rdack_retransmission(ssk); + mptcp_pm_subflow_established(msk, subflow); + } else { + mptcp_pm_fully_established(msk); + } + return true; ++ ++reset: ++ mptcp_subflow_reset(ssk); ++ return false; + } + + static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit) +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 5d747c6a610e8..b295eb6e9580b 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -1383,6 +1383,20 @@ static void pm_work(struct mptcp_sock *msk) + spin_unlock_bh(&msk->pm.lock); + } + ++static void __mptcp_close_subflow(struct mptcp_sock *msk) ++{ ++ struct mptcp_subflow_context *subflow, *tmp; ++ ++ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { ++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++ ++ if (inet_sk_state_load(ssk) != TCP_CLOSE) ++ continue; ++ ++ __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0); ++ } ++} ++ + static void mptcp_worker(struct work_struct *work) + { + struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); +@@ -1400,6 +1414,9 @@ static void mptcp_worker(struct work_struct *work) + mptcp_clean_una(sk); + mptcp_check_data_fin_ack(sk); + __mptcp_flush_join_list(msk); ++ if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) ++ __mptcp_close_subflow(msk); ++ + __mptcp_move_skbs(msk); + + if (msk->pm.status) +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 20f04ac85409e..9724636426905 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -90,6 +90,7 @@ + #define MPTCP_WORK_RTX 2 + #define MPTCP_WORK_EOF 3 + #define MPTCP_FALLBACK_DONE 4 ++#define MPTCP_WORK_CLOSE_SUBFLOW 5 + + struct mptcp_options_received { + u64 sndr_key; +@@ -202,6 +203,7 @@ struct mptcp_sock { + bool fully_established; + bool rcv_data_fin; + bool snd_data_fin_enable; ++ bool use_64bit_ack; /* Set when we received a 64-bit DSN */ + spinlock_t join_list_lock; + struct work_struct work; + struct list_head conn_list; +@@ -294,7 +296,6 @@ struct mptcp_subflow_context { + backup : 1, + data_avail : 1, + rx_eof : 1, +- use_64bit_ack : 1, /* Set when we received a 64-bit DSN */ + can_ack : 1; /* only after processing the remote a key */ + u32 remote_nonce; + u64 thmac; +@@ -348,6 +349,7 @@ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, + struct mptcp_options_received *mp_opt); + bool mptcp_subflow_data_available(struct sock *sk); + void __init mptcp_subflow_init(void); ++void mptcp_subflow_reset(struct sock *ssk); + + /* called with sk socket lock held */ + int __mptcp_subflow_connect(struct sock *sk, int ifindex, +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 6f035af1c9d25..559f5bbd96229 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -270,6 +270,19 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) + return thmac == subflow->thmac; + } + ++void mptcp_subflow_reset(struct sock *ssk) ++{ ++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); ++ struct sock *sk = subflow->conn; ++ ++ tcp_set_state(ssk, TCP_CLOSE); ++ tcp_send_active_reset(ssk, GFP_ATOMIC); ++ tcp_done(ssk); ++ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) && ++ schedule_work(&mptcp_sk(sk)->work)) ++ sock_hold(sk); ++} ++ + static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) + { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); +@@ -342,8 +355,7 @@ fallback: + return; + + do_reset: +- tcp_send_active_reset(sk, GFP_ATOMIC); +- tcp_done(sk); ++ mptcp_subflow_reset(sk); + } + + struct request_sock_ops mptcp_subflow_request_sock_ops; +@@ -769,12 +781,11 @@ static enum mapping_status get_mapping_status(struct sock *ssk, + if (!mpext->dsn64) { + map_seq = expand_seq(subflow->map_seq, subflow->map_data_len, + mpext->data_seq); +- subflow->use_64bit_ack = 0; + pr_debug("expanded seq=%llu", subflow->map_seq); + } else { + map_seq = mpext->data_seq; +- subflow->use_64bit_ack = 1; + } ++ WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); + + if (subflow->map_valid) { + /* Allow replacing only with an identical map */ +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index 678c5b14841c1..8dbfd84322a88 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -2508,6 +2508,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len) + /* Set timeout values for (tcp tcpfin udp) */ + ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg); + goto out_unlock; ++ } else if (!len) { ++ /* No more commands with len == 0 below */ ++ ret = -EINVAL; ++ goto out_unlock; + } + + usvc_compat = (struct ip_vs_service_user *)arg; +@@ -2584,9 +2588,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len) + break; + case IP_VS_SO_SET_DELDEST: + ret = ip_vs_del_dest(svc, &udest); +- break; +- default: +- ret = -EINVAL; + } + + out_unlock: +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index b00866d777fe0..d2e5a8f644b80 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -609,6 +609,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, + if (ret == NF_ACCEPT) { + nf_reset_ct(skb); + skb_forward_csum(skb); ++ if (skb->dev) ++ skb->tstamp = 0; + } + return ret; + } +@@ -649,6 +651,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, + + if (!local) { + skb_forward_csum(skb); ++ if (skb->dev) ++ skb->tstamp = 0; + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, + NULL, skb_dst(skb)->dev, dst_output); + } else +@@ -669,6 +673,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, + if (!local) { + ip_vs_drop_early_demux_sk(skb); + skb_forward_csum(skb); ++ if (skb->dev) ++ skb->tstamp = 0; + NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, + NULL, skb_dst(skb)->dev, dst_output); + } else +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c +index e8c86ee4c1c48..c8fb2187ad4b2 100644 +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct, + swin = win << sender->td_scale; + sender->td_maxwin = (swin == 0 ? 1 : swin); + sender->td_maxend = end + sender->td_maxwin; +- /* +- * We haven't seen traffic in the other direction yet +- * but we have to tweak window tracking to pass III +- * and IV until that happens. +- */ +- if (receiver->td_maxwin == 0) ++ if (receiver->td_maxwin == 0) { ++ /* We haven't seen traffic in the other ++ * direction yet but we have to tweak window ++ * tracking to pass III and IV until that ++ * happens. ++ */ + receiver->td_end = receiver->td_maxend = sack; ++ } else if (sack == receiver->td_end + 1) { ++ /* Likely a reply to a keepalive. ++ * Needed for III. ++ */ ++ receiver->td_end++; ++ } ++ + } + } else if (((state->state == TCP_CONNTRACK_SYN_SENT + && dir == IP_CT_DIR_ORIGINAL) +diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c +index 2b01a151eaa80..a579e59ee5c5e 100644 +--- a/net/netfilter/nf_dup_netdev.c ++++ b/net/netfilter/nf_dup_netdev.c +@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev) + skb_push(skb, skb->mac_len); + + skb->dev = dev; ++ skb->tstamp = 0; + dev_queue_xmit(skb); + } + +diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c +index ae5628ddbe6d7..fd7c5f0f5c25b 100644 +--- a/net/netfilter/nf_log_common.c ++++ b/net/netfilter/nf_log_common.c +@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, + } + EXPORT_SYMBOL_GPL(nf_log_dump_packet_common); + ++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb) ++{ ++ u16 vid; ++ ++ if (!skb_vlan_tag_present(skb)) ++ return; ++ ++ vid = skb_vlan_tag_get(skb); ++ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid); ++} ++EXPORT_SYMBOL_GPL(nf_log_dump_vlan); ++ + /* bridge and netdev logging families share this code. */ + void nf_log_l2packet(struct net *net, u_int8_t pf, + __be16 protocol, +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c +index 3087e23297dbf..b77985986b24e 100644 +--- a/net/netfilter/nft_fwd_netdev.c ++++ b/net/netfilter/nft_fwd_netdev.c +@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr, + return; + + skb->dev = dev; ++ skb->tstamp = 0; + neigh_xmit(neigh_table, dev, addr, skb); + out: + regs->verdict.code = verdict; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index e894254c17d43..8709f3d4e7c4b 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -1217,7 +1217,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) + u32 idx; + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; + +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); +diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c +index e2235849a57e9..7d50c45fea376 100644 +--- a/net/openvswitch/flow_table.c ++++ b/net/openvswitch/flow_table.c +@@ -172,7 +172,7 @@ static struct table_instance *table_instance_alloc(int new_size) + + static void __mask_array_destroy(struct mask_array *ma) + { +- free_percpu(ma->masks_usage_cntr); ++ free_percpu(ma->masks_usage_stats); + kfree(ma); + } + +@@ -196,15 +196,15 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma) + ma->masks_usage_zero_cntr[i] = 0; + + for_each_possible_cpu(cpu) { +- u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, +- cpu); ++ struct mask_array_stats *stats; + unsigned int start; + u64 counter; + ++ stats = per_cpu_ptr(ma->masks_usage_stats, cpu); + do { +- start = u64_stats_fetch_begin_irq(&ma->syncp); +- counter = usage_counters[i]; +- } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); ++ start = u64_stats_fetch_begin_irq(&stats->syncp); ++ counter = stats->usage_cntrs[i]; ++ } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + ma->masks_usage_zero_cntr[i] += counter; + } +@@ -227,9 +227,10 @@ static struct mask_array *tbl_mask_array_alloc(int size) + sizeof(struct sw_flow_mask *) * + size); + +- new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size, +- __alignof__(u64)); +- if (!new->masks_usage_cntr) { ++ new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) + ++ sizeof(u64) * size, ++ __alignof__(u64)); ++ if (!new->masks_usage_stats) { + kfree(new); + return NULL; + } +@@ -723,6 +724,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, + + /* Flow lookup does full lookup on flow table. It starts with + * mask from index passed in *index. ++ * This function MUST be called with BH disabled due to the use ++ * of CPU specific variables. + */ + static struct sw_flow *flow_lookup(struct flow_table *tbl, + struct table_instance *ti, +@@ -732,7 +735,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, + u32 *n_cache_hit, + u32 *index) + { +- u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); ++ struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats); + struct sw_flow *flow; + struct sw_flow_mask *mask; + int i; +@@ -742,9 +745,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, + if (mask) { + flow = masked_flow_lookup(ti, key, mask, n_mask_hit); + if (flow) { +- u64_stats_update_begin(&ma->syncp); +- usage_counters[*index]++; +- u64_stats_update_end(&ma->syncp); ++ u64_stats_update_begin(&stats->syncp); ++ stats->usage_cntrs[*index]++; ++ u64_stats_update_end(&stats->syncp); + (*n_cache_hit)++; + return flow; + } +@@ -763,9 +766,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, + flow = masked_flow_lookup(ti, key, mask, n_mask_hit); + if (flow) { /* Found */ + *index = i; +- u64_stats_update_begin(&ma->syncp); +- usage_counters[*index]++; +- u64_stats_update_end(&ma->syncp); ++ u64_stats_update_begin(&stats->syncp); ++ stats->usage_cntrs[*index]++; ++ u64_stats_update_end(&stats->syncp); + return flow; + } + } +@@ -851,9 +854,17 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); + u32 __always_unused n_mask_hit; + u32 __always_unused n_cache_hit; ++ struct sw_flow *flow; + u32 index = 0; + +- return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); ++ /* This function gets called trough the netlink interface and therefore ++ * is preemptible. However, flow_lookup() function needs to be called ++ * with BH disabled due to CPU specific variables. ++ */ ++ local_bh_disable(); ++ flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); ++ local_bh_enable(); ++ return flow; + } + + struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, +@@ -1109,7 +1120,6 @@ void ovs_flow_masks_rebalance(struct flow_table *table) + + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *mask; +- unsigned int start; + int cpu; + + mask = rcu_dereference_ovsl(ma->masks[i]); +@@ -1120,14 +1130,16 @@ void ovs_flow_masks_rebalance(struct flow_table *table) + masks_and_count[i].counter = 0; + + for_each_possible_cpu(cpu) { +- u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, +- cpu); ++ struct mask_array_stats *stats; ++ unsigned int start; + u64 counter; + ++ stats = per_cpu_ptr(ma->masks_usage_stats, cpu); + do { +- start = u64_stats_fetch_begin_irq(&ma->syncp); +- counter = usage_counters[i]; +- } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); ++ start = u64_stats_fetch_begin_irq(&stats->syncp); ++ counter = stats->usage_cntrs[i]; ++ } while (u64_stats_fetch_retry_irq(&stats->syncp, ++ start)); + + masks_and_count[i].counter += counter; + } +diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h +index 6e7d4ac593531..43144396e192c 100644 +--- a/net/openvswitch/flow_table.h ++++ b/net/openvswitch/flow_table.h +@@ -38,12 +38,16 @@ struct mask_count { + u64 counter; + }; + ++struct mask_array_stats { ++ struct u64_stats_sync syncp; ++ u64 usage_cntrs[]; ++}; ++ + struct mask_array { + struct rcu_head rcu; + int count, max; +- u64 __percpu *masks_usage_cntr; ++ struct mask_array_stats __percpu *masks_usage_stats; + u64 *masks_usage_zero_cntr; +- struct u64_stats_sync syncp; + struct sw_flow_mask __rcu *masks[]; + }; + +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c +index a780afdf570d2..0bac241a41235 100644 +--- a/net/sched/act_ct.c ++++ b/net/sched/act_ct.c +@@ -156,11 +156,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple, + __be16 target_dst = target.dst.u.udp.port; + + if (target_src != tuple->src.u.udp.port) +- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, ++ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP, + offsetof(struct udphdr, source), + 0xFFFF, be16_to_cpu(target_src)); + if (target_dst != tuple->dst.u.udp.port) +- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, ++ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP, + offsetof(struct udphdr, dest), + 0xFFFF, be16_to_cpu(target_dst)); + } +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c +index a229751ee8c46..85c0d0d5b9da5 100644 +--- a/net/sched/act_tunnel_key.c ++++ b/net/sched/act_tunnel_key.c +@@ -459,7 +459,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, + + metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, + 0, flags, +- key_id, 0); ++ key_id, opts_len); + } else { + NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); + ret = -EINVAL; +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index 41a55c6cbeb8f..faeabff283a2b 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -3712,7 +3712,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, + entry->gate.num_entries = tcf_gate_num_entries(act); + err = tcf_gate_get_entries(entry, act); + if (err) +- goto err_out; ++ goto err_out_locked; + } else { + err = -EOPNOTSUPP; + goto err_out_locked; +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c +index a406627b1d552..7c0e4fac9748d 100644 +--- a/net/smc/smc_core.c ++++ b/net/smc/smc_core.c +@@ -1597,7 +1597,7 @@ out: + return rc; + } + +-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ ++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ + + static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, + bool is_dmb, int bufsize) +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c +index 3ea33466ebe98..da9332de26c5d 100644 +--- a/net/smc/smc_llc.c ++++ b/net/smc/smc_llc.c +@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow, + default: + flow->type = SMC_LLC_FLOW_NONE; + } +- if (qentry == lgr->delayed_event) +- lgr->delayed_event = NULL; + smc_llc_flow_qentry_set(flow, qentry); + spin_unlock_bh(&lgr->llc_flow_lock); + return true; +@@ -1603,13 +1601,12 @@ static void smc_llc_event_work(struct work_struct *work) + struct smc_llc_qentry *qentry; + + if (!lgr->llc_flow_lcl.type && lgr->delayed_event) { +- if (smc_link_usable(lgr->delayed_event->link)) { +- smc_llc_event_handler(lgr->delayed_event); +- } else { +- qentry = lgr->delayed_event; +- lgr->delayed_event = NULL; ++ qentry = lgr->delayed_event; ++ lgr->delayed_event = NULL; ++ if (smc_link_usable(qentry->link)) ++ smc_llc_event_handler(qentry); ++ else + kfree(qentry); +- } + } + + again: +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index 258b04372f854..bd4678db9d76b 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -1147,9 +1147,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, + struct gssp_in_token *in_token) + { + struct kvec *argv = &rqstp->rq_arg.head[0]; +- unsigned int page_base, length; +- int pages, i, res; +- size_t inlen; ++ unsigned int length, pgto_offs, pgfrom_offs; ++ int pages, i, res, pgto, pgfrom; ++ size_t inlen, to_offs, from_offs; + + res = gss_read_common_verf(gc, argv, authp, in_handle); + if (res) +@@ -1177,17 +1177,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, + memcpy(page_address(in_token->pages[0]), argv->iov_base, length); + inlen -= length; + +- i = 1; +- page_base = rqstp->rq_arg.page_base; ++ to_offs = length; ++ from_offs = rqstp->rq_arg.page_base; + while (inlen) { +- length = min_t(unsigned int, inlen, PAGE_SIZE); +- memcpy(page_address(in_token->pages[i]), +- page_address(rqstp->rq_arg.pages[i]) + page_base, ++ pgto = to_offs >> PAGE_SHIFT; ++ pgfrom = from_offs >> PAGE_SHIFT; ++ pgto_offs = to_offs & ~PAGE_MASK; ++ pgfrom_offs = from_offs & ~PAGE_MASK; ++ ++ length = min_t(unsigned int, inlen, ++ min_t(unsigned int, PAGE_SIZE - pgto_offs, ++ PAGE_SIZE - pgfrom_offs)); ++ memcpy(page_address(in_token->pages[pgto]) + pgto_offs, ++ page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs, + length); + ++ to_offs += length; ++ from_offs += length; + inlen -= length; +- page_base = 0; +- i++; + } + return 0; + } +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +index 7b94d971feb3b..c3d588b149aaa 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +@@ -638,10 +638,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, + while (remaining) { + len = min_t(u32, PAGE_SIZE - pageoff, remaining); + +- memcpy(dst, page_address(*ppages), len); ++ memcpy(dst, page_address(*ppages) + pageoff, len); + remaining -= len; + dst += len; + pageoff = 0; ++ ppages++; + } + } + +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c +index 940d176e0e872..d4beca895992d 100644 +--- a/net/tipc/bcast.c ++++ b/net/tipc/bcast.c +@@ -108,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net) + { + struct tipc_bc_base *bb = tipc_bc_base(net); + int all_dests = tipc_link_bc_peers(bb->link); ++ int max_win = tipc_link_max_win(bb->link); ++ int min_win = tipc_link_min_win(bb->link); + int i, mtu, prim; + + bb->primary_bearer = INVALID_BEARER_ID; +@@ -121,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net) + continue; + + mtu = tipc_bearer_mtu(net, i); +- if (mtu < tipc_link_mtu(bb->link)) ++ if (mtu < tipc_link_mtu(bb->link)) { + tipc_link_set_mtu(bb->link, mtu); ++ tipc_link_set_queue_limits(bb->link, ++ min_win, ++ max_win); ++ } + bb->bcast_support &= tipc_bearer_bcast_support(net, i); + if (bb->dests[i] < all_dests) + continue; +@@ -585,7 +591,7 @@ static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win) + if (max_win > TIPC_MAX_LINK_WIN) + return -EINVAL; + tipc_bcast_lock(net); +- tipc_link_set_queue_limits(l, BCLINK_WIN_MIN, max_win); ++ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win); + tipc_bcast_unlock(net); + return 0; + } +diff --git a/net/tipc/msg.c b/net/tipc/msg.c +index 52e93ba4d8e2c..6812244018714 100644 +--- a/net/tipc/msg.c ++++ b/net/tipc/msg.c +@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) + if (fragid == FIRST_FRAGMENT) { + if (unlikely(head)) + goto err; +- frag = skb_unshare(frag, GFP_ATOMIC); ++ if (skb_cloned(frag)) ++ frag = skb_copy(frag, GFP_ATOMIC); + if (unlikely(!frag)) + goto err; + head = *headbuf = frag; +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c +index 2f9c148f17e27..fe4edce459ad4 100644 +--- a/net/tipc/name_distr.c ++++ b/net/tipc/name_distr.c +@@ -327,8 +327,13 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, + struct tipc_msg *hdr; + u16 seqno; + ++ spin_lock_bh(&namedq->lock); + skb_queue_walk_safe(namedq, skb, tmp) { +- skb_linearize(skb); ++ if (unlikely(skb_linearize(skb))) { ++ __skb_unlink(skb, namedq); ++ kfree_skb(skb); ++ continue; ++ } + hdr = buf_msg(skb); + seqno = msg_named_seqno(hdr); + if (msg_is_last_bulk(hdr)) { +@@ -338,12 +343,14 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, + + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { + __skb_unlink(skb, namedq); ++ spin_unlock_bh(&namedq->lock); + return skb; + } + + if (*open && (*rcv_nxt == seqno)) { + (*rcv_nxt)++; + __skb_unlink(skb, namedq); ++ spin_unlock_bh(&namedq->lock); + return skb; + } + +@@ -353,6 +360,7 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, + continue; + } + } ++ spin_unlock_bh(&namedq->lock); + return NULL; + } + +diff --git a/net/tipc/node.c b/net/tipc/node.c +index 4edcee3088da1..e4cf515e323f3 100644 +--- a/net/tipc/node.c ++++ b/net/tipc/node.c +@@ -1485,7 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, + + /* Clean up broadcast state */ + tipc_bcast_remove_peer(n->net, n->bc_entry.link); +- __skb_queue_purge(&n->bc_entry.namedq); ++ skb_queue_purge(&n->bc_entry.namedq); + + /* Abort any ongoing link failover */ + for (i = 0; i < MAX_BEARERS; i++) { +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index b74e2741f74f4..cec86229a6a02 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk, + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_prot_info *prot = &tls_ctx->prot_info; + struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); +- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); + struct tls_record_info *record = ctx->open_record; + int tls_push_record_flags; + struct page_frag *pfrag; + size_t orig_size = size; + u32 max_open_record_len; +- int copy, rc = 0; ++ bool more = false; + bool done = false; ++ int copy, rc = 0; + long timeo; + + if (flags & +@@ -492,9 +492,8 @@ handle_error: + if (!size) { + last_record: + tls_push_record_flags = flags; +- if (more) { +- tls_ctx->pending_open_record_frags = +- !!record->num_frags; ++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { ++ more = true; + break; + } + +@@ -526,6 +525,8 @@ last_record: + } + } while (!done); + ++ tls_ctx->pending_open_record_frags = more; ++ + if (orig_size - size > 0) + rc = orig_size - size; + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 7fd45f6ddb058..e14307f2bddcc 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -2355,7 +2355,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, + * case we'll continue with more data in the next round, + * but break unconditionally so unsplit data stops here. + */ +- state->split_start++; ++ if (state->split) ++ state->split_start++; ++ else ++ state->split_start = 0; + break; + case 9: + if (rdev->wiphy.extended_capabilities && +@@ -4683,16 +4686,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs, + if (err) + return err; + +- if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] || +- !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) +- return -EINVAL; +- +- he_obss_pd->min_offset = +- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); +- he_obss_pd->max_offset = +- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); ++ if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]) ++ he_obss_pd->min_offset = ++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); ++ if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) ++ he_obss_pd->max_offset = ++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); + +- if (he_obss_pd->min_offset >= he_obss_pd->max_offset) ++ if (he_obss_pd->min_offset > he_obss_pd->max_offset) + return -EINVAL; + + he_obss_pd->enable = true; +diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c +index 19c679456a0e2..c821e98671393 100644 +--- a/samples/bpf/xdpsock_user.c ++++ b/samples/bpf/xdpsock_user.c +@@ -1004,7 +1004,7 @@ static void rx_drop_all(void) + } + } + +-static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size) ++static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size) + { + u32 idx; + unsigned int i; +@@ -1017,14 +1017,14 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size) + for (i = 0; i < batch_size; i++) { + struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, + idx + i); +- tx_desc->addr = (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT; ++ tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT; + tx_desc->len = PKT_SIZE; + } + + xsk_ring_prod__submit(&xsk->tx, batch_size); + xsk->outstanding_tx += batch_size; +- frame_nb += batch_size; +- frame_nb %= NUM_FRAMES; ++ *frame_nb += batch_size; ++ *frame_nb %= NUM_FRAMES; + complete_tx_only(xsk, batch_size); + } + +@@ -1080,7 +1080,7 @@ static void tx_only_all(void) + } + + for (i = 0; i < num_socks; i++) +- tx_only(xsks[i], frame_nb[i], batch_size); ++ tx_only(xsks[i], &frame_nb[i], batch_size); + + pkt_cnt += batch_size; + +diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c +index a11bf6c5b53b4..cd3f16a6f5caf 100644 +--- a/samples/mic/mpssd/mpssd.c ++++ b/samples/mic/mpssd/mpssd.c +@@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd, + + static inline unsigned _vring_size(unsigned int num, unsigned long align) + { +- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) ++ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + + align - 1) & ~(align - 1)) +- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; ++ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4); + } + + /* +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index 011c3c76af865..21989fa0c1074 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, + /* now accumulate with current aggregate */ + rc = crypto_shash_update(shash, d.digest, + crypto_shash_digestsize(tfm)); ++ if (rc != 0) ++ return rc; + } + /* + * Extend cumulative digest over TPM registers 8-9, which contain +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 8a91711ca79b2..4c86cd4eece0c 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -531,6 +531,16 @@ int ima_file_hash(struct file *file, char *buf, size_t buf_size) + return -EOPNOTSUPP; + + mutex_lock(&iint->mutex); ++ ++ /* ++ * ima_file_hash can be called when ima_collect_measurement has still ++ * not been called, we might not always have a hash. ++ */ ++ if (!iint->ima_hash) { ++ mutex_unlock(&iint->mutex); ++ return -EOPNOTSUPP; ++ } ++ + if (buf) { + size_t copied_size; + +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c +index b4de33074b37d..4a7a4b6bf79b2 100644 +--- a/security/integrity/ima/ima_policy.c ++++ b/security/integrity/ima/ima_policy.c +@@ -59,6 +59,11 @@ enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB }; + + enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY }; + ++struct ima_rule_opt_list { ++ size_t count; ++ char *items[]; ++}; ++ + struct ima_rule_entry { + struct list_head list; + int action; +@@ -78,7 +83,7 @@ struct ima_rule_entry { + int type; /* audit type */ + } lsm[MAX_LSM_RULES]; + char *fsname; +- char *keyrings; /* Measure keys added to these keyrings */ ++ struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */ + struct ima_template_desc *template; + }; + +@@ -206,10 +211,6 @@ static LIST_HEAD(ima_policy_rules); + static LIST_HEAD(ima_temp_rules); + static struct list_head *ima_rules = &ima_default_rules; + +-/* Pre-allocated buffer used for matching keyrings. */ +-static char *ima_keyrings; +-static size_t ima_keyrings_len; +- + static int ima_policy __initdata; + + static int __init default_measure_policy_setup(char *str) +@@ -253,6 +254,72 @@ static int __init default_appraise_policy_setup(char *str) + } + __setup("ima_appraise_tcb", default_appraise_policy_setup); + ++static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src) ++{ ++ struct ima_rule_opt_list *opt_list; ++ size_t count = 0; ++ char *src_copy; ++ char *cur, *next; ++ size_t i; ++ ++ src_copy = match_strdup(src); ++ if (!src_copy) ++ return ERR_PTR(-ENOMEM); ++ ++ next = src_copy; ++ while ((cur = strsep(&next, "|"))) { ++ /* Don't accept an empty list item */ ++ if (!(*cur)) { ++ kfree(src_copy); ++ return ERR_PTR(-EINVAL); ++ } ++ count++; ++ } ++ ++ /* Don't accept an empty list */ ++ if (!count) { ++ kfree(src_copy); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ opt_list = kzalloc(struct_size(opt_list, items, count), GFP_KERNEL); ++ if (!opt_list) { ++ kfree(src_copy); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* ++ * strsep() has already replaced all instances of '|' with '\0', ++ * leaving a byte sequence of NUL-terminated strings. Reference each ++ * string with the array of items. ++ * ++ * IMPORTANT: Ownership of the allocated buffer is transferred from ++ * src_copy to the first element in the items array. To free the ++ * buffer, kfree() must only be called on the first element of the ++ * array. ++ */ ++ for (i = 0, cur = src_copy; i < count; i++) { ++ opt_list->items[i] = cur; ++ cur = strchr(cur, '\0') + 1; ++ } ++ opt_list->count = count; ++ ++ return opt_list; ++} ++ ++static void ima_free_rule_opt_list(struct ima_rule_opt_list *opt_list) ++{ ++ if (!opt_list) ++ return; ++ ++ if (opt_list->count) { ++ kfree(opt_list->items[0]); ++ opt_list->count = 0; ++ } ++ ++ kfree(opt_list); ++} ++ + static void ima_lsm_free_rule(struct ima_rule_entry *entry) + { + int i; +@@ -274,7 +341,7 @@ static void ima_free_rule(struct ima_rule_entry *entry) + * the defined_templates list and cannot be freed here + */ + kfree(entry->fsname); +- kfree(entry->keyrings); ++ ima_free_rule_opt_list(entry->keyrings); + ima_lsm_free_rule(entry); + kfree(entry); + } +@@ -394,8 +461,8 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event, + static bool ima_match_keyring(struct ima_rule_entry *rule, + const char *keyring, const struct cred *cred) + { +- char *next_keyring, *keyrings_ptr; + bool matched = false; ++ size_t i; + + if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid)) + return false; +@@ -406,15 +473,8 @@ static bool ima_match_keyring(struct ima_rule_entry *rule, + if (!keyring) + return false; + +- strcpy(ima_keyrings, rule->keyrings); +- +- /* +- * "keyrings=" is specified in the policy in the format below: +- * keyrings=.builtin_trusted_keys|.ima|.evm +- */ +- keyrings_ptr = ima_keyrings; +- while ((next_keyring = strsep(&keyrings_ptr, "|")) != NULL) { +- if (!strcmp(next_keyring, keyring)) { ++ for (i = 0; i < rule->keyrings->count; i++) { ++ if (!strcmp(rule->keyrings->items[i], keyring)) { + matched = true; + break; + } +@@ -1065,7 +1125,6 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + bool uid_token; + struct ima_template_desc *template_desc; + int result = 0; +- size_t keyrings_len; + + ab = integrity_audit_log_start(audit_context(), GFP_KERNEL, + AUDIT_INTEGRITY_POLICY_RULE); +@@ -1174,7 +1233,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + entry->func = POLICY_CHECK; + else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0) + entry->func = KEXEC_CMDLINE; +- else if (strcmp(args[0].from, "KEY_CHECK") == 0) ++ else if (IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) && ++ strcmp(args[0].from, "KEY_CHECK") == 0) + entry->func = KEY_CHECK; + else + result = -EINVAL; +@@ -1231,37 +1291,19 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) + case Opt_keyrings: + ima_log_string(ab, "keyrings", args[0].from); + +- keyrings_len = strlen(args[0].from) + 1; +- +- if ((entry->keyrings) || +- (keyrings_len < 2)) { ++ if (!IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) || ++ entry->keyrings) { + result = -EINVAL; + break; + } + +- if (keyrings_len > ima_keyrings_len) { +- char *tmpbuf; +- +- tmpbuf = krealloc(ima_keyrings, keyrings_len, +- GFP_KERNEL); +- if (!tmpbuf) { +- result = -ENOMEM; +- break; +- } +- +- ima_keyrings = tmpbuf; +- ima_keyrings_len = keyrings_len; +- } +- +- entry->keyrings = kstrdup(args[0].from, GFP_KERNEL); +- if (!entry->keyrings) { +- kfree(ima_keyrings); +- ima_keyrings = NULL; +- ima_keyrings_len = 0; +- result = -ENOMEM; ++ entry->keyrings = ima_alloc_rule_opt_list(args); ++ if (IS_ERR(entry->keyrings)) { ++ result = PTR_ERR(entry->keyrings); ++ entry->keyrings = NULL; + break; + } +- result = 0; ++ + entry->flags |= IMA_KEYRINGS; + break; + case Opt_fsuuid: +@@ -1574,6 +1616,15 @@ static void policy_func_show(struct seq_file *m, enum ima_hooks func) + seq_printf(m, "func=%d ", func); + } + ++static void ima_show_rule_opt_list(struct seq_file *m, ++ const struct ima_rule_opt_list *opt_list) ++{ ++ size_t i; ++ ++ for (i = 0; i < opt_list->count; i++) ++ seq_printf(m, "%s%s", i ? "|" : "", opt_list->items[i]); ++} ++ + int ima_policy_show(struct seq_file *m, void *v) + { + struct ima_rule_entry *entry = v; +@@ -1630,9 +1681,8 @@ int ima_policy_show(struct seq_file *m, void *v) + } + + if (entry->flags & IMA_KEYRINGS) { +- if (entry->keyrings != NULL) +- snprintf(tbuf, sizeof(tbuf), "%s", entry->keyrings); +- seq_printf(m, pt(Opt_keyrings), tbuf); ++ seq_puts(m, "keyrings="); ++ ima_show_rule_opt_list(m, entry->keyrings); + seq_puts(m, " "); + } + +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index c8b9c0b315d8f..250a92b187265 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + if (snd_BUG_ON(!dp)) + return -ENXIO; + +- mutex_lock(®ister_mutex); ++ if (cmd != SNDCTL_SEQ_SYNC && ++ mutex_lock_interruptible(®ister_mutex)) ++ return -ERESTARTSYS; + rc = snd_seq_oss_ioctl(dp, cmd, arg); +- mutex_unlock(®ister_mutex); ++ if (cmd != SNDCTL_SEQ_SYNC) ++ mutex_unlock(®ister_mutex); + return rc; + } + +diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c +index 45b740f44c459..c362eb38ab906 100644 +--- a/sound/firewire/bebob/bebob_hwdep.c ++++ b/sound/firewire/bebob/bebob_hwdep.c +@@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, + } + + memset(&event, 0, sizeof(event)); ++ count = min_t(long, count, sizeof(event.lock_status)); + if (bebob->dev_lock_changed) { + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; + event.lock_status.status = (bebob->dev_lock_count > 0); + bebob->dev_lock_changed = false; +- +- count = min_t(long, count, sizeof(event.lock_status)); + } + + spin_unlock_irq(&bebob->lock); +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 36a9dbc33aa01..476a8b871daa1 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1001,12 +1001,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) + azx_init_pci(chip); + hda_intel_init_chip(chip, true); + +- if (status && from_rt) { +- list_for_each_codec(codec, &chip->bus) +- if (!codec->relaxed_resume && +- (status & (1 << codec->addr))) +- schedule_delayed_work(&codec->jackpoll_work, +- codec->jackpoll_interval); ++ if (from_rt) { ++ list_for_each_codec(codec, &chip->bus) { ++ if (codec->relaxed_resume) ++ continue; ++ ++ if (codec->forced_resume || (status & (1 << codec->addr))) ++ pm_request_resume(hda_codec_dev(codec)); ++ } + } + + /* power down again for link-controlled chips */ +diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c +index 02cc682caa55a..588059428d8f5 100644 +--- a/sound/pci/hda/hda_jack.c ++++ b/sound/pci/hda/hda_jack.c +@@ -275,6 +275,23 @@ int snd_hda_jack_detect_state_mst(struct hda_codec *codec, + } + EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst); + ++static struct hda_jack_callback * ++find_callback_from_list(struct hda_jack_tbl *jack, ++ hda_jack_callback_fn func) ++{ ++ struct hda_jack_callback *cb; ++ ++ if (!func) ++ return NULL; ++ ++ for (cb = jack->callback; cb; cb = cb->next) { ++ if (cb->func == func) ++ return cb; ++ } ++ ++ return NULL; ++} ++ + /** + * snd_hda_jack_detect_enable_mst - enable the jack-detection + * @codec: the HDA codec +@@ -297,7 +314,10 @@ snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid, + jack = snd_hda_jack_tbl_new(codec, nid, dev_id); + if (!jack) + return ERR_PTR(-ENOMEM); +- if (func) { ++ ++ callback = find_callback_from_list(jack, func); ++ ++ if (func && !callback) { + callback = kzalloc(sizeof(*callback), GFP_KERNEL); + if (!callback) + return ERR_PTR(-ENOMEM); +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index b7dbf2e7f77af..a3eecdf9185e8 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -1065,6 +1065,7 @@ enum { + QUIRK_R3DI, + QUIRK_R3D, + QUIRK_AE5, ++ QUIRK_AE7, + }; + + #ifdef CONFIG_PCI +@@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { + SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), + SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D), + SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), ++ SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7), + {} + }; + +@@ -4675,6 +4677,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec) + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); + tmp = FLOAT_THREE; + break; ++ case QUIRK_AE7: ++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); ++ tmp = FLOAT_THREE; ++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, ++ SR_96_000); ++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, ++ SR_96_000); ++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO); ++ break; + default: + tmp = FLOAT_ONE; + break; +@@ -4720,6 +4731,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec) + case QUIRK_AE5: + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); + break; ++ case QUIRK_AE7: ++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f); ++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, ++ SR_96_000); ++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, ++ SR_96_000); ++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO); ++ break; + default: + break; + } +@@ -4729,7 +4748,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec) + if (ca0132_quirk(spec) == QUIRK_R3DI) + chipio_set_conn_rate(codec, 0x0F, SR_96_000); + +- tmp = FLOAT_ZERO; ++ if (ca0132_quirk(spec) == QUIRK_AE7) ++ tmp = FLOAT_THREE; ++ else ++ tmp = FLOAT_ZERO; + dspio_set_uint_param(codec, 0x80, 0x00, tmp); + + switch (ca0132_quirk(spec)) { +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 4020500880905..56a8643adbdcd 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -2046,22 +2046,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + int pinctl; + int err = 0; + ++ mutex_lock(&spec->pcm_lock); + if (hinfo->nid) { + pcm_idx = hinfo_to_pcm_index(codec, hinfo); +- if (snd_BUG_ON(pcm_idx < 0)) +- return -EINVAL; ++ if (snd_BUG_ON(pcm_idx < 0)) { ++ err = -EINVAL; ++ goto unlock; ++ } + cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid); +- if (snd_BUG_ON(cvt_idx < 0)) +- return -EINVAL; ++ if (snd_BUG_ON(cvt_idx < 0)) { ++ err = -EINVAL; ++ goto unlock; ++ } + per_cvt = get_cvt(spec, cvt_idx); +- + snd_BUG_ON(!per_cvt->assigned); + per_cvt->assigned = 0; + hinfo->nid = 0; + + azx_stream(get_azx_dev(substream))->stripe = 0; + +- mutex_lock(&spec->pcm_lock); + snd_hda_spdif_ctls_unassign(codec, pcm_idx); + clear_bit(pcm_idx, &spec->pcm_in_use); + pin_idx = hinfo_to_pin_index(codec, hinfo); +@@ -2091,10 +2094,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + per_pin->setup = false; + per_pin->channels = 0; + mutex_unlock(&per_pin->lock); +- unlock: +- mutex_unlock(&spec->pcm_lock); + } + ++unlock: ++ mutex_unlock(&spec->pcm_lock); ++ + return err; + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index d4f17b4658927..f2398721ac1ef 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -1150,6 +1150,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid) + codec->single_adc_amp = 1; + /* FIXME: do we need this for all Realtek codec models? */ + codec->spdif_status_reset = 1; ++ codec->forced_resume = 1; + codec->patch_ops = alc_patch_ops; + + err = alc_codec_rename_from_preset(codec); +@@ -1929,6 +1930,8 @@ enum { + ALC1220_FIXUP_CLEVO_P950, + ALC1220_FIXUP_CLEVO_PB51ED, + ALC1220_FIXUP_CLEVO_PB51ED_PINS, ++ ALC887_FIXUP_ASUS_AUDIO, ++ ALC887_FIXUP_ASUS_HMIC, + }; + + static void alc889_fixup_coef(struct hda_codec *codec, +@@ -2141,6 +2144,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec, + alc_fixup_headset_mode_no_hp_mic(codec, fix, action); + } + ++static void alc887_asus_hp_automute_hook(struct hda_codec *codec, ++ struct hda_jack_callback *jack) ++{ ++ struct alc_spec *spec = codec->spec; ++ unsigned int vref; ++ ++ snd_hda_gen_hp_automute(codec, jack); ++ ++ if (spec->gen.hp_jack_present) ++ vref = AC_PINCTL_VREF_80; ++ else ++ vref = AC_PINCTL_VREF_HIZ; ++ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref); ++} ++ ++static void alc887_fixup_asus_jack(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ if (action != HDA_FIXUP_ACT_PROBE) ++ return; ++ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP); ++ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook; ++} ++ + static const struct hda_fixup alc882_fixups[] = { + [ALC882_FIXUP_ABIT_AW9D_MAX] = { + .type = HDA_FIXUP_PINS, +@@ -2398,6 +2426,20 @@ static const struct hda_fixup alc882_fixups[] = { + .chained = true, + .chain_id = ALC1220_FIXUP_CLEVO_PB51ED, + }, ++ [ALC887_FIXUP_ASUS_AUDIO] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */ ++ { 0x19, 0x22219420 }, ++ {} ++ }, ++ }, ++ [ALC887_FIXUP_ASUS_HMIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc887_fixup_asus_jack, ++ .chained = true, ++ .chain_id = ALC887_FIXUP_ASUS_AUDIO, ++ }, + }; + + static const struct snd_pci_quirk alc882_fixup_tbl[] = { +@@ -2431,6 +2473,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), ++ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC), + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), +@@ -6233,6 +6276,7 @@ enum { + ALC269_FIXUP_LEMOTE_A190X, + ALC256_FIXUP_INTEL_NUC8_RUGGED, + ALC255_FIXUP_XIAOMI_HEADSET_MIC, ++ ALC274_FIXUP_HP_MIC, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -7612,6 +7656,14 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC289_FIXUP_ASUS_GA401 + }, ++ [ALC274_FIXUP_HP_MIC] = { ++ .type = HDA_FIXUP_VERBS, ++ .v.verbs = (const struct hda_verb[]) { ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, ++ { } ++ }, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -7763,6 +7815,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), ++ SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC), ++ SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), +@@ -8088,6 +8142,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, ++ {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"}, + {} + }; + #define ALC225_STANDARD_PINS \ +@@ -9622,6 +9677,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), ++ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A), +diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig +index 946a70210f492..601ea45d3ea66 100644 +--- a/sound/soc/codecs/Kconfig ++++ b/sound/soc/codecs/Kconfig +@@ -540,6 +540,7 @@ config SND_SOC_CQ0093VC + config SND_SOC_CROS_EC_CODEC + tristate "codec driver for ChromeOS EC" + depends on CROS_EC ++ select CRYPTO + select CRYPTO_LIB_SHA256 + help + If you say yes here you will get support for the +diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c +index c098518343959..3226c6d4493eb 100644 +--- a/sound/soc/codecs/tas2770.c ++++ b/sound/soc/codecs/tas2770.c +@@ -16,7 +16,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -57,7 +56,12 @@ static int tas2770_set_bias_level(struct snd_soc_component *component, + TAS2770_PWR_CTRL_MASK, + TAS2770_PWR_CTRL_ACTIVE); + break; +- ++ case SND_SOC_BIAS_STANDBY: ++ case SND_SOC_BIAS_PREPARE: ++ snd_soc_component_update_bits(component, ++ TAS2770_PWR_CTRL, ++ TAS2770_PWR_CTRL_MASK, TAS2770_PWR_CTRL_MUTE); ++ break; + case SND_SOC_BIAS_OFF: + snd_soc_component_update_bits(component, + TAS2770_PWR_CTRL, +@@ -135,23 +139,18 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w, + TAS2770_PWR_CTRL, + TAS2770_PWR_CTRL_MASK, + TAS2770_PWR_CTRL_MUTE); +- if (ret) +- goto end; + break; + case SND_SOC_DAPM_PRE_PMD: + ret = snd_soc_component_update_bits(component, + TAS2770_PWR_CTRL, + TAS2770_PWR_CTRL_MASK, + TAS2770_PWR_CTRL_SHUTDOWN); +- if (ret) +- goto end; + break; + default: + dev_err(tas2770->dev, "Not supported evevt\n"); + return -EINVAL; + } + +-end: + if (ret < 0) + return ret; + +@@ -243,6 +242,9 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth) + return -EINVAL; + } + ++ if (ret < 0) ++ return ret; ++ + tas2770->channel_size = bitwidth; + + ret = snd_soc_component_update_bits(component, +@@ -251,16 +253,15 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth) + TAS2770_TDM_CFG_REG5_50_MASK, + TAS2770_TDM_CFG_REG5_VSNS_ENABLE | + tas2770->v_sense_slot); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG6, + TAS2770_TDM_CFG_REG6_ISNS_MASK | + TAS2770_TDM_CFG_REG6_50_MASK, + TAS2770_TDM_CFG_REG6_ISNS_ENABLE | + tas2770->i_sense_slot); +- +-end: + if (ret < 0) + return ret; + +@@ -278,36 +279,35 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate) + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_SMP_MASK, + TAS2770_TDM_CFG_REG0_SMP_48KHZ); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_31_MASK, + TAS2770_TDM_CFG_REG0_31_44_1_48KHZ); +- if (ret) +- goto end; + break; + case 44100: + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_SMP_MASK, + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_31_MASK, + TAS2770_TDM_CFG_REG0_31_44_1_48KHZ); +- if (ret) +- goto end; + break; + case 96000: + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_SMP_MASK, + TAS2770_TDM_CFG_REG0_SMP_48KHZ); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_31_MASK, +@@ -318,8 +318,9 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate) + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_SMP_MASK, + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_31_MASK, +@@ -330,22 +331,22 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate) + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_SMP_MASK, + TAS2770_TDM_CFG_REG0_SMP_48KHZ); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_31_MASK, + TAS2770_TDM_CFG_REG0_31_176_4_192KHZ); +- if (ret) +- goto end; + break; + case 17640: + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_SMP_MASK, + TAS2770_TDM_CFG_REG0_SMP_44_1KHZ); +- if (ret) +- goto end; ++ if (ret < 0) ++ return ret; ++ + ret = snd_soc_component_update_bits(component, + TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_31_MASK, +@@ -355,7 +356,6 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate) + ret = -EINVAL; + } + +-end: + if (ret < 0) + return ret; + +@@ -575,6 +575,8 @@ static int tas2770_codec_probe(struct snd_soc_component *component) + + tas2770->component = component; + ++ tas2770_reset(tas2770); ++ + return 0; + } + +@@ -701,29 +703,28 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770) + rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format", + &tas2770->asi_format); + if (rc) { +- dev_err(tas2770->dev, "Looking up %s property failed %d\n", +- "ti,asi-format", rc); +- goto end; ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n", ++ "ti,asi-format"); ++ tas2770->asi_format = 0; + } + + rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no", + &tas2770->i_sense_slot); + if (rc) { +- dev_err(tas2770->dev, "Looking up %s property failed %d\n", +- "ti,imon-slot-no", rc); +- goto end; ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n", ++ "ti,imon-slot-no"); ++ tas2770->i_sense_slot = 0; + } + + rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no", + &tas2770->v_sense_slot); + if (rc) { +- dev_err(tas2770->dev, "Looking up %s property failed %d\n", +- "ti,vmon-slot-no", rc); +- goto end; ++ dev_info(tas2770->dev, "Property %s is missing setting default slot\n", ++ "ti,vmon-slot-no"); ++ tas2770->v_sense_slot = 2; + } + +-end: +- return rc; ++ return 0; + } + + static int tas2770_i2c_probe(struct i2c_client *client, +@@ -771,8 +772,6 @@ static int tas2770_i2c_probe(struct i2c_client *client, + tas2770->channel_size = 0; + tas2770->slot_width = 0; + +- tas2770_reset(tas2770); +- + result = tas2770_register_codec(tas2770); + if (result) + dev_err(tas2770->dev, "Register codec failed.\n"); +@@ -781,13 +780,6 @@ end: + return result; + } + +-static int tas2770_i2c_remove(struct i2c_client *client) +-{ +- pm_runtime_disable(&client->dev); +- return 0; +-} +- +- + static const struct i2c_device_id tas2770_i2c_id[] = { + { "tas2770", 0}, + { } +@@ -808,7 +800,6 @@ static struct i2c_driver tas2770_i2c_driver = { + .of_match_table = of_match_ptr(tas2770_of_match), + }, + .probe = tas2770_i2c_probe, +- .remove = tas2770_i2c_remove, + .id_table = tas2770_i2c_id, + }; + +diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c +index 8efe20605f9be..c7c782d279d0d 100644 +--- a/sound/soc/codecs/tlv320adcx140.c ++++ b/sound/soc/codecs/tlv320adcx140.c +@@ -161,7 +161,7 @@ static const struct regmap_config adcx140_i2c_regmap = { + }; + + /* Digital Volume control. From -100 to 27 dB in 0.5 dB steps */ +-static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10000, 50, 0); ++static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10050, 50, 0); + + /* ADC gain. From 0 to 42 dB in 1 dB steps */ + static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 100, 0); +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c +index 467802875c133..2e2d8e463655a 100644 +--- a/sound/soc/codecs/tlv320aic32x4.c ++++ b/sound/soc/codecs/tlv320aic32x4.c +@@ -665,7 +665,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component, + } + + static int aic32x4_setup_clocks(struct snd_soc_component *component, +- unsigned int sample_rate) ++ unsigned int sample_rate, unsigned int channels) + { + u8 aosr; + u16 dosr; +@@ -753,7 +753,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component, + dosr); + + clk_set_rate(clocks[5].clk, +- sample_rate * 32); ++ sample_rate * 32 * ++ channels); ++ + return 0; + } + } +@@ -775,7 +777,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream, + u8 iface1_reg = 0; + u8 dacsetup_reg = 0; + +- aic32x4_setup_clocks(component, params_rate(params)); ++ aic32x4_setup_clocks(component, params_rate(params), ++ params_channels(params)); + + switch (params_width(params)) { + case 16: +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c +index 410cca57da52d..344bd2c33bea1 100644 +--- a/sound/soc/codecs/wm_adsp.c ++++ b/sound/soc/codecs/wm_adsp.c +@@ -2049,6 +2049,7 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type, + { + struct wm_coeff_ctl *ctl; + struct snd_kcontrol *kcontrol; ++ char ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + int ret; + + ctl = wm_adsp_get_ctl(dsp, name, type, alg); +@@ -2059,8 +2060,25 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type, + return -EINVAL; + + ret = wm_coeff_write_ctrl(ctl, buf, len); ++ if (ret) ++ return ret; ++ ++ if (ctl->flags & WMFW_CTL_FLAG_SYS) ++ return 0; ++ ++ if (dsp->component->name_prefix) ++ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s", ++ dsp->component->name_prefix, ctl->name); ++ else ++ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s", ++ ctl->name); ++ ++ kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl_name); ++ if (!kcontrol) { ++ adsp_err(dsp, "Can't find kcontrol %s\n", ctl_name); ++ return -EINVAL; ++ } + +- kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name); + snd_ctl_notify(dsp->component->card->snd_card, + SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id); + +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c +index cdff739924e2e..2ea354dd5434f 100644 +--- a/sound/soc/fsl/fsl_sai.c ++++ b/sound/soc/fsl/fsl_sai.c +@@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) + return 0; + } + +-static struct snd_soc_dai_driver fsl_sai_dai = { ++static struct snd_soc_dai_driver fsl_sai_dai_template = { + .probe = fsl_sai_dai_probe, + .playback = { + .stream_name = "CPU-Playback", +@@ -966,12 +966,15 @@ static int fsl_sai_probe(struct platform_device *pdev) + return ret; + } + ++ memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template, ++ sizeof(fsl_sai_dai_template)); ++ + /* Sync Tx with Rx as default by following old DT binding */ + sai->synchronous[RX] = true; + sai->synchronous[TX] = false; +- fsl_sai_dai.symmetric_rates = 1; +- fsl_sai_dai.symmetric_channels = 1; +- fsl_sai_dai.symmetric_samplebits = 1; ++ sai->cpu_dai_drv.symmetric_rates = 1; ++ sai->cpu_dai_drv.symmetric_channels = 1; ++ sai->cpu_dai_drv.symmetric_samplebits = 1; + + if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) && + of_find_property(np, "fsl,sai-asynchronous", NULL)) { +@@ -988,9 +991,9 @@ static int fsl_sai_probe(struct platform_device *pdev) + /* Discard all settings for asynchronous mode */ + sai->synchronous[RX] = false; + sai->synchronous[TX] = false; +- fsl_sai_dai.symmetric_rates = 0; +- fsl_sai_dai.symmetric_channels = 0; +- fsl_sai_dai.symmetric_samplebits = 0; ++ sai->cpu_dai_drv.symmetric_rates = 0; ++ sai->cpu_dai_drv.symmetric_channels = 0; ++ sai->cpu_dai_drv.symmetric_samplebits = 0; + } + + if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) && +@@ -1020,7 +1023,7 @@ static int fsl_sai_probe(struct platform_device *pdev) + regcache_cache_only(sai->regmap, true); + + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component, +- &fsl_sai_dai, 1); ++ &sai->cpu_dai_drv, 1); + if (ret) + goto err_pm_disable; + +diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h +index 6aba7d28f5f34..677ecfc1ec68f 100644 +--- a/sound/soc/fsl/fsl_sai.h ++++ b/sound/soc/fsl/fsl_sai.h +@@ -180,6 +180,7 @@ struct fsl_sai { + unsigned int bclk_ratio; + + const struct fsl_sai_soc_data *soc_data; ++ struct snd_soc_dai_driver cpu_dai_drv; + struct snd_dmaengine_dai_dma_data dma_params_rx; + struct snd_dmaengine_dai_dma_data dma_params_tx; + }; +diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c +index 15a27a2cd0cae..fad1eb6253d53 100644 +--- a/sound/soc/fsl/imx-es8328.c ++++ b/sound/soc/fsl/imx-es8328.c +@@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev) + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; +- goto fail; ++ goto put_device; + } + + comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL); + if (!comp) { + ret = -ENOMEM; +- goto fail; ++ goto put_device; + } + + data->dev = dev; +@@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev) + ret = snd_soc_of_parse_card_name(&data->card, "model"); + if (ret) { + dev_err(dev, "Unable to parse card name\n"); +- goto fail; ++ goto put_device; + } + ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing"); + if (ret) { + dev_err(dev, "Unable to parse routing: %d\n", ret); +- goto fail; ++ goto put_device; + } + data->card.num_links = 1; + data->card.owner = THIS_MODULE; +@@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev) + ret = snd_soc_register_card(&data->card); + if (ret) { + dev_err(dev, "Unable to register: %d\n", ret); +- goto fail; ++ goto put_device; + } + + platform_set_drvdata(pdev, data); ++put_device: ++ put_device(&ssi_pdev->dev); + fail: + of_node_put(ssi_np); + of_node_put(codec_np); +diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c +index 0129d23694ed5..9a6f10ede427e 100644 +--- a/sound/soc/intel/boards/sof_rt5682.c ++++ b/sound/soc/intel/boards/sof_rt5682.c +@@ -119,6 +119,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = { + .driver_data = (void *)(SOF_RT5682_MCLK_EN | + SOF_RT5682_SSP_CODEC(0)), + }, ++ { ++ .callback = sof_rt5682_quirk_cb, ++ .matches = { ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"), ++ }, ++ .driver_data = (void *)(SOF_RT5682_MCLK_EN | ++ SOF_RT5682_SSP_CODEC(0) | ++ SOF_SPEAKER_AMP_PRESENT | ++ SOF_MAX98373_SPEAKER_AMP_PRESENT | ++ SOF_RT5682_SSP_AMP(2) | ++ SOF_RT5682_NUM_HDMIDEV(4)), ++ }, + {} + }; + +diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c +index 06d0a4f80fc17..a6c690c5308d3 100644 +--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c ++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c +@@ -673,7 +673,7 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev) + if (card == &mt8183_da7219_max98357_card) { + dai_link->be_hw_params_fixup = + mt8183_i2s_hw_params_fixup; +- dai_link->ops = &mt8183_mt6358_i2s_ops; ++ dai_link->ops = &mt8183_da7219_i2s_ops; + dai_link->cpus = i2s3_max98357a_cpus; + dai_link->num_cpus = + ARRAY_SIZE(i2s3_max98357a_cpus); +diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c +index e00a4af29c13f..f25da84f175ac 100644 +--- a/sound/soc/qcom/lpass-cpu.c ++++ b/sound/soc/qcom/lpass-cpu.c +@@ -209,21 +209,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream, + return 0; + } + +-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream, +- struct snd_soc_dai *dai) +-{ +- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); +- int ret; +- +- ret = regmap_write(drvdata->lpaif_map, +- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), +- 0); +- if (ret) +- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret); +- +- return ret; +-} +- + static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) + { +@@ -304,7 +289,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = { + .startup = lpass_cpu_daiops_startup, + .shutdown = lpass_cpu_daiops_shutdown, + .hw_params = lpass_cpu_daiops_hw_params, +- .hw_free = lpass_cpu_daiops_hw_free, + .prepare = lpass_cpu_daiops_prepare, + .trigger = lpass_cpu_daiops_trigger, + }; +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c +index 01179bc0e5e57..e62ac7e650785 100644 +--- a/sound/soc/qcom/lpass-platform.c ++++ b/sound/soc/qcom/lpass-platform.c +@@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component, + int ret, dma_ch, dir = substream->stream; + struct lpass_pcm_data *data; + +- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL); ++ data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + +@@ -118,6 +118,7 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component, + if (v->free_dma_channel) + v->free_dma_channel(drvdata, data->dma_ch); + ++ kfree(data); + return 0; + } + +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 5b60379237bff..d1e7dbb9fea36 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -592,6 +592,17 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, + k->info = snd_soc_bytes_info_ext; + k->tlv.c = snd_soc_bytes_tlv_callback; + ++ /* ++ * When a topology-based implementation abuses the ++ * control interface and uses bytes_ext controls of ++ * more than 512 bytes, we need to disable the size ++ * checks, otherwise accesses to such controls will ++ * return an -EINVAL error and prevent the card from ++ * being configured. ++ */ ++ if (IS_ENABLED(CONFIG_SND_CTL_VALIDATION) && sbe->max > 512) ++ k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK; ++ + ext_ops = tplg->bytes_ext_ops; + num_ops = tplg->bytes_ext_ops_count; + for (i = 0; i < num_ops; i++) { +diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c +index 186eea105bb15..009938d45ddd9 100644 +--- a/sound/soc/sof/control.c ++++ b/sound/soc/sof/control.c +@@ -298,6 +298,10 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol, + const struct snd_ctl_tlv __user *tlvd = + (const struct snd_ctl_tlv __user *)binary_data; + ++ /* make sure we have at least a header */ ++ if (size < sizeof(struct snd_ctl_tlv)) ++ return -EINVAL; ++ + /* + * The beginning of bytes data contains a header from where + * the length (as bytes) is needed to know the correct copy +@@ -306,6 +310,13 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol, + if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv))) + return -EFAULT; + ++ /* make sure TLV info is consistent */ ++ if (header.length + sizeof(struct snd_ctl_tlv) > size) { ++ dev_err_ratelimited(scomp->dev, "error: inconsistent TLV, data %d + header %zu > %d\n", ++ header.length, sizeof(struct snd_ctl_tlv), size); ++ return -EINVAL; ++ } ++ + /* be->max is coming from topology */ + if (header.length > be->max) { + dev_err_ratelimited(scomp->dev, "error: Bytes data size %d exceeds max %d.\n", +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c +index 63ca920c8e6e0..7152e6d1cf673 100644 +--- a/sound/soc/sof/intel/hda.c ++++ b/sound/soc/sof/intel/hda.c +@@ -1179,7 +1179,13 @@ void hda_machine_select(struct snd_sof_dev *sdev) + + mach = snd_soc_acpi_find_machine(desc->machines); + if (mach) { +- sof_pdata->tplg_filename = mach->sof_tplg_filename; ++ /* ++ * If tplg file name is overridden, use it instead of ++ * the one set in mach table ++ */ ++ if (!sof_pdata->tplg_filename) ++ sof_pdata->tplg_filename = mach->sof_tplg_filename; ++ + sof_pdata->machine = mach; + + if (mach->link_mask) { +diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c +index aa3532ba14349..f3a8140773db5 100644 +--- a/sound/soc/sof/sof-pci-dev.c ++++ b/sound/soc/sof/sof-pci-dev.c +@@ -35,8 +35,28 @@ static int sof_pci_debug; + module_param_named(sof_pci_debug, sof_pci_debug, int, 0444); + MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)"); + ++static const char *sof_override_tplg_name; ++ + #define SOF_PCI_DISABLE_PM_RUNTIME BIT(0) + ++static int sof_tplg_cb(const struct dmi_system_id *id) ++{ ++ sof_override_tplg_name = id->driver_data; ++ return 1; ++} ++ ++static const struct dmi_system_id sof_tplg_table[] = { ++ { ++ .callback = sof_tplg_cb, ++ .matches = { ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"), ++ }, ++ .driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg", ++ }, ++ {} ++}; ++ + static const struct dmi_system_id community_key_platforms[] = { + { + .ident = "Up Squared", +@@ -347,6 +367,10 @@ static int sof_pci_probe(struct pci_dev *pci, + sof_pdata->tplg_filename_prefix = + sof_pdata->desc->default_tplg_path; + ++ dmi_check_system(sof_tplg_table); ++ if (sof_override_tplg_name) ++ sof_pdata->tplg_filename = sof_override_tplg_name; ++ + #if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE) + /* set callback to enable runtime_pm */ + sof_pdata->sof_probe_complete = sof_pci_probe_complete; +diff --git a/sound/usb/format.c b/sound/usb/format.c +index 1b28d01d1f4cd..3bfead393aa34 100644 +--- a/sound/usb/format.c ++++ b/sound/usb/format.c +@@ -406,6 +406,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip, + case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */ + case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */ + case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */ ++ case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */ + case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */ + case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */ + case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */ +diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature +index c1daf4d57518c..3b218fd068b0e 100644 +--- a/tools/build/Makefile.feature ++++ b/tools/build/Makefile.feature +@@ -38,8 +38,6 @@ FEATURE_TESTS_BASIC := \ + get_current_dir_name \ + gettid \ + glibc \ +- gtk2 \ +- gtk2-infobar \ + libbfd \ + libcap \ + libelf \ +@@ -81,6 +79,8 @@ FEATURE_TESTS_EXTRA := \ + compile-32 \ + compile-x32 \ + cplus-demangle \ ++ gtk2 \ ++ gtk2-infobar \ + hello \ + libbabeltrace \ + libbfd-liberty \ +@@ -111,7 +111,6 @@ FEATURE_DISPLAY ?= \ + dwarf \ + dwarf_getlocations \ + glibc \ +- gtk2 \ + libbfd \ + libcap \ + libelf \ +diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile +index d220fe9527470..8da2556cdbfac 100644 +--- a/tools/build/feature/Makefile ++++ b/tools/build/feature/Makefile +@@ -90,7 +90,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$( + ############################### + + $(OUTPUT)test-all.bin: +- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma ++ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd + + $(OUTPUT)test-hello.bin: + $(BUILD) +diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c +index 5479e543b1947..d2623992ccd61 100644 +--- a/tools/build/feature/test-all.c ++++ b/tools/build/feature/test-all.c +@@ -78,14 +78,6 @@ + # include "test-libslang.c" + #undef main + +-#define main main_test_gtk2 +-# include "test-gtk2.c" +-#undef main +- +-#define main main_test_gtk2_infobar +-# include "test-gtk2-infobar.c" +-#undef main +- + #define main main_test_libbfd + # include "test-libbfd.c" + #undef main +@@ -205,8 +197,6 @@ int main(int argc, char *argv[]) + main_test_libelf_getshdrstrndx(); + main_test_libunwind(); + main_test_libslang(); +- main_test_gtk2(argc, argv); +- main_test_gtk2_infobar(argc, argv); + main_test_libbfd(); + main_test_backtrace(); + main_test_libnuma(); +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index e493d6048143f..edd6f7b7d9b82 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -3841,6 +3841,36 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) + return 0; + } + ++static int init_map_slots(struct bpf_map *map) ++{ ++ const struct bpf_map *targ_map; ++ unsigned int i; ++ int fd, err; ++ ++ for (i = 0; i < map->init_slots_sz; i++) { ++ if (!map->init_slots[i]) ++ continue; ++ ++ targ_map = map->init_slots[i]; ++ fd = bpf_map__fd(targ_map); ++ err = bpf_map_update_elem(map->fd, &i, &fd, 0); ++ if (err) { ++ err = -errno; ++ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", ++ map->name, i, targ_map->name, ++ fd, err); ++ return err; ++ } ++ pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", ++ map->name, i, targ_map->name, fd); ++ } ++ ++ zfree(&map->init_slots); ++ map->init_slots_sz = 0; ++ ++ return 0; ++} ++ + static int + bpf_object__create_maps(struct bpf_object *obj) + { +@@ -3883,28 +3913,11 @@ bpf_object__create_maps(struct bpf_object *obj) + } + + if (map->init_slots_sz) { +- for (j = 0; j < map->init_slots_sz; j++) { +- const struct bpf_map *targ_map; +- int fd; +- +- if (!map->init_slots[j]) +- continue; +- +- targ_map = map->init_slots[j]; +- fd = bpf_map__fd(targ_map); +- err = bpf_map_update_elem(map->fd, &j, &fd, 0); +- if (err) { +- err = -errno; +- pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", +- map->name, j, targ_map->name, +- fd, err); +- goto err_out; +- } +- pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", +- map->name, j, targ_map->name, fd); ++ err = init_map_slots(map); ++ if (err < 0) { ++ zclose(map->fd); ++ goto err_out; + } +- zfree(&map->init_slots); +- map->init_slots_sz = 0; + } + + if (map->pin_path && !map->pinned) { +@@ -5425,7 +5438,7 @@ retry_load: + free(log_buf); + goto retry_load; + } +- ret = -errno; ++ ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warn("load bpf program failed: %s\n", cp); + pr_perm_msg(ret); +diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c +index 2208444ecb448..cfcdbd7be066e 100644 +--- a/tools/lib/perf/evlist.c ++++ b/tools/lib/perf/evlist.c +@@ -45,6 +45,9 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, + if (!evsel->own_cpus || evlist->has_user_cpus) { + perf_cpu_map__put(evsel->cpus); + evsel->cpus = perf_cpu_map__get(evlist->cpus); ++ } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) { ++ perf_cpu_map__put(evsel->cpus); ++ evsel->cpus = perf_cpu_map__get(evlist->cpus); + } else if (evsel->cpus != evsel->own_cpus) { + perf_cpu_map__put(evsel->cpus); + evsel->cpus = perf_cpu_map__get(evsel->own_cpus); +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config +index 190be4fa5c218..2d6690b308564 100644 +--- a/tools/perf/Makefile.config ++++ b/tools/perf/Makefile.config +@@ -724,12 +724,14 @@ ifndef NO_SLANG + endif + endif + +-ifndef NO_GTK2 ++ifdef GTK2 + FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) ++ $(call feature_check,gtk2) + ifneq ($(feature-gtk2), 1) + msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); + NO_GTK2 := 1 + else ++ $(call feature_check,gtk2-infobar) + ifeq ($(feature-gtk2-infobar), 1) + GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT + endif +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf +index 6031167939ae6..515e6ed635f1a 100644 +--- a/tools/perf/Makefile.perf ++++ b/tools/perf/Makefile.perf +@@ -48,7 +48,7 @@ include ../scripts/utilities.mak + # + # Define NO_SLANG if you do not want TUI support. + # +-# Define NO_GTK2 if you do not want GTK+ GUI support. ++# Define GTK2 if you want GTK+ GUI support. + # + # Define NO_DEMANGLE if you do not want C++ symbol demangling. + # +@@ -386,7 +386,7 @@ ifneq ($(OUTPUT),) + CFLAGS += -I$(OUTPUT) + endif + +-ifndef NO_GTK2 ++ifdef GTK2 + ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so + GTK_IN := $(OUTPUT)gtk-in.o + endif +@@ -886,7 +886,7 @@ check: $(OUTPUT)common-cmds.h + + ### Installation rules + +-ifndef NO_GTK2 ++ifdef GTK2 + install-gtk: $(OUTPUT)libperf-gtk.so + $(call QUIET_INSTALL, 'GTK UI') \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \ +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index fddc97cac9841..eef64b1411a4a 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -2063,8 +2063,10 @@ static void setup_system_wide(int forks) + struct evsel *counter; + + evlist__for_each_entry(evsel_list, counter) { +- if (!counter->core.system_wide) ++ if (!counter->core.system_wide && ++ strcmp(counter->name, "duration_time")) { + return; ++ } + } + + if (evsel_list->core.nr_entries) +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c +index bea461b6f937b..44a75f234db17 100644 +--- a/tools/perf/builtin-trace.c ++++ b/tools/perf/builtin-trace.c +@@ -1762,7 +1762,11 @@ static int trace__read_syscall_info(struct trace *trace, int id) + if (table == NULL) + return -ENOMEM; + +- memset(table + trace->sctbl->syscalls.max_id, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc)); ++ // Need to memset from offset 0 and +1 members if brand new ++ if (trace->syscalls.table == NULL) ++ memset(table, 0, (id + 1) * sizeof(*sc)); ++ else ++ memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc)); + + trace->syscalls.table = table; + trace->sctbl->syscalls.max_id = id; +diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c +index 05cf2af9e2c27..d09ec2f030719 100644 +--- a/tools/perf/builtin-version.c ++++ b/tools/perf/builtin-version.c +@@ -60,7 +60,6 @@ static void library_status(void) + STATUS(HAVE_DWARF_SUPPORT, dwarf); + STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); + STATUS(HAVE_GLIBC_SUPPORT, glibc); +- STATUS(HAVE_GTK2_SUPPORT, gtk2); + #ifndef HAVE_SYSCALL_TABLE_SUPPORT + STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); + #endif +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c +index 0af4e81c46e2b..3a0348caec7d6 100644 +--- a/tools/perf/util/intel-pt.c ++++ b/tools/perf/util/intel-pt.c +@@ -1101,6 +1101,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, + + if (queue->tid == -1 || pt->have_sched_switch) { + ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); ++ if (ptq->tid == -1) ++ ptq->pid = -1; + thread__zput(ptq->thread); + } + +@@ -2603,10 +2605,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, + tid = sample->tid; + } + +- if (tid == -1) { +- pr_err("context_switch event has no tid\n"); +- return -EINVAL; +- } ++ if (tid == -1) ++ intel_pt_log("context_switch event has no tid\n"); + + ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); + if (ret <= 0) +diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c +index ab5030fcfed4e..d948a7f910cfa 100644 +--- a/tools/perf/util/metricgroup.c ++++ b/tools/perf/util/metricgroup.c +@@ -150,6 +150,18 @@ static void expr_ids__exit(struct expr_ids *ids) + free(ids->id[i].id); + } + ++static bool contains_event(struct evsel **metric_events, int num_events, ++ const char *event_name) ++{ ++ int i; ++ ++ for (i = 0; i < num_events; i++) { ++ if (!strcmp(metric_events[i]->name, event_name)) ++ return true; ++ } ++ return false; ++} ++ + /** + * Find a group of events in perf_evlist that correpond to those from a parsed + * metric expression. Note, as find_evsel_group is called in the same order as +@@ -180,7 +192,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, + int i = 0, matched_events = 0, events_to_match; + const int idnum = (int)hashmap__size(&pctx->ids); + +- /* duration_time is grouped separately. */ ++ /* ++ * duration_time is always grouped separately, when events are grouped ++ * (ie has_constraint is false) then ignore it in the matching loop and ++ * add it to metric_events at the end. ++ */ + if (!has_constraint && + hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr)) + events_to_match = idnum - 1; +@@ -207,23 +223,20 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, + sizeof(struct evsel *) * idnum); + current_leader = ev->leader; + } +- if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) { +- if (has_constraint) { +- /* +- * Events aren't grouped, ensure the same event +- * isn't matched from two groups. +- */ +- for (i = 0; i < matched_events; i++) { +- if (!strcmp(ev->name, +- metric_events[i]->name)) { +- break; +- } +- } +- if (i != matched_events) +- continue; +- } ++ /* ++ * Check for duplicate events with the same name. For example, ++ * uncore_imc/cas_count_read/ will turn into 6 events per socket ++ * on skylakex. Only the first such event is placed in ++ * metric_events. If events aren't grouped then this also ++ * ensures that the same event in different sibling groups ++ * aren't both added to metric_events. ++ */ ++ if (contains_event(metric_events, matched_events, ev->name)) ++ continue; ++ /* Does this event belong to the parse context? */ ++ if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) + metric_events[matched_events++] = ev; +- } ++ + if (matched_events == events_to_match) + break; + } +@@ -239,7 +252,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, + } + + if (matched_events != idnum) { +- /* Not whole match */ ++ /* Not a whole match */ + return NULL; + } + +@@ -247,8 +260,32 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, + + for (i = 0; i < idnum; i++) { + ev = metric_events[i]; +- ev->metric_leader = ev; ++ /* Don't free the used events. */ + set_bit(ev->idx, evlist_used); ++ /* ++ * The metric leader points to the identically named event in ++ * metric_events. ++ */ ++ ev->metric_leader = ev; ++ /* ++ * Mark two events with identical names in the same group (or ++ * globally) as being in use as uncore events may be duplicated ++ * for each pmu. Set the metric leader of such events to be the ++ * event that appears in metric_events. ++ */ ++ evlist__for_each_entry_continue(perf_evlist, ev) { ++ /* ++ * If events are grouped then the search can terminate ++ * when then group is left. ++ */ ++ if (!has_constraint && ++ ev->leader != metric_events[i]->leader) ++ break; ++ if (!strcmp(metric_events[i]->name, ev->name)) { ++ set_bit(ev->idx, evlist_used); ++ ev->metric_leader = metric_events[i]; ++ } ++ } + } + + return metric_events[0]; +diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py +index 46ff97e909c6f..1bc36a1db14f6 100755 +--- a/tools/power/pm-graph/sleepgraph.py ++++ b/tools/power/pm-graph/sleepgraph.py +@@ -171,7 +171,7 @@ class SystemValues: + tracefuncs = { + 'sys_sync': {}, + 'ksys_sync': {}, +- '__pm_notifier_call_chain': {}, ++ 'pm_notifier_call_chain_robust': {}, + 'pm_prepare_console': {}, + 'pm_notifier_call_chain': {}, + 'freeze_processes': {}, +diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c +index 8995092d541ec..3b796dd5e5772 100644 +--- a/tools/testing/radix-tree/idr-test.c ++++ b/tools/testing/radix-tree/idr-test.c +@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg) + return NULL; + } + ++static void *ida_leak_fn(void *arg) ++{ ++ struct ida *ida = arg; ++ time_t s = time(NULL); ++ int i, ret; ++ ++ rcu_register_thread(); ++ ++ do for (i = 0; i < 1000; i++) { ++ ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL); ++ if (ret >= 0) ++ ida_free(ida, 128); ++ } while (time(NULL) < s + 2); ++ ++ rcu_unregister_thread(); ++ return NULL; ++} ++ + void ida_thread_tests(void) + { ++ DEFINE_IDA(ida); + pthread_t threads[20]; + int i; + +@@ -536,6 +555,16 @@ void ida_thread_tests(void) + + while (i--) + pthread_join(threads[i], NULL); ++ ++ for (i = 0; i < ARRAY_SIZE(threads); i++) ++ if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) { ++ perror("creating ida thread"); ++ exit(1); ++ } ++ ++ while (i--) ++ pthread_join(threads[i], NULL); ++ assert(ida_is_empty(&ida)); + } + + void ida_tests(void) +diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c +index 944ad4721c83c..da14eaac71d03 100644 +--- a/tools/testing/selftests/bpf/bench.c ++++ b/tools/testing/selftests/bpf/bench.c +@@ -311,7 +311,6 @@ extern const struct bench bench_rename_kretprobe; + extern const struct bench bench_rename_rawtp; + extern const struct bench bench_rename_fentry; + extern const struct bench bench_rename_fexit; +-extern const struct bench bench_rename_fmodret; + extern const struct bench bench_trig_base; + extern const struct bench bench_trig_tp; + extern const struct bench bench_trig_rawtp; +@@ -332,7 +331,6 @@ static const struct bench *benchs[] = { + &bench_rename_rawtp, + &bench_rename_fentry, + &bench_rename_fexit, +- &bench_rename_fmodret, + &bench_trig_base, + &bench_trig_tp, + &bench_trig_rawtp, +@@ -462,4 +460,3 @@ int main(int argc, char **argv) + + return 0; + } +- +diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c +index e74cff40f4fea..a967674098ada 100644 +--- a/tools/testing/selftests/bpf/benchs/bench_rename.c ++++ b/tools/testing/selftests/bpf/benchs/bench_rename.c +@@ -106,12 +106,6 @@ static void setup_fexit() + attach_bpf(ctx.skel->progs.prog5); + } + +-static void setup_fmodret() +-{ +- setup_ctx(); +- attach_bpf(ctx.skel->progs.prog6); +-} +- + static void *consumer(void *input) + { + return NULL; +@@ -182,14 +176,3 @@ const struct bench bench_rename_fexit = { + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, + }; +- +-const struct bench bench_rename_fmodret = { +- .name = "rename-fmodret", +- .validate = validate, +- .setup = setup_fmodret, +- .producer_thread = producer, +- .consumer_thread = consumer, +- .measure = measure, +- .report_progress = hits_drops_report_progress, +- .report_final = hits_drops_report_final, +-}; +diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c +index 47fa04adc1471..21c2d265c3e8e 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c ++++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c +@@ -265,7 +265,7 @@ void test_sk_assign(void) + TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false), + TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true), + }; +- int server = -1; ++ __s64 server = -1; + int server_map; + int self_net; + +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +index 5f54c6aec7f07..b25c9c45c1484 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +@@ -45,9 +45,9 @@ static int getsetsockopt(void) + goto err; + } + +- if (*(int *)big_buf != 0x08) { ++ if (*big_buf != 0x08) { + log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08", +- *(int *)big_buf); ++ (int)*big_buf); + goto err; + } + +diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c +index 2702df2b23433..9966685866fdf 100644 +--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c ++++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c +@@ -61,10 +61,9 @@ void test_test_overhead(void) + const char *raw_tp_name = "raw_tp/task_rename"; + const char *fentry_name = "fentry/__set_task_comm"; + const char *fexit_name = "fexit/__set_task_comm"; +- const char *fmodret_name = "fmod_ret/__set_task_comm"; + const char *kprobe_func = "__set_task_comm"; + struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog; +- struct bpf_program *fentry_prog, *fexit_prog, *fmodret_prog; ++ struct bpf_program *fentry_prog, *fexit_prog; + struct bpf_object *obj; + struct bpf_link *link; + int err, duration = 0; +@@ -97,11 +96,6 @@ void test_test_overhead(void) + if (CHECK(!fexit_prog, "find_probe", + "prog '%s' not found\n", fexit_name)) + goto cleanup; +- fmodret_prog = bpf_object__find_program_by_title(obj, fmodret_name); +- if (CHECK(!fmodret_prog, "find_probe", +- "prog '%s' not found\n", fmodret_name)) +- goto cleanup; +- + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; +@@ -148,12 +142,6 @@ void test_test_overhead(void) + test_run("fexit"); + bpf_link__destroy(link); + +- /* attach fmod_ret */ +- link = bpf_program__attach_trace(fmodret_prog); +- if (CHECK(IS_ERR(link), "attach fmod_ret", "err %ld\n", PTR_ERR(link))) +- goto cleanup; +- test_run("fmod_ret"); +- bpf_link__destroy(link); + cleanup: + prctl(PR_SET_NAME, comm, 0L, 0L, 0L); + bpf_object__close(obj); +diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c +index 42403d088abc9..abb7344b531f4 100644 +--- a/tools/testing/selftests/bpf/progs/test_overhead.c ++++ b/tools/testing/selftests/bpf/progs/test_overhead.c +@@ -39,10 +39,4 @@ int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec) + return 0; + } + +-SEC("fmod_ret/__set_task_comm") +-int BPF_PROG(prog6, struct task_struct *tsk, const char *buf, bool exec) +-{ +- return !tsk; +-} +- + char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c +index bbf8296f4d663..1032b292af5b7 100644 +--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c ++++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c +@@ -19,6 +19,17 @@ + #define IP6(aaaa, bbbb, cccc, dddd) \ + { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) } + ++/* Macros for least-significant byte and word accesses. */ ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++#define LSE_INDEX(index, size) (index) ++#else ++#define LSE_INDEX(index, size) ((size) - (index) - 1) ++#endif ++#define LSB(value, index) \ ++ (((__u8 *)&(value))[LSE_INDEX((index), sizeof(value))]) ++#define LSW(value, index) \ ++ (((__u16 *)&(value))[LSE_INDEX((index), sizeof(value) / 2)]) ++ + #define MAX_SOCKS 32 + + struct { +@@ -369,171 +380,146 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx) + { + struct bpf_sock *sk; + int err, family; +- __u16 *half; +- __u8 *byte; + bool v4; + + v4 = (ctx->family == AF_INET); + + /* Narrow loads from family field */ +- byte = (__u8 *)&ctx->family; +- half = (__u16 *)&ctx->family; +- if (byte[0] != (v4 ? AF_INET : AF_INET6) || +- byte[1] != 0 || byte[2] != 0 || byte[3] != 0) ++ if (LSB(ctx->family, 0) != (v4 ? AF_INET : AF_INET6) || ++ LSB(ctx->family, 1) != 0 || LSB(ctx->family, 2) != 0 || LSB(ctx->family, 3) != 0) + return SK_DROP; +- if (half[0] != (v4 ? AF_INET : AF_INET6)) ++ if (LSW(ctx->family, 0) != (v4 ? AF_INET : AF_INET6)) + return SK_DROP; + +- byte = (__u8 *)&ctx->protocol; +- if (byte[0] != IPPROTO_TCP || +- byte[1] != 0 || byte[2] != 0 || byte[3] != 0) ++ /* Narrow loads from protocol field */ ++ if (LSB(ctx->protocol, 0) != IPPROTO_TCP || ++ LSB(ctx->protocol, 1) != 0 || LSB(ctx->protocol, 2) != 0 || LSB(ctx->protocol, 3) != 0) + return SK_DROP; +- half = (__u16 *)&ctx->protocol; +- if (half[0] != IPPROTO_TCP) ++ if (LSW(ctx->protocol, 0) != IPPROTO_TCP) + return SK_DROP; + + /* Narrow loads from remote_port field. Expect non-0 value. */ +- byte = (__u8 *)&ctx->remote_port; +- if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0) ++ if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 && ++ LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0) + return SK_DROP; +- half = (__u16 *)&ctx->remote_port; +- if (half[0] == 0) ++ if (LSW(ctx->remote_port, 0) == 0) + return SK_DROP; + + /* Narrow loads from local_port field. Expect DST_PORT. */ +- byte = (__u8 *)&ctx->local_port; +- if (byte[0] != ((DST_PORT >> 0) & 0xff) || +- byte[1] != ((DST_PORT >> 8) & 0xff) || +- byte[2] != 0 || byte[3] != 0) ++ if (LSB(ctx->local_port, 0) != ((DST_PORT >> 0) & 0xff) || ++ LSB(ctx->local_port, 1) != ((DST_PORT >> 8) & 0xff) || ++ LSB(ctx->local_port, 2) != 0 || LSB(ctx->local_port, 3) != 0) + return SK_DROP; +- half = (__u16 *)&ctx->local_port; +- if (half[0] != DST_PORT) ++ if (LSW(ctx->local_port, 0) != DST_PORT) + return SK_DROP; + + /* Narrow loads from IPv4 fields */ + if (v4) { + /* Expect non-0.0.0.0 in remote_ip4 */ +- byte = (__u8 *)&ctx->remote_ip4; +- if (byte[0] == 0 && byte[1] == 0 && +- byte[2] == 0 && byte[3] == 0) ++ if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 && ++ LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0) + return SK_DROP; +- half = (__u16 *)&ctx->remote_ip4; +- if (half[0] == 0 && half[1] == 0) ++ if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0) + return SK_DROP; + + /* Expect DST_IP4 in local_ip4 */ +- byte = (__u8 *)&ctx->local_ip4; +- if (byte[0] != ((DST_IP4 >> 0) & 0xff) || +- byte[1] != ((DST_IP4 >> 8) & 0xff) || +- byte[2] != ((DST_IP4 >> 16) & 0xff) || +- byte[3] != ((DST_IP4 >> 24) & 0xff)) ++ if (LSB(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xff) || ++ LSB(ctx->local_ip4, 1) != ((DST_IP4 >> 8) & 0xff) || ++ LSB(ctx->local_ip4, 2) != ((DST_IP4 >> 16) & 0xff) || ++ LSB(ctx->local_ip4, 3) != ((DST_IP4 >> 24) & 0xff)) + return SK_DROP; +- half = (__u16 *)&ctx->local_ip4; +- if (half[0] != ((DST_IP4 >> 0) & 0xffff) || +- half[1] != ((DST_IP4 >> 16) & 0xffff)) ++ if (LSW(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xffff) || ++ LSW(ctx->local_ip4, 1) != ((DST_IP4 >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect 0.0.0.0 IPs when family != AF_INET */ +- byte = (__u8 *)&ctx->remote_ip4; +- if (byte[0] != 0 || byte[1] != 0 && +- byte[2] != 0 || byte[3] != 0) ++ if (LSB(ctx->remote_ip4, 0) != 0 || LSB(ctx->remote_ip4, 1) != 0 || ++ LSB(ctx->remote_ip4, 2) != 0 || LSB(ctx->remote_ip4, 3) != 0) + return SK_DROP; +- half = (__u16 *)&ctx->remote_ip4; +- if (half[0] != 0 || half[1] != 0) ++ if (LSW(ctx->remote_ip4, 0) != 0 || LSW(ctx->remote_ip4, 1) != 0) + return SK_DROP; + +- byte = (__u8 *)&ctx->local_ip4; +- if (byte[0] != 0 || byte[1] != 0 && +- byte[2] != 0 || byte[3] != 0) ++ if (LSB(ctx->local_ip4, 0) != 0 || LSB(ctx->local_ip4, 1) != 0 || ++ LSB(ctx->local_ip4, 2) != 0 || LSB(ctx->local_ip4, 3) != 0) + return SK_DROP; +- half = (__u16 *)&ctx->local_ip4; +- if (half[0] != 0 || half[1] != 0) ++ if (LSW(ctx->local_ip4, 0) != 0 || LSW(ctx->local_ip4, 1) != 0) + return SK_DROP; + } + + /* Narrow loads from IPv6 fields */ + if (!v4) { +- /* Expenct non-:: IP in remote_ip6 */ +- byte = (__u8 *)&ctx->remote_ip6; +- if (byte[0] == 0 && byte[1] == 0 && +- byte[2] == 0 && byte[3] == 0 && +- byte[4] == 0 && byte[5] == 0 && +- byte[6] == 0 && byte[7] == 0 && +- byte[8] == 0 && byte[9] == 0 && +- byte[10] == 0 && byte[11] == 0 && +- byte[12] == 0 && byte[13] == 0 && +- byte[14] == 0 && byte[15] == 0) ++ /* Expect non-:: IP in remote_ip6 */ ++ if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 && ++ LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 && ++ LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 && ++ LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 && ++ LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 && ++ LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 && ++ LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 && ++ LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0) + return SK_DROP; +- half = (__u16 *)&ctx->remote_ip6; +- if (half[0] == 0 && half[1] == 0 && +- half[2] == 0 && half[3] == 0 && +- half[4] == 0 && half[5] == 0 && +- half[6] == 0 && half[7] == 0) ++ if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 && ++ LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 && ++ LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 && ++ LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0) + return SK_DROP; +- + /* Expect DST_IP6 in local_ip6 */ +- byte = (__u8 *)&ctx->local_ip6; +- if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) || +- byte[1] != ((DST_IP6[0] >> 8) & 0xff) || +- byte[2] != ((DST_IP6[0] >> 16) & 0xff) || +- byte[3] != ((DST_IP6[0] >> 24) & 0xff) || +- byte[4] != ((DST_IP6[1] >> 0) & 0xff) || +- byte[5] != ((DST_IP6[1] >> 8) & 0xff) || +- byte[6] != ((DST_IP6[1] >> 16) & 0xff) || +- byte[7] != ((DST_IP6[1] >> 24) & 0xff) || +- byte[8] != ((DST_IP6[2] >> 0) & 0xff) || +- byte[9] != ((DST_IP6[2] >> 8) & 0xff) || +- byte[10] != ((DST_IP6[2] >> 16) & 0xff) || +- byte[11] != ((DST_IP6[2] >> 24) & 0xff) || +- byte[12] != ((DST_IP6[3] >> 0) & 0xff) || +- byte[13] != ((DST_IP6[3] >> 8) & 0xff) || +- byte[14] != ((DST_IP6[3] >> 16) & 0xff) || +- byte[15] != ((DST_IP6[3] >> 24) & 0xff)) ++ if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) || ++ LSB(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 8) & 0xff) || ++ LSB(ctx->local_ip6[0], 2) != ((DST_IP6[0] >> 16) & 0xff) || ++ LSB(ctx->local_ip6[0], 3) != ((DST_IP6[0] >> 24) & 0xff) || ++ LSB(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xff) || ++ LSB(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 8) & 0xff) || ++ LSB(ctx->local_ip6[1], 2) != ((DST_IP6[1] >> 16) & 0xff) || ++ LSB(ctx->local_ip6[1], 3) != ((DST_IP6[1] >> 24) & 0xff) || ++ LSB(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xff) || ++ LSB(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 8) & 0xff) || ++ LSB(ctx->local_ip6[2], 2) != ((DST_IP6[2] >> 16) & 0xff) || ++ LSB(ctx->local_ip6[2], 3) != ((DST_IP6[2] >> 24) & 0xff) || ++ LSB(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xff) || ++ LSB(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 8) & 0xff) || ++ LSB(ctx->local_ip6[3], 2) != ((DST_IP6[3] >> 16) & 0xff) || ++ LSB(ctx->local_ip6[3], 3) != ((DST_IP6[3] >> 24) & 0xff)) + return SK_DROP; +- half = (__u16 *)&ctx->local_ip6; +- if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) || +- half[1] != ((DST_IP6[0] >> 16) & 0xffff) || +- half[2] != ((DST_IP6[1] >> 0) & 0xffff) || +- half[3] != ((DST_IP6[1] >> 16) & 0xffff) || +- half[4] != ((DST_IP6[2] >> 0) & 0xffff) || +- half[5] != ((DST_IP6[2] >> 16) & 0xffff) || +- half[6] != ((DST_IP6[3] >> 0) & 0xffff) || +- half[7] != ((DST_IP6[3] >> 16) & 0xffff)) ++ if (LSW(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xffff) || ++ LSW(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 16) & 0xffff) || ++ LSW(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xffff) || ++ LSW(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 16) & 0xffff) || ++ LSW(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xffff) || ++ LSW(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 16) & 0xffff) || ++ LSW(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xffff) || ++ LSW(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect :: IPs when family != AF_INET6 */ +- byte = (__u8 *)&ctx->remote_ip6; +- if (byte[0] != 0 || byte[1] != 0 || +- byte[2] != 0 || byte[3] != 0 || +- byte[4] != 0 || byte[5] != 0 || +- byte[6] != 0 || byte[7] != 0 || +- byte[8] != 0 || byte[9] != 0 || +- byte[10] != 0 || byte[11] != 0 || +- byte[12] != 0 || byte[13] != 0 || +- byte[14] != 0 || byte[15] != 0) ++ if (LSB(ctx->remote_ip6[0], 0) != 0 || LSB(ctx->remote_ip6[0], 1) != 0 || ++ LSB(ctx->remote_ip6[0], 2) != 0 || LSB(ctx->remote_ip6[0], 3) != 0 || ++ LSB(ctx->remote_ip6[1], 0) != 0 || LSB(ctx->remote_ip6[1], 1) != 0 || ++ LSB(ctx->remote_ip6[1], 2) != 0 || LSB(ctx->remote_ip6[1], 3) != 0 || ++ LSB(ctx->remote_ip6[2], 0) != 0 || LSB(ctx->remote_ip6[2], 1) != 0 || ++ LSB(ctx->remote_ip6[2], 2) != 0 || LSB(ctx->remote_ip6[2], 3) != 0 || ++ LSB(ctx->remote_ip6[3], 0) != 0 || LSB(ctx->remote_ip6[3], 1) != 0 || ++ LSB(ctx->remote_ip6[3], 2) != 0 || LSB(ctx->remote_ip6[3], 3) != 0) + return SK_DROP; +- half = (__u16 *)&ctx->remote_ip6; +- if (half[0] != 0 || half[1] != 0 || +- half[2] != 0 || half[3] != 0 || +- half[4] != 0 || half[5] != 0 || +- half[6] != 0 || half[7] != 0) ++ if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 || ++ LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 || ++ LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 || ++ LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0) + return SK_DROP; + +- byte = (__u8 *)&ctx->local_ip6; +- if (byte[0] != 0 || byte[1] != 0 || +- byte[2] != 0 || byte[3] != 0 || +- byte[4] != 0 || byte[5] != 0 || +- byte[6] != 0 || byte[7] != 0 || +- byte[8] != 0 || byte[9] != 0 || +- byte[10] != 0 || byte[11] != 0 || +- byte[12] != 0 || byte[13] != 0 || +- byte[14] != 0 || byte[15] != 0) ++ if (LSB(ctx->local_ip6[0], 0) != 0 || LSB(ctx->local_ip6[0], 1) != 0 || ++ LSB(ctx->local_ip6[0], 2) != 0 || LSB(ctx->local_ip6[0], 3) != 0 || ++ LSB(ctx->local_ip6[1], 0) != 0 || LSB(ctx->local_ip6[1], 1) != 0 || ++ LSB(ctx->local_ip6[1], 2) != 0 || LSB(ctx->local_ip6[1], 3) != 0 || ++ LSB(ctx->local_ip6[2], 0) != 0 || LSB(ctx->local_ip6[2], 1) != 0 || ++ LSB(ctx->local_ip6[2], 2) != 0 || LSB(ctx->local_ip6[2], 3) != 0 || ++ LSB(ctx->local_ip6[3], 0) != 0 || LSB(ctx->local_ip6[3], 1) != 0 || ++ LSB(ctx->local_ip6[3], 2) != 0 || LSB(ctx->local_ip6[3], 3) != 0) + return SK_DROP; +- half = (__u16 *)&ctx->local_ip6; +- if (half[0] != 0 || half[1] != 0 || +- half[2] != 0 || half[3] != 0 || +- half[4] != 0 || half[5] != 0 || +- half[6] != 0 || half[7] != 0) ++ if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 || ++ LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 || ++ LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 || ++ LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0) + return SK_DROP; + } + +diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +index 458b0d69133e4..553a282d816ab 100644 +--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c ++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +@@ -18,11 +18,11 @@ + #define MAX_ULONG_STR_LEN 7 + #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN) + ++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string"; + static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) + { +- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string"; + unsigned char i; +- char name[64]; ++ char name[sizeof(tcp_mem_name)]; + int ret; + + memset(name, 0, sizeof(name)); +diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +index b2e6f9b0894d8..2b64bc563a12e 100644 +--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c ++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +@@ -18,11 +18,11 @@ + #define MAX_ULONG_STR_LEN 7 + #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN) + ++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop"; + static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx) + { +- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop"; + unsigned char i; +- char name[64]; ++ char name[sizeof(tcp_mem_name)]; + int ret; + + memset(name, 0, sizeof(name)); +diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c +index 29fa09d6a6c6d..e9dfa0313d1bb 100644 +--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c ++++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c +@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep") + int handle__tp(struct trace_event_raw_sys_enter *args) + { + struct __kernel_timespec *ts; ++ long tv_nsec; + + if (args->id != __NR_nanosleep) + return 0; + + ts = (void *)args->args[0]; +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC) ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) || ++ tv_nsec != MY_TV_NSEC) + return 0; + + tp_called = true; +@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter") + int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id) + { + struct __kernel_timespec *ts; ++ long tv_nsec; + + if (id != __NR_nanosleep) + return 0; + + ts = (void *)PT_REGS_PARM1_CORE(regs); +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC) ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) || ++ tv_nsec != MY_TV_NSEC) + return 0; + + raw_tp_called = true; +@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter") + int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id) + { + struct __kernel_timespec *ts; ++ long tv_nsec; + + if (id != __NR_nanosleep) + return 0; + + ts = (void *)PT_REGS_PARM1_CORE(regs); +- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC) ++ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) || ++ tv_nsec != MY_TV_NSEC) + return 0; + + tp_btf_called = true; +diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc +index 7449a4b8f1f9a..9098f1e7433fd 100644 +--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc ++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc +@@ -25,12 +25,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events + echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger + echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger + +-echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events +-echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger +-echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger ++echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events ++echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger ++echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger + + ping $LOCALHOST -c 3 +-if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then ++if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then + fail "Failed to create combined histogram" + fi + +diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh +index 1aba83c87ad32..846c7ed71556f 100644 +--- a/tools/testing/selftests/livepatch/functions.sh ++++ b/tools/testing/selftests/livepatch/functions.sh +@@ -278,7 +278,7 @@ function check_result { + # help differentiate repeated testing runs. Remove them with a + # post-comparison sed filter. + +- result=$(dmesg | comm -13 "$SAVED_DMESG" - | \ ++ result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \ + grep -e 'livepatch:' -e 'test_klp' | \ + grep -v '\(tainting\|taints\) kernel' | \ + sed 's/^\[[ 0-9.]*\] //') +diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh +index 8383eb89d88a9..bb7a1775307b8 100755 +--- a/tools/testing/selftests/lkdtm/run.sh ++++ b/tools/testing/selftests/lkdtm/run.sh +@@ -82,7 +82,7 @@ dmesg > "$DMESG" + ($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true + + # Record and dump the results +-dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true ++dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true + + cat "$LOG" + # Check for expected output +diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config +index 3b42c06b59858..c5e50ab2ced60 100644 +--- a/tools/testing/selftests/net/config ++++ b/tools/testing/selftests/net/config +@@ -31,3 +31,4 @@ CONFIG_NET_SCH_ETF=m + CONFIG_NET_SCH_NETEM=y + CONFIG_TEST_BLACKHOLE_DEV=m + CONFIG_KALLSYMS=y ++CONFIG_NET_FOU=m +diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh +index a0b5f57d6bd31..0727e2012b685 100755 +--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh ++++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh +@@ -215,10 +215,16 @@ switch_create() + + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 ++ ++ sysctl_set net.ipv4.conf.all.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 + } + + switch_destroy() + { ++ sysctl_restore net.ipv4.conf.all.rp_filter ++ + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10 + +@@ -359,6 +365,10 @@ ns_switch_create() + + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 ++ ++ sysctl_set net.ipv4.conf.all.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 + } + export -f ns_switch_create + +diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh +index 1209031bc794d..5d97fa347d75a 100755 +--- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh ++++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh +@@ -237,10 +237,16 @@ switch_create() + + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 ++ ++ sysctl_set net.ipv4.conf.all.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 + } + + switch_destroy() + { ++ sysctl_restore net.ipv4.conf.all.rp_filter ++ + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20 + bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10 + +@@ -402,6 +408,10 @@ ns_switch_create() + + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10 + bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20 ++ ++ sysctl_set net.ipv4.conf.all.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0 ++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0 + } + export -f ns_switch_create + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +index 57d75b7f62203..e9449430f98df 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +@@ -444,9 +444,9 @@ do_transfer() + duration=$(printf "(duration %05sms)" $duration) + if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then + echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2 +- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2 ++ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2 + ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port" +- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2 ++ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2 + ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port" + + cat "$capout" +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index f39c1129ce5f0..c2943e4dfcfe6 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -176,9 +176,9 @@ do_transfer() + + if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then + echo " client exit code $retc, server $rets" 1>&2 +- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2 ++ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2 + ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port" +- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2 ++ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2 + ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port" + + cat "$capout" +diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh +index 8a2fe6d64bf24..c9ce3dfa42ee7 100755 +--- a/tools/testing/selftests/net/rtnetlink.sh ++++ b/tools/testing/selftests/net/rtnetlink.sh +@@ -520,6 +520,11 @@ kci_test_encap_fou() + return $ksft_skip + fi + ++ if ! /sbin/modprobe -q -n fou; then ++ echo "SKIP: module fou is not found" ++ return $ksft_skip ++ fi ++ /sbin/modprobe -q fou + ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null + if [ $? -ne 0 ];then + echo "FAIL: can't add fou port 7777, skipping test" +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +index 55ef15184057d..386bca731e581 100644 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +@@ -64,6 +64,7 @@ int bufsize; + int debug; + int testing; + volatile int gotsig; ++bool prefixes_enabled; + char *cipath = "/dev/fb0"; + long cioffset; + +@@ -77,7 +78,12 @@ void sighandler(int sig, siginfo_t *info, void *ctx) + } + gotsig = sig; + #ifdef __powerpc64__ +- ucp->uc_mcontext.gp_regs[PT_NIP] += 4; ++ if (prefixes_enabled) { ++ u32 inst = *(u32 *)ucp->uc_mcontext.gp_regs[PT_NIP]; ++ ucp->uc_mcontext.gp_regs[PT_NIP] += ((inst >> 26 == 1) ? 8 : 4); ++ } else { ++ ucp->uc_mcontext.gp_regs[PT_NIP] += 4; ++ } + #else + ucp->uc_mcontext.uc_regs->gregs[PT_NIP] += 4; + #endif +@@ -648,6 +654,8 @@ int main(int argc, char *argv[]) + exit(1); + } + ++ prefixes_enabled = have_hwcap2(PPC_FEATURE2_ARCH_3_1); ++ + rc |= test_harness(test_alignment_handler_vsx_206, + "test_alignment_handler_vsx_206"); + rc |= test_harness(test_alignment_handler_vsx_207, +diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh +index 8a8d0f456946c..0d783e1065c86 100755 +--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh ++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh +@@ -1,17 +1,19 @@ + #!/bin/sh + # SPDX-License-Identifier: GPL-2.0-only + ++KSELFTESTS_SKIP=4 ++ + . ./eeh-functions.sh + + if ! eeh_supported ; then + echo "EEH not supported on this system, skipping" +- exit 0; ++ exit $KSELFTESTS_SKIP; + fi + + if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \ + [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then + echo "debugfs EEH testing files are missing. Is debugfs mounted?" +- exit 1; ++ exit $KSELFTESTS_SKIP; + fi + + pre_lspci=`mktemp` +@@ -84,4 +86,5 @@ echo "$failed devices failed to recover ($dev_count tested)" + lspci | diff -u $pre_lspci - + rm -f $pre_lspci + +-exit $failed ++test "$failed" == 0 ++exit $? +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c +index 7a6d40286a421..9a9eb02539fb4 100644 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c +@@ -1667,64 +1667,79 @@ TEST_F(TRACE_poke, getpid_runs_normally) + } + + #if defined(__x86_64__) +-# define ARCH_REGS struct user_regs_struct +-# define SYSCALL_NUM orig_rax +-# define SYSCALL_RET rax ++# define ARCH_REGS struct user_regs_struct ++# define SYSCALL_NUM(_regs) (_regs).orig_rax ++# define SYSCALL_RET(_regs) (_regs).rax + #elif defined(__i386__) +-# define ARCH_REGS struct user_regs_struct +-# define SYSCALL_NUM orig_eax +-# define SYSCALL_RET eax ++# define ARCH_REGS struct user_regs_struct ++# define SYSCALL_NUM(_regs) (_regs).orig_eax ++# define SYSCALL_RET(_regs) (_regs).eax + #elif defined(__arm__) +-# define ARCH_REGS struct pt_regs +-# define SYSCALL_NUM ARM_r7 +-# define SYSCALL_RET ARM_r0 ++# define ARCH_REGS struct pt_regs ++# define SYSCALL_NUM(_regs) (_regs).ARM_r7 ++# define SYSCALL_RET(_regs) (_regs).ARM_r0 + #elif defined(__aarch64__) +-# define ARCH_REGS struct user_pt_regs +-# define SYSCALL_NUM regs[8] +-# define SYSCALL_RET regs[0] ++# define ARCH_REGS struct user_pt_regs ++# define SYSCALL_NUM(_regs) (_regs).regs[8] ++# define SYSCALL_RET(_regs) (_regs).regs[0] + #elif defined(__riscv) && __riscv_xlen == 64 +-# define ARCH_REGS struct user_regs_struct +-# define SYSCALL_NUM a7 +-# define SYSCALL_RET a0 ++# define ARCH_REGS struct user_regs_struct ++# define SYSCALL_NUM(_regs) (_regs).a7 ++# define SYSCALL_RET(_regs) (_regs).a0 + #elif defined(__csky__) +-# define ARCH_REGS struct pt_regs +-#if defined(__CSKYABIV2__) +-# define SYSCALL_NUM regs[3] +-#else +-# define SYSCALL_NUM regs[9] +-#endif +-# define SYSCALL_RET a0 ++# define ARCH_REGS struct pt_regs ++# if defined(__CSKYABIV2__) ++# define SYSCALL_NUM(_regs) (_regs).regs[3] ++# else ++# define SYSCALL_NUM(_regs) (_regs).regs[9] ++# endif ++# define SYSCALL_RET(_regs) (_regs).a0 + #elif defined(__hppa__) +-# define ARCH_REGS struct user_regs_struct +-# define SYSCALL_NUM gr[20] +-# define SYSCALL_RET gr[28] ++# define ARCH_REGS struct user_regs_struct ++# define SYSCALL_NUM(_regs) (_regs).gr[20] ++# define SYSCALL_RET(_regs) (_regs).gr[28] + #elif defined(__powerpc__) +-# define ARCH_REGS struct pt_regs +-# define SYSCALL_NUM gpr[0] +-# define SYSCALL_RET gpr[3] ++# define ARCH_REGS struct pt_regs ++# define SYSCALL_NUM(_regs) (_regs).gpr[0] ++# define SYSCALL_RET(_regs) (_regs).gpr[3] ++# define SYSCALL_RET_SET(_regs, _val) \ ++ do { \ ++ typeof(_val) _result = (_val); \ ++ /* \ ++ * A syscall error is signaled by CR0 SO bit \ ++ * and the code is stored as a positive value. \ ++ */ \ ++ if (_result < 0) { \ ++ SYSCALL_RET(_regs) = -result; \ ++ (_regs).ccr |= 0x10000000; \ ++ } else { \ ++ SYSCALL_RET(_regs) = result; \ ++ (_regs).ccr &= ~0x10000000; \ ++ } \ ++ } while (0) + #elif defined(__s390__) +-# define ARCH_REGS s390_regs +-# define SYSCALL_NUM gprs[2] +-# define SYSCALL_RET gprs[2] ++# define ARCH_REGS s390_regs ++# define SYSCALL_NUM(_regs) (_regs).gprs[2] ++# define SYSCALL_RET(_regs) (_regs).gprs[2] + # define SYSCALL_NUM_RET_SHARE_REG + #elif defined(__mips__) +-# define ARCH_REGS struct pt_regs +-# define SYSCALL_NUM regs[2] +-# define SYSCALL_SYSCALL_NUM regs[4] +-# define SYSCALL_RET regs[2] ++# define ARCH_REGS struct pt_regs ++# define SYSCALL_NUM(_regs) (_regs).regs[2] ++# define SYSCALL_SYSCALL_NUM regs[4] ++# define SYSCALL_RET(_regs) (_regs).regs[2] + # define SYSCALL_NUM_RET_SHARE_REG + #elif defined(__xtensa__) +-# define ARCH_REGS struct user_pt_regs +-# define SYSCALL_NUM syscall ++# define ARCH_REGS struct user_pt_regs ++# define SYSCALL_NUM(_regs) (_regs).syscall + /* + * On xtensa syscall return value is in the register + * a2 of the current window which is not fixed. + */ +-#define SYSCALL_RET(reg) a[(reg).windowbase * 4 + 2] ++#define SYSCALL_RET(_regs) (_regs).a[(_regs).windowbase * 4 + 2] + #elif defined(__sh__) +-# define ARCH_REGS struct pt_regs +-# define SYSCALL_NUM gpr[3] +-# define SYSCALL_RET gpr[0] ++# define ARCH_REGS struct pt_regs ++# define SYSCALL_NUM(_regs) (_regs).gpr[3] ++# define SYSCALL_RET(_regs) (_regs).gpr[0] + #else + # error "Do not know how to find your architecture's registers and syscalls" + #endif +@@ -1773,10 +1788,10 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee) + #endif + + #if defined(__mips__) +- if (regs.SYSCALL_NUM == __NR_O32_Linux) ++ if (SYSCALL_NUM(regs) == __NR_O32_Linux) + return regs.SYSCALL_SYSCALL_NUM; + #endif +- return regs.SYSCALL_NUM; ++ return SYSCALL_NUM(regs); + } + + /* Architecture-specific syscall changing routine. */ +@@ -1799,14 +1814,14 @@ void change_syscall(struct __test_metadata *_metadata, + defined(__s390__) || defined(__hppa__) || defined(__riscv) || \ + defined(__xtensa__) || defined(__csky__) || defined(__sh__) + { +- regs.SYSCALL_NUM = syscall; ++ SYSCALL_NUM(regs) = syscall; + } + #elif defined(__mips__) + { +- if (regs.SYSCALL_NUM == __NR_O32_Linux) ++ if (SYSCALL_NUM(regs) == __NR_O32_Linux) + regs.SYSCALL_SYSCALL_NUM = syscall; + else +- regs.SYSCALL_NUM = syscall; ++ SYSCALL_NUM(regs) = syscall; + } + + #elif defined(__arm__) +@@ -1840,11 +1855,8 @@ void change_syscall(struct __test_metadata *_metadata, + if (syscall == -1) + #ifdef SYSCALL_NUM_RET_SHARE_REG + TH_LOG("Can't modify syscall return on this architecture"); +- +-#elif defined(__xtensa__) +- regs.SYSCALL_RET(regs) = result; + #else +- regs.SYSCALL_RET = result; ++ SYSCALL_RET(regs) = result; + #endif + + #ifdef HAVE_GETREGS +@@ -3715,7 +3727,7 @@ TEST(user_notification_filter_empty) + if (pid == 0) { + int listener; + +- listener = user_notif_syscall(__NR_mknod, SECCOMP_FILTER_FLAG_NEW_LISTENER); ++ listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER); + if (listener < 0) + _exit(EXIT_FAILURE); + +diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config +index 3ba674b64fa9f..69dd0d1aa30b2 100644 +--- a/tools/testing/selftests/vm/config ++++ b/tools/testing/selftests/vm/config +@@ -3,3 +3,4 @@ CONFIG_USERFAULTFD=y + CONFIG_TEST_VMALLOC=m + CONFIG_DEVICE_PRIVATE=y + CONFIG_TEST_HMM=m ++CONFIG_GUP_BENCHMARK=y