mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-18 19:08:48 +00:00
21962 lines
716 KiB
Diff
21962 lines
716 KiB
Diff
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
|
index fb95fad81c79a..6746f91ebc490 100644
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -577,7 +577,7 @@
|
|
loops can be debugged more effectively on production
|
|
systems.
|
|
|
|
- clearcpuid=BITNUM [X86]
|
|
+ clearcpuid=BITNUM[,BITNUM...] [X86]
|
|
Disable CPUID feature X for the kernel. See
|
|
arch/x86/include/asm/cpufeatures.h for the valid bit
|
|
numbers. Note the Linux specific bits are not necessarily
|
|
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
|
|
index fc823572bcff2..90c6d039b91b0 100644
|
|
--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
|
|
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
|
|
@@ -23,8 +23,7 @@ properties:
|
|
- items:
|
|
- const: allwinner,sun7i-a20-crypto
|
|
- const: allwinner,sun4i-a10-crypto
|
|
- - items:
|
|
- - const: allwinner,sun8i-a33-crypto
|
|
+ - const: allwinner,sun8i-a33-crypto
|
|
|
|
reg:
|
|
maxItems: 1
|
|
@@ -59,7 +58,9 @@ if:
|
|
properties:
|
|
compatible:
|
|
contains:
|
|
- const: allwinner,sun6i-a31-crypto
|
|
+ enum:
|
|
+ - allwinner,sun6i-a31-crypto
|
|
+ - allwinner,sun8i-a33-crypto
|
|
|
|
then:
|
|
required:
|
|
diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
|
|
index 9d6c9feb12ff1..a3c1dffaa4bb4 100644
|
|
--- a/Documentation/devicetree/bindings/net/socionext-netsec.txt
|
|
+++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
|
|
@@ -30,7 +30,9 @@ Optional properties: (See ethernet.txt file in the same directory)
|
|
- max-frame-size: See ethernet.txt in the same directory.
|
|
|
|
The MAC address will be determined using the optional properties
|
|
-defined in ethernet.txt.
|
|
+defined in ethernet.txt. The 'phy-mode' property is required, but may
|
|
+be set to the empty string if the PHY configuration is programmed by
|
|
+the firmware or set by hardware straps, and needs to be preserved.
|
|
|
|
Example:
|
|
eth0: ethernet@522d0000 {
|
|
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
|
|
index 837d51f9e1fab..25e6673a085a0 100644
|
|
--- a/Documentation/networking/ip-sysctl.rst
|
|
+++ b/Documentation/networking/ip-sysctl.rst
|
|
@@ -1142,13 +1142,15 @@ icmp_ratelimit - INTEGER
|
|
icmp_msgs_per_sec - INTEGER
|
|
Limit maximal number of ICMP packets sent per second from this host.
|
|
Only messages whose type matches icmp_ratemask (see below) are
|
|
- controlled by this limit.
|
|
+ controlled by this limit. For security reasons, the precise count
|
|
+ of messages per second is randomized.
|
|
|
|
Default: 1000
|
|
|
|
icmp_msgs_burst - INTEGER
|
|
icmp_msgs_per_sec controls number of ICMP packets sent per second,
|
|
while icmp_msgs_burst controls the burst size of these packets.
|
|
+ For security reasons, the precise burst size is randomized.
|
|
|
|
Default: 50
|
|
|
|
diff --git a/Makefile b/Makefile
|
|
index a4622ef65436e..9bdb93053ee93 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 8
|
|
-SUBLEVEL = 16
|
|
+SUBLEVEL = 17
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
|
|
index ce81018345184..6b5c54576f54d 100644
|
|
--- a/arch/arc/plat-hsdk/Kconfig
|
|
+++ b/arch/arc/plat-hsdk/Kconfig
|
|
@@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
|
|
select ARC_HAS_ACCL_REGS
|
|
select ARC_IRQ_NO_AUTOSAVE
|
|
select CLK_HSDK
|
|
+ select RESET_CONTROLLER
|
|
select RESET_HSDK
|
|
select HAVE_PCI
|
|
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
|
|
index 911d8cf77f2c6..0339a46fa71c5 100644
|
|
--- a/arch/arm/boot/dts/imx6sl.dtsi
|
|
+++ b/arch/arm/boot/dts/imx6sl.dtsi
|
|
@@ -939,8 +939,10 @@
|
|
};
|
|
|
|
rngb: rngb@21b4000 {
|
|
+ compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
|
|
reg = <0x021b4000 0x4000>;
|
|
interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ clocks = <&clks IMX6SL_CLK_DUMMY>;
|
|
};
|
|
|
|
weim: weim@21b8000 {
|
|
diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
|
|
index ebbe1518ef8a6..63cafd220dba1 100644
|
|
--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
|
|
+++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
|
|
@@ -57,7 +57,7 @@
|
|
|
|
lvds-receiver {
|
|
compatible = "ti,ds90cf384a", "lvds-decoder";
|
|
- powerdown-gpios = <&gpio7 25 GPIO_ACTIVE_LOW>;
|
|
+ power-supply = <&vcc_3v3_tft1>;
|
|
|
|
ports {
|
|
#address-cells = <1>;
|
|
@@ -81,6 +81,7 @@
|
|
panel {
|
|
compatible = "edt,etm0700g0dh6";
|
|
backlight = <&lcd_backlight>;
|
|
+ power-supply = <&vcc_3v3_tft1>;
|
|
|
|
port {
|
|
panel_in: endpoint {
|
|
@@ -113,6 +114,17 @@
|
|
};
|
|
};
|
|
|
|
+ vcc_3v3_tft1: regulator-panel {
|
|
+ compatible = "regulator-fixed";
|
|
+
|
|
+ regulator-name = "vcc-3v3-tft1";
|
|
+ regulator-min-microvolt = <3300000>;
|
|
+ regulator-max-microvolt = <3300000>;
|
|
+ enable-active-high;
|
|
+ startup-delay-us = <500>;
|
|
+ gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
|
|
+ };
|
|
+
|
|
vcc_sdhi1: regulator-vcc-sdhi1 {
|
|
compatible = "regulator-fixed";
|
|
|
|
@@ -207,6 +219,7 @@
|
|
reg = <0x38>;
|
|
interrupt-parent = <&gpio2>;
|
|
interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
|
|
+ vcc-supply = <&vcc_3v3_tft1>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
|
|
index eedb92526968a..a4ab8b96d0eb6 100644
|
|
--- a/arch/arm/boot/dts/meson8.dtsi
|
|
+++ b/arch/arm/boot/dts/meson8.dtsi
|
|
@@ -239,8 +239,6 @@
|
|
<GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
|
|
- <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
|
|
<GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
|
|
diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
|
|
index 5ceb6cc4451d2..1dbe4e8b38ac7 100644
|
|
--- a/arch/arm/boot/dts/owl-s500.dtsi
|
|
+++ b/arch/arm/boot/dts/owl-s500.dtsi
|
|
@@ -84,21 +84,21 @@
|
|
global_timer: timer@b0020200 {
|
|
compatible = "arm,cortex-a9-global-timer";
|
|
reg = <0xb0020200 0x100>;
|
|
- interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
|
+ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
|
status = "disabled";
|
|
};
|
|
|
|
twd_timer: timer@b0020600 {
|
|
compatible = "arm,cortex-a9-twd-timer";
|
|
reg = <0xb0020600 0x20>;
|
|
- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
|
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
|
status = "disabled";
|
|
};
|
|
|
|
twd_wdt: wdt@b0020620 {
|
|
compatible = "arm,cortex-a9-twd-wdt";
|
|
reg = <0xb0020620 0xe0>;
|
|
- interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
|
+ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
|
status = "disabled";
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
|
|
index 5700e6b700d36..b85025d009437 100644
|
|
--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
|
|
+++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
|
|
@@ -121,8 +121,6 @@
|
|
reset-gpios = <&gpiog 0 GPIO_ACTIVE_LOW>; /* ETH_RST# */
|
|
interrupt-parent = <&gpioa>;
|
|
interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */
|
|
- rxc-skew-ps = <1860>;
|
|
- txc-skew-ps = <1860>;
|
|
reset-assert-us = <10000>;
|
|
reset-deassert-us = <300>;
|
|
micrel,force-master;
|
|
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
|
|
index 7c4bd615b3115..e4e3c92eb30d3 100644
|
|
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
|
|
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
|
|
@@ -11,7 +11,6 @@
|
|
serial0 = &uart4;
|
|
serial1 = &usart3;
|
|
serial2 = &uart8;
|
|
- ethernet0 = ðernet0;
|
|
};
|
|
|
|
chosen {
|
|
@@ -26,23 +25,13 @@
|
|
|
|
display_bl: display-bl {
|
|
compatible = "pwm-backlight";
|
|
- pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
|
|
+ pwms = <&pwm2 3 500000 PWM_POLARITY_INVERTED>;
|
|
brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
|
|
default-brightness-level = <8>;
|
|
enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
|
|
status = "okay";
|
|
};
|
|
|
|
- ethernet_vio: vioregulator {
|
|
- compatible = "regulator-fixed";
|
|
- regulator-name = "vio";
|
|
- regulator-min-microvolt = <3300000>;
|
|
- regulator-max-microvolt = <3300000>;
|
|
- gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
|
|
- regulator-always-on;
|
|
- regulator-boot-on;
|
|
- };
|
|
-
|
|
gpio-keys-polled {
|
|
compatible = "gpio-keys-polled";
|
|
#size-cells = <0>;
|
|
@@ -141,28 +130,6 @@
|
|
status = "okay";
|
|
};
|
|
|
|
-ðernet0 {
|
|
- status = "okay";
|
|
- pinctrl-0 = <ðernet0_rmii_pins_a>;
|
|
- pinctrl-1 = <ðernet0_rmii_sleep_pins_a>;
|
|
- pinctrl-names = "default", "sleep";
|
|
- phy-mode = "rmii";
|
|
- max-speed = <100>;
|
|
- phy-handle = <&phy0>;
|
|
- st,eth-ref-clk-sel;
|
|
- phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
|
|
-
|
|
- mdio0 {
|
|
- #address-cells = <1>;
|
|
- #size-cells = <0>;
|
|
- compatible = "snps,dwmac-mdio";
|
|
-
|
|
- phy0: ethernet-phy@1 {
|
|
- reg = <1>;
|
|
- };
|
|
- };
|
|
-};
|
|
-
|
|
&i2c2 { /* Header X22 */
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&i2c2_pins_a>;
|
|
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
|
|
index ba905196fb549..a87ebc4843963 100644
|
|
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
|
|
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
|
|
@@ -9,6 +9,10 @@
|
|
#include <dt-bindings/mfd/st,stpmic1.h>
|
|
|
|
/ {
|
|
+ aliases {
|
|
+ ethernet0 = ðernet0;
|
|
+ };
|
|
+
|
|
memory@c0000000 {
|
|
device_type = "memory";
|
|
reg = <0xC0000000 0x40000000>;
|
|
@@ -55,6 +59,16 @@
|
|
no-map;
|
|
};
|
|
};
|
|
+
|
|
+ ethernet_vio: vioregulator {
|
|
+ compatible = "regulator-fixed";
|
|
+ regulator-name = "vio";
|
|
+ regulator-min-microvolt = <3300000>;
|
|
+ regulator-max-microvolt = <3300000>;
|
|
+ gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
|
|
+ regulator-always-on;
|
|
+ regulator-boot-on;
|
|
+ };
|
|
};
|
|
|
|
&adc {
|
|
@@ -94,6 +108,28 @@
|
|
status = "okay";
|
|
};
|
|
|
|
+ðernet0 {
|
|
+ status = "okay";
|
|
+ pinctrl-0 = <ðernet0_rmii_pins_a>;
|
|
+ pinctrl-1 = <ðernet0_rmii_sleep_pins_a>;
|
|
+ pinctrl-names = "default", "sleep";
|
|
+ phy-mode = "rmii";
|
|
+ max-speed = <100>;
|
|
+ phy-handle = <&phy0>;
|
|
+ st,eth-ref-clk-sel;
|
|
+ phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
|
|
+
|
|
+ mdio0 {
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
+ compatible = "snps,dwmac-mdio";
|
|
+
|
|
+ phy0: ethernet-phy@1 {
|
|
+ reg = <1>;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
&i2c4 {
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&i2c4_pins_a>;
|
|
@@ -249,7 +285,7 @@
|
|
compatible = "ti,tsc2004";
|
|
reg = <0x49>;
|
|
vio-supply = <&v3v3>;
|
|
- interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
|
|
+ interrupts-extended = <&gpioh 15 IRQ_TYPE_EDGE_FALLING>;
|
|
};
|
|
|
|
eeprom@50 {
|
|
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
|
|
index 930202742a3f6..905cd7bb98cf0 100644
|
|
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
|
|
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
|
|
@@ -295,9 +295,9 @@
|
|
|
|
&sdmmc2 {
|
|
pinctrl-names = "default", "opendrain", "sleep";
|
|
- pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
|
|
- pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
|
|
- pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
|
|
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_c>;
|
|
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_c>;
|
|
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_c>;
|
|
bus-width = <8>;
|
|
mmc-ddr-1_8v;
|
|
no-sd;
|
|
diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
|
|
index 42d62d1ba1dc7..ea15073f0c79c 100644
|
|
--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
|
|
+++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
|
|
@@ -223,16 +223,16 @@
|
|
};
|
|
|
|
®_dc1sw {
|
|
- regulator-min-microvolt = <3000000>;
|
|
- regulator-max-microvolt = <3000000>;
|
|
+ regulator-min-microvolt = <3300000>;
|
|
+ regulator-max-microvolt = <3300000>;
|
|
regulator-name = "vcc-gmac-phy";
|
|
};
|
|
|
|
®_dcdc1 {
|
|
regulator-always-on;
|
|
- regulator-min-microvolt = <3000000>;
|
|
- regulator-max-microvolt = <3000000>;
|
|
- regulator-name = "vcc-3v0";
|
|
+ regulator-min-microvolt = <3300000>;
|
|
+ regulator-max-microvolt = <3300000>;
|
|
+ regulator-name = "vcc-3v3";
|
|
};
|
|
|
|
®_dcdc2 {
|
|
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
|
|
index 2aab043441e8f..eae8aaaadc3bf 100644
|
|
--- a/arch/arm/mach-at91/pm.c
|
|
+++ b/arch/arm/mach-at91/pm.c
|
|
@@ -800,6 +800,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
|
|
|
|
pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
|
|
soc_pm.data.pmc = of_iomap(pmc_np, 0);
|
|
+ of_node_put(pmc_np);
|
|
if (!soc_pm.data.pmc) {
|
|
pr_err("AT91: PM not supported, PMC not found\n");
|
|
return;
|
|
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
|
|
index 6f5f89711f256..a92d277f81a08 100644
|
|
--- a/arch/arm/mach-omap2/cpuidle44xx.c
|
|
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
|
|
@@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|
*/
|
|
if (mpuss_can_lose_context) {
|
|
error = cpu_cluster_pm_enter();
|
|
- if (error)
|
|
+ if (error) {
|
|
+ omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
|
|
goto cpu_cluster_pm_out;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
|
|
index 58c5ef3cf1d7e..2d370f7f75fa2 100644
|
|
--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
|
|
+++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
|
|
@@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
|
|
.dev_id = "s3c2410-sdi",
|
|
.table = {
|
|
/* Card detect S3C2410_GPG(10) */
|
|
- GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
|
|
{ },
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
|
|
index e1c372e5447b6..82cc37513779c 100644
|
|
--- a/arch/arm/mach-s3c24xx/mach-h1940.c
|
|
+++ b/arch/arm/mach-s3c24xx/mach-h1940.c
|
|
@@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
|
|
.dev_id = "s3c2410-sdi",
|
|
.table = {
|
|
/* Card detect S3C2410_GPF(5) */
|
|
- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
|
|
/* Write protect S3C2410_GPH(8) */
|
|
- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
|
|
{ },
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
|
|
index 9035f868fb34e..3a5b1124037b2 100644
|
|
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
|
|
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
|
|
@@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
|
|
.dev_id = "s3c2410-sdi",
|
|
.table = {
|
|
/* Card detect S3C2410_GPG(8) */
|
|
- GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
|
|
/* Write protect S3C2410_GPH(8) */
|
|
- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
|
|
+ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
|
|
{ },
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
|
|
index d856f23939aff..ffa20f52aa832 100644
|
|
--- a/arch/arm/mach-s3c24xx/mach-n30.c
|
|
+++ b/arch/arm/mach-s3c24xx/mach-n30.c
|
|
@@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
|
|
.dev_id = "s3c2410-sdi",
|
|
.table = {
|
|
/* Card detect S3C2410_GPF(1) */
|
|
- GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
|
|
/* Write protect S3C2410_GPG(10) */
|
|
- GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
|
|
{ },
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
|
|
index fde98b175c752..c0a06f123cfea 100644
|
|
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
|
|
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
|
|
@@ -571,9 +571,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
|
|
.dev_id = "s3c2410-sdi",
|
|
.table = {
|
|
/* Card detect S3C2410_GPF(5) */
|
|
- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
|
|
/* Write protect S3C2410_GPH(8) */
|
|
- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
|
|
{ },
|
|
},
|
|
};
|
|
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
|
|
index 12c26eb88afbc..43d91bfd23600 100644
|
|
--- a/arch/arm/mm/cache-l2x0.c
|
|
+++ b/arch/arm/mm/cache-l2x0.c
|
|
@@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
|
|
|
|
ret = of_property_read_u32(np, "prefetch-data", &val);
|
|
if (ret == 0) {
|
|
- if (val)
|
|
+ if (val) {
|
|
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
|
|
- else
|
|
+ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
|
|
+ } else {
|
|
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
|
+ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
|
+ }
|
|
+ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
|
|
} else if (ret != -EINVAL) {
|
|
pr_err("L2C-310 OF prefetch-data property value is missing\n");
|
|
}
|
|
|
|
ret = of_property_read_u32(np, "prefetch-instr", &val);
|
|
if (ret == 0) {
|
|
- if (val)
|
|
+ if (val) {
|
|
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
|
- else
|
|
+ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
|
+ } else {
|
|
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
|
+ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
|
+ }
|
|
+ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
|
|
} else if (ret != -EINVAL) {
|
|
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
|
|
}
|
|
diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
|
|
index 2006ad5424fa6..f8eb72bb41254 100644
|
|
--- a/arch/arm64/boot/dts/actions/s700.dtsi
|
|
+++ b/arch/arm64/boot/dts/actions/s700.dtsi
|
|
@@ -231,7 +231,7 @@
|
|
|
|
pinctrl: pinctrl@e01b0000 {
|
|
compatible = "actions,s700-pinctrl";
|
|
- reg = <0x0 0xe01b0000 0x0 0x1000>;
|
|
+ reg = <0x0 0xe01b0000 0x0 0x100>;
|
|
clocks = <&cmu CLK_GPIO>;
|
|
gpio-controller;
|
|
gpio-ranges = <&pinctrl 0 0 136>;
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
|
|
index ff5ba85b7562e..833bbc3359c44 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
|
|
@@ -41,13 +41,13 @@
|
|
|
|
led-white {
|
|
label = "vim3:white:sys";
|
|
- gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
|
|
+ gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
|
|
linux,default-trigger = "heartbeat";
|
|
};
|
|
|
|
led-red {
|
|
label = "vim3:red";
|
|
- gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
|
|
+ gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
|
|
index 66ac66856e7e8..077e12a0de3f9 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
|
|
@@ -614,6 +614,7 @@
|
|
gpc: gpc@303a0000 {
|
|
compatible = "fsl,imx8mq-gpc";
|
|
reg = <0x303a0000 0x10000>;
|
|
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
|
|
interrupt-parent = <&gic>;
|
|
interrupt-controller;
|
|
#interrupt-cells = <3>;
|
|
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
|
|
index a5a12b2599a4a..01522dd10603e 100644
|
|
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
|
|
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
|
|
@@ -431,12 +431,11 @@
|
|
status = "okay";
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&nor_gpio1_pins>;
|
|
- bus-width = <8>;
|
|
- max-frequency = <50000000>;
|
|
- non-removable;
|
|
+
|
|
flash@0 {
|
|
compatible = "jedec,spi-nor";
|
|
reg = <0>;
|
|
+ spi-max-frequency = <50000000>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
|
|
index 32bd140ac9fd4..103d2226c579b 100644
|
|
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
|
|
@@ -228,14 +228,14 @@
|
|
};
|
|
|
|
thermal-zones {
|
|
- cpu0_1-thermal {
|
|
+ cpu0-1-thermal {
|
|
polling-delay-passive = <250>;
|
|
polling-delay = <1000>;
|
|
|
|
thermal-sensors = <&tsens 5>;
|
|
|
|
trips {
|
|
- cpu0_1_alert0: trip-point@0 {
|
|
+ cpu0_1_alert0: trip-point0 {
|
|
temperature = <75000>;
|
|
hysteresis = <2000>;
|
|
type = "passive";
|
|
@@ -258,7 +258,7 @@
|
|
};
|
|
};
|
|
|
|
- cpu2_3-thermal {
|
|
+ cpu2-3-thermal {
|
|
polling-delay-passive = <250>;
|
|
polling-delay = <1000>;
|
|
|
|
@@ -1021,7 +1021,7 @@
|
|
reg-names = "mdp_phys";
|
|
|
|
interrupt-parent = <&mdss>;
|
|
- interrupts = <0 0>;
|
|
+ interrupts = <0>;
|
|
|
|
clocks = <&gcc GCC_MDSS_AHB_CLK>,
|
|
<&gcc GCC_MDSS_AXI_CLK>,
|
|
@@ -1053,7 +1053,7 @@
|
|
reg-names = "dsi_ctrl";
|
|
|
|
interrupt-parent = <&mdss>;
|
|
- interrupts = <4 0>;
|
|
+ interrupts = <4>;
|
|
|
|
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
|
|
<&gcc PCLK0_CLK_SRC>;
|
|
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
|
|
index 0bcdf04711079..adf9a5988cdc2 100644
|
|
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
|
|
@@ -119,7 +119,7 @@
|
|
|
|
wcd_codec: codec@f000 {
|
|
compatible = "qcom,pm8916-wcd-analog-codec";
|
|
- reg = <0xf000 0x200>;
|
|
+ reg = <0xf000>;
|
|
reg-names = "pmic-codec-core";
|
|
clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
|
|
clock-names = "mclk";
|
|
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
|
|
index 31b9217bb5bfe..7f1b75b2bcee3 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
|
|
@@ -2193,7 +2193,7 @@
|
|
|
|
system-cache-controller@9200000 {
|
|
compatible = "qcom,sc7180-llcc";
|
|
- reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>;
|
|
+ reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>;
|
|
reg-names = "llcc_base", "llcc_broadcast_base";
|
|
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
|
|
};
|
|
@@ -2357,7 +2357,7 @@
|
|
<19200000>;
|
|
|
|
interrupt-parent = <&mdss>;
|
|
- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <0>;
|
|
|
|
status = "disabled";
|
|
|
|
@@ -2380,7 +2380,7 @@
|
|
reg-names = "dsi_ctrl";
|
|
|
|
interrupt-parent = <&mdss>;
|
|
- interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <4>;
|
|
|
|
clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
|
|
<&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
|
|
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
|
|
index 42171190cce46..065e8fe3a071c 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
|
|
@@ -1214,9 +1214,8 @@
|
|
reg = <0 0xe6ea0000 0 0x0064>;
|
|
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 210>;
|
|
- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
|
|
- <&dmac2 0x43>, <&dmac2 0x42>;
|
|
- dma-names = "tx", "rx", "tx", "rx";
|
|
+ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
|
|
+ dma-names = "tx", "rx";
|
|
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
|
|
resets = <&cpg 210>;
|
|
#address-cells = <1>;
|
|
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
|
|
index 1991bdc36792f..27f74df8efbde 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
|
|
@@ -1192,9 +1192,8 @@
|
|
reg = <0 0xe6ea0000 0 0x0064>;
|
|
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&cpg CPG_MOD 210>;
|
|
- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
|
|
- <&dmac2 0x43>, <&dmac2 0x42>;
|
|
- dma-names = "tx", "rx", "tx", "rx";
|
|
+ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
|
|
+ dma-names = "tx", "rx";
|
|
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
|
|
resets = <&cpg 210>;
|
|
#address-cells = <1>;
|
|
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
|
|
index 9174ddc76bdc3..b8d04c5748bf3 100644
|
|
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
|
|
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
|
|
@@ -500,7 +500,7 @@
|
|
};
|
|
|
|
i2c0: i2c@ff020000 {
|
|
- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
|
|
+ compatible = "cdns,i2c-r1p14";
|
|
status = "disabled";
|
|
interrupt-parent = <&gic>;
|
|
interrupts = <0 17 4>;
|
|
@@ -511,7 +511,7 @@
|
|
};
|
|
|
|
i2c1: i2c@ff030000 {
|
|
- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
|
|
+ compatible = "cdns,i2c-r1p14";
|
|
status = "disabled";
|
|
interrupt-parent = <&gic>;
|
|
interrupts = <0 18 4>;
|
|
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
|
|
index 0bc46149e4917..4b39293d0f72d 100644
|
|
--- a/arch/arm64/include/asm/insn.h
|
|
+++ b/arch/arm64/include/asm/insn.h
|
|
@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
|
|
__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
|
|
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
|
|
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
|
|
+__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800)
|
|
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
|
|
+__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800)
|
|
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
|
|
+__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF)
|
|
__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
|
|
+__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
|
|
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
|
|
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
|
|
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
|
|
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
|
|
index a1871bb32bb17..d207f63eb68e1 100644
|
|
--- a/arch/arm64/include/asm/memory.h
|
|
+++ b/arch/arm64/include/asm/memory.h
|
|
@@ -163,7 +163,6 @@ extern u64 vabits_actual;
|
|
#include <linux/bitops.h>
|
|
#include <linux/mmdebug.h>
|
|
|
|
-extern s64 physvirt_offset;
|
|
extern s64 memstart_addr;
|
|
/* PHYS_OFFSET - the physical address of the start of memory. */
|
|
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
|
|
@@ -239,7 +238,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
|
|
*/
|
|
#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
|
|
|
|
-#define __lm_to_phys(addr) (((addr) + physvirt_offset))
|
|
+#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
|
|
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
|
|
|
|
#define __virt_to_phys_nodebug(x) ({ \
|
|
@@ -257,7 +256,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
|
|
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
|
|
#endif /* CONFIG_DEBUG_VIRTUAL */
|
|
|
|
-#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
|
|
+#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
|
|
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
|
|
|
|
/*
|
|
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
|
|
index 758e2d1577d0c..a1745d6ea4b58 100644
|
|
--- a/arch/arm64/include/asm/pgtable.h
|
|
+++ b/arch/arm64/include/asm/pgtable.h
|
|
@@ -23,6 +23,8 @@
|
|
#define VMALLOC_START (MODULES_END)
|
|
#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
|
|
|
|
+#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
|
|
+
|
|
#define FIRST_USER_ADDRESS 0UL
|
|
|
|
#ifndef __ASSEMBLY__
|
|
@@ -33,8 +35,6 @@
|
|
#include <linux/mm_types.h>
|
|
#include <linux/sched.h>
|
|
|
|
-extern struct page *vmemmap;
|
|
-
|
|
extern void __pte_error(const char *file, int line, unsigned long val);
|
|
extern void __pmd_error(const char *file, int line, unsigned long val);
|
|
extern void __pud_error(const char *file, int line, unsigned long val);
|
|
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
|
|
index 422ed2e38a6c8..6e8a7eec667e8 100644
|
|
--- a/arch/arm64/kernel/cpu_errata.c
|
|
+++ b/arch/arm64/kernel/cpu_errata.c
|
|
@@ -234,14 +234,17 @@ static int detect_harden_bp_fw(void)
|
|
smccc_end = NULL;
|
|
break;
|
|
|
|
-#if IS_ENABLED(CONFIG_KVM)
|
|
case SMCCC_CONDUIT_SMC:
|
|
cb = call_smc_arch_workaround_1;
|
|
+#if IS_ENABLED(CONFIG_KVM)
|
|
smccc_start = __smccc_workaround_1_smc;
|
|
smccc_end = __smccc_workaround_1_smc +
|
|
__SMCCC_WORKAROUND_1_SMC_SZ;
|
|
- break;
|
|
+#else
|
|
+ smccc_start = NULL;
|
|
+ smccc_end = NULL;
|
|
#endif
|
|
+ break;
|
|
|
|
default:
|
|
return -1;
|
|
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
|
|
index a107375005bc9..ccc8c9e22b258 100644
|
|
--- a/arch/arm64/kernel/insn.c
|
|
+++ b/arch/arm64/kernel/insn.c
|
|
@@ -176,7 +176,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn)
|
|
|
|
bool __kprobes aarch64_insn_is_branch(u32 insn)
|
|
{
|
|
- /* b, bl, cb*, tb*, b.cond, br, blr */
|
|
+ /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
|
|
|
|
return aarch64_insn_is_b(insn) ||
|
|
aarch64_insn_is_bl(insn) ||
|
|
@@ -185,8 +185,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
|
|
aarch64_insn_is_tbz(insn) ||
|
|
aarch64_insn_is_tbnz(insn) ||
|
|
aarch64_insn_is_ret(insn) ||
|
|
+ aarch64_insn_is_ret_auth(insn) ||
|
|
aarch64_insn_is_br(insn) ||
|
|
+ aarch64_insn_is_br_auth(insn) ||
|
|
aarch64_insn_is_blr(insn) ||
|
|
+ aarch64_insn_is_blr_auth(insn) ||
|
|
aarch64_insn_is_bcond(insn);
|
|
}
|
|
|
|
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
|
|
index 581602413a130..c26d84ff0e224 100644
|
|
--- a/arch/arm64/kernel/perf_event.c
|
|
+++ b/arch/arm64/kernel/perf_event.c
|
|
@@ -510,6 +510,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
|
|
|
|
static inline void armv8pmu_enable_counter(u32 mask)
|
|
{
|
|
+ /*
|
|
+ * Make sure event configuration register writes are visible before we
|
|
+ * enable the counter.
|
|
+ * */
|
|
+ isb();
|
|
write_sysreg(mask, pmcntenset_el0);
|
|
}
|
|
|
|
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
|
|
index 263d5fba4c8a3..c541fb48886e3 100644
|
|
--- a/arch/arm64/kernel/probes/decode-insn.c
|
|
+++ b/arch/arm64/kernel/probes/decode-insn.c
|
|
@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
|
|
aarch64_insn_is_msr_imm(insn) ||
|
|
aarch64_insn_is_msr_reg(insn) ||
|
|
aarch64_insn_is_exception(insn) ||
|
|
- aarch64_insn_is_eret(insn))
|
|
+ aarch64_insn_is_eret(insn) ||
|
|
+ aarch64_insn_is_eret_auth(insn))
|
|
return false;
|
|
|
|
/*
|
|
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
|
|
index 1e93cfc7c47ad..ca4410eb230a3 100644
|
|
--- a/arch/arm64/mm/init.c
|
|
+++ b/arch/arm64/mm/init.c
|
|
@@ -54,12 +54,6 @@
|
|
s64 memstart_addr __ro_after_init = -1;
|
|
EXPORT_SYMBOL(memstart_addr);
|
|
|
|
-s64 physvirt_offset __ro_after_init;
|
|
-EXPORT_SYMBOL(physvirt_offset);
|
|
-
|
|
-struct page *vmemmap __ro_after_init;
|
|
-EXPORT_SYMBOL(vmemmap);
|
|
-
|
|
/*
|
|
* We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
|
|
* memory as some devices, namely the Raspberry Pi 4, have peripherals with
|
|
@@ -290,20 +284,6 @@ void __init arm64_memblock_init(void)
|
|
memstart_addr = round_down(memblock_start_of_DRAM(),
|
|
ARM64_MEMSTART_ALIGN);
|
|
|
|
- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
|
|
-
|
|
- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
|
|
-
|
|
- /*
|
|
- * If we are running with a 52-bit kernel VA config on a system that
|
|
- * does not support it, we have to offset our vmemmap and physvirt_offset
|
|
- * s.t. we avoid the 52-bit portion of the direct linear map
|
|
- */
|
|
- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
|
|
- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
|
|
- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
|
|
- }
|
|
-
|
|
/*
|
|
* Remove the memory that we will not be able to cover with the
|
|
* linear mapping. Take care not to clip the kernel which may be
|
|
@@ -318,6 +298,16 @@ void __init arm64_memblock_init(void)
|
|
memblock_remove(0, memstart_addr);
|
|
}
|
|
|
|
+ /*
|
|
+ * If we are running with a 52-bit kernel VA config on a system that
|
|
+ * does not support it, we have to place the available physical
|
|
+ * memory in the 48-bit addressable part of the linear region, i.e.,
|
|
+ * we have to move it upward. Since memstart_addr represents the
|
|
+ * physical address of PAGE_OFFSET, we have to *subtract* from it.
|
|
+ */
|
|
+ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
|
|
+ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
|
|
+
|
|
/*
|
|
* Apply the memory limit if it was set. Since the kernel may be loaded
|
|
* high up in memory, add back the kernel region that must be accessible
|
|
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
|
|
index 9ef4ec0aea008..59f7dfe50a4d0 100644
|
|
--- a/arch/m68k/coldfire/device.c
|
|
+++ b/arch/m68k/coldfire/device.c
|
|
@@ -554,7 +554,7 @@ static struct platform_device mcf_edma = {
|
|
};
|
|
#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */
|
|
|
|
-#if IS_ENABLED(CONFIG_MMC)
|
|
+#ifdef MCFSDHC_BASE
|
|
static struct mcf_esdhc_platform_data mcf_esdhc_data = {
|
|
.max_bus_width = 4,
|
|
.cd_type = ESDHC_CD_NONE,
|
|
@@ -579,7 +579,7 @@ static struct platform_device mcf_esdhc = {
|
|
.resource = mcf_esdhc_resources,
|
|
.dev.platform_data = &mcf_esdhc_data,
|
|
};
|
|
-#endif /* IS_ENABLED(CONFIG_MMC) */
|
|
+#endif /* MCFSDHC_BASE */
|
|
|
|
static struct platform_device *mcf_devices[] __initdata = {
|
|
&mcf_uart,
|
|
@@ -613,7 +613,7 @@ static struct platform_device *mcf_devices[] __initdata = {
|
|
#if IS_ENABLED(CONFIG_MCF_EDMA)
|
|
&mcf_edma,
|
|
#endif
|
|
-#if IS_ENABLED(CONFIG_MMC)
|
|
+#ifdef MCFSDHC_BASE
|
|
&mcf_esdhc,
|
|
#endif
|
|
};
|
|
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
|
|
index 2e87a9b6d312f..63bce836b9f10 100644
|
|
--- a/arch/microblaze/include/asm/Kbuild
|
|
+++ b/arch/microblaze/include/asm/Kbuild
|
|
@@ -1,7 +1,6 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
generated-y += syscall_table.h
|
|
generic-y += extable.h
|
|
-generic-y += hw_irq.h
|
|
generic-y += kvm_para.h
|
|
generic-y += local64.h
|
|
generic-y += mcs_spinlock.h
|
|
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
|
|
index 3f9ae3585ab98..80c9534148821 100644
|
|
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
|
|
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
|
|
@@ -13,20 +13,19 @@
|
|
*/
|
|
#define MAX_EA_BITS_PER_CONTEXT 46
|
|
|
|
-#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
|
|
|
|
/*
|
|
- * Our page table limit us to 64TB. Hence for the kernel mapping,
|
|
- * each MAP area is limited to 16 TB.
|
|
- * The four map areas are: linear mapping, vmap, IO and vmemmap
|
|
+ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
|
|
+ * of vmemmap space. To better support sparse memory layout, we use 61TB
|
|
+ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
|
|
*/
|
|
+#define REGION_SHIFT (40)
|
|
#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
|
|
|
|
/*
|
|
- * Define the address range of the kernel non-linear virtual area
|
|
- * 16TB
|
|
+ * Define the address range of the kernel non-linear virtual area (61TB)
|
|
*/
|
|
-#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
|
|
+#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
|
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
|
|
index 414d209f45bbe..c711fe8901109 100644
|
|
--- a/arch/powerpc/include/asm/drmem.h
|
|
+++ b/arch/powerpc/include/asm/drmem.h
|
|
@@ -8,14 +8,13 @@
|
|
#ifndef _ASM_POWERPC_LMB_H
|
|
#define _ASM_POWERPC_LMB_H
|
|
|
|
+#include <linux/sched.h>
|
|
+
|
|
struct drmem_lmb {
|
|
u64 base_addr;
|
|
u32 drc_index;
|
|
u32 aa_index;
|
|
u32 flags;
|
|
-#ifdef CONFIG_MEMORY_HOTPLUG
|
|
- int nid;
|
|
-#endif
|
|
};
|
|
|
|
struct drmem_lmb_info {
|
|
@@ -26,8 +25,22 @@ struct drmem_lmb_info {
|
|
|
|
extern struct drmem_lmb_info *drmem_info;
|
|
|
|
+static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
|
|
+ const struct drmem_lmb *start)
|
|
+{
|
|
+ /*
|
|
+ * DLPAR code paths can take several milliseconds per element
|
|
+ * when interacting with firmware. Ensure that we don't
|
|
+ * unfairly monopolize the CPU.
|
|
+ */
|
|
+ if (((++lmb - start) % 16) == 0)
|
|
+ cond_resched();
|
|
+
|
|
+ return lmb;
|
|
+}
|
|
+
|
|
#define for_each_drmem_lmb_in_range(lmb, start, end) \
|
|
- for ((lmb) = (start); (lmb) < (end); (lmb)++)
|
|
+ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
|
|
|
|
#define for_each_drmem_lmb(lmb) \
|
|
for_each_drmem_lmb_in_range((lmb), \
|
|
@@ -104,22 +117,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
|
|
lmb->aa_index = 0xffffffff;
|
|
}
|
|
|
|
-#ifdef CONFIG_MEMORY_HOTPLUG
|
|
-static inline void lmb_set_nid(struct drmem_lmb *lmb)
|
|
-{
|
|
- lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
|
|
-}
|
|
-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
|
|
-{
|
|
- lmb->nid = -1;
|
|
-}
|
|
-#else
|
|
-static inline void lmb_set_nid(struct drmem_lmb *lmb)
|
|
-{
|
|
-}
|
|
-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
|
|
-{
|
|
-}
|
|
-#endif
|
|
-
|
|
#endif /* _ASM_POWERPC_LMB_H */
|
|
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
|
|
index cb424799da0dc..5a00da670a407 100644
|
|
--- a/arch/powerpc/include/asm/hw_breakpoint.h
|
|
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
|
|
@@ -40,6 +40,7 @@ struct arch_hw_breakpoint {
|
|
#else
|
|
#define HW_BREAKPOINT_SIZE 0x8
|
|
#endif
|
|
+#define HW_BREAKPOINT_SIZE_QUADWORD 0x10
|
|
|
|
#define DABR_MAX_LEN 8
|
|
#define DAWR_MAX_LEN 512
|
|
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
|
|
index 88e6c78100d9b..c750afc62887c 100644
|
|
--- a/arch/powerpc/include/asm/reg.h
|
|
+++ b/arch/powerpc/include/asm/reg.h
|
|
@@ -815,7 +815,7 @@
|
|
#define THRM1_TIN (1 << 31)
|
|
#define THRM1_TIV (1 << 30)
|
|
#define THRM1_THRES(x) ((x&0x7f)<<23)
|
|
-#define THRM3_SITV(x) ((x&0x3fff)<<1)
|
|
+#define THRM3_SITV(x) ((x & 0x1fff) << 1)
|
|
#define THRM1_TID (1<<2)
|
|
#define THRM1_TIE (1<<1)
|
|
#define THRM1_V (1<<0)
|
|
diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
|
|
index 85580b30aba48..7546402d796af 100644
|
|
--- a/arch/powerpc/include/asm/svm.h
|
|
+++ b/arch/powerpc/include/asm/svm.h
|
|
@@ -15,6 +15,8 @@ static inline bool is_secure_guest(void)
|
|
return mfmsr() & MSR_S;
|
|
}
|
|
|
|
+void __init svm_swiotlb_init(void);
|
|
+
|
|
void dtl_cache_ctor(void *addr);
|
|
#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
|
|
|
|
@@ -25,6 +27,8 @@ static inline bool is_secure_guest(void)
|
|
return false;
|
|
}
|
|
|
|
+static inline void svm_swiotlb_init(void) {}
|
|
+
|
|
#define get_dtl_cache_ctor() NULL
|
|
|
|
#endif /* CONFIG_PPC_SVM */
|
|
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
|
|
index 862985cf51804..cf87bbdcfdcb2 100644
|
|
--- a/arch/powerpc/include/asm/tlb.h
|
|
+++ b/arch/powerpc/include/asm/tlb.h
|
|
@@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
return false;
|
|
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
|
|
}
|
|
-static inline void mm_reset_thread_local(struct mm_struct *mm)
|
|
-{
|
|
- WARN_ON(atomic_read(&mm->context.copros) > 0);
|
|
- /*
|
|
- * It's possible for mm_access to take a reference on mm_users to
|
|
- * access the remote mm from another thread, but it's not allowed
|
|
- * to set mm_cpumask, so mm_users may be > 1 here.
|
|
- */
|
|
- WARN_ON(current->mm != mm);
|
|
- atomic_set(&mm->context.active_cpus, 1);
|
|
- cpumask_clear(mm_cpumask(mm));
|
|
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
|
-}
|
|
#else /* CONFIG_PPC_BOOK3S_64 */
|
|
static inline int mm_is_thread_local(struct mm_struct *mm)
|
|
{
|
|
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
|
|
index c55e67bab2710..2190be70c7fd9 100644
|
|
--- a/arch/powerpc/kernel/hw_breakpoint.c
|
|
+++ b/arch/powerpc/kernel/hw_breakpoint.c
|
|
@@ -519,9 +519,17 @@ static bool ea_hw_range_overlaps(unsigned long ea, int size,
|
|
struct arch_hw_breakpoint *info)
|
|
{
|
|
unsigned long hw_start_addr, hw_end_addr;
|
|
+ unsigned long align_size = HW_BREAKPOINT_SIZE;
|
|
|
|
- hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
|
|
- hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
|
|
+ /*
|
|
+ * On p10 predecessors, quadword is handle differently then
|
|
+ * other instructions.
|
|
+ */
|
|
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
|
|
+ align_size = HW_BREAKPOINT_SIZE_QUADWORD;
|
|
+
|
|
+ hw_start_addr = ALIGN_DOWN(info->address, align_size);
|
|
+ hw_end_addr = ALIGN(info->address + info->len, align_size);
|
|
|
|
return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
|
|
}
|
|
@@ -635,6 +643,8 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
|
|
if (*type == CACHEOP) {
|
|
*size = cache_op_size();
|
|
*ea &= ~(*size - 1);
|
|
+ } else if (*type == LOAD_VMX || *type == STORE_VMX) {
|
|
+ *ea &= ~(*size - 1);
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
|
|
index 05b1cc0e009e4..3a22281a8264e 100644
|
|
--- a/arch/powerpc/kernel/irq.c
|
|
+++ b/arch/powerpc/kernel/irq.c
|
|
@@ -214,7 +214,7 @@ void replay_soft_interrupts(void)
|
|
struct pt_regs regs;
|
|
|
|
ppc_save_regs(®s);
|
|
- regs.softe = IRQS_ALL_DISABLED;
|
|
+ regs.softe = IRQS_ENABLED;
|
|
|
|
again:
|
|
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
|
@@ -368,6 +368,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * Disable preempt here, so that the below preempt_enable will
|
|
+ * perform resched if required (a replayed interrupt may set
|
|
+ * need_resched).
|
|
+ */
|
|
+ preempt_disable();
|
|
irq_soft_mask_set(IRQS_ALL_DISABLED);
|
|
trace_hardirqs_off();
|
|
|
|
@@ -377,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
|
trace_hardirqs_on();
|
|
irq_soft_mask_set(IRQS_ENABLED);
|
|
__hard_irq_enable();
|
|
+ preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(arch_local_irq_restore);
|
|
|
|
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
|
|
index 697c7e4b5877f..8bd8d8de5c40b 100644
|
|
--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
|
|
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
|
|
@@ -219,6 +219,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
|
|
brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
|
|
brk.type = HW_BRK_TYPE_TRANSLATE;
|
|
brk.len = DABR_MAX_LEN;
|
|
+ brk.hw_len = DABR_MAX_LEN;
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
|
brk.type |= HW_BRK_TYPE_READ;
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
|
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
|
|
index e2ab8a111b693..0b4694b8d2482 100644
|
|
--- a/arch/powerpc/kernel/tau_6xx.c
|
|
+++ b/arch/powerpc/kernel/tau_6xx.c
|
|
@@ -13,13 +13,14 @@
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
-#include <linux/jiffies.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/param.h>
|
|
#include <linux/string.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/init.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/workqueue.h>
|
|
|
|
#include <asm/io.h>
|
|
#include <asm/reg.h>
|
|
@@ -39,9 +40,7 @@ static struct tau_temp
|
|
unsigned char grew;
|
|
} tau[NR_CPUS];
|
|
|
|
-struct timer_list tau_timer;
|
|
-
|
|
-#undef DEBUG
|
|
+static bool tau_int_enable;
|
|
|
|
/* TODO: put these in a /proc interface, with some sanity checks, and maybe
|
|
* dynamic adjustment to minimize # of interrupts */
|
|
@@ -50,72 +49,49 @@ struct timer_list tau_timer;
|
|
#define step_size 2 /* step size when temp goes out of range */
|
|
#define window_expand 1 /* expand the window by this much */
|
|
/* configurable values for shrinking the window */
|
|
-#define shrink_timer 2*HZ /* period between shrinking the window */
|
|
+#define shrink_timer 2000 /* period between shrinking the window */
|
|
#define min_window 2 /* minimum window size, degrees C */
|
|
|
|
static void set_thresholds(unsigned long cpu)
|
|
{
|
|
-#ifdef CONFIG_TAU_INT
|
|
- /*
|
|
- * setup THRM1,
|
|
- * threshold, valid bit, enable interrupts, interrupt when below threshold
|
|
- */
|
|
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
|
|
+ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
|
|
|
|
- /* setup THRM2,
|
|
- * threshold, valid bit, enable interrupts, interrupt when above threshold
|
|
- */
|
|
- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
|
|
-#else
|
|
- /* same thing but don't enable interrupts */
|
|
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
|
|
- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
|
|
-#endif
|
|
+ /* setup THRM1, threshold, valid bit, interrupt when below threshold */
|
|
+ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
|
|
+
|
|
+ /* setup THRM2, threshold, valid bit, interrupt when above threshold */
|
|
+ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
|
|
}
|
|
|
|
static void TAUupdate(int cpu)
|
|
{
|
|
- unsigned thrm;
|
|
-
|
|
-#ifdef DEBUG
|
|
- printk("TAUupdate ");
|
|
-#endif
|
|
+ u32 thrm;
|
|
+ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
|
|
|
|
/* if both thresholds are crossed, the step_sizes cancel out
|
|
* and the window winds up getting expanded twice. */
|
|
- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
|
|
- if(thrm & THRM1_TIN){ /* crossed low threshold */
|
|
- if (tau[cpu].low >= step_size){
|
|
- tau[cpu].low -= step_size;
|
|
- tau[cpu].high -= (step_size - window_expand);
|
|
- }
|
|
- tau[cpu].grew = 1;
|
|
-#ifdef DEBUG
|
|
- printk("low threshold crossed ");
|
|
-#endif
|
|
+ thrm = mfspr(SPRN_THRM1);
|
|
+ if ((thrm & bits) == bits) {
|
|
+ mtspr(SPRN_THRM1, 0);
|
|
+
|
|
+ if (tau[cpu].low >= step_size) {
|
|
+ tau[cpu].low -= step_size;
|
|
+ tau[cpu].high -= (step_size - window_expand);
|
|
}
|
|
+ tau[cpu].grew = 1;
|
|
+ pr_debug("%s: low threshold crossed\n", __func__);
|
|
}
|
|
- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
|
|
- if(thrm & THRM1_TIN){ /* crossed high threshold */
|
|
- if (tau[cpu].high <= 127-step_size){
|
|
- tau[cpu].low += (step_size - window_expand);
|
|
- tau[cpu].high += step_size;
|
|
- }
|
|
- tau[cpu].grew = 1;
|
|
-#ifdef DEBUG
|
|
- printk("high threshold crossed ");
|
|
-#endif
|
|
+ thrm = mfspr(SPRN_THRM2);
|
|
+ if ((thrm & bits) == bits) {
|
|
+ mtspr(SPRN_THRM2, 0);
|
|
+
|
|
+ if (tau[cpu].high <= 127 - step_size) {
|
|
+ tau[cpu].low += (step_size - window_expand);
|
|
+ tau[cpu].high += step_size;
|
|
}
|
|
+ tau[cpu].grew = 1;
|
|
+ pr_debug("%s: high threshold crossed\n", __func__);
|
|
}
|
|
-
|
|
-#ifdef DEBUG
|
|
- printk("grew = %d\n", tau[cpu].grew);
|
|
-#endif
|
|
-
|
|
-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
|
|
- set_thresholds(cpu);
|
|
-#endif
|
|
-
|
|
}
|
|
|
|
#ifdef CONFIG_TAU_INT
|
|
@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
|
|
static void tau_timeout(void * info)
|
|
{
|
|
int cpu;
|
|
- unsigned long flags;
|
|
int size;
|
|
int shrink;
|
|
|
|
- /* disabling interrupts *should* be okay */
|
|
- local_irq_save(flags);
|
|
cpu = smp_processor_id();
|
|
|
|
-#ifndef CONFIG_TAU_INT
|
|
- TAUupdate(cpu);
|
|
-#endif
|
|
+ if (!tau_int_enable)
|
|
+ TAUupdate(cpu);
|
|
+
|
|
+ /* Stop thermal sensor comparisons and interrupts */
|
|
+ mtspr(SPRN_THRM3, 0);
|
|
|
|
size = tau[cpu].high - tau[cpu].low;
|
|
if (size > min_window && ! tau[cpu].grew) {
|
|
@@ -173,32 +148,26 @@ static void tau_timeout(void * info)
|
|
|
|
set_thresholds(cpu);
|
|
|
|
- /*
|
|
- * Do the enable every time, since otherwise a bunch of (relatively)
|
|
- * complex sleep code needs to be added. One mtspr every time
|
|
- * tau_timeout is called is probably not a big deal.
|
|
- *
|
|
- * Enable thermal sensor and set up sample interval timer
|
|
- * need 20 us to do the compare.. until a nice 'cpu_speed' function
|
|
- * call is implemented, just assume a 500 mhz clock. It doesn't really
|
|
- * matter if we take too long for a compare since it's all interrupt
|
|
- * driven anyway.
|
|
- *
|
|
- * use a extra long time.. (60 us @ 500 mhz)
|
|
+ /* Restart thermal sensor comparisons and interrupts.
|
|
+ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
|
|
+ * recommends that "the maximum value be set in THRM3 under all
|
|
+ * conditions."
|
|
*/
|
|
- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
|
|
-
|
|
- local_irq_restore(flags);
|
|
+ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
|
|
}
|
|
|
|
-static void tau_timeout_smp(struct timer_list *unused)
|
|
-{
|
|
+static struct workqueue_struct *tau_workq;
|
|
|
|
- /* schedule ourselves to be run again */
|
|
- mod_timer(&tau_timer, jiffies + shrink_timer) ;
|
|
+static void tau_work_func(struct work_struct *work)
|
|
+{
|
|
+ msleep(shrink_timer);
|
|
on_each_cpu(tau_timeout, NULL, 0);
|
|
+ /* schedule ourselves to be run again */
|
|
+ queue_work(tau_workq, work);
|
|
}
|
|
|
|
+DECLARE_WORK(tau_work, tau_work_func);
|
|
+
|
|
/*
|
|
* setup the TAU
|
|
*
|
|
@@ -231,21 +200,19 @@ static int __init TAU_init(void)
|
|
return 1;
|
|
}
|
|
|
|
+ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
|
|
+ !strcmp(cur_cpu_spec->platform, "ppc750");
|
|
|
|
- /* first, set up the window shrinking timer */
|
|
- timer_setup(&tau_timer, tau_timeout_smp, 0);
|
|
- tau_timer.expires = jiffies + shrink_timer;
|
|
- add_timer(&tau_timer);
|
|
+ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
|
|
+ if (!tau_workq)
|
|
+ return -ENOMEM;
|
|
|
|
on_each_cpu(TAU_init_smp, NULL, 0);
|
|
|
|
- printk("Thermal assist unit ");
|
|
-#ifdef CONFIG_TAU_INT
|
|
- printk("using interrupts, ");
|
|
-#else
|
|
- printk("using timers, ");
|
|
-#endif
|
|
- printk("shrink_timer: %d jiffies\n", shrink_timer);
|
|
+ queue_work(tau_workq, &tau_work);
|
|
+
|
|
+ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
|
|
+ tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
|
|
tau_initialized = 1;
|
|
|
|
return 0;
|
|
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
|
|
index b5cc9b23cf024..277a07772e7d6 100644
|
|
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
|
|
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
|
|
@@ -644,19 +644,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
|
|
struct mm_struct *mm = arg;
|
|
unsigned long pid = mm->context.id;
|
|
|
|
+ /*
|
|
+ * A kthread could have done a mmget_not_zero() after the flushing CPU
|
|
+ * checked mm_is_singlethreaded, and be in the process of
|
|
+ * kthread_use_mm when interrupted here. In that case, current->mm will
|
|
+ * be set to mm, because kthread_use_mm() setting ->mm and switching to
|
|
+ * the mm is done with interrupts off.
|
|
+ */
|
|
if (current->mm == mm)
|
|
- return; /* Local CPU */
|
|
+ goto out_flush;
|
|
|
|
if (current->active_mm == mm) {
|
|
- /*
|
|
- * Must be a kernel thread because sender is single-threaded.
|
|
- */
|
|
- BUG_ON(current->mm);
|
|
+ WARN_ON_ONCE(current->mm != NULL);
|
|
+ /* Is a kernel thread and is using mm as the lazy tlb */
|
|
mmgrab(&init_mm);
|
|
- switch_mm(mm, &init_mm, current);
|
|
current->active_mm = &init_mm;
|
|
+ switch_mm_irqs_off(mm, &init_mm, current);
|
|
mmdrop(mm);
|
|
}
|
|
+
|
|
+ atomic_dec(&mm->context.active_cpus);
|
|
+ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
|
|
+
|
|
+out_flush:
|
|
_tlbiel_pid(pid, RIC_FLUSH_ALL);
|
|
}
|
|
|
|
@@ -671,7 +681,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
|
|
*/
|
|
smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
|
|
(void *)mm, 1);
|
|
- mm_reset_thread_local(mm);
|
|
}
|
|
|
|
void radix__flush_tlb_mm(struct mm_struct *mm)
|
|
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
|
|
index 59327cefbc6a6..873fcfc7b8756 100644
|
|
--- a/arch/powerpc/mm/drmem.c
|
|
+++ b/arch/powerpc/mm/drmem.c
|
|
@@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
|
|
if (!drmem_info->lmbs)
|
|
return;
|
|
|
|
- for_each_drmem_lmb(lmb) {
|
|
+ for_each_drmem_lmb(lmb)
|
|
read_drconf_v1_cell(lmb, &prop);
|
|
- lmb_set_nid(lmb);
|
|
- }
|
|
}
|
|
|
|
static void __init init_drmem_v2_lmbs(const __be32 *prop)
|
|
@@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
|
|
|
|
lmb->aa_index = dr_cell.aa_index;
|
|
lmb->flags = dr_cell.flags;
|
|
-
|
|
- lmb_set_nid(lmb);
|
|
}
|
|
}
|
|
}
|
|
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
|
|
index 019b0c0bbbf31..ca91d04d0a7ae 100644
|
|
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
|
|
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
|
|
@@ -121,8 +121,7 @@ void __init kasan_mmu_init(void)
|
|
{
|
|
int ret;
|
|
|
|
- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
|
|
- IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
|
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
|
|
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
|
|
|
if (ret)
|
|
@@ -133,11 +132,11 @@ void __init kasan_mmu_init(void)
|
|
void __init kasan_init(void)
|
|
{
|
|
struct memblock_region *reg;
|
|
+ int ret;
|
|
|
|
for_each_memblock(memory, reg) {
|
|
phys_addr_t base = reg->base;
|
|
phys_addr_t top = min(base + reg->size, total_lowmem);
|
|
- int ret;
|
|
|
|
if (base >= top)
|
|
continue;
|
|
@@ -147,6 +146,13 @@ void __init kasan_init(void)
|
|
panic("kasan: kasan_init_region() failed");
|
|
}
|
|
|
|
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
|
+ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
|
+
|
|
+ if (ret)
|
|
+ panic("kasan: kasan_init_shadow_page_tables() failed");
|
|
+ }
|
|
+
|
|
kasan_remap_early_shadow_ro();
|
|
|
|
clear_page(kasan_early_shadow_page);
|
|
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
|
|
index c2c11eb8dcfca..0f21bcb16405a 100644
|
|
--- a/arch/powerpc/mm/mem.c
|
|
+++ b/arch/powerpc/mm/mem.c
|
|
@@ -50,6 +50,7 @@
|
|
#include <asm/swiotlb.h>
|
|
#include <asm/rtas.h>
|
|
#include <asm/kasan.h>
|
|
+#include <asm/svm.h>
|
|
|
|
#include <mm/mmu_decl.h>
|
|
|
|
@@ -290,7 +291,10 @@ void __init mem_init(void)
|
|
* back to to-down.
|
|
*/
|
|
memblock_set_bottom_up(true);
|
|
- swiotlb_init(0);
|
|
+ if (is_secure_guest())
|
|
+ svm_swiotlb_init();
|
|
+ else
|
|
+ swiotlb_init(0);
|
|
#endif
|
|
|
|
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
|
diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
|
|
index e608f9db12ddc..8965b4463d433 100644
|
|
--- a/arch/powerpc/perf/hv-gpci-requests.h
|
|
+++ b/arch/powerpc/perf/hv-gpci-requests.h
|
|
@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
|
|
|
|
#define REQUEST_NAME system_performance_capabilities
|
|
#define REQUEST_NUM 0x40
|
|
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
|
|
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
|
|
#include I(REQUEST_BEGIN)
|
|
REQUEST(__field(0, 1, perf_collect_privileged)
|
|
__field(0x1, 1, capability_mask)
|
|
@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
|
|
|
|
#define REQUEST_NAME system_hypervisor_times
|
|
#define REQUEST_NUM 0xF0
|
|
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
|
|
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
|
|
#include I(REQUEST_BEGIN)
|
|
REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
|
|
__count(0x8, 8, time_spent_processing_virtual_processor_timers)
|
|
@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
|
|
|
|
#define REQUEST_NAME system_tlbie_count_and_time
|
|
#define REQUEST_NUM 0xF4
|
|
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
|
|
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
|
|
#include I(REQUEST_BEGIN)
|
|
REQUEST(__count(0, 8, tlbie_instructions_issued)
|
|
/*
|
|
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
|
|
index 4c86da5eb28ab..0b5c8f4fbdbfd 100644
|
|
--- a/arch/powerpc/perf/isa207-common.c
|
|
+++ b/arch/powerpc/perf/isa207-common.c
|
|
@@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|
|
|
mask |= CNST_PMC_MASK(pmc);
|
|
value |= CNST_PMC_VAL(pmc);
|
|
+
|
|
+ /*
|
|
+ * PMC5 and PMC6 are used to count cycles and instructions and
|
|
+ * they do not support most of the constraint bits. Add a check
|
|
+ * to exclude PMC5/6 from most of the constraints except for
|
|
+ * EBB/BHRB.
|
|
+ */
|
|
+ if (pmc >= 5)
|
|
+ goto ebb_bhrb;
|
|
}
|
|
|
|
if (pmc <= 4) {
|
|
@@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|
}
|
|
}
|
|
|
|
+ebb_bhrb:
|
|
if (!pmc && ebb)
|
|
/* EBB events must specify the PMC */
|
|
return -1;
|
|
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
|
|
index fb7515b4fa9c6..b439b027a42f1 100644
|
|
--- a/arch/powerpc/platforms/Kconfig
|
|
+++ b/arch/powerpc/platforms/Kconfig
|
|
@@ -223,12 +223,11 @@ config TAU
|
|
temperature within 2-4 degrees Celsius. This option shows the current
|
|
on-die temperature in /proc/cpuinfo if the cpu supports it.
|
|
|
|
- Unfortunately, on some chip revisions, this sensor is very inaccurate
|
|
- and in many cases, does not work at all, so don't assume the cpu
|
|
- temp is actually what /proc/cpuinfo says it is.
|
|
+ Unfortunately, this sensor is very inaccurate when uncalibrated, so
|
|
+ don't assume the cpu temp is actually what /proc/cpuinfo says it is.
|
|
|
|
config TAU_INT
|
|
- bool "Interrupt driven TAU driver (DANGEROUS)"
|
|
+ bool "Interrupt driven TAU driver (EXPERIMENTAL)"
|
|
depends on TAU
|
|
help
|
|
The TAU supports an interrupt driven mode which causes an interrupt
|
|
@@ -236,12 +235,7 @@ config TAU_INT
|
|
to get notified the temp has exceeded a range. With this option off,
|
|
a timer is used to re-check the temperature periodically.
|
|
|
|
- However, on some cpus it appears that the TAU interrupt hardware
|
|
- is buggy and can cause a situation which would lead unexplained hard
|
|
- lockups.
|
|
-
|
|
- Unless you are extending the TAU driver, or enjoy kernel/hardware
|
|
- debugging, leave this option off.
|
|
+ If in doubt, say N here.
|
|
|
|
config TAU_AVERAGE
|
|
bool "Average high and low temp"
|
|
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
|
|
index 543c816fa99ef..0e6693bacb7e7 100644
|
|
--- a/arch/powerpc/platforms/powernv/opal-dump.c
|
|
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
|
|
@@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
|
|
return count;
|
|
}
|
|
|
|
-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
|
|
- uint32_t type)
|
|
+static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
|
|
{
|
|
struct dump_obj *dump;
|
|
int rc;
|
|
|
|
dump = kzalloc(sizeof(*dump), GFP_KERNEL);
|
|
if (!dump)
|
|
- return NULL;
|
|
+ return;
|
|
|
|
dump->kobj.kset = dump_kset;
|
|
|
|
@@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
|
|
rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
|
|
if (rc) {
|
|
kobject_put(&dump->kobj);
|
|
- return NULL;
|
|
+ return;
|
|
}
|
|
|
|
+ /*
|
|
+ * As soon as the sysfs file for this dump is created/activated there is
|
|
+ * a chance the opal_errd daemon (or any userspace) might read and
|
|
+ * acknowledge the dump before kobject_uevent() is called. If that
|
|
+ * happens then there is a potential race between
|
|
+ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
|
|
+ * use-after-free of a kernfs object resulting in a kernel crash.
|
|
+ *
|
|
+ * To avoid that, we need to take a reference on behalf of the bin file,
|
|
+ * so that our reference remains valid while we call kobject_uevent().
|
|
+ * We then drop our reference before exiting the function, leaving the
|
|
+ * bin file to drop the last reference (if it hasn't already).
|
|
+ */
|
|
+
|
|
+ /* Take a reference for the bin file */
|
|
+ kobject_get(&dump->kobj);
|
|
rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
|
|
- if (rc) {
|
|
+ if (rc == 0) {
|
|
+ kobject_uevent(&dump->kobj, KOBJ_ADD);
|
|
+
|
|
+ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
|
|
+ __func__, dump->id, dump->size);
|
|
+ } else {
|
|
+ /* Drop reference count taken for bin file */
|
|
kobject_put(&dump->kobj);
|
|
- return NULL;
|
|
}
|
|
|
|
- pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
|
|
- __func__, dump->id, dump->size);
|
|
-
|
|
- kobject_uevent(&dump->kobj, KOBJ_ADD);
|
|
-
|
|
- return dump;
|
|
+ /* Drop our reference */
|
|
+ kobject_put(&dump->kobj);
|
|
+ return;
|
|
}
|
|
|
|
static irqreturn_t process_dump(int irq, void *data)
|
|
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
index 8b748690dac22..9f236149b4027 100644
|
|
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
|
|
@@ -356,25 +356,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
|
|
|
|
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
|
|
{
|
|
+ struct memory_block *mem_block;
|
|
unsigned long block_sz;
|
|
int rc;
|
|
|
|
if (!lmb_is_removable(lmb))
|
|
return -EINVAL;
|
|
|
|
+ mem_block = lmb_to_memblock(lmb);
|
|
+ if (mem_block == NULL)
|
|
+ return -EINVAL;
|
|
+
|
|
rc = dlpar_offline_lmb(lmb);
|
|
- if (rc)
|
|
+ if (rc) {
|
|
+ put_device(&mem_block->dev);
|
|
return rc;
|
|
+ }
|
|
|
|
block_sz = pseries_memory_block_size();
|
|
|
|
- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
|
|
+ __remove_memory(mem_block->nid, lmb->base_addr, block_sz);
|
|
+ put_device(&mem_block->dev);
|
|
|
|
/* Update memory regions for memory remove */
|
|
memblock_remove(lmb->base_addr, block_sz);
|
|
|
|
invalidate_lmb_associativity_index(lmb);
|
|
- lmb_clear_nid(lmb);
|
|
lmb->flags &= ~DRCONF_MEM_ASSIGNED;
|
|
|
|
return 0;
|
|
@@ -631,7 +638,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
|
|
static int dlpar_add_lmb(struct drmem_lmb *lmb)
|
|
{
|
|
unsigned long block_sz;
|
|
- int rc;
|
|
+ int nid, rc;
|
|
|
|
if (lmb->flags & DRCONF_MEM_ASSIGNED)
|
|
return -EINVAL;
|
|
@@ -642,11 +649,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
|
|
return rc;
|
|
}
|
|
|
|
- lmb_set_nid(lmb);
|
|
block_sz = memory_block_size_bytes();
|
|
|
|
+ /* Find the node id for this address. */
|
|
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
|
|
+
|
|
/* Add the memory */
|
|
- rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
|
|
+ rc = __add_memory(nid, lmb->base_addr, block_sz);
|
|
if (rc) {
|
|
invalidate_lmb_associativity_index(lmb);
|
|
return rc;
|
|
@@ -654,9 +663,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
|
|
|
|
rc = dlpar_online_lmb(lmb);
|
|
if (rc) {
|
|
- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
|
|
+ __remove_memory(nid, lmb->base_addr, block_sz);
|
|
invalidate_lmb_associativity_index(lmb);
|
|
- lmb_clear_nid(lmb);
|
|
} else {
|
|
lmb->flags |= DRCONF_MEM_ASSIGNED;
|
|
}
|
|
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
|
|
index 9c569078a09fd..6c2c66450dac8 100644
|
|
--- a/arch/powerpc/platforms/pseries/papr_scm.c
|
|
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
|
|
@@ -702,6 +702,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
|
|
p->bus_desc.of_node = p->pdev->dev.of_node;
|
|
p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
|
|
|
|
+ /* Set the dimm command family mask to accept PDSMs */
|
|
+ set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
|
|
+
|
|
if (!p->bus_desc.provider_name)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
|
|
index 13c86a292c6d7..b2b245b25edba 100644
|
|
--- a/arch/powerpc/platforms/pseries/ras.c
|
|
+++ b/arch/powerpc/platforms/pseries/ras.c
|
|
@@ -521,18 +521,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
|
|
return 0; /* need to perform reset */
|
|
}
|
|
|
|
+static int mce_handle_err_realmode(int disposition, u8 error_type)
|
|
+{
|
|
+#ifdef CONFIG_PPC_BOOK3S_64
|
|
+ if (disposition == RTAS_DISP_NOT_RECOVERED) {
|
|
+ switch (error_type) {
|
|
+ case MC_ERROR_TYPE_SLB:
|
|
+ case MC_ERROR_TYPE_ERAT:
|
|
+ /*
|
|
+ * Store the old slb content in paca before flushing.
|
|
+ * Print this when we go to virtual mode.
|
|
+ * There are chances that we may hit MCE again if there
|
|
+ * is a parity error on the SLB entry we trying to read
|
|
+ * for saving. Hence limit the slb saving to single
|
|
+ * level of recursion.
|
|
+ */
|
|
+ if (local_paca->in_mce == 1)
|
|
+ slb_save_contents(local_paca->mce_faulty_slbs);
|
|
+ flush_and_reload_slb();
|
|
+ disposition = RTAS_DISP_FULLY_RECOVERED;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
|
|
+ /* Platform corrected itself but could be degraded */
|
|
+ pr_err("MCE: limited recovery, system may be degraded\n");
|
|
+ disposition = RTAS_DISP_FULLY_RECOVERED;
|
|
+ }
|
|
+#endif
|
|
+ return disposition;
|
|
+}
|
|
|
|
-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
|
+static int mce_handle_err_virtmode(struct pt_regs *regs,
|
|
+ struct rtas_error_log *errp,
|
|
+ struct pseries_mc_errorlog *mce_log,
|
|
+ int disposition)
|
|
{
|
|
struct mce_error_info mce_err = { 0 };
|
|
- unsigned long eaddr = 0, paddr = 0;
|
|
- struct pseries_errorlog *pseries_log;
|
|
- struct pseries_mc_errorlog *mce_log;
|
|
- int disposition = rtas_error_disposition(errp);
|
|
int initiator = rtas_error_initiator(errp);
|
|
int severity = rtas_error_severity(errp);
|
|
+ unsigned long eaddr = 0, paddr = 0;
|
|
u8 error_type, err_sub_type;
|
|
|
|
+ if (!mce_log)
|
|
+ goto out;
|
|
+
|
|
+ error_type = mce_log->error_type;
|
|
+ err_sub_type = rtas_mc_error_sub_type(mce_log);
|
|
+
|
|
if (initiator == RTAS_INITIATOR_UNKNOWN)
|
|
mce_err.initiator = MCE_INITIATOR_UNKNOWN;
|
|
else if (initiator == RTAS_INITIATOR_CPU)
|
|
@@ -571,18 +608,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
|
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
|
|
mce_err.error_class = MCE_ECLASS_UNKNOWN;
|
|
|
|
- if (!rtas_error_extended(errp))
|
|
- goto out;
|
|
-
|
|
- pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
|
|
- if (pseries_log == NULL)
|
|
- goto out;
|
|
-
|
|
- mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
|
|
- error_type = mce_log->error_type;
|
|
- err_sub_type = rtas_mc_error_sub_type(mce_log);
|
|
-
|
|
- switch (mce_log->error_type) {
|
|
+ switch (error_type) {
|
|
case MC_ERROR_TYPE_UE:
|
|
mce_err.error_type = MCE_ERROR_TYPE_UE;
|
|
mce_common_process_ue(regs, &mce_err);
|
|
@@ -682,37 +708,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
|
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
|
|
break;
|
|
}
|
|
+out:
|
|
+ save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
|
|
+ &mce_err, regs->nip, eaddr, paddr);
|
|
+ return disposition;
|
|
+}
|
|
|
|
-#ifdef CONFIG_PPC_BOOK3S_64
|
|
- if (disposition == RTAS_DISP_NOT_RECOVERED) {
|
|
- switch (error_type) {
|
|
- case MC_ERROR_TYPE_SLB:
|
|
- case MC_ERROR_TYPE_ERAT:
|
|
- /*
|
|
- * Store the old slb content in paca before flushing.
|
|
- * Print this when we go to virtual mode.
|
|
- * There are chances that we may hit MCE again if there
|
|
- * is a parity error on the SLB entry we trying to read
|
|
- * for saving. Hence limit the slb saving to single
|
|
- * level of recursion.
|
|
- */
|
|
- if (local_paca->in_mce == 1)
|
|
- slb_save_contents(local_paca->mce_faulty_slbs);
|
|
- flush_and_reload_slb();
|
|
- disposition = RTAS_DISP_FULLY_RECOVERED;
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
- } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
|
|
- /* Platform corrected itself but could be degraded */
|
|
- printk(KERN_ERR "MCE: limited recovery, system may "
|
|
- "be degraded\n");
|
|
- disposition = RTAS_DISP_FULLY_RECOVERED;
|
|
- }
|
|
-#endif
|
|
+static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
|
+{
|
|
+ struct pseries_errorlog *pseries_log;
|
|
+ struct pseries_mc_errorlog *mce_log = NULL;
|
|
+ int disposition = rtas_error_disposition(errp);
|
|
+ u8 error_type;
|
|
+
|
|
+ if (!rtas_error_extended(errp))
|
|
+ goto out;
|
|
+
|
|
+ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
|
|
+ if (!pseries_log)
|
|
+ goto out;
|
|
+
|
|
+ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
|
|
+ error_type = mce_log->error_type;
|
|
+
|
|
+ disposition = mce_handle_err_realmode(disposition, error_type);
|
|
|
|
-out:
|
|
/*
|
|
* Enable translation as we will be accessing per-cpu variables
|
|
* in save_mce_event() which may fall outside RMO region, also
|
|
@@ -723,10 +743,10 @@ out:
|
|
* Note: All the realmode handling like flushing SLB entries for
|
|
* SLB multihit is done by now.
|
|
*/
|
|
+out:
|
|
mtmsr(mfmsr() | MSR_IR | MSR_DR);
|
|
- save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
|
|
- &mce_err, regs->nip, eaddr, paddr);
|
|
-
|
|
+ disposition = mce_handle_err_virtmode(regs, errp, mce_log,
|
|
+ disposition);
|
|
return disposition;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
|
|
index bbb97169bf63e..6268545947b83 100644
|
|
--- a/arch/powerpc/platforms/pseries/rng.c
|
|
+++ b/arch/powerpc/platforms/pseries/rng.c
|
|
@@ -36,6 +36,7 @@ static __init int rng_init(void)
|
|
|
|
ppc_md.get_random_seed = pseries_get_random_long;
|
|
|
|
+ of_node_put(dn);
|
|
return 0;
|
|
}
|
|
machine_subsys_initcall(pseries, rng_init);
|
|
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
|
|
index 40c0637203d5b..81085eb8f2255 100644
|
|
--- a/arch/powerpc/platforms/pseries/svm.c
|
|
+++ b/arch/powerpc/platforms/pseries/svm.c
|
|
@@ -7,6 +7,7 @@
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
+#include <linux/memblock.h>
|
|
#include <asm/machdep.h>
|
|
#include <asm/svm.h>
|
|
#include <asm/swiotlb.h>
|
|
@@ -34,6 +35,31 @@ static int __init init_svm(void)
|
|
}
|
|
machine_early_initcall(pseries, init_svm);
|
|
|
|
+/*
|
|
+ * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
|
|
+ * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
|
|
+ * any addressing limitation, we don't need to allocate it in low addresses.
|
|
+ */
|
|
+void __init svm_swiotlb_init(void)
|
|
+{
|
|
+ unsigned char *vstart;
|
|
+ unsigned long bytes, io_tlb_nslabs;
|
|
+
|
|
+ io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
|
|
+ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
|
|
+
|
|
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
|
+
|
|
+ vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
|
|
+ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
|
|
+ return;
|
|
+
|
|
+ if (io_tlb_start)
|
|
+ memblock_free_early(io_tlb_start,
|
|
+ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
|
+ panic("SVM: Cannot allocate SWIOTLB buffer");
|
|
+}
|
|
+
|
|
int set_memory_encrypted(unsigned long addr, int numpages)
|
|
{
|
|
if (!PAGE_ALIGNED(addr))
|
|
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
|
|
index ad8117148ea3b..21b9d1bf39ff6 100644
|
|
--- a/arch/powerpc/sysdev/xics/icp-hv.c
|
|
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
|
|
@@ -174,6 +174,7 @@ int icp_hv_init(void)
|
|
|
|
icp_ops = &icp_hv_ops;
|
|
|
|
+ of_node_put(np);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
|
|
index 7efe4bc3ccf63..ac5862cee142a 100644
|
|
--- a/arch/powerpc/xmon/xmon.c
|
|
+++ b/arch/powerpc/xmon/xmon.c
|
|
@@ -962,6 +962,7 @@ static void insert_cpu_bpts(void)
|
|
brk.address = dabr[i].address;
|
|
brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
|
|
brk.len = 8;
|
|
+ brk.hw_len = 8;
|
|
__set_breakpoint(i, &brk);
|
|
}
|
|
}
|
|
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
|
|
index 5967f30141563..c93486a9989bc 100644
|
|
--- a/arch/s390/pci/pci_bus.c
|
|
+++ b/arch/s390/pci/pci_bus.c
|
|
@@ -197,9 +197,10 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
|
|
* With pdev->no_vf_scan the common PCI probing code does not
|
|
* perform PF/VF linking.
|
|
*/
|
|
- if (zdev->vfn)
|
|
+ if (zdev->vfn) {
|
|
zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
|
|
-
|
|
+ pdev->no_command_memory = 1;
|
|
+ }
|
|
}
|
|
|
|
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
|
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
|
|
index 8735c468230a5..555203e3e7b45 100644
|
|
--- a/arch/um/drivers/vector_kern.c
|
|
+++ b/arch/um/drivers/vector_kern.c
|
|
@@ -1403,7 +1403,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
|
|
kfree(vp->bpf->filter);
|
|
vp->bpf->filter = NULL;
|
|
} else {
|
|
- vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
|
|
+ vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
|
|
if (vp->bpf == NULL) {
|
|
netdev_err(dev, "failed to allocate memory for firmware\n");
|
|
goto flash_fail;
|
|
@@ -1415,7 +1415,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
|
|
if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
|
|
goto flash_fail;
|
|
|
|
- vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL);
|
|
+ vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
|
|
if (!vp->bpf->filter)
|
|
goto free_buffer;
|
|
|
|
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
|
|
index 25eaa6a0c6583..c07436e89e599 100644
|
|
--- a/arch/um/kernel/time.c
|
|
+++ b/arch/um/kernel/time.c
|
|
@@ -70,13 +70,17 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
|
|
* read of the message and write of the ACK.
|
|
*/
|
|
if (mode != TTMH_READ) {
|
|
+ bool disabled = irqs_disabled();
|
|
+
|
|
+ BUG_ON(mode == TTMH_IDLE && !disabled);
|
|
+
|
|
+ if (disabled)
|
|
+ local_irq_enable();
|
|
while (os_poll(1, &time_travel_ext_fd) != 0) {
|
|
- if (mode == TTMH_IDLE) {
|
|
- BUG_ON(!irqs_disabled());
|
|
- local_irq_enable();
|
|
- local_irq_disable();
|
|
- }
|
|
+ /* nothing */
|
|
}
|
|
+ if (disabled)
|
|
+ local_irq_disable();
|
|
}
|
|
|
|
ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
|
|
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
|
|
index c8862696a47b9..7d0394f4ebf97 100644
|
|
--- a/arch/x86/boot/compressed/pgtable_64.c
|
|
+++ b/arch/x86/boot/compressed/pgtable_64.c
|
|
@@ -5,15 +5,6 @@
|
|
#include "pgtable.h"
|
|
#include "../string.h"
|
|
|
|
-/*
|
|
- * __force_order is used by special_insns.h asm code to force instruction
|
|
- * serialization.
|
|
- *
|
|
- * It is not referenced from the code, but GCC < 5 with -fPIE would fail
|
|
- * due to an undefined symbol. Define it to make these ancient GCCs work.
|
|
- */
|
|
-unsigned long __force_order;
|
|
-
|
|
#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
|
|
#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
|
|
|
|
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
|
|
index fb616203ce427..be50ef8572cce 100644
|
|
--- a/arch/x86/events/amd/iommu.c
|
|
+++ b/arch/x86/events/amd/iommu.c
|
|
@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
|
|
while (amd_iommu_v2_event_descs[i].attr.attr.name)
|
|
i++;
|
|
|
|
- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
|
|
+ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
|
|
if (!attrs)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
|
|
index 4103665c6e032..29640b4079af0 100644
|
|
--- a/arch/x86/events/core.c
|
|
+++ b/arch/x86/events/core.c
|
|
@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
|
|
|
|
cpuc->event_list[n] = event;
|
|
n++;
|
|
- if (is_counter_pair(&event->hw))
|
|
+ if (is_counter_pair(&event->hw)) {
|
|
cpuc->n_pair++;
|
|
+ cpuc->n_txn_pair++;
|
|
+ }
|
|
}
|
|
return n;
|
|
}
|
|
@@ -1953,6 +1955,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
|
|
|
|
perf_pmu_disable(pmu);
|
|
__this_cpu_write(cpu_hw_events.n_txn, 0);
|
|
+ __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
|
|
}
|
|
|
|
/*
|
|
@@ -1978,6 +1981,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
|
|
*/
|
|
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
|
|
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
|
|
+ __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
|
|
perf_pmu_enable(pmu);
|
|
}
|
|
|
|
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
|
|
index dc43cc124e096..221d1766d6e6c 100644
|
|
--- a/arch/x86/events/intel/ds.c
|
|
+++ b/arch/x86/events/intel/ds.c
|
|
@@ -670,9 +670,7 @@ unlock:
|
|
|
|
static inline void intel_pmu_drain_pebs_buffer(void)
|
|
{
|
|
- struct pt_regs regs;
|
|
-
|
|
- x86_pmu.drain_pebs(®s);
|
|
+ x86_pmu.drain_pebs(NULL);
|
|
}
|
|
|
|
/*
|
|
@@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
struct x86_perf_regs perf_regs;
|
|
struct pt_regs *regs = &perf_regs.regs;
|
|
void *at = get_next_pebs_record_by_bit(base, top, bit);
|
|
+ struct pt_regs dummy_iregs;
|
|
|
|
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
|
|
/*
|
|
@@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
} else if (!intel_pmu_save_and_restart(event))
|
|
return;
|
|
|
|
+ if (!iregs)
|
|
+ iregs = &dummy_iregs;
|
|
+
|
|
while (count > 1) {
|
|
setup_sample(event, iregs, at, &data, regs);
|
|
perf_event_output(event, &data, regs);
|
|
@@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
|
}
|
|
|
|
setup_sample(event, iregs, at, &data, regs);
|
|
-
|
|
- /*
|
|
- * All but the last records are processed.
|
|
- * The last one is left to be able to call the overflow handler.
|
|
- */
|
|
- if (perf_event_overflow(event, &data, regs)) {
|
|
- x86_pmu_stop(event, 0);
|
|
- return;
|
|
+ if (iregs == &dummy_iregs) {
|
|
+ /*
|
|
+ * The PEBS records may be drained in the non-overflow context,
|
|
+ * e.g., large PEBS + context switch. Perf should treat the
|
|
+ * last record the same as other PEBS records, and doesn't
|
|
+ * invoke the generic overflow handler.
|
|
+ */
|
|
+ perf_event_output(event, &data, regs);
|
|
+ } else {
|
|
+ /*
|
|
+ * All but the last records are processed.
|
|
+ * The last one is left to be able to call the overflow handler.
|
|
+ */
|
|
+ if (perf_event_overflow(event, &data, regs))
|
|
+ x86_pmu_stop(event, 0);
|
|
}
|
|
-
|
|
}
|
|
|
|
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
|
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
|
|
index 1038e9f1e3542..3b70c2ff177c0 100644
|
|
--- a/arch/x86/events/intel/uncore_snb.c
|
|
+++ b/arch/x86/events/intel/uncore_snb.c
|
|
@@ -115,6 +115,10 @@
|
|
#define ICL_UNC_CBO_0_PER_CTR0 0x702
|
|
#define ICL_UNC_CBO_MSR_OFFSET 0x8
|
|
|
|
+/* ICL ARB register */
|
|
+#define ICL_UNC_ARB_PER_CTR 0x3b1
|
|
+#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
|
|
+
|
|
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
|
|
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
|
|
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
|
|
@@ -302,15 +306,21 @@ void skl_uncore_cpu_init(void)
|
|
snb_uncore_arb.ops = &skl_uncore_msr_ops;
|
|
}
|
|
|
|
+static struct intel_uncore_ops icl_uncore_msr_ops = {
|
|
+ .disable_event = snb_uncore_msr_disable_event,
|
|
+ .enable_event = snb_uncore_msr_enable_event,
|
|
+ .read_counter = uncore_msr_read_counter,
|
|
+};
|
|
+
|
|
static struct intel_uncore_type icl_uncore_cbox = {
|
|
.name = "cbox",
|
|
- .num_counters = 4,
|
|
+ .num_counters = 2,
|
|
.perf_ctr_bits = 44,
|
|
.perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
|
|
.event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
|
|
.event_mask = SNB_UNC_RAW_EVENT_MASK,
|
|
.msr_offset = ICL_UNC_CBO_MSR_OFFSET,
|
|
- .ops = &skl_uncore_msr_ops,
|
|
+ .ops = &icl_uncore_msr_ops,
|
|
.format_group = &snb_uncore_format_group,
|
|
};
|
|
|
|
@@ -339,13 +349,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
|
|
.single_fixed = 1,
|
|
.event_mask = SNB_UNC_CTL_EV_SEL_MASK,
|
|
.format_group = &icl_uncore_clock_format_group,
|
|
- .ops = &skl_uncore_msr_ops,
|
|
+ .ops = &icl_uncore_msr_ops,
|
|
.event_descs = icl_uncore_events,
|
|
};
|
|
|
|
+static struct intel_uncore_type icl_uncore_arb = {
|
|
+ .name = "arb",
|
|
+ .num_counters = 1,
|
|
+ .num_boxes = 1,
|
|
+ .perf_ctr_bits = 44,
|
|
+ .perf_ctr = ICL_UNC_ARB_PER_CTR,
|
|
+ .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
|
|
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
|
|
+ .ops = &icl_uncore_msr_ops,
|
|
+ .format_group = &snb_uncore_format_group,
|
|
+};
|
|
+
|
|
static struct intel_uncore_type *icl_msr_uncores[] = {
|
|
&icl_uncore_cbox,
|
|
- &snb_uncore_arb,
|
|
+ &icl_uncore_arb,
|
|
&icl_uncore_clockbox,
|
|
NULL,
|
|
};
|
|
@@ -363,7 +385,6 @@ void icl_uncore_cpu_init(void)
|
|
{
|
|
uncore_msr_uncores = icl_msr_uncores;
|
|
icl_uncore_cbox.num_boxes = icl_get_cbox_num();
|
|
- snb_uncore_arb.ops = &skl_uncore_msr_ops;
|
|
}
|
|
|
|
enum {
|
|
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
|
|
index 07652fa20ebbe..6a03fe8054a81 100644
|
|
--- a/arch/x86/events/intel/uncore_snbep.c
|
|
+++ b/arch/x86/events/intel/uncore_snbep.c
|
|
@@ -4550,10 +4550,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
|
|
INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
|
|
|
|
INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
|
|
- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
|
|
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
|
|
INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
|
|
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
|
|
- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
|
|
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
|
|
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
|
|
{ /* end: all zeroes */ },
|
|
};
|
|
@@ -5009,17 +5009,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
|
|
INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
|
|
|
|
INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
|
|
- INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"),
|
|
+ INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
|
|
INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
|
|
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
|
|
- INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
|
|
+ INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
|
|
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
|
|
|
|
INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
|
|
- INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"),
|
|
+ INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
|
|
INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
|
|
INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
|
|
- INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"),
|
|
+ INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
|
|
INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
|
|
{ /* end: all zeroes */ },
|
|
};
|
|
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
|
|
index e17a3d8a47ede..d4d482d16fe18 100644
|
|
--- a/arch/x86/events/perf_event.h
|
|
+++ b/arch/x86/events/perf_event.h
|
|
@@ -198,6 +198,7 @@ struct cpu_hw_events {
|
|
they've never been enabled yet */
|
|
int n_txn; /* the # last events in the below arrays;
|
|
added in the current transaction */
|
|
+ int n_txn_pair;
|
|
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
|
|
u64 tags[X86_PMC_IDX_MAX];
|
|
|
|
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
|
|
index eb8e781c43539..b8f7c9659ef6b 100644
|
|
--- a/arch/x86/include/asm/special_insns.h
|
|
+++ b/arch/x86/include/asm/special_insns.h
|
|
@@ -11,45 +11,47 @@
|
|
#include <linux/jump_label.h>
|
|
|
|
/*
|
|
- * Volatile isn't enough to prevent the compiler from reordering the
|
|
- * read/write functions for the control registers and messing everything up.
|
|
- * A memory clobber would solve the problem, but would prevent reordering of
|
|
- * all loads stores around it, which can hurt performance. Solution is to
|
|
- * use a variable and mimic reads and writes to it to enforce serialization
|
|
+ * The compiler should not reorder volatile asm statements with respect to each
|
|
+ * other: they should execute in program order. However GCC 4.9.x and 5.x have
|
|
+ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
|
|
+ * volatile asm. The write functions are not affected since they have memory
|
|
+ * clobbers preventing reordering. To prevent reads from being reordered with
|
|
+ * respect to writes, use a dummy memory operand.
|
|
*/
|
|
-extern unsigned long __force_order;
|
|
+
|
|
+#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
|
|
|
|
void native_write_cr0(unsigned long val);
|
|
|
|
static inline unsigned long native_read_cr0(void)
|
|
{
|
|
unsigned long val;
|
|
- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
|
|
+ asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
|
return val;
|
|
}
|
|
|
|
static __always_inline unsigned long native_read_cr2(void)
|
|
{
|
|
unsigned long val;
|
|
- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
|
|
+ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
|
return val;
|
|
}
|
|
|
|
static __always_inline void native_write_cr2(unsigned long val)
|
|
{
|
|
- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
|
|
+ asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
|
|
}
|
|
|
|
static inline unsigned long __native_read_cr3(void)
|
|
{
|
|
unsigned long val;
|
|
- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
|
|
+ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
|
return val;
|
|
}
|
|
|
|
static inline void native_write_cr3(unsigned long val)
|
|
{
|
|
- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
|
|
+ asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
|
|
}
|
|
|
|
static inline unsigned long native_read_cr4(void)
|
|
@@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void)
|
|
asm volatile("1: mov %%cr4, %0\n"
|
|
"2:\n"
|
|
_ASM_EXTABLE(1b, 2b)
|
|
- : "=r" (val), "=m" (__force_order) : "0" (0));
|
|
+ : "=r" (val) : "0" (0), __FORCE_ORDER);
|
|
#else
|
|
/* CR4 always exists on x86_64. */
|
|
- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
|
|
+ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
|
#endif
|
|
return val;
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 95c090a45b4b4..d8ef789e00c15 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -358,7 +358,7 @@ void native_write_cr0(unsigned long val)
|
|
unsigned long bits_missing = 0;
|
|
|
|
set_register:
|
|
- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
|
|
+ asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
|
|
|
|
if (static_branch_likely(&cr_pinning)) {
|
|
if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
|
|
@@ -377,7 +377,7 @@ void native_write_cr4(unsigned long val)
|
|
unsigned long bits_changed = 0;
|
|
|
|
set_register:
|
|
- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
|
|
+ asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
|
|
|
|
if (static_branch_likely(&cr_pinning)) {
|
|
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
|
|
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
|
|
index 14e4b4d17ee5b..07673a034d39c 100644
|
|
--- a/arch/x86/kernel/cpu/mce/core.c
|
|
+++ b/arch/x86/kernel/cpu/mce/core.c
|
|
@@ -370,42 +370,105 @@ static int msr_to_offset(u32 msr)
|
|
return -1;
|
|
}
|
|
|
|
+__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
|
|
+ struct pt_regs *regs, int trapnr,
|
|
+ unsigned long error_code,
|
|
+ unsigned long fault_addr)
|
|
+{
|
|
+ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
|
|
+ (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
|
|
+
|
|
+ show_stack_regs(regs);
|
|
+
|
|
+ panic("MCA architectural violation!\n");
|
|
+
|
|
+ while (true)
|
|
+ cpu_relax();
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
/* MSR access wrappers used for error injection */
|
|
-static u64 mce_rdmsrl(u32 msr)
|
|
+static noinstr u64 mce_rdmsrl(u32 msr)
|
|
{
|
|
- u64 v;
|
|
+ DECLARE_ARGS(val, low, high);
|
|
|
|
if (__this_cpu_read(injectm.finished)) {
|
|
- int offset = msr_to_offset(msr);
|
|
+ int offset;
|
|
+ u64 ret;
|
|
|
|
+ instrumentation_begin();
|
|
+
|
|
+ offset = msr_to_offset(msr);
|
|
if (offset < 0)
|
|
- return 0;
|
|
- return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
|
|
- }
|
|
+ ret = 0;
|
|
+ else
|
|
+ ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
|
|
|
|
- if (rdmsrl_safe(msr, &v)) {
|
|
- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
|
|
- /*
|
|
- * Return zero in case the access faulted. This should
|
|
- * not happen normally but can happen if the CPU does
|
|
- * something weird, or if the code is buggy.
|
|
- */
|
|
- v = 0;
|
|
+ instrumentation_end();
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
- return v;
|
|
+ /*
|
|
+ * RDMSR on MCA MSRs should not fault. If they do, this is very much an
|
|
+ * architectural violation and needs to be reported to hw vendor. Panic
|
|
+ * the box to not allow any further progress.
|
|
+ */
|
|
+ asm volatile("1: rdmsr\n"
|
|
+ "2:\n"
|
|
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
|
|
+ : EAX_EDX_RET(val, low, high) : "c" (msr));
|
|
+
|
|
+
|
|
+ return EAX_EDX_VAL(val, low, high);
|
|
+}
|
|
+
|
|
+__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
|
|
+ struct pt_regs *regs, int trapnr,
|
|
+ unsigned long error_code,
|
|
+ unsigned long fault_addr)
|
|
+{
|
|
+ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
|
|
+ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
|
|
+ regs->ip, (void *)regs->ip);
|
|
+
|
|
+ show_stack_regs(regs);
|
|
+
|
|
+ panic("MCA architectural violation!\n");
|
|
+
|
|
+ while (true)
|
|
+ cpu_relax();
|
|
+
|
|
+ return true;
|
|
}
|
|
|
|
-static void mce_wrmsrl(u32 msr, u64 v)
|
|
+static noinstr void mce_wrmsrl(u32 msr, u64 v)
|
|
{
|
|
+ u32 low, high;
|
|
+
|
|
if (__this_cpu_read(injectm.finished)) {
|
|
- int offset = msr_to_offset(msr);
|
|
+ int offset;
|
|
+
|
|
+ instrumentation_begin();
|
|
|
|
+ offset = msr_to_offset(msr);
|
|
if (offset >= 0)
|
|
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
|
|
+
|
|
+ instrumentation_end();
|
|
+
|
|
return;
|
|
}
|
|
- wrmsrl(msr, v);
|
|
+
|
|
+ low = (u32)v;
|
|
+ high = (u32)(v >> 32);
|
|
+
|
|
+ /* See comment in mce_rdmsrl() */
|
|
+ asm volatile("1: wrmsr\n"
|
|
+ "2:\n"
|
|
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
|
|
+ : : "c" (msr), "a"(low), "d" (high) : "memory");
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
|
|
index 6473070b5da49..b122610e9046a 100644
|
|
--- a/arch/x86/kernel/cpu/mce/internal.h
|
|
+++ b/arch/x86/kernel/cpu/mce/internal.h
|
|
@@ -185,4 +185,14 @@ extern bool amd_filter_mce(struct mce *m);
|
|
static inline bool amd_filter_mce(struct mce *m) { return false; };
|
|
#endif
|
|
|
|
+__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
|
|
+ struct pt_regs *regs, int trapnr,
|
|
+ unsigned long error_code,
|
|
+ unsigned long fault_addr);
|
|
+
|
|
+__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
|
|
+ struct pt_regs *regs, int trapnr,
|
|
+ unsigned long error_code,
|
|
+ unsigned long fault_addr);
|
|
+
|
|
#endif /* __X86_MCE_INTERNAL_H__ */
|
|
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
|
|
index e1da619add192..567ce09a02868 100644
|
|
--- a/arch/x86/kernel/cpu/mce/severity.c
|
|
+++ b/arch/x86/kernel/cpu/mce/severity.c
|
|
@@ -9,9 +9,11 @@
|
|
#include <linux/seq_file.h>
|
|
#include <linux/init.h>
|
|
#include <linux/debugfs.h>
|
|
-#include <asm/mce.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
+#include <asm/mce.h>
|
|
+#include <asm/intel-family.h>
|
|
+
|
|
#include "internal.h"
|
|
|
|
/*
|
|
@@ -40,9 +42,14 @@ static struct severity {
|
|
unsigned char context;
|
|
unsigned char excp;
|
|
unsigned char covered;
|
|
+ unsigned char cpu_model;
|
|
+ unsigned char cpu_minstepping;
|
|
+ unsigned char bank_lo, bank_hi;
|
|
char *msg;
|
|
} severities[] = {
|
|
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
|
|
+#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
|
|
+#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
|
|
#define KERNEL .context = IN_KERNEL
|
|
#define USER .context = IN_USER
|
|
#define KERNEL_RECOV .context = IN_KERNEL_RECOV
|
|
@@ -97,7 +104,6 @@ static struct severity {
|
|
KEEP, "Corrected error",
|
|
NOSER, BITCLR(MCI_STATUS_UC)
|
|
),
|
|
-
|
|
/*
|
|
* known AO MCACODs reported via MCE or CMC:
|
|
*
|
|
@@ -113,6 +119,18 @@ static struct severity {
|
|
AO, "Action optional: last level cache writeback error",
|
|
SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
|
|
),
|
|
+ /*
|
|
+ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
|
|
+ * to report uncorrected errors using CMCI with a special signature.
|
|
+ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
|
|
+ * in one of the memory controller banks.
|
|
+ * Set severity to "AO" for same action as normal patrol scrub error.
|
|
+ */
|
|
+ MCESEV(
|
|
+ AO, "Uncorrected Patrol Scrub Error",
|
|
+ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
|
|
+ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
|
|
+ ),
|
|
|
|
/* ignore OVER for UCNA */
|
|
MCESEV(
|
|
@@ -324,6 +342,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
|
|
continue;
|
|
if (s->excp && excp != s->excp)
|
|
continue;
|
|
+ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
|
|
+ continue;
|
|
+ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
|
|
+ continue;
|
|
+ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
|
|
+ continue;
|
|
if (msg)
|
|
*msg = s->msg;
|
|
s->covered = 1;
|
|
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
|
|
index 7401cc12c3ccf..42679610c9bea 100644
|
|
--- a/arch/x86/kernel/dumpstack.c
|
|
+++ b/arch/x86/kernel/dumpstack.c
|
|
@@ -115,7 +115,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
|
|
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
|
|
|
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
|
|
- printk("%sCode: Bad RIP value.\n", loglvl);
|
|
+ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
|
|
+ loglvl, prologue);
|
|
} else {
|
|
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
|
__stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
|
|
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
|
|
index 61ddc3a5e5c2b..f8ff895aaf7e1 100644
|
|
--- a/arch/x86/kernel/fpu/init.c
|
|
+++ b/arch/x86/kernel/fpu/init.c
|
|
@@ -243,9 +243,9 @@ static void __init fpu__init_system_ctx_switch(void)
|
|
*/
|
|
static void __init fpu__init_parse_early_param(void)
|
|
{
|
|
- char arg[32];
|
|
+ char arg[128];
|
|
char *argptr = arg;
|
|
- int bit;
|
|
+ int arglen, res, bit;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
if (cmdline_find_option_bool(boot_command_line, "no387"))
|
|
@@ -268,12 +268,26 @@ static void __init fpu__init_parse_early_param(void)
|
|
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
|
|
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
|
|
|
|
- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
|
|
- sizeof(arg)) &&
|
|
- get_option(&argptr, &bit) &&
|
|
- bit >= 0 &&
|
|
- bit < NCAPINTS * 32)
|
|
- setup_clear_cpu_cap(bit);
|
|
+ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
|
|
+ if (arglen <= 0)
|
|
+ return;
|
|
+
|
|
+ pr_info("Clearing CPUID bits:");
|
|
+ do {
|
|
+ res = get_option(&argptr, &bit);
|
|
+ if (res == 0 || res == 3)
|
|
+ break;
|
|
+
|
|
+ /* If the argument was too long, the last bit may be cut off */
|
|
+ if (res == 1 && arglen >= sizeof(arg))
|
|
+ break;
|
|
+
|
|
+ if (bit >= 0 && bit < NCAPINTS * 32) {
|
|
+ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
|
|
+ setup_clear_cpu_cap(bit);
|
|
+ }
|
|
+ } while (res == 2);
|
|
+ pr_cont("\n");
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
|
|
index d7c5e44b26f73..091752c3a19e2 100644
|
|
--- a/arch/x86/kernel/nmi.c
|
|
+++ b/arch/x86/kernel/nmi.c
|
|
@@ -102,7 +102,6 @@ fs_initcall(nmi_warning_debugfs);
|
|
|
|
static void nmi_check_duration(struct nmiaction *action, u64 duration)
|
|
{
|
|
- u64 whole_msecs = READ_ONCE(action->max_duration);
|
|
int remainder_ns, decimal_msecs;
|
|
|
|
if (duration < nmi_longest_ns || duration < action->max_duration)
|
|
@@ -110,12 +109,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
|
|
|
|
action->max_duration = duration;
|
|
|
|
- remainder_ns = do_div(whole_msecs, (1000 * 1000));
|
|
+ remainder_ns = do_div(duration, (1000 * 1000));
|
|
decimal_msecs = remainder_ns / 1000;
|
|
|
|
printk_ratelimited(KERN_INFO
|
|
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
|
|
- action->handler, whole_msecs, decimal_msecs);
|
|
+ action->handler, duration, decimal_msecs);
|
|
}
|
|
|
|
static int nmi_handle(unsigned int type, struct pt_regs *regs)
|
|
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
|
|
index d0e2825ae6174..571cb8657e53e 100644
|
|
--- a/arch/x86/kvm/emulate.c
|
|
+++ b/arch/x86/kvm/emulate.c
|
|
@@ -3594,7 +3594,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
|
|
u64 tsc_aux = 0;
|
|
|
|
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
|
|
- return emulate_gp(ctxt, 0);
|
|
+ return emulate_ud(ctxt);
|
|
ctxt->dst.val = tsc_aux;
|
|
return X86EMUL_CONTINUE;
|
|
}
|
|
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
|
|
index d057376bd3d33..698969e18fe35 100644
|
|
--- a/arch/x86/kvm/ioapic.c
|
|
+++ b/arch/x86/kvm/ioapic.c
|
|
@@ -197,12 +197,9 @@ static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
|
|
|
|
/*
|
|
* If no longer has pending EOI in LAPICs, update
|
|
- * EOI for this vetor.
|
|
+ * EOI for this vector.
|
|
*/
|
|
rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
|
|
- kvm_ioapic_update_eoi_one(vcpu, ioapic,
|
|
- entry->fields.trig_mode,
|
|
- irq);
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
|
|
index cfe83d4ae6252..ca0781b41df9d 100644
|
|
--- a/arch/x86/kvm/kvm_cache_regs.h
|
|
+++ b/arch/x86/kvm/kvm_cache_regs.h
|
|
@@ -7,7 +7,7 @@
|
|
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
|
|
#define KVM_POSSIBLE_CR4_GUEST_BITS \
|
|
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
|
|
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
|
|
+ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD)
|
|
|
|
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
|
|
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 4ce2ddd26c0b7..ccb72af1bcb5d 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -490,6 +490,12 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
|
}
|
|
}
|
|
|
|
+void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
|
|
+{
|
|
+ apic_clear_irr(vec, vcpu->arch.apic);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
|
|
+
|
|
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
|
{
|
|
struct kvm_vcpu *vcpu;
|
|
@@ -2462,6 +2468,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
|
__apic_update_ppr(apic, &ppr);
|
|
return apic_has_interrupt_for_ppr(apic, ppr);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
|
|
|
|
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
|
|
{
|
|
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
|
|
index 754f29beb83e3..4fb86e3a9dd3d 100644
|
|
--- a/arch/x86/kvm/lapic.h
|
|
+++ b/arch/x86/kvm/lapic.h
|
|
@@ -89,6 +89,7 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
|
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
|
int shorthand, unsigned int dest, int dest_mode);
|
|
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
|
|
+void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec);
|
|
bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
|
|
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
|
|
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
|
|
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
|
|
index 1e6724c30cc05..57cd70801216f 100644
|
|
--- a/arch/x86/kvm/mmu/mmu.c
|
|
+++ b/arch/x86/kvm/mmu/mmu.c
|
|
@@ -6341,6 +6341,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
|
|
cond_resched_lock(&kvm->mmu_lock);
|
|
}
|
|
}
|
|
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
srcu_read_unlock(&kvm->srcu, rcu_idx);
|
|
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
|
|
index e80daa98682f5..b74722e0abb53 100644
|
|
--- a/arch/x86/kvm/svm/avic.c
|
|
+++ b/arch/x86/kvm/svm/avic.c
|
|
@@ -868,6 +868,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
|
|
* - Tell IOMMU to use legacy mode for this interrupt.
|
|
* - Retrieve ga_tag of prior interrupt remapping data.
|
|
*/
|
|
+ pi.prev_ga_tag = 0;
|
|
pi.is_guest_mode = false;
|
|
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
|
|
|
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
|
|
index a5810928b011f..27e41fac91965 100644
|
|
--- a/arch/x86/kvm/vmx/nested.c
|
|
+++ b/arch/x86/kvm/vmx/nested.c
|
|
@@ -2402,6 +2402,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
|
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
|
|
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
|
|
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
|
|
+
|
|
+ vmx->segment_cache.bitmask = 0;
|
|
}
|
|
|
|
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
|
|
@@ -3295,8 +3297,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|
prepare_vmcs02_early(vmx, vmcs12);
|
|
|
|
if (from_vmentry) {
|
|
- if (unlikely(!nested_get_vmcs12_pages(vcpu)))
|
|
+ if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
|
|
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
|
|
+ }
|
|
|
|
if (nested_vmx_check_vmentry_hw(vcpu)) {
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
@@ -3480,6 +3484,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
if (unlikely(status != NVMX_VMENTRY_SUCCESS))
|
|
goto vmentry_failed;
|
|
|
|
+ /* Emulate processing of posted interrupts on VM-Enter. */
|
|
+ if (nested_cpu_has_posted_intr(vmcs12) &&
|
|
+ kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
|
|
+ vmx->nested.pi_pending = true;
|
|
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
+ kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
|
|
+ }
|
|
+
|
|
/* Hide L1D cache contents from the nested guest. */
|
|
vmx->vcpu.arch.l1tf_flush_l1d = true;
|
|
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index 619a3dcd3f5e7..8d6435b731186 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -798,11 +798,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
|
|
{
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
- printk(KERN_INFO "attempt to access beyond end of device\n");
|
|
- printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
|
|
- bio_devname(bio, b), bio->bi_opf,
|
|
- (unsigned long long)bio_end_sector(bio),
|
|
- (long long)maxsector);
|
|
+ pr_info_ratelimited("attempt to access beyond end of device\n"
|
|
+ "%s: rw=%d, want=%llu, limit=%llu\n",
|
|
+ bio_devname(bio, b), bio->bi_opf,
|
|
+ bio_end_sector(bio), maxsector);
|
|
}
|
|
|
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
|
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
|
|
index 062229395a507..7b52e7657b2d1 100644
|
|
--- a/block/blk-mq-sysfs.c
|
|
+++ b/block/blk-mq-sysfs.c
|
|
@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
|
|
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
|
|
kobj);
|
|
|
|
- cancel_delayed_work_sync(&hctx->run_work);
|
|
-
|
|
if (hctx->flags & BLK_MQ_F_BLOCKING)
|
|
cleanup_srcu_struct(hctx->srcu);
|
|
blk_free_flush_queue(hctx->fq);
|
|
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
|
|
index 02643e149d5e1..95fea6c18baf7 100644
|
|
--- a/block/blk-sysfs.c
|
|
+++ b/block/blk-sysfs.c
|
|
@@ -896,9 +896,16 @@ static void __blk_release_queue(struct work_struct *work)
|
|
|
|
blk_free_queue_stats(q->stats);
|
|
|
|
- if (queue_is_mq(q))
|
|
+ if (queue_is_mq(q)) {
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
+ int i;
|
|
+
|
|
cancel_delayed_work_sync(&q->requeue_work);
|
|
|
|
+ queue_for_each_hw_ctx(q, hctx, i)
|
|
+ cancel_delayed_work_sync(&hctx->run_work);
|
|
+ }
|
|
+
|
|
blk_exit_queue(q);
|
|
|
|
blk_queue_free_zone_bitmaps(q);
|
|
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
|
|
index 43c6aa784858b..e62d735ed2660 100644
|
|
--- a/crypto/algif_aead.c
|
|
+++ b/crypto/algif_aead.c
|
|
@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
|
|
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
|
|
|
|
skcipher_request_set_sync_tfm(skreq, null_tfm);
|
|
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
+ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
NULL, NULL);
|
|
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
|
|
|
|
@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
|
areq->outlen = outlen;
|
|
|
|
aead_request_set_callback(&areq->cra_u.aead_req,
|
|
- CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
+ CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
af_alg_async_cb, areq);
|
|
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
|
|
crypto_aead_decrypt(&areq->cra_u.aead_req);
|
|
|
|
/* AIO operation in progress */
|
|
- if (err == -EINPROGRESS || err == -EBUSY)
|
|
+ if (err == -EINPROGRESS)
|
|
return -EIOCBQUEUED;
|
|
|
|
sock_put(sk);
|
|
} else {
|
|
/* Synchronous operation */
|
|
aead_request_set_callback(&areq->cra_u.aead_req,
|
|
+ CRYPTO_TFM_REQ_MAY_SLEEP |
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
crypto_req_done, &ctx->wait);
|
|
err = crypto_wait_req(ctx->enc ?
|
|
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
|
|
index 81c4022285a7c..30069a92a9b22 100644
|
|
--- a/crypto/algif_skcipher.c
|
|
+++ b/crypto/algif_skcipher.c
|
|
@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
|
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
|
|
|
|
/* AIO operation in progress */
|
|
- if (err == -EINPROGRESS || err == -EBUSY)
|
|
+ if (err == -EINPROGRESS)
|
|
return -EIOCBQUEUED;
|
|
|
|
sock_put(sk);
|
|
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
|
|
index 5b310eea9e527..adab46ca5dff7 100644
|
|
--- a/drivers/android/binder.c
|
|
+++ b/drivers/android/binder.c
|
|
@@ -223,7 +223,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
|
|
struct binder_work {
|
|
struct list_head entry;
|
|
|
|
- enum {
|
|
+ enum binder_work_type {
|
|
BINDER_WORK_TRANSACTION = 1,
|
|
BINDER_WORK_TRANSACTION_COMPLETE,
|
|
BINDER_WORK_RETURN_ERROR,
|
|
@@ -885,27 +885,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
|
|
return w;
|
|
}
|
|
|
|
-/**
|
|
- * binder_dequeue_work_head() - Dequeues the item at head of list
|
|
- * @proc: binder_proc associated with list
|
|
- * @list: list to dequeue head
|
|
- *
|
|
- * Removes the head of the list if there are items on the list
|
|
- *
|
|
- * Return: pointer dequeued binder_work, NULL if list was empty
|
|
- */
|
|
-static struct binder_work *binder_dequeue_work_head(
|
|
- struct binder_proc *proc,
|
|
- struct list_head *list)
|
|
-{
|
|
- struct binder_work *w;
|
|
-
|
|
- binder_inner_proc_lock(proc);
|
|
- w = binder_dequeue_work_head_ilocked(list);
|
|
- binder_inner_proc_unlock(proc);
|
|
- return w;
|
|
-}
|
|
-
|
|
static void
|
|
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
|
|
static void binder_free_thread(struct binder_thread *thread);
|
|
@@ -2345,8 +2324,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
* file is done when the transaction is torn
|
|
* down.
|
|
*/
|
|
- WARN_ON(failed_at &&
|
|
- proc->tsk == current->group_leader);
|
|
} break;
|
|
case BINDER_TYPE_PTR:
|
|
/*
|
|
@@ -4589,13 +4566,17 @@ static void binder_release_work(struct binder_proc *proc,
|
|
struct list_head *list)
|
|
{
|
|
struct binder_work *w;
|
|
+ enum binder_work_type wtype;
|
|
|
|
while (1) {
|
|
- w = binder_dequeue_work_head(proc, list);
|
|
+ binder_inner_proc_lock(proc);
|
|
+ w = binder_dequeue_work_head_ilocked(list);
|
|
+ wtype = w ? w->type : 0;
|
|
+ binder_inner_proc_unlock(proc);
|
|
if (!w)
|
|
return;
|
|
|
|
- switch (w->type) {
|
|
+ switch (wtype) {
|
|
case BINDER_WORK_TRANSACTION: {
|
|
struct binder_transaction *t;
|
|
|
|
@@ -4629,9 +4610,11 @@ static void binder_release_work(struct binder_proc *proc,
|
|
kfree(death);
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
} break;
|
|
+ case BINDER_WORK_NODE:
|
|
+ break;
|
|
default:
|
|
pr_err("unexpected work type, %d, not freed\n",
|
|
- w->type);
|
|
+ wtype);
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
|
|
index a5fef9aa419fd..91a0c84d55c97 100644
|
|
--- a/drivers/bluetooth/btusb.c
|
|
+++ b/drivers/bluetooth/btusb.c
|
|
@@ -2849,6 +2849,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
|
|
buf = kmalloc(size, GFP_KERNEL);
|
|
if (!buf) {
|
|
kfree(dr);
|
|
+ usb_free_urb(urb);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
|
|
index 66e2700c9032a..bc1469778cf87 100644
|
|
--- a/drivers/bus/mhi/core/Makefile
|
|
+++ b/drivers/bus/mhi/core/Makefile
|
|
@@ -1,3 +1,3 @@
|
|
-obj-$(CONFIG_MHI_BUS) := mhi.o
|
|
+obj-$(CONFIG_MHI_BUS) += mhi.o
|
|
|
|
mhi-y := init.o main.o pm.o boot.o
|
|
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
|
|
index 77b8d551ae7fe..dd559661c15b3 100644
|
|
--- a/drivers/char/ipmi/ipmi_si_intf.c
|
|
+++ b/drivers/char/ipmi/ipmi_si_intf.c
|
|
@@ -1963,7 +1963,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
|
/* Do this early so it's available for logs. */
|
|
if (!new_smi->io.dev) {
|
|
pr_err("IPMI interface added with no device\n");
|
|
- rv = EIO;
|
|
+ rv = -EIO;
|
|
goto out_err;
|
|
}
|
|
|
|
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
|
|
index 37c22667e8319..4313ecb2af5b2 100644
|
|
--- a/drivers/clk/at91/clk-main.c
|
|
+++ b/drivers/clk/at91/clk-main.c
|
|
@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
|
|
return -EINVAL;
|
|
|
|
regmap_read(regmap, AT91_CKGR_MOR, &tmp);
|
|
- tmp &= ~MOR_KEY_MASK;
|
|
|
|
if (index && !(tmp & AT91_PMC_MOSCSEL))
|
|
- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
|
|
+ tmp = AT91_PMC_MOSCSEL;
|
|
else if (!index && (tmp & AT91_PMC_MOSCSEL))
|
|
- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
|
|
+ tmp = 0;
|
|
+ else
|
|
+ return 0;
|
|
+
|
|
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
|
|
+ AT91_PMC_MOSCSEL | MOR_KEY_MASK,
|
|
+ tmp | AT91_PMC_KEY);
|
|
|
|
while (!clk_sam9x5_main_ready(regmap))
|
|
cpu_relax();
|
|
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
|
|
index 011802f1a6df9..f18b4d9e9455b 100644
|
|
--- a/drivers/clk/bcm/clk-bcm2835.c
|
|
+++ b/drivers/clk/bcm/clk-bcm2835.c
|
|
@@ -1337,8 +1337,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
|
|
pll->hw.init = &init;
|
|
|
|
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ kfree(pll);
|
|
return NULL;
|
|
+ }
|
|
return &pll->hw;
|
|
}
|
|
|
|
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
|
|
index a64aace213c27..7762c5825e77d 100644
|
|
--- a/drivers/clk/imx/clk-imx8mq.c
|
|
+++ b/drivers/clk/imx/clk-imx8mq.c
|
|
@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
|
|
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
|
|
|
|
static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
|
|
- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
|
|
+ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
|
|
|
|
static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
|
|
- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
|
|
+ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
|
|
|
|
static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
|
|
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
|
|
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
|
|
index 7edf8c8432b67..64ea895f1a7df 100644
|
|
--- a/drivers/clk/keystone/sci-clk.c
|
|
+++ b/drivers/clk/keystone/sci-clk.c
|
|
@@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
|
|
np = of_find_node_with_property(np, *clk_name);
|
|
if (!np) {
|
|
clk_name++;
|
|
- break;
|
|
+ continue;
|
|
}
|
|
|
|
if (!of_device_is_available(np))
|
|
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
|
|
index 9766cccf5844c..6e0d3a1667291 100644
|
|
--- a/drivers/clk/mediatek/clk-mt6779.c
|
|
+++ b/drivers/clk/mediatek/clk-mt6779.c
|
|
@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
|
|
"pwm_sel", 19),
|
|
GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
|
|
"pwm_sel", 21),
|
|
+ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
|
|
+ "uart_sel", 22),
|
|
GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
|
|
"uart_sel", 23),
|
|
GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
|
|
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
|
|
index 53715e36326c6..9918cb375de30 100644
|
|
--- a/drivers/clk/meson/axg-audio.c
|
|
+++ b/drivers/clk/meson/axg-audio.c
|
|
@@ -1209,13 +1209,132 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
|
|
};
|
|
|
|
|
|
-/* Convenience table to populate regmap in .probe()
|
|
- * Note that this table is shared between both AXG and G12A,
|
|
- * with spdifout_b clocks being exclusive to G12A. Since those
|
|
- * clocks are not declared within the AXG onecell table, we do not
|
|
- * feel the need to have separate AXG/G12A regmap tables.
|
|
- */
|
|
+/* Convenience table to populate regmap in .probe(). */
|
|
static struct clk_regmap *const axg_clk_regmaps[] = {
|
|
+ &ddr_arb,
|
|
+ &pdm,
|
|
+ &tdmin_a,
|
|
+ &tdmin_b,
|
|
+ &tdmin_c,
|
|
+ &tdmin_lb,
|
|
+ &tdmout_a,
|
|
+ &tdmout_b,
|
|
+ &tdmout_c,
|
|
+ &frddr_a,
|
|
+ &frddr_b,
|
|
+ &frddr_c,
|
|
+ &toddr_a,
|
|
+ &toddr_b,
|
|
+ &toddr_c,
|
|
+ &loopback,
|
|
+ &spdifin,
|
|
+ &spdifout,
|
|
+ &resample,
|
|
+ &power_detect,
|
|
+ &mst_a_mclk_sel,
|
|
+ &mst_b_mclk_sel,
|
|
+ &mst_c_mclk_sel,
|
|
+ &mst_d_mclk_sel,
|
|
+ &mst_e_mclk_sel,
|
|
+ &mst_f_mclk_sel,
|
|
+ &mst_a_mclk_div,
|
|
+ &mst_b_mclk_div,
|
|
+ &mst_c_mclk_div,
|
|
+ &mst_d_mclk_div,
|
|
+ &mst_e_mclk_div,
|
|
+ &mst_f_mclk_div,
|
|
+ &mst_a_mclk,
|
|
+ &mst_b_mclk,
|
|
+ &mst_c_mclk,
|
|
+ &mst_d_mclk,
|
|
+ &mst_e_mclk,
|
|
+ &mst_f_mclk,
|
|
+ &spdifout_clk_sel,
|
|
+ &spdifout_clk_div,
|
|
+ &spdifout_clk,
|
|
+ &spdifin_clk_sel,
|
|
+ &spdifin_clk_div,
|
|
+ &spdifin_clk,
|
|
+ &pdm_dclk_sel,
|
|
+ &pdm_dclk_div,
|
|
+ &pdm_dclk,
|
|
+ &pdm_sysclk_sel,
|
|
+ &pdm_sysclk_div,
|
|
+ &pdm_sysclk,
|
|
+ &mst_a_sclk_pre_en,
|
|
+ &mst_b_sclk_pre_en,
|
|
+ &mst_c_sclk_pre_en,
|
|
+ &mst_d_sclk_pre_en,
|
|
+ &mst_e_sclk_pre_en,
|
|
+ &mst_f_sclk_pre_en,
|
|
+ &mst_a_sclk_div,
|
|
+ &mst_b_sclk_div,
|
|
+ &mst_c_sclk_div,
|
|
+ &mst_d_sclk_div,
|
|
+ &mst_e_sclk_div,
|
|
+ &mst_f_sclk_div,
|
|
+ &mst_a_sclk_post_en,
|
|
+ &mst_b_sclk_post_en,
|
|
+ &mst_c_sclk_post_en,
|
|
+ &mst_d_sclk_post_en,
|
|
+ &mst_e_sclk_post_en,
|
|
+ &mst_f_sclk_post_en,
|
|
+ &mst_a_sclk,
|
|
+ &mst_b_sclk,
|
|
+ &mst_c_sclk,
|
|
+ &mst_d_sclk,
|
|
+ &mst_e_sclk,
|
|
+ &mst_f_sclk,
|
|
+ &mst_a_lrclk_div,
|
|
+ &mst_b_lrclk_div,
|
|
+ &mst_c_lrclk_div,
|
|
+ &mst_d_lrclk_div,
|
|
+ &mst_e_lrclk_div,
|
|
+ &mst_f_lrclk_div,
|
|
+ &mst_a_lrclk,
|
|
+ &mst_b_lrclk,
|
|
+ &mst_c_lrclk,
|
|
+ &mst_d_lrclk,
|
|
+ &mst_e_lrclk,
|
|
+ &mst_f_lrclk,
|
|
+ &tdmin_a_sclk_sel,
|
|
+ &tdmin_b_sclk_sel,
|
|
+ &tdmin_c_sclk_sel,
|
|
+ &tdmin_lb_sclk_sel,
|
|
+ &tdmout_a_sclk_sel,
|
|
+ &tdmout_b_sclk_sel,
|
|
+ &tdmout_c_sclk_sel,
|
|
+ &tdmin_a_sclk_pre_en,
|
|
+ &tdmin_b_sclk_pre_en,
|
|
+ &tdmin_c_sclk_pre_en,
|
|
+ &tdmin_lb_sclk_pre_en,
|
|
+ &tdmout_a_sclk_pre_en,
|
|
+ &tdmout_b_sclk_pre_en,
|
|
+ &tdmout_c_sclk_pre_en,
|
|
+ &tdmin_a_sclk_post_en,
|
|
+ &tdmin_b_sclk_post_en,
|
|
+ &tdmin_c_sclk_post_en,
|
|
+ &tdmin_lb_sclk_post_en,
|
|
+ &tdmout_a_sclk_post_en,
|
|
+ &tdmout_b_sclk_post_en,
|
|
+ &tdmout_c_sclk_post_en,
|
|
+ &tdmin_a_sclk,
|
|
+ &tdmin_b_sclk,
|
|
+ &tdmin_c_sclk,
|
|
+ &tdmin_lb_sclk,
|
|
+ &tdmout_a_sclk,
|
|
+ &tdmout_b_sclk,
|
|
+ &tdmout_c_sclk,
|
|
+ &tdmin_a_lrclk,
|
|
+ &tdmin_b_lrclk,
|
|
+ &tdmin_c_lrclk,
|
|
+ &tdmin_lb_lrclk,
|
|
+ &tdmout_a_lrclk,
|
|
+ &tdmout_b_lrclk,
|
|
+ &tdmout_c_lrclk,
|
|
+};
|
|
+
|
|
+static struct clk_regmap *const g12a_clk_regmaps[] = {
|
|
&ddr_arb,
|
|
&pdm,
|
|
&tdmin_a,
|
|
@@ -1713,8 +1832,8 @@ static const struct audioclk_data axg_audioclk_data = {
|
|
};
|
|
|
|
static const struct audioclk_data g12a_audioclk_data = {
|
|
- .regmap_clks = axg_clk_regmaps,
|
|
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
|
|
+ .regmap_clks = g12a_clk_regmaps,
|
|
+ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
|
|
.hw_onecell_data = &g12a_audio_hw_onecell_data,
|
|
.reset_offset = AUDIO_SW_RESET,
|
|
.reset_num = 26,
|
|
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
|
|
index 30c15766ebb16..05d032be15c8f 100644
|
|
--- a/drivers/clk/meson/g12a.c
|
|
+++ b/drivers/clk/meson/g12a.c
|
|
@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
|
|
&g12a_fclk_div2_div.hw
|
|
},
|
|
.num_parents = 1,
|
|
+ /*
|
|
+ * Similar to fclk_div3, it seems that this clock is used by
|
|
+ * the resident firmware and is required by the platform to
|
|
+ * operate correctly.
|
|
+ * Until the following condition are met, we need this clock to
|
|
+ * be marked as critical:
|
|
+ * a) Mark the clock used by a firmware resource, if possible
|
|
+ * b) CCF has a clock hand-off mechanism to make the sure the
|
|
+ * clock stays on until the proper driver comes along
|
|
+ */
|
|
+ .flags = CLK_IS_CRITICAL,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
|
|
index c6fb57cd576f5..aa5c0c6ead017 100644
|
|
--- a/drivers/clk/qcom/gcc-sdm660.c
|
|
+++ b/drivers/clk/qcom/gcc-sdm660.c
|
|
@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
|
|
.cmd_rcgr = 0x48044,
|
|
.mnd_width = 0,
|
|
.hid_width = 5,
|
|
- .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
|
|
+ .parent_map = gcc_parent_map_xo_gpll0,
|
|
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
|
|
.clkr.hw.init = &(struct clk_init_data){
|
|
.name = "hmss_rbcpr_clk_src",
|
|
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
|
|
index b333fc28c94b6..37c858d689e0d 100644
|
|
--- a/drivers/clk/rockchip/clk-half-divider.c
|
|
+++ b/drivers/clk/rockchip/clk-half-divider.c
|
|
@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
|
|
unsigned long flags,
|
|
spinlock_t *lock)
|
|
{
|
|
- struct clk *clk;
|
|
+ struct clk *clk = ERR_PTR(-ENOMEM);
|
|
struct clk_mux *mux = NULL;
|
|
struct clk_gate *gate = NULL;
|
|
struct clk_divider *div = NULL;
|
|
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
|
|
index 09aa44cb8a91d..ba04cb381cd3f 100644
|
|
--- a/drivers/clocksource/hyperv_timer.c
|
|
+++ b/drivers/clocksource/hyperv_timer.c
|
|
@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
|
|
return read_hv_clock_tsc();
|
|
}
|
|
|
|
-static u64 read_hv_sched_clock_tsc(void)
|
|
+static u64 notrace read_hv_sched_clock_tsc(void)
|
|
{
|
|
return (read_hv_clock_tsc() - hv_sched_clock_offset) *
|
|
(NSEC_PER_SEC / HV_CLOCK_HZ);
|
|
@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
|
|
return read_hv_clock_msr();
|
|
}
|
|
|
|
-static u64 read_hv_sched_clock_msr(void)
|
|
+static u64 notrace read_hv_sched_clock_msr(void)
|
|
{
|
|
return (read_hv_clock_msr() - hv_sched_clock_offset) *
|
|
(NSEC_PER_SEC / HV_CLOCK_HZ);
|
|
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
|
|
index df1c941260d14..b4af4094309b0 100644
|
|
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
|
|
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
|
|
@@ -484,6 +484,12 @@ remove_opp:
|
|
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
|
|
late_initcall(armada37xx_cpufreq_driver_init);
|
|
|
|
+static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
|
|
+ { .compatible = "marvell,armada-3700-nb-pm" },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
|
|
+
|
|
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
|
|
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
|
|
MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
|
|
index 8646eb197cd96..31f5c4ebbac9f 100644
|
|
--- a/drivers/cpufreq/powernv-cpufreq.c
|
|
+++ b/drivers/cpufreq/powernv-cpufreq.c
|
|
@@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
|
|
unsigned long action, void *unused)
|
|
{
|
|
int cpu;
|
|
- struct cpufreq_policy cpu_policy;
|
|
+ struct cpufreq_policy *cpu_policy;
|
|
|
|
rebooting = true;
|
|
for_each_online_cpu(cpu) {
|
|
- cpufreq_get_policy(&cpu_policy, cpu);
|
|
- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
|
|
+ cpu_policy = cpufreq_cpu_get(cpu);
|
|
+ if (!cpu_policy)
|
|
+ continue;
|
|
+ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
|
|
+ cpufreq_cpu_put(cpu_policy);
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
|
|
index b957061424a1f..8f3d6d31da52f 100644
|
|
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
|
|
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
|
|
@@ -120,7 +120,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
|
|
/* Be sure all data is written before enabling the task */
|
|
wmb();
|
|
|
|
- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
|
|
+ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
|
|
+ * on older SoCs, we have no reason to complicate things.
|
|
+ */
|
|
+ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
|
|
writel(v, ce->base + CE_TLR);
|
|
mutex_unlock(&ce->mlock);
|
|
|
|
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
|
|
index bc35aa0ec07ae..d7f2840cf0a94 100644
|
|
--- a/drivers/crypto/caam/Kconfig
|
|
+++ b/drivers/crypto/caam/Kconfig
|
|
@@ -114,6 +114,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
|
select CRYPTO_AUTHENC
|
|
select CRYPTO_SKCIPHER
|
|
select CRYPTO_DES
|
|
+ select CRYPTO_XTS
|
|
help
|
|
Selecting this will use CAAM Queue Interface (QI) for sending
|
|
& receiving crypto jobs to/from CAAM. This gives better performance
|
|
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
|
|
index 315d53499ce85..829d41a1e5da1 100644
|
|
--- a/drivers/crypto/caam/caamalg_qi.c
|
|
+++ b/drivers/crypto/caam/caamalg_qi.c
|
|
@@ -18,6 +18,8 @@
|
|
#include "qi.h"
|
|
#include "jr.h"
|
|
#include "caamalg_desc.h"
|
|
+#include <crypto/xts.h>
|
|
+#include <asm/unaligned.h>
|
|
|
|
/*
|
|
* crypto alg
|
|
@@ -67,6 +69,12 @@ struct caam_ctx {
|
|
struct device *qidev;
|
|
spinlock_t lock; /* Protects multiple init of driver context */
|
|
struct caam_drv_ctx *drv_ctx[NUM_OP];
|
|
+ bool xts_key_fallback;
|
|
+ struct crypto_skcipher *fallback;
|
|
+};
|
|
+
|
|
+struct caam_skcipher_req_ctx {
|
|
+ struct skcipher_request fallback_req;
|
|
};
|
|
|
|
static int aead_set_sh_desc(struct crypto_aead *aead)
|
|
@@ -726,12 +734,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
|
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
|
struct device *jrdev = ctx->jrdev;
|
|
int ret = 0;
|
|
+ int err;
|
|
|
|
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
|
|
+ err = xts_verify_key(skcipher, key, keylen);
|
|
+ if (err) {
|
|
dev_dbg(jrdev, "key size mismatch\n");
|
|
- return -EINVAL;
|
|
+ return err;
|
|
}
|
|
|
|
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
|
|
+ ctx->xts_key_fallback = true;
|
|
+
|
|
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
ctx->cdata.keylen = keylen;
|
|
ctx->cdata.key_virt = key;
|
|
ctx->cdata.key_inline = true;
|
|
@@ -1373,6 +1390,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
|
return edesc;
|
|
}
|
|
|
|
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
|
|
+{
|
|
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
|
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
|
+
|
|
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
|
|
+}
|
|
+
|
|
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
|
{
|
|
struct skcipher_edesc *edesc;
|
|
@@ -1383,6 +1408,22 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
|
if (!req->cryptlen)
|
|
return 0;
|
|
|
|
+ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
|
|
+ ctx->xts_key_fallback)) {
|
|
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
|
|
+
|
|
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
|
|
+ skcipher_request_set_callback(&rctx->fallback_req,
|
|
+ req->base.flags,
|
|
+ req->base.complete,
|
|
+ req->base.data);
|
|
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
|
|
+ req->dst, req->cryptlen, req->iv);
|
|
+
|
|
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
|
|
+ crypto_skcipher_decrypt(&rctx->fallback_req);
|
|
+ }
|
|
+
|
|
if (unlikely(caam_congested))
|
|
return -EAGAIN;
|
|
|
|
@@ -1507,6 +1548,7 @@ static struct caam_skcipher_alg driver_algs[] = {
|
|
.base = {
|
|
.cra_name = "xts(aes)",
|
|
.cra_driver_name = "xts-aes-caam-qi",
|
|
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
},
|
|
.setkey = xts_skcipher_setkey,
|
|
@@ -2440,9 +2482,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
|
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
|
struct caam_skcipher_alg *caam_alg =
|
|
container_of(alg, typeof(*caam_alg), skcipher);
|
|
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (alg_aai == OP_ALG_AAI_XTS) {
|
|
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
|
|
+ struct crypto_skcipher *fallback;
|
|
+
|
|
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
|
|
+ CRYPTO_ALG_NEED_FALLBACK);
|
|
+ if (IS_ERR(fallback)) {
|
|
+ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
|
|
+ tfm_name, PTR_ERR(fallback));
|
|
+ return PTR_ERR(fallback);
|
|
+ }
|
|
+
|
|
+ ctx->fallback = fallback;
|
|
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
|
|
+ crypto_skcipher_reqsize(fallback));
|
|
+ }
|
|
+
|
|
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
|
|
+ if (ret && ctx->fallback)
|
|
+ crypto_free_skcipher(ctx->fallback);
|
|
|
|
- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
|
|
- false);
|
|
+ return ret;
|
|
}
|
|
|
|
static int caam_aead_init(struct crypto_aead *tfm)
|
|
@@ -2468,7 +2533,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
|
|
|
|
static void caam_cra_exit(struct crypto_skcipher *tfm)
|
|
{
|
|
- caam_exit_common(crypto_skcipher_ctx(tfm));
|
|
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
+
|
|
+ if (ctx->fallback)
|
|
+ crypto_free_skcipher(ctx->fallback);
|
|
+ caam_exit_common(ctx);
|
|
}
|
|
|
|
static void caam_aead_exit(struct crypto_aead *tfm)
|
|
@@ -2502,7 +2571,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
|
|
alg->base.cra_module = THIS_MODULE;
|
|
alg->base.cra_priority = CAAM_CRA_PRIORITY;
|
|
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
|
|
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
|
+ alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
|
|
|
|
alg->init = caam_cra_init;
|
|
alg->exit = caam_cra_exit;
|
|
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
|
|
index 64112c736810e..7234b95241e91 100644
|
|
--- a/drivers/crypto/ccp/ccp-ops.c
|
|
+++ b/drivers/crypto/ccp/ccp-ops.c
|
|
@@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
- goto e_ctx;
|
|
+ goto e_data;
|
|
}
|
|
} else {
|
|
/* Stash the context */
|
|
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
|
|
index d39e1664fc7ed..3c65bf070c908 100644
|
|
--- a/drivers/crypto/ccree/cc_pm.c
|
|
+++ b/drivers/crypto/ccree/cc_pm.c
|
|
@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
|
|
int cc_pm_get(struct device *dev)
|
|
{
|
|
int rc = pm_runtime_get_sync(dev);
|
|
+ if (rc < 0) {
|
|
+ pm_runtime_put_noidle(dev);
|
|
+ return rc;
|
|
+ }
|
|
|
|
- return (rc == 1 ? 0 : rc);
|
|
+ return 0;
|
|
}
|
|
|
|
void cc_pm_put_suspend(struct device *dev)
|
|
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
|
|
index 54093115eb95d..bad8e90ba168d 100644
|
|
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
|
|
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
|
|
@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref)
|
|
static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
|
|
struct sock *sk)
|
|
{
|
|
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
|
|
struct net_device *ndev = cdev->ports[0];
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
struct net_device *temp;
|
|
int addr_type;
|
|
#endif
|
|
+ int i;
|
|
|
|
switch (sk->sk_family) {
|
|
case PF_INET:
|
|
@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
|
|
return NULL;
|
|
|
|
if (is_vlan_dev(ndev))
|
|
- return vlan_dev_real_dev(ndev);
|
|
- return ndev;
|
|
+ ndev = vlan_dev_real_dev(ndev);
|
|
+
|
|
+ for_each_port(adap, i)
|
|
+ if (cdev->ports[i] == ndev)
|
|
+ return ndev;
|
|
+ return NULL;
|
|
}
|
|
|
|
static void assign_rxopt(struct sock *sk, unsigned int opt)
|
|
@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk)
|
|
chtls_purge_write_queue(sk);
|
|
free_tls_keyid(sk);
|
|
kref_put(&csk->kref, chtls_sock_release);
|
|
- csk->cdev = NULL;
|
|
if (sk->sk_family == AF_INET)
|
|
sk->sk_prot = &tcp_prot;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
if (sk->sk_family == PF_INET6) {
|
|
- struct chtls_sock *csk;
|
|
+ struct net_device *ndev = chtls_find_netdev(cdev, sk);
|
|
int addr_type = 0;
|
|
|
|
- csk = rcu_dereference_sk_user_data(sk);
|
|
addr_type = ipv6_addr_type((const struct in6_addr *)
|
|
&sk->sk_v6_rcv_saddr);
|
|
if (addr_type != IPV6_ADDR_ANY)
|
|
- cxgb4_clip_release(csk->egress_dev, (const u32 *)
|
|
+ cxgb4_clip_release(ndev, (const u32 *)
|
|
&sk->sk_v6_rcv_saddr, 1);
|
|
}
|
|
#endif
|
|
@@ -1156,6 +1160,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
|
ndev = n->dev;
|
|
if (!ndev)
|
|
goto free_dst;
|
|
+ if (is_vlan_dev(ndev))
|
|
+ ndev = vlan_dev_real_dev(ndev);
|
|
+
|
|
port_id = cxgb4_port_idx(ndev);
|
|
|
|
csk = chtls_sock_create(cdev);
|
|
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
|
|
index 2e9acae1cba3b..9fb5ca6682ea2 100644
|
|
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
|
|
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
|
|
@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
|
|
return 0;
|
|
}
|
|
|
|
-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
|
|
+static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
|
|
{
|
|
- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
|
|
+ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
|
|
}
|
|
|
|
static int csk_wait_memory(struct chtls_dev *cdev,
|
|
@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
|
|
copied = 0;
|
|
csk = rcu_dereference_sk_user_data(sk);
|
|
cdev = csk->cdev;
|
|
+ lock_sock(sk);
|
|
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
|
|
|
err = sk_stream_wait_connect(sk, &timeo);
|
|
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
|
|
index 64614a9bdf219..047826f18bd35 100644
|
|
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
|
|
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
|
|
@@ -332,11 +332,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
|
|
ret = sec_alloc_pbuf_resource(dev, res);
|
|
if (ret) {
|
|
dev_err(dev, "fail to alloc pbuf dma resource!\n");
|
|
- goto alloc_fail;
|
|
+ goto alloc_pbuf_fail;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
+alloc_pbuf_fail:
|
|
+ if (ctx->alg_type == SEC_AEAD)
|
|
+ sec_free_mac_resource(dev, qp_ctx->res);
|
|
alloc_fail:
|
|
sec_free_civ_resource(dev, res);
|
|
|
|
@@ -447,8 +450,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
|
|
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
|
|
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
|
|
GFP_KERNEL);
|
|
- if (!ctx->qp_ctx)
|
|
- return -ENOMEM;
|
|
+ if (!ctx->qp_ctx) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_destroy_qps;
|
|
+ }
|
|
|
|
for (i = 0; i < sec->ctx_q_num; i++) {
|
|
ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
|
|
@@ -457,12 +462,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
|
|
}
|
|
|
|
return 0;
|
|
+
|
|
err_sec_release_qp_ctx:
|
|
for (i = i - 1; i >= 0; i--)
|
|
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
|
|
|
|
- sec_destroy_qps(ctx->qps, sec->ctx_q_num);
|
|
kfree(ctx->qp_ctx);
|
|
+err_destroy_qps:
|
|
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
|
|
index ad73fc9466821..3be6e0db0f9fc 100644
|
|
--- a/drivers/crypto/ixp4xx_crypto.c
|
|
+++ b/drivers/crypto/ixp4xx_crypto.c
|
|
@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
|
|
|
|
if (crypt_virt) {
|
|
dma_free_coherent(dev,
|
|
- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
|
|
+ NPE_QLEN * sizeof(struct crypt_ctl),
|
|
crypt_virt, crypt_phys);
|
|
}
|
|
}
|
|
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
|
|
index 7e3ad085b5bdd..efce3a83b35a8 100644
|
|
--- a/drivers/crypto/mediatek/mtk-platform.c
|
|
+++ b/drivers/crypto/mediatek/mtk-platform.c
|
|
@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
|
|
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
|
|
{
|
|
struct mtk_ring **ring = cryp->ring;
|
|
- int i, err = ENOMEM;
|
|
+ int i;
|
|
|
|
for (i = 0; i < MTK_RING_MAX; i++) {
|
|
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
|
|
@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
|
|
return 0;
|
|
|
|
err_cleanup:
|
|
- for (; i--; ) {
|
|
+ do {
|
|
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
|
|
ring[i]->res_base, ring[i]->res_dma);
|
|
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
|
|
ring[i]->cmd_base, ring[i]->cmd_dma);
|
|
kfree(ring[i]);
|
|
- }
|
|
- return err;
|
|
+ } while (i--);
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
static int mtk_crypto_probe(struct platform_device *pdev)
|
|
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
|
|
index 82691a057d2a1..bc956dfb34de6 100644
|
|
--- a/drivers/crypto/omap-sham.c
|
|
+++ b/drivers/crypto/omap-sham.c
|
|
@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
|
u32 val, mask;
|
|
|
|
+ if (likely(ctx->digcnt))
|
|
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
|
|
+
|
|
/*
|
|
* Setting ALGO_CONST only for the first iteration and
|
|
* CLOSE_HASH only for the last one. Note that flags mode bits
|
|
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
|
|
index 7384e91c8b32b..0d32b641a7f9d 100644
|
|
--- a/drivers/crypto/picoxcell_crypto.c
|
|
+++ b/drivers/crypto/picoxcell_crypto.c
|
|
@@ -1666,11 +1666,6 @@ static int spacc_probe(struct platform_device *pdev)
|
|
goto err_clk_put;
|
|
}
|
|
|
|
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
|
|
- if (ret)
|
|
- goto err_clk_disable;
|
|
-
|
|
-
|
|
/*
|
|
* Use an IRQ threshold of 50% as a default. This seems to be a
|
|
* reasonable trade off of latency against throughput but can be
|
|
@@ -1678,6 +1673,10 @@ static int spacc_probe(struct platform_device *pdev)
|
|
*/
|
|
engine->stat_irq_thresh = (engine->fifo_sz / 2);
|
|
|
|
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
|
|
+ if (ret)
|
|
+ goto err_clk_disable;
|
|
+
|
|
/*
|
|
* Configure the interrupts. We only use the STAT_CNT interrupt as we
|
|
* only submit a new packet for processing when we complete another in
|
|
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
|
|
index 4ef3eb11361c2..4a4c3284ae1f3 100644
|
|
--- a/drivers/crypto/stm32/Kconfig
|
|
+++ b/drivers/crypto/stm32/Kconfig
|
|
@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
|
|
tristate "Support for STM32 crc accelerators"
|
|
depends on ARCH_STM32
|
|
select CRYPTO_HASH
|
|
+ select CRC32
|
|
help
|
|
This enables support for the CRC32 hw accelerator which can be found
|
|
on STMicroelectronics STM32 SOC.
|
|
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
|
|
index 3ba41148c2a46..2c13f5214d2cf 100644
|
|
--- a/drivers/crypto/stm32/stm32-crc32.c
|
|
+++ b/drivers/crypto/stm32/stm32-crc32.c
|
|
@@ -6,6 +6,7 @@
|
|
|
|
#include <linux/bitrev.h>
|
|
#include <linux/clk.h>
|
|
+#include <linux/crc32.h>
|
|
#include <linux/crc32poly.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mod_devicetable.h>
|
|
@@ -147,7 +148,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
|
|
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
|
|
struct stm32_crc *crc;
|
|
- unsigned long flags;
|
|
|
|
crc = stm32_crc_get_next_crc();
|
|
if (!crc)
|
|
@@ -155,7 +155,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
|
|
|
|
pm_runtime_get_sync(crc->dev);
|
|
|
|
- spin_lock_irqsave(&crc->lock, flags);
|
|
+ if (!spin_trylock(&crc->lock)) {
|
|
+ /* Hardware is busy, calculate crc32 by software */
|
|
+ if (mctx->poly == CRC32_POLY_LE)
|
|
+ ctx->partial = crc32_le(ctx->partial, d8, length);
|
|
+ else
|
|
+ ctx->partial = __crc32c_le(ctx->partial, d8, length);
|
|
+
|
|
+ goto pm_out;
|
|
+ }
|
|
|
|
/*
|
|
* Restore previously calculated CRC for this context as init value
|
|
@@ -195,8 +203,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
|
|
/* Store partial result */
|
|
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
|
|
|
|
- spin_unlock_irqrestore(&crc->lock, flags);
|
|
+ spin_unlock(&crc->lock);
|
|
|
|
+pm_out:
|
|
pm_runtime_mark_last_busy(crc->dev);
|
|
pm_runtime_put_autosuspend(crc->dev);
|
|
|
|
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
|
|
index 323822372b4ce..7480fc1042093 100644
|
|
--- a/drivers/dma/dmatest.c
|
|
+++ b/drivers/dma/dmatest.c
|
|
@@ -1240,15 +1240,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
|
add_threaded_test(info);
|
|
|
|
/* Check if channel was added successfully */
|
|
- dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
|
|
-
|
|
- if (dtc->chan) {
|
|
+ if (!list_empty(&info->channels)) {
|
|
/*
|
|
* if new channel was not successfully added, revert the
|
|
* "test_channel" string to the name of the last successfully
|
|
* added channel. exception for when users issues empty string
|
|
* to channel parameter.
|
|
*/
|
|
+ dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
|
|
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
|
|
&& (strcmp("", strim(test_channel)) != 0)) {
|
|
ret = -EINVAL;
|
|
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
|
|
index a1b56f52db2f2..5e7fdc0b6e3db 100644
|
|
--- a/drivers/dma/dw/core.c
|
|
+++ b/drivers/dma/dw/core.c
|
|
@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
|
|
if (dws->dma_dev != chan->device->dev)
|
|
return false;
|
|
|
|
+ /* permit channels in accordance with the channels mask */
|
|
+ if (dws->channels && !(dws->channels & dwc->mask))
|
|
+ return false;
|
|
+
|
|
/* We have to copy data since dws can be temporary storage */
|
|
memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
|
|
|
|
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
|
|
index 7a085b3c1854c..d9810980920a1 100644
|
|
--- a/drivers/dma/dw/dw.c
|
|
+++ b/drivers/dma/dw/dw.c
|
|
@@ -14,7 +14,7 @@
|
|
static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
|
|
{
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
- u32 cfghi = DWC_CFGH_FIFO_MODE;
|
|
+ u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
|
|
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
|
|
bool hs_polarity = dwc->dws.hs_polarity;
|
|
|
|
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
|
|
index 9e27831dee324..43e975fb67142 100644
|
|
--- a/drivers/dma/dw/of.c
|
|
+++ b/drivers/dma/dw/of.c
|
|
@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
|
|
};
|
|
dma_cap_mask_t cap;
|
|
|
|
- if (dma_spec->args_count != 3)
|
|
+ if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
|
|
return NULL;
|
|
|
|
slave.src_id = dma_spec->args[0];
|
|
slave.dst_id = dma_spec->args[0];
|
|
slave.m_master = dma_spec->args[1];
|
|
slave.p_master = dma_spec->args[2];
|
|
+ if (dma_spec->args_count >= 4)
|
|
+ slave.channels = dma_spec->args[3];
|
|
|
|
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
|
|
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
|
|
slave.m_master >= dw->pdata->nr_masters ||
|
|
- slave.p_master >= dw->pdata->nr_masters))
|
|
+ slave.p_master >= dw->pdata->nr_masters ||
|
|
+ slave.channels >= BIT(dw->pdata->nr_channels)))
|
|
return NULL;
|
|
|
|
dma_cap_zero(cap);
|
|
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
|
|
index fd782aee02d92..98c56606ab1a9 100644
|
|
--- a/drivers/dma/ioat/dma.c
|
|
+++ b/drivers/dma/ioat/dma.c
|
|
@@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
|
|
struct ioat_descs *descs = &ioat_chan->descs[i];
|
|
|
|
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
|
|
- SZ_2M, &descs->hw, flags);
|
|
+ IOAT_CHUNK_SIZE, &descs->hw, flags);
|
|
if (!descs->virt) {
|
|
int idx;
|
|
|
|
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
|
|
index b194658b8b5c9..fbec28dc661d7 100644
|
|
--- a/drivers/edac/aspeed_edac.c
|
|
+++ b/drivers/edac/aspeed_edac.c
|
|
@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
|
|
/* register interrupt handler */
|
|
irq = platform_get_irq(pdev, 0);
|
|
dev_dbg(&pdev->dev, "got irq %d\n", irq);
|
|
- if (!irq)
|
|
- return -ENODEV;
|
|
+ if (irq < 0)
|
|
+ return irq;
|
|
|
|
rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
|
|
DRV_NAME, ctx);
|
|
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
|
|
index 191aa7c19ded7..324a46b8479b0 100644
|
|
--- a/drivers/edac/i5100_edac.c
|
|
+++ b/drivers/edac/i5100_edac.c
|
|
@@ -1061,16 +1061,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
PCI_DEVICE_ID_INTEL_5100_19, 0);
|
|
if (!einj) {
|
|
ret = -ENODEV;
|
|
- goto bail_einj;
|
|
+ goto bail_mc_free;
|
|
}
|
|
|
|
rc = pci_enable_device(einj);
|
|
if (rc < 0) {
|
|
ret = rc;
|
|
- goto bail_disable_einj;
|
|
+ goto bail_einj;
|
|
}
|
|
|
|
-
|
|
mci->pdev = &pdev->dev;
|
|
|
|
priv = mci->pvt_info;
|
|
@@ -1136,14 +1135,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
bail_scrub:
|
|
priv->scrub_enable = 0;
|
|
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
|
|
- edac_mc_free(mci);
|
|
-
|
|
-bail_disable_einj:
|
|
pci_disable_device(einj);
|
|
|
|
bail_einj:
|
|
pci_dev_put(einj);
|
|
|
|
+bail_mc_free:
|
|
+ edac_mc_free(mci);
|
|
+
|
|
bail_disable_ch1:
|
|
pci_disable_device(ch1mm);
|
|
|
|
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
|
|
index 8be3e89a510e4..d7419a90a2f5b 100644
|
|
--- a/drivers/edac/ti_edac.c
|
|
+++ b/drivers/edac/ti_edac.c
|
|
@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
|
|
|
|
/* add EMIF ECC error handler */
|
|
error_irq = platform_get_irq(pdev, 0);
|
|
- if (!error_irq) {
|
|
+ if (error_irq < 0) {
|
|
+ ret = error_irq;
|
|
edac_printk(KERN_ERR, EDAC_MOD_NAME,
|
|
"EMIF irq number not defined.\n");
|
|
goto err;
|
|
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
|
|
index 6998dc86b5ce8..b797a713c3313 100644
|
|
--- a/drivers/firmware/arm_scmi/mailbox.c
|
|
+++ b/drivers/firmware/arm_scmi/mailbox.c
|
|
@@ -110,7 +110,7 @@ static int mailbox_chan_free(int id, void *p, void *data)
|
|
struct scmi_chan_info *cinfo = p;
|
|
struct scmi_mailbox *smbox = cinfo->transport_info;
|
|
|
|
- if (!IS_ERR(smbox->chan)) {
|
|
+ if (smbox && !IS_ERR(smbox->chan)) {
|
|
mbox_free_channel(smbox->chan);
|
|
cinfo->transport_info = NULL;
|
|
smbox->chan = NULL;
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index 7c1cc0ba30a55..78cf9e4fddbdf 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -8178,8 +8178,7 @@ static int dm_update_plane_state(struct dc *dc,
|
|
dm_old_plane_state->dc_state,
|
|
dm_state->context)) {
|
|
|
|
- ret = EINVAL;
|
|
- return ret;
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
index d016f50e187c8..d261f425b80ec 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
|
|
@@ -2538,7 +2538,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|
|
|
copy_stream_update_to_stream(dc, context, stream, stream_update);
|
|
|
|
- if (update_type > UPDATE_TYPE_FAST) {
|
|
+ if (update_type >= UPDATE_TYPE_FULL) {
|
|
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
|
|
DC_ERROR("Mode validation failed for stream update!\n");
|
|
dc_release_state(context);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
|
|
index ebff9b1e312e5..124c081a0f2ca 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
|
|
@@ -75,7 +75,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
|
|
else
|
|
bl_pwm &= 0xFFFF;
|
|
|
|
- current_backlight = bl_pwm << (1 + bl_int_count);
|
|
+ current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count);
|
|
|
|
if (bl_period == 0)
|
|
bl_period = 0xFFFF;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
index 20bdabebbc434..76cd4f3de4eaf 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
@@ -3165,6 +3165,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
|
context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
|
|
dc->debug.enable_dram_clock_change_one_display_vactive;
|
|
|
|
+ /*Unsafe due to current pipe merge and split logic*/
|
|
+ ASSERT(context != dc->current_state);
|
|
+
|
|
if (fast_validate) {
|
|
return dcn20_validate_bandwidth_internal(dc, context, true);
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
index f00a568350848..c6ab3dee4fd69 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
@@ -1184,6 +1184,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
|
|
|
BW_VAL_TRACE_COUNT();
|
|
|
|
+ /*Unsafe due to current pipe merge and split logic*/
|
|
+ ASSERT(context != dc->current_state);
|
|
+
|
|
out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
|
|
|
|
if (pipe_cnt == 0)
|
|
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
|
|
index 5d67a41f7c3a8..3dd70d813f694 100644
|
|
--- a/drivers/gpu/drm/drm_debugfs_crc.c
|
|
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
|
|
@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
|
|
source[len - 1] = '\0';
|
|
|
|
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ kfree(source);
|
|
return ret;
|
|
+ }
|
|
|
|
spin_lock_irq(&crc->lock);
|
|
|
|
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
|
|
index 8b2d5c945c95c..1d85af9a481ac 100644
|
|
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
|
|
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
|
|
@@ -175,6 +175,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * Note that on error, drm_gem_vram_init will free the buffer object.
|
|
+ */
|
|
+
|
|
static int drm_gem_vram_init(struct drm_device *dev,
|
|
struct drm_gem_vram_object *gbo,
|
|
size_t size, unsigned long pg_align)
|
|
@@ -184,15 +188,19 @@ static int drm_gem_vram_init(struct drm_device *dev,
|
|
int ret;
|
|
size_t acc_size;
|
|
|
|
- if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
|
|
+ if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
|
|
+ kfree(gbo);
|
|
return -EINVAL;
|
|
+ }
|
|
bdev = &vmm->bdev;
|
|
|
|
gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
|
|
|
|
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ kfree(gbo);
|
|
return ret;
|
|
+ }
|
|
|
|
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
|
|
|
|
@@ -203,13 +211,13 @@ static int drm_gem_vram_init(struct drm_device *dev,
|
|
&gbo->placement, pg_align, false, acc_size,
|
|
NULL, NULL, ttm_buffer_object_destroy);
|
|
if (ret)
|
|
- goto err_drm_gem_object_release;
|
|
+ /*
|
|
+ * A failing ttm_bo_init will call ttm_buffer_object_destroy
|
|
+ * to release gbo->bo.base and kfree gbo.
|
|
+ */
|
|
+ return ret;
|
|
|
|
return 0;
|
|
-
|
|
-err_drm_gem_object_release:
|
|
- drm_gem_object_release(&gbo->bo.base);
|
|
- return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -243,13 +251,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
|
|
|
|
ret = drm_gem_vram_init(dev, gbo, size, pg_align);
|
|
if (ret < 0)
|
|
- goto err_kfree;
|
|
+ return ERR_PTR(ret);
|
|
|
|
return gbo;
|
|
-
|
|
-err_kfree:
|
|
- kfree(gbo);
|
|
- return ERR_PTR(ret);
|
|
}
|
|
EXPORT_SYMBOL(drm_gem_vram_create);
|
|
|
|
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
|
|
index f41cbb753bb46..720a767118c9c 100644
|
|
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
|
|
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
|
|
@@ -2078,7 +2078,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
|
|
intel_dp->dpcd,
|
|
sizeof(intel_dp->dpcd));
|
|
cdv_intel_edp_panel_vdd_off(gma_encoder);
|
|
- if (ret == 0) {
|
|
+ if (ret <= 0) {
|
|
/* if this fails, presume the device is a ghost */
|
|
DRM_INFO("failed to retrieve link info, disabling eDP\n");
|
|
drm_encoder_cleanup(encoder);
|
|
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
|
|
index d8b43500f12d1..2d01a293aa782 100644
|
|
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
|
|
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
|
|
@@ -485,7 +485,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
|
|
mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
|
|
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
|
|
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
|
|
- cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
|
|
+ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
|
|
mtk_crtc_ddp_config(crtc, cmdq_handle);
|
|
cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
|
|
}
|
|
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
|
|
index d6023ba8033c0..3bb567812b990 100644
|
|
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
|
|
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
|
|
@@ -864,7 +864,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
|
|
int i;
|
|
|
|
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
|
|
- sizeof(a6xx_state->indexed_regs));
|
|
+ sizeof(*a6xx_state->indexed_regs));
|
|
if (!a6xx_state->indexed_regs)
|
|
return;
|
|
|
|
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
|
|
index a74ccc5b8220d..5b5809c0e44b3 100644
|
|
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
|
|
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
|
|
@@ -189,10 +189,16 @@ struct msm_gem_address_space *
|
|
adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
|
struct platform_device *pdev)
|
|
{
|
|
- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
|
|
- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
|
|
+ struct iommu_domain *iommu;
|
|
+ struct msm_mmu *mmu;
|
|
struct msm_gem_address_space *aspace;
|
|
|
|
+ iommu = iommu_domain_alloc(&platform_bus_type);
|
|
+ if (!iommu)
|
|
+ return NULL;
|
|
+
|
|
+ mmu = msm_iommu_new(&pdev->dev, iommu);
|
|
+
|
|
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
|
|
0xffffffff - SZ_16M);
|
|
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
|
|
index 1026e1e5bec10..4d81a0c73616f 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
|
|
@@ -881,7 +881,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
|
struct drm_plane *plane;
|
|
struct drm_display_mode *mode;
|
|
|
|
- int cnt = 0, rc = 0, mixer_width, i, z_pos;
|
|
+ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
|
|
|
|
struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
|
|
int multirect_count = 0;
|
|
@@ -914,9 +914,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
|
|
|
memset(pipe_staged, 0, sizeof(pipe_staged));
|
|
|
|
- mixer_width = mode->hdisplay / cstate->num_mixers;
|
|
+ if (cstate->num_mixers) {
|
|
+ mixer_width = mode->hdisplay / cstate->num_mixers;
|
|
|
|
- _dpu_crtc_setup_lm_bounds(crtc, state);
|
|
+ _dpu_crtc_setup_lm_bounds(crtc, state);
|
|
+ }
|
|
|
|
crtc_rect.x2 = mode->hdisplay;
|
|
crtc_rect.y2 = mode->vdisplay;
|
|
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
|
|
index 497cf443a9afa..0b02e65a89e79 100644
|
|
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
|
|
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <drm/drm_drv.h>
|
|
#include <drm/drm_fb_cma_helper.h>
|
|
#include <drm/drm_fb_helper.h>
|
|
+#include <drm/drm_fourcc.h>
|
|
#include <drm/drm_gem_cma_helper.h>
|
|
#include <drm/drm_gem_framebuffer_helper.h>
|
|
#include <drm/drm_irq.h>
|
|
@@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
|
|
clk_disable_unprepare(mxsfb->clk_axi);
|
|
}
|
|
|
|
+static struct drm_framebuffer *
|
|
+mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
|
+ const struct drm_mode_fb_cmd2 *mode_cmd)
|
|
+{
|
|
+ const struct drm_format_info *info;
|
|
+
|
|
+ info = drm_get_format_info(dev, mode_cmd);
|
|
+ if (!info)
|
|
+ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
|
|
+ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
|
|
+}
|
|
+
|
|
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
|
|
- .fb_create = drm_gem_fb_create,
|
|
+ .fb_create = mxsfb_fb_create,
|
|
.atomic_check = drm_atomic_helper_check,
|
|
.atomic_commit = drm_atomic_helper_commit,
|
|
};
|
|
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
|
|
index 7debf2ca42522..4b4ca31a2d577 100644
|
|
--- a/drivers/gpu/drm/panel/panel-simple.c
|
|
+++ b/drivers/gpu/drm/panel/panel-simple.c
|
|
@@ -2862,12 +2862,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
|
|
static const struct panel_desc ortustech_com43h4m85ulc = {
|
|
.modes = &ortustech_com43h4m85ulc_mode,
|
|
.num_modes = 1,
|
|
- .bpc = 8,
|
|
+ .bpc = 6,
|
|
.size = {
|
|
.width = 56,
|
|
.height = 93,
|
|
},
|
|
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
|
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
|
|
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
|
|
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
|
};
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
|
|
index c30c719a80594..3c4a85213c15f 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
|
|
@@ -69,6 +69,9 @@ struct panfrost_compatible {
|
|
int num_pm_domains;
|
|
/* Only required if num_pm_domains > 1. */
|
|
const char * const *pm_domain_names;
|
|
+
|
|
+ /* Vendor implementation quirks callback */
|
|
+ void (*vendor_quirk)(struct panfrost_device *pfdev);
|
|
};
|
|
|
|
struct panfrost_device {
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
index 882fecc33fdb1..6e11a73e81aa3 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
@@ -667,7 +667,18 @@ static const struct panfrost_compatible default_data = {
|
|
.pm_domain_names = NULL,
|
|
};
|
|
|
|
+static const struct panfrost_compatible amlogic_data = {
|
|
+ .num_supplies = ARRAY_SIZE(default_supplies),
|
|
+ .supply_names = default_supplies,
|
|
+ .vendor_quirk = panfrost_gpu_amlogic_quirk,
|
|
+};
|
|
+
|
|
static const struct of_device_id dt_match[] = {
|
|
+ /* Set first to probe before the generic compatibles */
|
|
+ { .compatible = "amlogic,meson-gxm-mali",
|
|
+ .data = &amlogic_data, },
|
|
+ { .compatible = "amlogic,meson-g12a-mali",
|
|
+ .data = &amlogic_data, },
|
|
{ .compatible = "arm,mali-t604", .data = &default_data, },
|
|
{ .compatible = "arm,mali-t624", .data = &default_data, },
|
|
{ .compatible = "arm,mali-t628", .data = &default_data, },
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
|
|
index f2c1ddc41a9bf..165403878ad9b 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
|
|
@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
|
|
return 0;
|
|
}
|
|
|
|
+void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
|
|
+{
|
|
+ /*
|
|
+ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
|
|
+ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
|
|
+ * to operate correctly.
|
|
+ */
|
|
+ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
|
|
+ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
|
|
+}
|
|
+
|
|
static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
|
|
{
|
|
u32 quirks = 0;
|
|
@@ -135,6 +146,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
|
|
|
|
if (quirks)
|
|
gpu_write(pfdev, GPU_JM_CONFIG, quirks);
|
|
+
|
|
+ /* Here goes platform specific quirks */
|
|
+ if (pfdev->comp->vendor_quirk)
|
|
+ pfdev->comp->vendor_quirk(pfdev);
|
|
}
|
|
|
|
#define MAX_HW_REVS 6
|
|
@@ -304,16 +319,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
|
|
int ret;
|
|
u32 val;
|
|
|
|
+ panfrost_gpu_init_quirks(pfdev);
|
|
+
|
|
/* Just turn on everything for now */
|
|
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
|
|
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
|
|
- val, val == pfdev->features.l2_present, 100, 1000);
|
|
+ val, val == pfdev->features.l2_present, 100, 20000);
|
|
if (ret)
|
|
dev_err(pfdev->dev, "error powering up gpu L2");
|
|
|
|
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
|
|
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
|
|
- val, val == pfdev->features.shader_present, 100, 1000);
|
|
+ val, val == pfdev->features.shader_present, 100, 20000);
|
|
if (ret)
|
|
dev_err(pfdev->dev, "error powering up gpu shader");
|
|
|
|
@@ -355,7 +372,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
|
|
return err;
|
|
}
|
|
|
|
- panfrost_gpu_init_quirks(pfdev);
|
|
panfrost_gpu_power_on(pfdev);
|
|
|
|
return 0;
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
|
|
index 4112412087b27..468c51e7e46db 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
|
|
@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
|
|
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
|
|
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
|
|
|
|
+void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
|
|
+
|
|
#endif
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
|
|
index ea38ac60581c6..eddaa62ad8b0e 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
|
|
@@ -51,6 +51,10 @@
|
|
#define GPU_STATUS 0x34
|
|
#define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
|
|
#define GPU_LATEST_FLUSH_ID 0x38
|
|
+#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
|
|
+#define GPU_PWR_KEY_UNLOCK 0x2968A819
|
|
+#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
|
|
+#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
|
|
#define GPU_FAULT_STATUS 0x3C
|
|
#define GPU_FAULT_ADDRESS_LO 0x40
|
|
#define GPU_FAULT_ADDRESS_HI 0x44
|
|
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
|
|
index f1a81c9b184d4..fa09b3ae8b9d4 100644
|
|
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
|
|
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <drm/drm_fourcc.h>
|
|
#include <drm/drm_gem_cma_helper.h>
|
|
#include <drm/drm_gem_framebuffer_helper.h>
|
|
+#include <drm/drm_managed.h>
|
|
#include <drm/drm_plane_helper.h>
|
|
#include <drm/drm_vblank.h>
|
|
|
|
@@ -341,6 +342,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
|
|
.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
|
|
};
|
|
|
|
+static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
|
|
+{
|
|
+ struct rcar_du_vsp *vsp = res;
|
|
+
|
|
+ put_device(vsp->vsp);
|
|
+}
|
|
+
|
|
int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
|
|
unsigned int crtcs)
|
|
{
|
|
@@ -357,6 +365,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
|
|
|
|
vsp->vsp = &pdev->dev;
|
|
|
|
+ ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = vsp1_du_init(vsp->vsp);
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
|
|
index fa39d140adc6c..94825ec3a09d8 100644
|
|
--- a/drivers/gpu/drm/vgem/vgem_drv.c
|
|
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
|
|
@@ -471,8 +471,8 @@ static int __init vgem_init(void)
|
|
|
|
out_put:
|
|
drm_dev_put(&vgem_device->drm);
|
|
+ platform_device_unregister(vgem_device->platform);
|
|
return ret;
|
|
-
|
|
out_unregister:
|
|
platform_device_unregister(vgem_device->platform);
|
|
out_free:
|
|
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
|
|
index 0a5c8cf409fb8..dc8cb8dfce58e 100644
|
|
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
|
|
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
|
|
@@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
|
|
vgdev->capsets[i].id > 0, 5 * HZ);
|
|
if (ret == 0) {
|
|
DRM_ERROR("timed out waiting for cap set %d\n", i);
|
|
+ spin_lock(&vgdev->display_info_lock);
|
|
kfree(vgdev->capsets);
|
|
vgdev->capsets = NULL;
|
|
+ spin_unlock(&vgdev->display_info_lock);
|
|
return;
|
|
}
|
|
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
|
|
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
|
|
index 9e663a5d99526..2517450bf46ba 100644
|
|
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
|
|
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
|
|
@@ -684,9 +684,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
|
|
int i = le32_to_cpu(cmd->capset_index);
|
|
|
|
spin_lock(&vgdev->display_info_lock);
|
|
- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
|
|
- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
|
|
- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
|
|
+ if (vgdev->capsets) {
|
|
+ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
|
|
+ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
|
|
+ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
|
|
+ } else {
|
|
+ DRM_ERROR("invalid capset memory.");
|
|
+ }
|
|
spin_unlock(&vgdev->display_info_lock);
|
|
wake_up(&vgdev->resp_wq);
|
|
}
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
|
|
index 4af2f19480f4f..b8b060354667e 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_composer.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
|
|
@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
|
|
+ (i * composer->pitch)
|
|
+ (j * composer->cpp);
|
|
/* XRGB format ignores Alpha channel */
|
|
- memset(vaddr_out + src_offset + 24, 0, 8);
|
|
+ bitmap_clear(vaddr_out + src_offset, 24, 8);
|
|
crc = crc32_le(crc, vaddr_out + src_offset,
|
|
sizeof(u32));
|
|
}
|
|
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
|
|
index 1e8b2169d8341..e6a3ea1b399a7 100644
|
|
--- a/drivers/gpu/drm/vkms/vkms_drv.c
|
|
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
|
|
@@ -188,8 +188,8 @@ static int __init vkms_init(void)
|
|
|
|
out_put:
|
|
drm_dev_put(&vkms_device->drm);
|
|
+ platform_device_unregister(vkms_device->platform);
|
|
return ret;
|
|
-
|
|
out_unregister:
|
|
platform_device_unregister(vkms_device->platform);
|
|
out_free:
|
|
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
|
|
index b49ec7dde6457..b269c792d25dc 100644
|
|
--- a/drivers/hid/hid-ids.h
|
|
+++ b/drivers/hid/hid-ids.h
|
|
@@ -726,6 +726,7 @@
|
|
#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
|
|
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
|
|
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
|
|
+#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
|
|
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
|
|
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
|
|
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
|
|
@@ -1122,6 +1123,7 @@
|
|
#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
|
|
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
|
|
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
|
|
+#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
|
|
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
|
|
|
|
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
|
|
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
|
|
index e3d475f4baf66..b2bff932c524f 100644
|
|
--- a/drivers/hid/hid-input.c
|
|
+++ b/drivers/hid/hid-input.c
|
|
@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
|
case 0x3b: /* Battery Strength */
|
|
hidinput_setup_battery(device, HID_INPUT_REPORT, field);
|
|
usage->type = EV_PWR;
|
|
- goto ignore;
|
|
+ return;
|
|
|
|
case 0x3c: /* Invert */
|
|
map_key_clear(BTN_TOOL_RUBBER);
|
|
@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
|
case HID_DC_BATTERYSTRENGTH:
|
|
hidinput_setup_battery(device, HID_INPUT_REPORT, field);
|
|
usage->type = EV_PWR;
|
|
- goto ignore;
|
|
+ return;
|
|
}
|
|
goto unknown;
|
|
|
|
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
|
|
index 6c55682c59740..044a93f3c1178 100644
|
|
--- a/drivers/hid/hid-ite.c
|
|
+++ b/drivers/hid/hid-ite.c
|
|
@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
|
|
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
|
USB_VENDOR_ID_SYNAPTICS,
|
|
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
|
|
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
|
|
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
|
+ USB_VENDOR_ID_SYNAPTICS,
|
|
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
|
|
{ }
|
|
};
|
|
MODULE_DEVICE_TABLE(hid, ite_devices);
|
|
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
|
|
index e3152155c4b85..99f041afd5c0c 100644
|
|
--- a/drivers/hid/hid-multitouch.c
|
|
+++ b/drivers/hid/hid-multitouch.c
|
|
@@ -1973,6 +1973,12 @@ static const struct hid_device_id mt_devices[] = {
|
|
HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
|
|
USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
|
|
|
|
+ /* Lenovo X1 TAB Gen 3 */
|
|
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
|
|
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
|
|
+ USB_VENDOR_ID_LENOVO,
|
|
+ USB_DEVICE_ID_LENOVO_X1_TAB3) },
|
|
+
|
|
/* MosArt panels */
|
|
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
|
|
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
|
|
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
|
|
index 1a6e600197d0b..509b9bb1362cb 100644
|
|
--- a/drivers/hid/hid-roccat-kone.c
|
|
+++ b/drivers/hid/hid-roccat-kone.c
|
|
@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
|
|
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
|
|
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
|
|
int retval = 0, difference, old_profile;
|
|
+ struct kone_settings *settings = (struct kone_settings *)buf;
|
|
|
|
/* I need to get my data in one piece */
|
|
if (off != 0 || count != sizeof(struct kone_settings))
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&kone->kone_lock);
|
|
- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
|
|
+ difference = memcmp(settings, &kone->settings,
|
|
+ sizeof(struct kone_settings));
|
|
if (difference) {
|
|
- retval = kone_set_settings(usb_dev,
|
|
- (struct kone_settings const *)buf);
|
|
- if (retval) {
|
|
- mutex_unlock(&kone->kone_lock);
|
|
- return retval;
|
|
+ if (settings->startup_profile < 1 ||
|
|
+ settings->startup_profile > 5) {
|
|
+ retval = -EINVAL;
|
|
+ goto unlock;
|
|
}
|
|
|
|
+ retval = kone_set_settings(usb_dev, settings);
|
|
+ if (retval)
|
|
+ goto unlock;
|
|
+
|
|
old_profile = kone->settings.startup_profile;
|
|
- memcpy(&kone->settings, buf, sizeof(struct kone_settings));
|
|
+ memcpy(&kone->settings, settings, sizeof(struct kone_settings));
|
|
|
|
kone_profile_activated(kone, kone->settings.startup_profile);
|
|
|
|
if (kone->settings.startup_profile != old_profile)
|
|
kone_profile_report(kone, kone->settings.startup_profile);
|
|
}
|
|
+unlock:
|
|
mutex_unlock(&kone->kone_lock);
|
|
|
|
+ if (retval)
|
|
+ return retval;
|
|
+
|
|
return sizeof(struct kone_settings);
|
|
}
|
|
static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
|
|
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
|
|
index 94698cae04971..3e1d56585b91a 100644
|
|
--- a/drivers/hwmon/bt1-pvt.c
|
|
+++ b/drivers/hwmon/bt1-pvt.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/bitops.h>
|
|
#include <linux/clk.h>
|
|
#include <linux/completion.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/device.h>
|
|
#include <linux/hwmon-sysfs.h>
|
|
#include <linux/hwmon.h>
|
|
@@ -476,6 +477,7 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
|
|
long *val)
|
|
{
|
|
struct pvt_cache *cache = &pvt->cache[type];
|
|
+ unsigned long timeout;
|
|
u32 data;
|
|
int ret;
|
|
|
|
@@ -499,7 +501,14 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
|
|
pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
|
|
pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
|
|
|
|
- wait_for_completion(&cache->conversion);
|
|
+ /*
|
|
+ * Wait with timeout since in case if the sensor is suddenly powered
|
|
+ * down the request won't be completed and the caller will hang up on
|
|
+ * this procedure until the power is back up again. Multiply the
|
|
+ * timeout by the factor of two to prevent a false timeout.
|
|
+ */
|
|
+ timeout = 2 * usecs_to_jiffies(ktime_to_us(pvt->timeout));
|
|
+ ret = wait_for_completion_timeout(&cache->conversion, timeout);
|
|
|
|
pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
|
|
pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
|
|
@@ -509,6 +518,9 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
|
|
|
|
mutex_unlock(&pvt->iface_mtx);
|
|
|
|
+ if (!ret)
|
|
+ return -ETIMEDOUT;
|
|
+
|
|
if (type == PVT_TEMP)
|
|
*val = pvt_calc_poly(&poly_N_to_temp, data);
|
|
else
|
|
@@ -654,44 +666,16 @@ static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
|
|
|
|
static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
|
|
{
|
|
- unsigned long rate;
|
|
- ktime_t kt;
|
|
- u32 data;
|
|
-
|
|
- rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
|
|
- if (!rate)
|
|
- return -ENODEV;
|
|
-
|
|
- /*
|
|
- * Don't bother with mutex here, since we just read data from MMIO.
|
|
- * We also have to scale the ticks timeout up to compensate the
|
|
- * ms-ns-data translations.
|
|
- */
|
|
- data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
|
|
+ int ret;
|
|
|
|
- /*
|
|
- * Calculate ref-clock based delay (Ttotal) between two consecutive
|
|
- * data samples of the same sensor. So we first must calculate the
|
|
- * delay introduced by the internal ref-clock timer (Tref * Fclk).
|
|
- * Then add the constant timeout cuased by each conversion latency
|
|
- * (Tmin). The basic formulae for each conversion is following:
|
|
- * Ttotal = Tref * Fclk + Tmin
|
|
- * Note if alarms are enabled the sensors are polled one after
|
|
- * another, so in order to have the delay being applicable for each
|
|
- * sensor the requested value must be equally redistirbuted.
|
|
- */
|
|
-#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
|
|
- kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
|
|
- kt = ktime_divns(kt, rate);
|
|
- kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
|
|
-#else
|
|
- kt = ktime_set(data, 0);
|
|
- kt = ktime_divns(kt, rate);
|
|
- kt = ktime_add_ns(kt, PVT_TOUT_MIN);
|
|
-#endif
|
|
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
/* Return the result in msec as hwmon sysfs interface requires. */
|
|
- *val = ktime_to_ms(kt);
|
|
+ *val = ktime_to_ms(pvt->timeout);
|
|
+
|
|
+ mutex_unlock(&pvt->iface_mtx);
|
|
|
|
return 0;
|
|
}
|
|
@@ -699,7 +683,7 @@ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
|
|
static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
|
|
{
|
|
unsigned long rate;
|
|
- ktime_t kt;
|
|
+ ktime_t kt, cache;
|
|
u32 data;
|
|
int ret;
|
|
|
|
@@ -712,7 +696,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
|
|
* between all available sensors to have the requested delay
|
|
* applicable to each individual sensor.
|
|
*/
|
|
- kt = ms_to_ktime(val);
|
|
+ cache = kt = ms_to_ktime(val);
|
|
#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
|
|
kt = ktime_divns(kt, PVT_SENSORS_NUM);
|
|
#endif
|
|
@@ -741,6 +725,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
|
|
return ret;
|
|
|
|
pvt_set_tout(pvt, data);
|
|
+ pvt->timeout = cache;
|
|
|
|
mutex_unlock(&pvt->iface_mtx);
|
|
|
|
@@ -982,10 +967,52 @@ static int pvt_request_clks(struct pvt_hwmon *pvt)
|
|
return 0;
|
|
}
|
|
|
|
-static void pvt_init_iface(struct pvt_hwmon *pvt)
|
|
+static int pvt_check_pwr(struct pvt_hwmon *pvt)
|
|
{
|
|
+ unsigned long tout;
|
|
+ int ret = 0;
|
|
+ u32 data;
|
|
+
|
|
+ /*
|
|
+ * Test out the sensor conversion functionality. If it is not done on
|
|
+ * time then the domain must have been unpowered and we won't be able
|
|
+ * to use the device later in this driver.
|
|
+ * Note If the power source is lost during the normal driver work the
|
|
+ * data read procedure will either return -ETIMEDOUT (for the
|
|
+ * alarm-less driver configuration) or just stop the repeated
|
|
+ * conversion. In the later case alas we won't be able to detect the
|
|
+ * problem.
|
|
+ */
|
|
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
|
|
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
|
|
+ pvt_set_tout(pvt, 0);
|
|
+ readl(pvt->regs + PVT_DATA);
|
|
+
|
|
+ tout = PVT_TOUT_MIN / NSEC_PER_USEC;
|
|
+ usleep_range(tout, 2 * tout);
|
|
+
|
|
+ data = readl(pvt->regs + PVT_DATA);
|
|
+ if (!(data & PVT_DATA_VALID)) {
|
|
+ ret = -ENODEV;
|
|
+ dev_err(pvt->dev, "Sensor is powered down\n");
|
|
+ }
|
|
+
|
|
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int pvt_init_iface(struct pvt_hwmon *pvt)
|
|
+{
|
|
+ unsigned long rate;
|
|
u32 trim, temp;
|
|
|
|
+ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
|
|
+ if (!rate) {
|
|
+ dev_err(pvt->dev, "Invalid reference clock rate\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
/*
|
|
* Make sure all interrupts and controller are disabled so not to
|
|
* accidentally have ISR executed before the driver data is fully
|
|
@@ -1000,12 +1027,37 @@ static void pvt_init_iface(struct pvt_hwmon *pvt)
|
|
pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
|
|
pvt_set_tout(pvt, PVT_TOUT_DEF);
|
|
|
|
+ /*
|
|
+ * Preserve the current ref-clock based delay (Ttotal) between the
|
|
+ * sensors data samples in the driver data so not to recalculate it
|
|
+ * each time on the data requests and timeout reads. It consists of the
|
|
+ * delay introduced by the internal ref-clock timer (N / Fclk) and the
|
|
+ * constant timeout caused by each conversion latency (Tmin):
|
|
+ * Ttotal = N / Fclk + Tmin
|
|
+ * If alarms are enabled the sensors are polled one after another and
|
|
+ * in order to get the next measurement of a particular sensor the
|
|
+ * caller will have to wait for at most until all the others are
|
|
+ * polled. In that case the formulae will look a bit different:
|
|
+ * Ttotal = 5 * (N / Fclk + Tmin)
|
|
+ */
|
|
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
|
|
+ pvt->timeout = ktime_set(PVT_SENSORS_NUM * PVT_TOUT_DEF, 0);
|
|
+ pvt->timeout = ktime_divns(pvt->timeout, rate);
|
|
+ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_SENSORS_NUM * PVT_TOUT_MIN);
|
|
+#else
|
|
+ pvt->timeout = ktime_set(PVT_TOUT_DEF, 0);
|
|
+ pvt->timeout = ktime_divns(pvt->timeout, rate);
|
|
+ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_TOUT_MIN);
|
|
+#endif
|
|
+
|
|
trim = PVT_TRIM_DEF;
|
|
if (!of_property_read_u32(pvt->dev->of_node,
|
|
"baikal,pvt-temp-offset-millicelsius", &temp))
|
|
trim = pvt_calc_trim(temp);
|
|
|
|
pvt_set_trim(pvt, trim);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int pvt_request_irq(struct pvt_hwmon *pvt)
|
|
@@ -1109,7 +1161,13 @@ static int pvt_probe(struct platform_device *pdev)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- pvt_init_iface(pvt);
|
|
+ ret = pvt_check_pwr(pvt);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = pvt_init_iface(pvt);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
ret = pvt_request_irq(pvt);
|
|
if (ret)
|
|
diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
|
|
index 5eac73e948854..93b8dd5e7c944 100644
|
|
--- a/drivers/hwmon/bt1-pvt.h
|
|
+++ b/drivers/hwmon/bt1-pvt.h
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/completion.h>
|
|
#include <linux/hwmon.h>
|
|
#include <linux/kernel.h>
|
|
+#include <linux/ktime.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/seqlock.h>
|
|
|
|
@@ -201,6 +202,7 @@ struct pvt_cache {
|
|
* if alarms are disabled).
|
|
* @sensor: current PVT sensor the data conversion is being performed for.
|
|
* @cache: data cache descriptor.
|
|
+ * @timeout: conversion timeout cache.
|
|
*/
|
|
struct pvt_hwmon {
|
|
struct device *dev;
|
|
@@ -214,6 +216,7 @@ struct pvt_hwmon {
|
|
struct mutex iface_mtx;
|
|
enum pvt_sensor_type sensor;
|
|
struct pvt_cache cache[PVT_SENSORS_NUM];
|
|
+ ktime_t timeout;
|
|
};
|
|
|
|
/*
|
|
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
|
|
index 18b4e071067f7..de04dff28945b 100644
|
|
--- a/drivers/hwmon/pmbus/max34440.c
|
|
+++ b/drivers/hwmon/pmbus/max34440.c
|
|
@@ -388,7 +388,6 @@ static struct pmbus_driver_info max34440_info[] = {
|
|
.func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
- .read_byte_data = max34440_read_byte_data,
|
|
.read_word_data = max34440_read_word_data,
|
|
.write_word_data = max34440_write_word_data,
|
|
},
|
|
@@ -419,7 +418,6 @@ static struct pmbus_driver_info max34440_info[] = {
|
|
.func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
.func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
- .read_byte_data = max34440_read_byte_data,
|
|
.read_word_data = max34440_read_word_data,
|
|
.write_word_data = max34440_write_word_data,
|
|
},
|
|
@@ -455,7 +453,6 @@ static struct pmbus_driver_info max34440_info[] = {
|
|
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
|
|
- .read_byte_data = max34440_read_byte_data,
|
|
.read_word_data = max34440_read_word_data,
|
|
.write_word_data = max34440_write_word_data,
|
|
},
|
|
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
|
|
index 5a5120121e507..3964ceab2817c 100644
|
|
--- a/drivers/hwmon/w83627ehf.c
|
|
+++ b/drivers/hwmon/w83627ehf.c
|
|
@@ -1951,8 +1951,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
|
|
data,
|
|
&w83627ehf_chip_info,
|
|
w83627ehf_groups);
|
|
+ if (IS_ERR(hwmon_dev)) {
|
|
+ err = PTR_ERR(hwmon_dev);
|
|
+ goto exit_release;
|
|
+ }
|
|
|
|
- return PTR_ERR_OR_ZERO(hwmon_dev);
|
|
+ return 0;
|
|
|
|
exit_release:
|
|
release_region(res->start, IOREGION_LENGTH);
|
|
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
|
|
index 3ccc703dc9409..167fbc2e7033f 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-cti.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-cti.c
|
|
@@ -86,22 +86,16 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
|
|
CS_LOCK(drvdata->base);
|
|
}
|
|
|
|
-static void cti_enable_hw_smp_call(void *info)
|
|
-{
|
|
- struct cti_drvdata *drvdata = info;
|
|
-
|
|
- cti_write_all_hw_regs(drvdata);
|
|
-}
|
|
-
|
|
/* write regs to hardware and enable */
|
|
static int cti_enable_hw(struct cti_drvdata *drvdata)
|
|
{
|
|
struct cti_config *config = &drvdata->config;
|
|
struct device *dev = &drvdata->csdev->dev;
|
|
+ unsigned long flags;
|
|
int rc = 0;
|
|
|
|
pm_runtime_get_sync(dev->parent);
|
|
- spin_lock(&drvdata->spinlock);
|
|
+ spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
/* no need to do anything if enabled or unpowered*/
|
|
if (config->hw_enabled || !config->hw_powered)
|
|
@@ -112,19 +106,11 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
|
|
if (rc)
|
|
goto cti_err_not_enabled;
|
|
|
|
- if (drvdata->ctidev.cpu >= 0) {
|
|
- rc = smp_call_function_single(drvdata->ctidev.cpu,
|
|
- cti_enable_hw_smp_call,
|
|
- drvdata, 1);
|
|
- if (rc)
|
|
- goto cti_err_not_enabled;
|
|
- } else {
|
|
- cti_write_all_hw_regs(drvdata);
|
|
- }
|
|
+ cti_write_all_hw_regs(drvdata);
|
|
|
|
config->hw_enabled = true;
|
|
atomic_inc(&drvdata->config.enable_req_count);
|
|
- spin_unlock(&drvdata->spinlock);
|
|
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
return rc;
|
|
|
|
cti_state_unchanged:
|
|
@@ -132,7 +118,7 @@ cti_state_unchanged:
|
|
|
|
/* cannot enable due to error */
|
|
cti_err_not_enabled:
|
|
- spin_unlock(&drvdata->spinlock);
|
|
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
pm_runtime_put(dev->parent);
|
|
return rc;
|
|
}
|
|
@@ -141,9 +127,7 @@ cti_err_not_enabled:
|
|
static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
|
|
{
|
|
struct cti_config *config = &drvdata->config;
|
|
- struct device *dev = &drvdata->csdev->dev;
|
|
|
|
- pm_runtime_get_sync(dev->parent);
|
|
spin_lock(&drvdata->spinlock);
|
|
config->hw_powered = true;
|
|
|
|
@@ -163,7 +147,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
|
|
/* did not re-enable due to no claim / no request */
|
|
cti_hp_not_enabled:
|
|
spin_unlock(&drvdata->spinlock);
|
|
- pm_runtime_put(dev->parent);
|
|
}
|
|
|
|
/* disable hardware */
|
|
@@ -511,12 +494,15 @@ static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
|
|
return !link_err;
|
|
}
|
|
|
|
-static void cti_remove_sysfs_link(struct cti_trig_con *tc)
|
|
+static void cti_remove_sysfs_link(struct cti_drvdata *drvdata,
|
|
+ struct cti_trig_con *tc)
|
|
{
|
|
struct coresight_sysfs_link link_info;
|
|
|
|
+ link_info.orig = drvdata->csdev;
|
|
link_info.orig_name = tc->con_dev_name;
|
|
link_info.target = tc->con_dev;
|
|
+ link_info.target_name = dev_name(&drvdata->csdev->dev);
|
|
coresight_remove_sysfs_link(&link_info);
|
|
}
|
|
|
|
@@ -606,8 +592,8 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
|
|
ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
|
|
ctidev = &ctidrv->ctidev;
|
|
list_for_each_entry(tc, &ctidev->trig_cons, node) {
|
|
- if (tc->con_dev == csdev->ect_dev) {
|
|
- cti_remove_sysfs_link(tc);
|
|
+ if (tc->con_dev == csdev) {
|
|
+ cti_remove_sysfs_link(ctidrv, tc);
|
|
tc->con_dev = NULL;
|
|
break;
|
|
}
|
|
@@ -651,7 +637,7 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
|
|
if (tc->con_dev) {
|
|
coresight_set_assoc_ectdev_mutex(tc->con_dev,
|
|
NULL);
|
|
- cti_remove_sysfs_link(tc);
|
|
+ cti_remove_sysfs_link(drvdata, tc);
|
|
tc->con_dev = NULL;
|
|
}
|
|
}
|
|
@@ -742,7 +728,8 @@ static int cti_dying_cpu(unsigned int cpu)
|
|
|
|
spin_lock(&drvdata->spinlock);
|
|
drvdata->config.hw_powered = false;
|
|
- coresight_disclaim_device(drvdata->base);
|
|
+ if (drvdata->config.hw_enabled)
|
|
+ coresight_disclaim_device(drvdata->base);
|
|
spin_unlock(&drvdata->spinlock);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
|
|
index 84f1dcb698272..9b0c5d719232f 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
|
|
@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
|
|
cpumask_t *mask = &event_data->mask;
|
|
struct coresight_device *sink;
|
|
|
|
- if (WARN_ON(cpumask_empty(mask)))
|
|
+ if (!event_data->snk_config)
|
|
return;
|
|
|
|
- if (!event_data->snk_config)
|
|
+ if (WARN_ON(cpumask_empty(mask)))
|
|
return;
|
|
|
|
cpu = cpumask_first(mask);
|
|
@@ -310,6 +310,16 @@ static void etm_event_start(struct perf_event *event, int flags)
|
|
if (!event_data)
|
|
goto fail;
|
|
|
|
+ /*
|
|
+ * Check if this ETM is allowed to trace, as decided
|
|
+ * at etm_setup_aux(). This could be due to an unreachable
|
|
+ * sink from this ETM. We can't do much in this case if
|
|
+ * the sink was specified or hinted to the driver. For
|
|
+ * now, simply don't record anything on this ETM.
|
|
+ */
|
|
+ if (!cpumask_test_cpu(cpu, &event_data->mask))
|
|
+ goto fail_end_stop;
|
|
+
|
|
path = etm_event_cpu_path(event_data, cpu);
|
|
/* We need a sink, no need to continue without one */
|
|
sink = coresight_get_sink(path);
|
|
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
|
|
index b673e738bc9a8..a588cd6de01c7 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
|
|
@@ -206,7 +206,7 @@ static ssize_t reset_store(struct device *dev,
|
|
* each trace run.
|
|
*/
|
|
config->vinst_ctrl = BIT(0);
|
|
- if (drvdata->nr_addr_cmp == true) {
|
|
+ if (drvdata->nr_addr_cmp > 0) {
|
|
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
|
|
/* SSSTATUS, bit[9] */
|
|
config->vinst_ctrl |= BIT(9);
|
|
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
|
|
index 6089c481f8f19..d4e74b03c1e0f 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
|
|
@@ -48,12 +48,11 @@ module_param(pm_save_enable, int, 0444);
|
|
MODULE_PARM_DESC(pm_save_enable,
|
|
"Save/restore state on power down: 1 = never, 2 = self-hosted");
|
|
|
|
-/* The number of ETMv4 currently registered */
|
|
-static int etm4_count;
|
|
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
|
|
static void etm4_set_default_config(struct etmv4_config *config);
|
|
static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
|
|
struct perf_event *event);
|
|
+static u64 etm4_get_access_type(struct etmv4_config *config);
|
|
|
|
static enum cpuhp_state hp_online;
|
|
|
|
@@ -781,6 +780,22 @@ static void etm4_init_arch_data(void *info)
|
|
CS_LOCK(drvdata->base);
|
|
}
|
|
|
|
+/* Set ELx trace filter access in the TRCVICTLR register */
|
|
+static void etm4_set_victlr_access(struct etmv4_config *config)
|
|
+{
|
|
+ u64 access_type;
|
|
+
|
|
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
|
|
+
|
|
+ /*
|
|
+ * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
|
|
+ * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
|
|
+ * etm4_get_access_type() but with a relative shift in this register.
|
|
+ */
|
|
+ access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
|
|
+ config->vinst_ctrl |= (u32)access_type;
|
|
+}
|
|
+
|
|
static void etm4_set_default_config(struct etmv4_config *config)
|
|
{
|
|
/* disable all events tracing */
|
|
@@ -798,6 +813,9 @@ static void etm4_set_default_config(struct etmv4_config *config)
|
|
|
|
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
|
|
config->vinst_ctrl = BIT(0);
|
|
+
|
|
+ /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
|
|
+ etm4_set_victlr_access(config);
|
|
}
|
|
|
|
static u64 etm4_get_ns_access_type(struct etmv4_config *config)
|
|
@@ -1062,7 +1080,7 @@ out:
|
|
|
|
void etm4_config_trace_mode(struct etmv4_config *config)
|
|
{
|
|
- u32 addr_acc, mode;
|
|
+ u32 mode;
|
|
|
|
mode = config->mode;
|
|
mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
|
|
@@ -1074,15 +1092,7 @@ void etm4_config_trace_mode(struct etmv4_config *config)
|
|
if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
|
|
return;
|
|
|
|
- addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
|
|
- /* clear default config */
|
|
- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
|
|
- ETM_EXLEVEL_NS_HYP);
|
|
-
|
|
- addr_acc |= etm4_get_ns_access_type(config);
|
|
-
|
|
- config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
|
|
- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
|
|
+ etm4_set_victlr_access(config);
|
|
}
|
|
|
|
static int etm4_online_cpu(unsigned int cpu)
|
|
@@ -1179,7 +1189,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
|
state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
|
|
state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
|
|
|
|
- for (i = 0; i < drvdata->nrseqstate; i++)
|
|
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
|
|
state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
|
|
|
|
state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
|
|
@@ -1223,7 +1233,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
|
state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
|
|
|
|
state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
|
|
- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
|
|
+ state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
|
|
|
|
state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
|
|
|
|
@@ -1284,7 +1294,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
|
writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
|
|
writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
|
|
|
|
- for (i = 0; i < drvdata->nrseqstate; i++)
|
|
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
|
|
writel_relaxed(state->trcseqevr[i],
|
|
drvdata->base + TRCSEQEVRn(i));
|
|
|
|
@@ -1333,7 +1343,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
|
writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
|
|
|
|
writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
|
|
- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
|
|
+ writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
|
|
|
|
writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
|
|
|
|
@@ -1394,28 +1404,25 @@ static struct notifier_block etm4_cpu_pm_nb = {
|
|
.notifier_call = etm4_cpu_pm_notify,
|
|
};
|
|
|
|
-/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
|
|
-static int etm4_pm_setup_cpuslocked(void)
|
|
+/* Setup PM. Deals with error conditions and counts */
|
|
+static int __init etm4_pm_setup(void)
|
|
{
|
|
int ret;
|
|
|
|
- if (etm4_count++)
|
|
- return 0;
|
|
-
|
|
ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
|
|
if (ret)
|
|
- goto reduce_count;
|
|
+ return ret;
|
|
|
|
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
|
|
- "arm/coresight4:starting",
|
|
- etm4_starting_cpu, etm4_dying_cpu);
|
|
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
|
|
+ "arm/coresight4:starting",
|
|
+ etm4_starting_cpu, etm4_dying_cpu);
|
|
|
|
if (ret)
|
|
goto unregister_notifier;
|
|
|
|
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
|
|
- "arm/coresight4:online",
|
|
- etm4_online_cpu, NULL);
|
|
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
|
+ "arm/coresight4:online",
|
|
+ etm4_online_cpu, NULL);
|
|
|
|
/* HP dyn state ID returned in ret on success */
|
|
if (ret > 0) {
|
|
@@ -1424,21 +1431,15 @@ static int etm4_pm_setup_cpuslocked(void)
|
|
}
|
|
|
|
/* failed dyn state - remove others */
|
|
- cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
|
|
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
|
|
|
|
unregister_notifier:
|
|
cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
|
|
-
|
|
-reduce_count:
|
|
- --etm4_count;
|
|
return ret;
|
|
}
|
|
|
|
-static void etm4_pm_clear(void)
|
|
+static void __init etm4_pm_clear(void)
|
|
{
|
|
- if (--etm4_count != 0)
|
|
- return;
|
|
-
|
|
cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
|
|
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
|
|
if (hp_online) {
|
|
@@ -1491,22 +1492,12 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
|
|
if (!desc.name)
|
|
return -ENOMEM;
|
|
|
|
- cpus_read_lock();
|
|
etmdrvdata[drvdata->cpu] = drvdata;
|
|
|
|
if (smp_call_function_single(drvdata->cpu,
|
|
etm4_init_arch_data, drvdata, 1))
|
|
dev_err(dev, "ETM arch init failed\n");
|
|
|
|
- ret = etm4_pm_setup_cpuslocked();
|
|
- cpus_read_unlock();
|
|
-
|
|
- /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
|
|
- if (ret) {
|
|
- etmdrvdata[drvdata->cpu] = NULL;
|
|
- return ret;
|
|
- }
|
|
-
|
|
if (etm4_arch_supported(drvdata->arch) == false) {
|
|
ret = -EINVAL;
|
|
goto err_arch_supported;
|
|
@@ -1553,7 +1544,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
|
|
|
|
err_arch_supported:
|
|
etmdrvdata[drvdata->cpu] = NULL;
|
|
- etm4_pm_clear();
|
|
return ret;
|
|
}
|
|
|
|
@@ -1591,4 +1581,23 @@ static struct amba_driver etm4x_driver = {
|
|
.probe = etm4_probe,
|
|
.id_table = etm4_ids,
|
|
};
|
|
-builtin_amba_driver(etm4x_driver);
|
|
+
|
|
+static int __init etm4x_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = etm4_pm_setup();
|
|
+
|
|
+ /* etm4_pm_setup() does its own cleanup - exit on error */
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = amba_driver_register(&etm4x_driver);
|
|
+ if (ret) {
|
|
+ pr_err("Error registering etm4x driver\n");
|
|
+ etm4_pm_clear();
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+device_initcall(etm4x_init);
|
|
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
|
|
index 47729e04aac72..ab38f9afd821a 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
|
|
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
|
|
@@ -192,6 +192,9 @@
|
|
#define ETM_EXLEVEL_NS_HYP BIT(14)
|
|
#define ETM_EXLEVEL_NS_NA BIT(15)
|
|
|
|
+/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
|
|
+#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
|
|
+
|
|
/* secure / non secure masks - TRCVICTLR, IDR3 */
|
|
#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
|
|
/* NS MON (EL3) mode never implemented */
|
|
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
|
|
index e4912abda3aa2..85a6c099ddeb1 100644
|
|
--- a/drivers/hwtracing/coresight/coresight-platform.c
|
|
+++ b/drivers/hwtracing/coresight/coresight-platform.c
|
|
@@ -712,11 +712,11 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
|
|
return dir;
|
|
|
|
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
|
|
- if (ptr->outport > pdata->nr_outport)
|
|
- pdata->nr_outport = ptr->outport;
|
|
+ if (ptr->outport >= pdata->nr_outport)
|
|
+ pdata->nr_outport = ptr->outport + 1;
|
|
ptr++;
|
|
} else {
|
|
- WARN_ON(pdata->nr_inport == ptr->child_port);
|
|
+ WARN_ON(pdata->nr_inport == ptr->child_port + 1);
|
|
/*
|
|
* We do not track input port connections for a device.
|
|
* However we need the highest port number described,
|
|
@@ -724,8 +724,8 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
|
|
* record for an output connection. Hence, do not move
|
|
* the ptr for input connections
|
|
*/
|
|
- if (ptr->child_port > pdata->nr_inport)
|
|
- pdata->nr_inport = ptr->child_port;
|
|
+ if (ptr->child_port >= pdata->nr_inport)
|
|
+ pdata->nr_inport = ptr->child_port + 1;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
|
|
index f3efbb3b2b4d1..cf03af09c6ced 100644
|
|
--- a/drivers/hwtracing/coresight/coresight.c
|
|
+++ b/drivers/hwtracing/coresight/coresight.c
|
|
@@ -1023,7 +1023,6 @@ static void coresight_device_release(struct device *dev)
|
|
{
|
|
struct coresight_device *csdev = to_coresight_device(dev);
|
|
|
|
- cti_remove_assoc_from_csdev(csdev);
|
|
fwnode_handle_put(csdev->dev.fwnode);
|
|
kfree(csdev->refcnt);
|
|
kfree(csdev);
|
|
@@ -1357,6 +1356,7 @@ void coresight_unregister(struct coresight_device *csdev)
|
|
{
|
|
etm_perf_del_symlink_sink(csdev);
|
|
/* Remove references of that device in the topology */
|
|
+ cti_remove_assoc_from_csdev(csdev);
|
|
coresight_remove_conns(csdev);
|
|
coresight_release_platform_data(csdev, csdev->pdata);
|
|
device_unregister(&csdev->dev);
|
|
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
|
|
index 735bf31a3fdff..6546d6cf3c24c 100644
|
|
--- a/drivers/i2c/busses/Kconfig
|
|
+++ b/drivers/i2c/busses/Kconfig
|
|
@@ -1191,6 +1191,7 @@ config I2C_RCAR
|
|
tristate "Renesas R-Car I2C Controller"
|
|
depends on ARCH_RENESAS || COMPILE_TEST
|
|
select I2C_SLAVE
|
|
+ select RESET_CONTROLLER if ARCH_RCAR_GEN3
|
|
help
|
|
If you say yes to this option, support will be included for the
|
|
R-Car I2C controller.
|
|
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
|
|
index 2ade99b105b91..bbf8dd491d245 100644
|
|
--- a/drivers/i2c/i2c-core-acpi.c
|
|
+++ b/drivers/i2c/i2c-core-acpi.c
|
|
@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
|
|
void i2c_acpi_register_devices(struct i2c_adapter *adap)
|
|
{
|
|
acpi_status status;
|
|
+ acpi_handle handle;
|
|
|
|
if (!has_acpi_companion(&adap->dev))
|
|
return;
|
|
@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
|
|
adap, NULL);
|
|
if (ACPI_FAILURE(status))
|
|
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
|
|
+
|
|
+ if (!adap->dev.parent)
|
|
+ return;
|
|
+
|
|
+ handle = ACPI_HANDLE(adap->dev.parent);
|
|
+ if (!handle)
|
|
+ return;
|
|
+
|
|
+ acpi_walk_dep_device_list(handle);
|
|
}
|
|
|
|
const struct acpi_device_id *
|
|
@@ -729,7 +739,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- acpi_walk_dep_device_list(handle);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
|
|
index 97f2e29265da7..cc7564446ccd2 100644
|
|
--- a/drivers/i3c/master.c
|
|
+++ b/drivers/i3c/master.c
|
|
@@ -1782,6 +1782,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
|
|
i3c_master_detach_free_devs(master);
|
|
}
|
|
|
|
+static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
|
|
+{
|
|
+ struct i3c_master_controller *master = i3cdev->common.master;
|
|
+ struct i3c_dev_boardinfo *i3cboardinfo;
|
|
+
|
|
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
|
|
+ if (i3cdev->info.pid != i3cboardinfo->pid)
|
|
+ continue;
|
|
+
|
|
+ i3cdev->boardinfo = i3cboardinfo;
|
|
+ i3cdev->info.static_addr = i3cboardinfo->static_addr;
|
|
+ return;
|
|
+ }
|
|
+}
|
|
+
|
|
static struct i3c_dev_desc *
|
|
i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
|
|
{
|
|
@@ -1837,10 +1852,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
|
|
if (ret)
|
|
goto err_detach_dev;
|
|
|
|
+ i3c_master_attach_boardinfo(newdev);
|
|
+
|
|
olddev = i3c_master_search_i3c_dev_duplicate(newdev);
|
|
if (olddev) {
|
|
- newdev->boardinfo = olddev->boardinfo;
|
|
- newdev->info.static_addr = olddev->info.static_addr;
|
|
newdev->dev = olddev->dev;
|
|
if (newdev->dev)
|
|
newdev->dev->desc = newdev;
|
|
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
|
|
index 3fee8bd7fe20b..3f2226928fe05 100644
|
|
--- a/drivers/i3c/master/i3c-master-cdns.c
|
|
+++ b/drivers/i3c/master/i3c-master-cdns.c
|
|
@@ -1635,8 +1635,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
|
|
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
|
|
sizeof(*master->ibi.slots),
|
|
GFP_KERNEL);
|
|
- if (!master->ibi.slots)
|
|
+ if (!master->ibi.slots) {
|
|
+ ret = -ENOMEM;
|
|
goto err_disable_sysclk;
|
|
+ }
|
|
|
|
writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
|
|
writel(MST_INT_IBIR_THR, master->regs + MST_IER);
|
|
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
|
|
index 0e2068ec068b8..358636954619d 100644
|
|
--- a/drivers/iio/adc/stm32-adc-core.c
|
|
+++ b/drivers/iio/adc/stm32-adc-core.c
|
|
@@ -794,6 +794,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
|
|
{
|
|
return stm32_adc_core_hw_start(dev);
|
|
}
|
|
+
|
|
+static int stm32_adc_core_runtime_idle(struct device *dev)
|
|
+{
|
|
+ pm_runtime_mark_last_busy(dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
#endif
|
|
|
|
static const struct dev_pm_ops stm32_adc_core_pm_ops = {
|
|
@@ -801,7 +808,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
|
|
pm_runtime_force_resume)
|
|
SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
|
|
stm32_adc_core_runtime_resume,
|
|
- NULL)
|
|
+ stm32_adc_core_runtime_idle)
|
|
};
|
|
|
|
static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
|
|
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
|
|
index 26de0dab60bbb..d28c7c6940b00 100644
|
|
--- a/drivers/infiniband/core/cma.c
|
|
+++ b/drivers/infiniband/core/cma.c
|
|
@@ -68,6 +68,9 @@ static const char * const cma_events[] = {
|
|
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
|
|
};
|
|
|
|
+static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
|
|
+ union ib_gid *mgid);
|
|
+
|
|
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
|
|
{
|
|
size_t index = event;
|
|
@@ -345,13 +348,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
|
|
|
|
struct cma_multicast {
|
|
struct rdma_id_private *id_priv;
|
|
- union {
|
|
- struct ib_sa_multicast *ib;
|
|
- } multicast;
|
|
+ struct ib_sa_multicast *sa_mc;
|
|
struct list_head list;
|
|
void *context;
|
|
struct sockaddr_storage addr;
|
|
- struct kref mcref;
|
|
u8 join_state;
|
|
};
|
|
|
|
@@ -363,18 +363,6 @@ struct cma_work {
|
|
struct rdma_cm_event event;
|
|
};
|
|
|
|
-struct cma_ndev_work {
|
|
- struct work_struct work;
|
|
- struct rdma_id_private *id;
|
|
- struct rdma_cm_event event;
|
|
-};
|
|
-
|
|
-struct iboe_mcast_work {
|
|
- struct work_struct work;
|
|
- struct rdma_id_private *id;
|
|
- struct cma_multicast *mc;
|
|
-};
|
|
-
|
|
union cma_ip_addr {
|
|
struct in6_addr ip6;
|
|
struct {
|
|
@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
|
|
rdma_start_port(cma_dev->device)];
|
|
}
|
|
|
|
-static inline void release_mc(struct kref *kref)
|
|
-{
|
|
- struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
|
|
-
|
|
- kfree(mc->multicast.ib);
|
|
- kfree(mc);
|
|
-}
|
|
-
|
|
static void cma_release_dev(struct rdma_id_private *id_priv)
|
|
{
|
|
mutex_lock(&lock);
|
|
@@ -1783,19 +1763,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
|
|
mutex_unlock(&lock);
|
|
}
|
|
|
|
-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
|
|
- struct cma_multicast *mc)
|
|
+static void destroy_mc(struct rdma_id_private *id_priv,
|
|
+ struct cma_multicast *mc)
|
|
{
|
|
- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
|
|
- struct net_device *ndev = NULL;
|
|
+ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
|
|
+ ib_sa_free_multicast(mc->sa_mc);
|
|
|
|
- if (dev_addr->bound_dev_if)
|
|
- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
|
|
- if (ndev) {
|
|
- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
|
|
- dev_put(ndev);
|
|
+ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
|
|
+ struct rdma_dev_addr *dev_addr =
|
|
+ &id_priv->id.route.addr.dev_addr;
|
|
+ struct net_device *ndev = NULL;
|
|
+
|
|
+ if (dev_addr->bound_dev_if)
|
|
+ ndev = dev_get_by_index(dev_addr->net,
|
|
+ dev_addr->bound_dev_if);
|
|
+ if (ndev) {
|
|
+ union ib_gid mgid;
|
|
+
|
|
+ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
|
|
+ &mgid);
|
|
+ cma_igmp_send(ndev, &mgid, false);
|
|
+ dev_put(ndev);
|
|
+ }
|
|
}
|
|
- kref_put(&mc->mcref, release_mc);
|
|
+ kfree(mc);
|
|
}
|
|
|
|
static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
|
|
@@ -1803,16 +1794,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
|
|
struct cma_multicast *mc;
|
|
|
|
while (!list_empty(&id_priv->mc_list)) {
|
|
- mc = container_of(id_priv->mc_list.next,
|
|
- struct cma_multicast, list);
|
|
+ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
|
|
+ list);
|
|
list_del(&mc->list);
|
|
- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
|
|
- id_priv->id.port_num)) {
|
|
- ib_sa_free_multicast(mc->multicast.ib);
|
|
- kfree(mc);
|
|
- } else {
|
|
- cma_leave_roce_mc_group(id_priv, mc);
|
|
- }
|
|
+ destroy_mc(id_priv, mc);
|
|
}
|
|
}
|
|
|
|
@@ -2646,32 +2631,14 @@ static void cma_work_handler(struct work_struct *_work)
|
|
struct rdma_id_private *id_priv = work->id;
|
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
- if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
|
|
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
|
|
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
|
|
goto out_unlock;
|
|
-
|
|
- if (cma_cm_event_handler(id_priv, &work->event)) {
|
|
- cma_id_put(id_priv);
|
|
- destroy_id_handler_unlock(id_priv);
|
|
- goto out_free;
|
|
+ if (work->old_state != 0 || work->new_state != 0) {
|
|
+ if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
|
|
+ goto out_unlock;
|
|
}
|
|
|
|
-out_unlock:
|
|
- mutex_unlock(&id_priv->handler_mutex);
|
|
- cma_id_put(id_priv);
|
|
-out_free:
|
|
- kfree(work);
|
|
-}
|
|
-
|
|
-static void cma_ndev_work_handler(struct work_struct *_work)
|
|
-{
|
|
- struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
|
|
- struct rdma_id_private *id_priv = work->id;
|
|
-
|
|
- mutex_lock(&id_priv->handler_mutex);
|
|
- if (id_priv->state == RDMA_CM_DESTROYING ||
|
|
- id_priv->state == RDMA_CM_DEVICE_REMOVAL)
|
|
- goto out_unlock;
|
|
-
|
|
if (cma_cm_event_handler(id_priv, &work->event)) {
|
|
cma_id_put(id_priv);
|
|
destroy_id_handler_unlock(id_priv);
|
|
@@ -2682,6 +2649,8 @@ out_unlock:
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
cma_id_put(id_priv);
|
|
out_free:
|
|
+ if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
|
|
+ rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
|
|
kfree(work);
|
|
}
|
|
|
|
@@ -4295,63 +4264,66 @@ out:
|
|
}
|
|
EXPORT_SYMBOL(rdma_disconnect);
|
|
|
|
-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
|
+static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
|
|
+ struct ib_sa_multicast *multicast,
|
|
+ struct rdma_cm_event *event,
|
|
+ struct cma_multicast *mc)
|
|
{
|
|
- struct rdma_id_private *id_priv;
|
|
- struct cma_multicast *mc = multicast->context;
|
|
- struct rdma_cm_event event = {};
|
|
- int ret = 0;
|
|
-
|
|
- id_priv = mc->id_priv;
|
|
- mutex_lock(&id_priv->handler_mutex);
|
|
- if (id_priv->state != RDMA_CM_ADDR_BOUND &&
|
|
- id_priv->state != RDMA_CM_ADDR_RESOLVED)
|
|
- goto out;
|
|
+ struct rdma_dev_addr *dev_addr;
|
|
+ enum ib_gid_type gid_type;
|
|
+ struct net_device *ndev;
|
|
|
|
if (!status)
|
|
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
|
|
else
|
|
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
|
|
status);
|
|
- mutex_lock(&id_priv->qp_mutex);
|
|
- if (!status && id_priv->id.qp) {
|
|
- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
|
|
- be16_to_cpu(multicast->rec.mlid));
|
|
- if (status)
|
|
- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
|
|
- status);
|
|
+
|
|
+ event->status = status;
|
|
+ event->param.ud.private_data = mc->context;
|
|
+ if (status) {
|
|
+ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
|
|
+ return;
|
|
}
|
|
- mutex_unlock(&id_priv->qp_mutex);
|
|
|
|
- event.status = status;
|
|
- event.param.ud.private_data = mc->context;
|
|
- if (!status) {
|
|
- struct rdma_dev_addr *dev_addr =
|
|
- &id_priv->id.route.addr.dev_addr;
|
|
- struct net_device *ndev =
|
|
- dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
|
|
- enum ib_gid_type gid_type =
|
|
- id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
|
|
- rdma_start_port(id_priv->cma_dev->device)];
|
|
-
|
|
- event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
|
|
- ret = ib_init_ah_from_mcmember(id_priv->id.device,
|
|
- id_priv->id.port_num,
|
|
- &multicast->rec,
|
|
- ndev, gid_type,
|
|
- &event.param.ud.ah_attr);
|
|
- if (ret)
|
|
- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
|
|
+ dev_addr = &id_priv->id.route.addr.dev_addr;
|
|
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
|
|
+ gid_type =
|
|
+ id_priv->cma_dev
|
|
+ ->default_gid_type[id_priv->id.port_num -
|
|
+ rdma_start_port(
|
|
+ id_priv->cma_dev->device)];
|
|
+
|
|
+ event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
|
|
+ if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
|
|
+ &multicast->rec, ndev, gid_type,
|
|
+ &event->param.ud.ah_attr)) {
|
|
+ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
|
|
+ goto out;
|
|
+ }
|
|
|
|
- event.param.ud.qp_num = 0xFFFFFF;
|
|
- event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
|
|
- if (ndev)
|
|
- dev_put(ndev);
|
|
- } else
|
|
- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
|
|
+ event->param.ud.qp_num = 0xFFFFFF;
|
|
+ event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
|
|
|
|
- ret = cma_cm_event_handler(id_priv, &event);
|
|
+out:
|
|
+ if (ndev)
|
|
+ dev_put(ndev);
|
|
+}
|
|
|
|
+static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
|
+{
|
|
+ struct cma_multicast *mc = multicast->context;
|
|
+ struct rdma_id_private *id_priv = mc->id_priv;
|
|
+ struct rdma_cm_event event = {};
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&id_priv->handler_mutex);
|
|
+ if (id_priv->state != RDMA_CM_ADDR_BOUND &&
|
|
+ id_priv->state != RDMA_CM_ADDR_RESOLVED)
|
|
+ goto out;
|
|
+
|
|
+ cma_make_mc_event(status, id_priv, multicast, &event, mc);
|
|
+ ret = cma_cm_event_handler(id_priv, &event);
|
|
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
|
|
if (ret) {
|
|
destroy_id_handler_unlock(id_priv);
|
|
@@ -4441,23 +4413,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
|
|
IB_SA_MCMEMBER_REC_MTU |
|
|
IB_SA_MCMEMBER_REC_HOP_LIMIT;
|
|
|
|
- mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
|
|
- id_priv->id.port_num, &rec,
|
|
- comp_mask, GFP_KERNEL,
|
|
- cma_ib_mc_handler, mc);
|
|
- return PTR_ERR_OR_ZERO(mc->multicast.ib);
|
|
-}
|
|
-
|
|
-static void iboe_mcast_work_handler(struct work_struct *work)
|
|
-{
|
|
- struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
|
|
- struct cma_multicast *mc = mw->mc;
|
|
- struct ib_sa_multicast *m = mc->multicast.ib;
|
|
-
|
|
- mc->multicast.ib->context = mc;
|
|
- cma_ib_mc_handler(0, m);
|
|
- kref_put(&mc->mcref, release_mc);
|
|
- kfree(mw);
|
|
+ mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
|
|
+ id_priv->id.port_num, &rec, comp_mask,
|
|
+ GFP_KERNEL, cma_ib_mc_handler, mc);
|
|
+ return PTR_ERR_OR_ZERO(mc->sa_mc);
|
|
}
|
|
|
|
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
|
|
@@ -4492,52 +4451,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
|
|
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
|
|
struct cma_multicast *mc)
|
|
{
|
|
- struct iboe_mcast_work *work;
|
|
+ struct cma_work *work;
|
|
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
|
|
int err = 0;
|
|
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
|
|
struct net_device *ndev = NULL;
|
|
+ struct ib_sa_multicast ib;
|
|
enum ib_gid_type gid_type;
|
|
bool send_only;
|
|
|
|
send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
|
|
|
|
- if (cma_zero_addr((struct sockaddr *)&mc->addr))
|
|
+ if (cma_zero_addr(addr))
|
|
return -EINVAL;
|
|
|
|
work = kzalloc(sizeof *work, GFP_KERNEL);
|
|
if (!work)
|
|
return -ENOMEM;
|
|
|
|
- mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
|
|
- if (!mc->multicast.ib) {
|
|
- err = -ENOMEM;
|
|
- goto out1;
|
|
- }
|
|
-
|
|
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
|
|
rdma_start_port(id_priv->cma_dev->device)];
|
|
- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
|
|
+ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
|
|
|
|
- mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
|
|
+ ib.rec.pkey = cpu_to_be16(0xffff);
|
|
if (id_priv->id.ps == RDMA_PS_UDP)
|
|
- mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
|
|
+ ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
|
|
|
|
if (dev_addr->bound_dev_if)
|
|
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
|
|
if (!ndev) {
|
|
err = -ENODEV;
|
|
- goto out2;
|
|
+ goto err_free;
|
|
}
|
|
- mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
|
|
- mc->multicast.ib->rec.hop_limit = 1;
|
|
- mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
|
|
+ ib.rec.rate = iboe_get_rate(ndev);
|
|
+ ib.rec.hop_limit = 1;
|
|
+ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
|
|
|
|
if (addr->sa_family == AF_INET) {
|
|
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
|
|
- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
|
|
+ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
|
|
if (!send_only) {
|
|
- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
|
|
+ err = cma_igmp_send(ndev, &ib.rec.mgid,
|
|
true);
|
|
}
|
|
}
|
|
@@ -4546,24 +4500,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
|
|
err = -ENOTSUPP;
|
|
}
|
|
dev_put(ndev);
|
|
- if (err || !mc->multicast.ib->rec.mtu) {
|
|
+ if (err || !ib.rec.mtu) {
|
|
if (!err)
|
|
err = -EINVAL;
|
|
- goto out2;
|
|
+ goto err_free;
|
|
}
|
|
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
|
|
- &mc->multicast.ib->rec.port_gid);
|
|
+ &ib.rec.port_gid);
|
|
work->id = id_priv;
|
|
- work->mc = mc;
|
|
- INIT_WORK(&work->work, iboe_mcast_work_handler);
|
|
- kref_get(&mc->mcref);
|
|
+ INIT_WORK(&work->work, cma_work_handler);
|
|
+ cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
|
|
+ /* Balances with cma_id_put() in cma_work_handler */
|
|
+ cma_id_get(id_priv);
|
|
queue_work(cma_wq, &work->work);
|
|
-
|
|
return 0;
|
|
|
|
-out2:
|
|
- kfree(mc->multicast.ib);
|
|
-out1:
|
|
+err_free:
|
|
kfree(work);
|
|
return err;
|
|
}
|
|
@@ -4575,6 +4527,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
|
struct cma_multicast *mc;
|
|
int ret;
|
|
|
|
+ /* Not supported for kernel QPs */
|
|
+ if (WARN_ON(id->qp))
|
|
+ return -EINVAL;
|
|
+
|
|
if (!id->device)
|
|
return -EINVAL;
|
|
|
|
@@ -4583,7 +4539,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
|
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
|
|
return -EINVAL;
|
|
|
|
- mc = kmalloc(sizeof *mc, GFP_KERNEL);
|
|
+ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
|
|
if (!mc)
|
|
return -ENOMEM;
|
|
|
|
@@ -4593,7 +4549,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
|
mc->join_state = join_state;
|
|
|
|
if (rdma_protocol_roce(id->device, id->port_num)) {
|
|
- kref_init(&mc->mcref);
|
|
ret = cma_iboe_join_multicast(id_priv, mc);
|
|
if (ret)
|
|
goto out_err;
|
|
@@ -4625,25 +4580,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
spin_lock_irq(&id_priv->lock);
|
|
list_for_each_entry(mc, &id_priv->mc_list, list) {
|
|
- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
|
|
- list_del(&mc->list);
|
|
- spin_unlock_irq(&id_priv->lock);
|
|
-
|
|
- if (id->qp)
|
|
- ib_detach_mcast(id->qp,
|
|
- &mc->multicast.ib->rec.mgid,
|
|
- be16_to_cpu(mc->multicast.ib->rec.mlid));
|
|
-
|
|
- BUG_ON(id_priv->cma_dev->device != id->device);
|
|
-
|
|
- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
|
|
- ib_sa_free_multicast(mc->multicast.ib);
|
|
- kfree(mc);
|
|
- } else if (rdma_protocol_roce(id->device, id->port_num)) {
|
|
- cma_leave_roce_mc_group(id_priv, mc);
|
|
- }
|
|
- return;
|
|
- }
|
|
+ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
|
|
+ continue;
|
|
+ list_del(&mc->list);
|
|
+ spin_unlock_irq(&id_priv->lock);
|
|
+
|
|
+ WARN_ON(id_priv->cma_dev->device != id->device);
|
|
+ destroy_mc(id_priv, mc);
|
|
+ return;
|
|
}
|
|
spin_unlock_irq(&id_priv->lock);
|
|
}
|
|
@@ -4652,7 +4596,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
|
|
static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
|
|
{
|
|
struct rdma_dev_addr *dev_addr;
|
|
- struct cma_ndev_work *work;
|
|
+ struct cma_work *work;
|
|
|
|
dev_addr = &id_priv->id.route.addr.dev_addr;
|
|
|
|
@@ -4665,7 +4609,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
|
|
if (!work)
|
|
return -ENOMEM;
|
|
|
|
- INIT_WORK(&work->work, cma_ndev_work_handler);
|
|
+ INIT_WORK(&work->work, cma_work_handler);
|
|
work->id = id_priv;
|
|
work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
|
|
cma_id_get(id_priv);
|
|
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
|
|
index a92fc3f90bb5b..19e36e52181be 100644
|
|
--- a/drivers/infiniband/core/cq.c
|
|
+++ b/drivers/infiniband/core/cq.c
|
|
@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
|
|
}
|
|
|
|
/**
|
|
- * __ib_alloc_cq_user - allocate a completion queue
|
|
+ * __ib_alloc_cq allocate a completion queue
|
|
* @dev: device to allocate the CQ for
|
|
* @private: driver private data, accessible from cq->cq_context
|
|
* @nr_cqe: number of CQEs to allocate
|
|
* @comp_vector: HCA completion vectors for this CQ
|
|
* @poll_ctx: context to poll the CQ from.
|
|
* @caller: module owner name.
|
|
- * @udata: Valid user data or NULL for kernel object
|
|
*
|
|
* This is the proper interface to allocate a CQ for in-kernel users. A
|
|
* CQ allocated with this interface will automatically be polled from the
|
|
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
|
|
* to use this CQ abstraction.
|
|
*/
|
|
-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
|
- int nr_cqe, int comp_vector,
|
|
- enum ib_poll_context poll_ctx,
|
|
- const char *caller, struct ib_udata *udata)
|
|
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
|
|
+ int comp_vector, enum ib_poll_context poll_ctx,
|
|
+ const char *caller)
|
|
{
|
|
struct ib_cq_init_attr cq_attr = {
|
|
.cqe = nr_cqe,
|
|
@@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
|
out_destroy_cq:
|
|
rdma_dim_destroy(cq);
|
|
rdma_restrack_del(&cq->res);
|
|
- cq->device->ops.destroy_cq(cq, udata);
|
|
+ cq->device->ops.destroy_cq(cq, NULL);
|
|
out_free_wc:
|
|
kfree(cq->wc);
|
|
out_free_cq:
|
|
@@ -285,7 +283,7 @@ out_free_cq:
|
|
trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
|
|
return ERR_PTR(ret);
|
|
}
|
|
-EXPORT_SYMBOL(__ib_alloc_cq_user);
|
|
+EXPORT_SYMBOL(__ib_alloc_cq);
|
|
|
|
/**
|
|
* __ib_alloc_cq_any - allocate a completion queue
|
|
@@ -310,18 +308,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
|
|
atomic_inc_return(&counter) %
|
|
min_t(int, dev->num_comp_vectors, num_online_cpus());
|
|
|
|
- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
|
|
- caller, NULL);
|
|
+ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
|
|
+ caller);
|
|
}
|
|
EXPORT_SYMBOL(__ib_alloc_cq_any);
|
|
|
|
/**
|
|
- * ib_free_cq_user - free a completion queue
|
|
+ * ib_free_cq - free a completion queue
|
|
* @cq: completion queue to free.
|
|
- * @udata: User data or NULL for kernel object
|
|
*/
|
|
-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
|
+void ib_free_cq(struct ib_cq *cq)
|
|
{
|
|
+ int ret;
|
|
+
|
|
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
|
|
return;
|
|
if (WARN_ON_ONCE(cq->cqe_used))
|
|
@@ -343,12 +342,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
|
|
|
rdma_dim_destroy(cq);
|
|
trace_cq_free(cq);
|
|
+ ret = cq->device->ops.destroy_cq(cq, NULL);
|
|
+ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
|
|
rdma_restrack_del(&cq->res);
|
|
- cq->device->ops.destroy_cq(cq, udata);
|
|
kfree(cq->wc);
|
|
kfree(cq);
|
|
}
|
|
-EXPORT_SYMBOL(ib_free_cq_user);
|
|
+EXPORT_SYMBOL(ib_free_cq);
|
|
|
|
void ib_cq_pool_init(struct ib_device *dev)
|
|
{
|
|
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
|
|
index d03dacaef7880..2643d5dbe1da8 100644
|
|
--- a/drivers/infiniband/core/ucma.c
|
|
+++ b/drivers/infiniband/core/ucma.c
|
|
@@ -586,6 +586,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
|
|
list_move_tail(&uevent->list, &list);
|
|
}
|
|
list_del(&ctx->list);
|
|
+ events_reported = ctx->events_reported;
|
|
mutex_unlock(&ctx->file->mut);
|
|
|
|
list_for_each_entry_safe(uevent, tmp, &list, list) {
|
|
@@ -595,7 +596,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
|
|
kfree(uevent);
|
|
}
|
|
|
|
- events_reported = ctx->events_reported;
|
|
mutex_destroy(&ctx->mutex);
|
|
kfree(ctx);
|
|
return events_reported;
|
|
@@ -1512,7 +1512,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
|
|
return 0;
|
|
|
|
err3:
|
|
+ mutex_lock(&ctx->mutex);
|
|
rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
|
|
+ mutex_unlock(&ctx->mutex);
|
|
ucma_cleanup_mc_events(mc);
|
|
err2:
|
|
xa_erase(&multicast_table, mc->id);
|
|
@@ -1678,7 +1680,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
|
|
|
|
cur_file = ctx->file;
|
|
if (cur_file == new_file) {
|
|
+ mutex_lock(&cur_file->mut);
|
|
resp.events_reported = ctx->events_reported;
|
|
+ mutex_unlock(&cur_file->mut);
|
|
goto response;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
|
|
index 82455a1392f1d..7e765fe211607 100644
|
|
--- a/drivers/infiniband/core/umem.c
|
|
+++ b/drivers/infiniband/core/umem.c
|
|
@@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
|
dma_addr_t mask;
|
|
int i;
|
|
|
|
+ /* rdma_for_each_block() has a bug if the page size is smaller than the
|
|
+ * page size used to build the umem. For now prevent smaller page sizes
|
|
+ * from being returned.
|
|
+ */
|
|
+ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
|
|
+
|
|
/* At minimum, drivers must support PAGE_SIZE or smaller */
|
|
if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
|
|
return 0;
|
|
|
|
va = virt;
|
|
- /* max page size not to exceed MR length */
|
|
- mask = roundup_pow_of_two(umem->length);
|
|
+ /* The best result is the smallest page size that results in the minimum
|
|
+ * number of required pages. Compute the largest page size that could
|
|
+ * work based on VA address bits that don't change.
|
|
+ */
|
|
+ mask = pgsz_bitmap &
|
|
+ GENMASK(BITS_PER_LONG - 1,
|
|
+ bits_per((umem->length - 1 + virt) ^ virt));
|
|
/* offset into first SGL */
|
|
pgoff = umem->address & ~PAGE_MASK;
|
|
|
|
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
|
|
index 1b0ea945756f0..2e397d18dbf44 100644
|
|
--- a/drivers/infiniband/core/verbs.c
|
|
+++ b/drivers/infiniband/core/verbs.c
|
|
@@ -2016,16 +2016,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
|
|
|
|
int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
|
{
|
|
+ int ret;
|
|
+
|
|
if (WARN_ON_ONCE(cq->shared))
|
|
return -EOPNOTSUPP;
|
|
|
|
if (atomic_read(&cq->usecnt))
|
|
return -EBUSY;
|
|
|
|
+ ret = cq->device->ops.destroy_cq(cq, udata);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
rdma_restrack_del(&cq->res);
|
|
- cq->device->ops.destroy_cq(cq, udata);
|
|
kfree(cq);
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL(ib_destroy_cq_user);
|
|
|
|
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
|
|
index cb6e873039df5..9f69abf01d331 100644
|
|
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
|
|
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
|
|
@@ -2714,7 +2714,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
|
|
}
|
|
|
|
/* Completion Queues */
|
|
-void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
+int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
{
|
|
struct bnxt_re_cq *cq;
|
|
struct bnxt_qplib_nq *nq;
|
|
@@ -2730,6 +2730,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
atomic_dec(&rdev->cq_count);
|
|
nq->budget--;
|
|
kfree(cq->cql);
|
|
+ return 0;
|
|
}
|
|
|
|
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
|
|
index e5fbbeba6d28d..f4a0ded67a8aa 100644
|
|
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
|
|
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
|
|
@@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
|
|
const struct ib_recv_wr **bad_recv_wr);
|
|
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
-void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
+int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
|
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
|
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
|
|
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
|
|
index b1bb61c65f4f6..7b076fc23cf38 100644
|
|
--- a/drivers/infiniband/hw/cxgb4/cq.c
|
|
+++ b/drivers/infiniband/hw/cxgb4/cq.c
|
|
@@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|
return !err || err == -ENODATA ? npolled : err;
|
|
}
|
|
|
|
-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
{
|
|
struct c4iw_cq *chp;
|
|
struct c4iw_ucontext *ucontext;
|
|
@@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
|
|
chp->destroy_skb, chp->wr_waitp);
|
|
c4iw_put_wr_wait(chp->wr_waitp);
|
|
+ return 0;
|
|
}
|
|
|
|
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
|
|
index e8e11bd95e429..de0f278e31501 100644
|
|
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
|
|
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
|
|
@@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
|
|
struct ib_udata *udata);
|
|
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
|
|
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
|
|
-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
|
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
|
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
|
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
|
|
index 1889dd172a252..05f593940e7b0 100644
|
|
--- a/drivers/infiniband/hw/efa/efa.h
|
|
+++ b/drivers/infiniband/hw/efa/efa.h
|
|
@@ -139,7 +139,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
|
|
struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
|
struct ib_qp_init_attr *init_attr,
|
|
struct ib_udata *udata);
|
|
-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
|
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
|
|
index 7dd082441333c..bd2caa2353c75 100644
|
|
--- a/drivers/infiniband/hw/efa/efa_verbs.c
|
|
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
|
|
@@ -843,7 +843,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
|
|
return efa_com_destroy_cq(&dev->edev, ¶ms);
|
|
}
|
|
|
|
-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
{
|
|
struct efa_dev *dev = to_edev(ibcq->device);
|
|
struct efa_cq *cq = to_ecq(ibcq);
|
|
@@ -856,6 +856,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
efa_destroy_cq_idx(dev, cq->cq_idx);
|
|
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
|
|
DMA_FROM_DEVICE);
|
|
+ return 0;
|
|
}
|
|
|
|
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
|
|
index e87d616f79882..c5acf3332519b 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
|
|
@@ -311,7 +311,7 @@ err_cq_buf:
|
|
return ret;
|
|
}
|
|
|
|
-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
+int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
{
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
|
|
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
|
@@ -322,6 +322,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
free_cq_buf(hr_dev, hr_cq);
|
|
free_cq_db(hr_dev, hr_cq, udata);
|
|
free_cqc(hr_dev, hr_cq);
|
|
+ return 0;
|
|
}
|
|
|
|
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
|
|
index c69453a62767c..77ca55b559a0a 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
|
|
@@ -929,7 +929,7 @@ struct hns_roce_hw {
|
|
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
|
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
|
|
struct ib_udata *udata);
|
|
- void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
+ int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
|
int (*init_eq)(struct hns_roce_dev *hr_dev);
|
|
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
|
|
@@ -1246,7 +1246,7 @@ int to_hr_qp_type(int qp_type);
|
|
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
|
|
-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
|
+int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
|
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|
struct ib_udata *udata, unsigned long virt,
|
|
struct hns_roce_db *db);
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
|
|
index cf39f560b8001..5a0c90e0b367b 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
|
|
@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
|
|
ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
|
|
break;
|
|
case IB_WR_LOCAL_INV:
|
|
- break;
|
|
case IB_WR_ATOMIC_CMP_AND_SWP:
|
|
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
|
case IB_WR_LSO:
|
|
@@ -3573,7 +3572,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|
return 0;
|
|
}
|
|
|
|
-static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
+static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
{
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
|
|
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
|
|
@@ -3604,6 +3603,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
}
|
|
wait_time++;
|
|
}
|
|
+ return 0;
|
|
}
|
|
|
|
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
index 38a48ab3e1d02..37809a0b50e25 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
@@ -1770,9 +1770,9 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
|
|
int *buf_page_size, int *bt_page_size, u32 hem_type)
|
|
{
|
|
u64 obj_per_chunk;
|
|
- int bt_chunk_size = 1 << PAGE_SHIFT;
|
|
- int buf_chunk_size = 1 << PAGE_SHIFT;
|
|
- int obj_per_chunk_default = buf_chunk_size / obj_size;
|
|
+ u64 bt_chunk_size = PAGE_SIZE;
|
|
+ u64 buf_chunk_size = PAGE_SIZE;
|
|
+ u64 obj_per_chunk_default = buf_chunk_size / obj_size;
|
|
|
|
*buf_page_size = 0;
|
|
*bt_page_size = 0;
|
|
@@ -3640,9 +3640,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
|
|
V2_QPC_BYTE_76_SRQ_EN_S, 1);
|
|
}
|
|
|
|
- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
|
|
- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
|
|
-
|
|
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
|
|
|
|
hr_qp->access_flags = attr->qp_access_flags;
|
|
@@ -3983,6 +3980,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
|
dma_addr_t trrl_ba;
|
|
dma_addr_t irrl_ba;
|
|
enum ib_mtu mtu;
|
|
+ u8 lp_pktn_ini;
|
|
u8 port_num;
|
|
u64 *mtts;
|
|
u8 *dmac;
|
|
@@ -4090,13 +4088,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
|
}
|
|
|
|
#define MAX_LP_MSG_LEN 65536
|
|
- /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
|
|
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
|
|
+ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
|
|
+
|
|
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
|
|
- V2_QPC_BYTE_56_LP_PKTN_INI_S,
|
|
- ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
|
|
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
|
|
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
|
|
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
|
|
|
|
+ /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
|
|
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
|
|
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
|
|
+ roce_set_field(qpc_mask->byte_172_sq_psn,
|
|
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
|
|
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
|
|
+
|
|
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
|
|
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
|
|
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
|
|
@@ -4287,11 +4293,19 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
|
V2_QPC_BYTE_28_FL_S, 0);
|
|
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
|
|
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
|
|
+
|
|
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
|
|
+ if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
|
|
+ ibdev_err(ibdev,
|
|
+ "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
|
|
+ hr_qp->sl, MAX_SERVICE_LEVEL);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
|
|
- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
|
|
+ V2_QPC_BYTE_28_SL_S, hr_qp->sl);
|
|
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
|
|
V2_QPC_BYTE_28_SL_S, 0);
|
|
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
|
|
|
|
return 0;
|
|
}
|
|
@@ -4787,7 +4801,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
|
|
V2_QPC_BYTE_212_RETRY_CNT_M,
|
|
V2_QPC_BYTE_212_RETRY_CNT_S);
|
|
- qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
|
|
+ qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
|
|
+ V2_QPC_BYTE_244_RNR_CNT_M,
|
|
+ V2_QPC_BYTE_244_RNR_CNT_S);
|
|
|
|
done:
|
|
qp_attr->cur_qp_state = qp_attr->qp_state;
|
|
@@ -4803,6 +4819,7 @@ done:
|
|
}
|
|
|
|
qp_init_attr->cap = qp_attr->cap;
|
|
+ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
|
|
|
|
out:
|
|
mutex_unlock(&hr_qp->mutex);
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
|
|
index 4f840997c6c73..c6a280bdbfaaf 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
|
|
@@ -1957,6 +1957,8 @@ struct hns_roce_eq_context {
|
|
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
|
|
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
|
|
|
|
+#define MAX_SERVICE_LEVEL 0x7
|
|
+
|
|
struct hns_roce_wqe_atomic_seg {
|
|
__le64 fetchadd_swap_data;
|
|
__le64 cmp_data;
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
|
|
index 4edea397b6b80..4486c9b7c3e43 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
|
|
@@ -1171,8 +1171,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
|
mutex_lock(&hr_qp->mutex);
|
|
|
|
- cur_state = attr_mask & IB_QP_CUR_STATE ?
|
|
- attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
|
|
+ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
|
|
+ goto out;
|
|
+
|
|
+ cur_state = hr_qp->state;
|
|
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
|
|
|
|
if (ibqp->uobject &&
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
|
|
index 49d92638e0dbb..9a2b87cc3d301 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw.h
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
|
|
@@ -409,8 +409,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
|
|
}
|
|
|
|
/* i40iw.c */
|
|
-void i40iw_add_ref(struct ib_qp *);
|
|
-void i40iw_rem_ref(struct ib_qp *);
|
|
+void i40iw_qp_add_ref(struct ib_qp *ibqp);
|
|
+void i40iw_qp_rem_ref(struct ib_qp *ibqp);
|
|
struct ib_qp *i40iw_get_qp(struct ib_device *, int);
|
|
|
|
void i40iw_flush_wqes(struct i40iw_device *iwdev,
|
|
@@ -554,9 +554,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
|
|
bool wait);
|
|
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
|
|
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
|
|
-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
|
|
- struct i40iw_qp *iwqp,
|
|
- u32 qp_num);
|
|
+void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
|
|
+
|
|
enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
|
|
struct i40iw_dma_mem *memptr,
|
|
u32 size, u32 mask);
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
|
|
index fa7a5ff498c73..56c1e9abc52dc 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
|
|
@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
|
|
iwqp = cm_node->iwqp;
|
|
if (iwqp) {
|
|
iwqp->cm_node = NULL;
|
|
- i40iw_rem_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_rem_ref(&iwqp->ibqp);
|
|
cm_node->iwqp = NULL;
|
|
} else if (cm_node->qhash_set) {
|
|
i40iw_get_addr_info(cm_node, &nfo);
|
|
@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
|
|
kfree(work);
|
|
return;
|
|
}
|
|
- i40iw_add_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_add_ref(&iwqp->ibqp);
|
|
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
|
|
|
work->iwqp = iwqp;
|
|
@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
|
|
|
|
kfree(dwork);
|
|
i40iw_cm_disconn_true(iwqp);
|
|
- i40iw_rem_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_rem_ref(&iwqp->ibqp);
|
|
}
|
|
|
|
/**
|
|
@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|
cm_node->lsmm_size = accept.size + conn_param->private_data_len;
|
|
i40iw_cm_init_tsa_conn(iwqp, cm_node);
|
|
cm_id->add_ref(cm_id);
|
|
- i40iw_add_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_add_ref(&iwqp->ibqp);
|
|
|
|
attr.qp_state = IB_QPS_RTS;
|
|
cm_node->qhash_set = false;
|
|
@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|
iwqp->cm_node = cm_node;
|
|
cm_node->iwqp = iwqp;
|
|
iwqp->cm_id = cm_id;
|
|
- i40iw_add_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_add_ref(&iwqp->ibqp);
|
|
|
|
if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
|
|
cm_node->state = I40IW_CM_STATE_SYN_SENT;
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
|
|
index ae8b97c306657..a7512508f7e60 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
|
|
@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|
__func__, info->qp_cq_id);
|
|
continue;
|
|
}
|
|
- i40iw_add_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_add_ref(&iwqp->ibqp);
|
|
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
|
qp = &iwqp->sc_qp;
|
|
spin_lock_irqsave(&iwqp->lock, flags);
|
|
@@ -427,7 +427,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|
break;
|
|
}
|
|
if (info->qp)
|
|
- i40iw_rem_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_rem_ref(&iwqp->ibqp);
|
|
} while (1);
|
|
|
|
if (aeqcnt)
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
|
|
index 016524683e17e..72db7c1dc2998 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
|
|
@@ -479,25 +479,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
|
|
}
|
|
}
|
|
|
|
-/**
|
|
- * i40iw_free_qp - callback after destroy cqp completes
|
|
- * @cqp_request: cqp request for destroy qp
|
|
- * @num: not used
|
|
- */
|
|
-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
|
|
-{
|
|
- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
|
|
- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
|
|
- struct i40iw_device *iwdev;
|
|
- u32 qp_num = iwqp->ibqp.qp_num;
|
|
-
|
|
- iwdev = iwqp->iwdev;
|
|
-
|
|
- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
|
|
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
|
|
- i40iw_rem_devusecount(iwdev);
|
|
-}
|
|
-
|
|
/**
|
|
* i40iw_wait_event - wait for completion
|
|
* @iwdev: iwarp device
|
|
@@ -618,26 +599,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
|
|
}
|
|
|
|
/**
|
|
- * i40iw_add_ref - add refcount for qp
|
|
+ * i40iw_qp_add_ref - add refcount for qp
|
|
* @ibqp: iqarp qp
|
|
*/
|
|
-void i40iw_add_ref(struct ib_qp *ibqp)
|
|
+void i40iw_qp_add_ref(struct ib_qp *ibqp)
|
|
{
|
|
struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
|
|
|
|
- atomic_inc(&iwqp->refcount);
|
|
+ refcount_inc(&iwqp->refcount);
|
|
}
|
|
|
|
/**
|
|
- * i40iw_rem_ref - rem refcount for qp and free if 0
|
|
+ * i40iw_qp_rem_ref - rem refcount for qp and free if 0
|
|
* @ibqp: iqarp qp
|
|
*/
|
|
-void i40iw_rem_ref(struct ib_qp *ibqp)
|
|
+void i40iw_qp_rem_ref(struct ib_qp *ibqp)
|
|
{
|
|
struct i40iw_qp *iwqp;
|
|
- enum i40iw_status_code status;
|
|
- struct i40iw_cqp_request *cqp_request;
|
|
- struct cqp_commands_info *cqp_info;
|
|
struct i40iw_device *iwdev;
|
|
u32 qp_num;
|
|
unsigned long flags;
|
|
@@ -645,7 +623,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
|
|
iwqp = to_iwqp(ibqp);
|
|
iwdev = iwqp->iwdev;
|
|
spin_lock_irqsave(&iwdev->qptable_lock, flags);
|
|
- if (!atomic_dec_and_test(&iwqp->refcount)) {
|
|
+ if (!refcount_dec_and_test(&iwqp->refcount)) {
|
|
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
|
return;
|
|
}
|
|
@@ -653,25 +631,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
|
|
qp_num = iwqp->ibqp.qp_num;
|
|
iwdev->qp_table[qp_num] = NULL;
|
|
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
|
|
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
|
|
- if (!cqp_request)
|
|
- return;
|
|
-
|
|
- cqp_request->callback_fcn = i40iw_free_qp;
|
|
- cqp_request->param = (void *)&iwqp->sc_qp;
|
|
- cqp_info = &cqp_request->info;
|
|
- cqp_info->cqp_cmd = OP_QP_DESTROY;
|
|
- cqp_info->post_sq = 1;
|
|
- cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
|
|
- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
|
|
- cqp_info->in.u.qp_destroy.remove_hash_idx = true;
|
|
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
|
|
- if (!status)
|
|
- return;
|
|
+ complete(&iwqp->free_qp);
|
|
|
|
- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
|
|
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
|
|
- i40iw_rem_devusecount(iwdev);
|
|
}
|
|
|
|
/**
|
|
@@ -938,7 +899,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
|
|
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
|
|
|
|
i40iw_terminate_done(qp, 1);
|
|
- i40iw_rem_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_rem_ref(&iwqp->ibqp);
|
|
}
|
|
|
|
/**
|
|
@@ -950,7 +911,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
|
|
struct i40iw_qp *iwqp;
|
|
|
|
iwqp = (struct i40iw_qp *)qp->back_qp;
|
|
- i40iw_add_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_add_ref(&iwqp->ibqp);
|
|
timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
|
|
iwqp->terminate_timer.expires = jiffies + HZ;
|
|
add_timer(&iwqp->terminate_timer);
|
|
@@ -966,7 +927,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
|
|
|
|
iwqp = (struct i40iw_qp *)qp->back_qp;
|
|
if (del_timer(&iwqp->terminate_timer))
|
|
- i40iw_rem_ref(&iwqp->ibqp);
|
|
+ i40iw_qp_rem_ref(&iwqp->ibqp);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
|
|
index 19af29a48c559..2419de36e943d 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
|
|
@@ -364,11 +364,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
|
|
* @iwqp: qp ptr (user or kernel)
|
|
* @qp_num: qp number assigned
|
|
*/
|
|
-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
|
|
- struct i40iw_qp *iwqp,
|
|
- u32 qp_num)
|
|
+void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
|
|
{
|
|
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
|
|
+ struct i40iw_device *iwdev = iwqp->iwdev;
|
|
+ u32 qp_num = iwqp->ibqp.qp_num;
|
|
|
|
i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
|
|
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
|
|
@@ -402,6 +402,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
|
|
static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|
{
|
|
struct i40iw_qp *iwqp = to_iwqp(ibqp);
|
|
+ struct ib_qp_attr attr;
|
|
+ struct i40iw_device *iwdev = iwqp->iwdev;
|
|
+
|
|
+ memset(&attr, 0, sizeof(attr));
|
|
|
|
iwqp->destroyed = 1;
|
|
|
|
@@ -416,7 +420,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|
}
|
|
}
|
|
|
|
- i40iw_rem_ref(&iwqp->ibqp);
|
|
+ attr.qp_state = IB_QPS_ERR;
|
|
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
|
+ i40iw_qp_rem_ref(&iwqp->ibqp);
|
|
+ wait_for_completion(&iwqp->free_qp);
|
|
+ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
|
|
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
|
|
+ i40iw_free_qp_resources(iwqp);
|
|
+ i40iw_rem_devusecount(iwdev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -577,6 +589,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|
qp->back_qp = (void *)iwqp;
|
|
qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
|
|
|
|
+ iwqp->iwdev = iwdev;
|
|
iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
|
|
|
|
if (i40iw_allocate_dma_mem(dev->hw,
|
|
@@ -601,7 +614,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|
goto error;
|
|
}
|
|
|
|
- iwqp->iwdev = iwdev;
|
|
iwqp->iwpd = iwpd;
|
|
iwqp->ibqp.qp_num = qp_num;
|
|
qp = &iwqp->sc_qp;
|
|
@@ -715,7 +727,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|
goto error;
|
|
}
|
|
|
|
- i40iw_add_ref(&iwqp->ibqp);
|
|
+ refcount_set(&iwqp->refcount, 1);
|
|
spin_lock_init(&iwqp->lock);
|
|
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
|
|
iwdev->qp_table[qp_num] = iwqp;
|
|
@@ -737,10 +749,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|
}
|
|
init_completion(&iwqp->sq_drained);
|
|
init_completion(&iwqp->rq_drained);
|
|
+ init_completion(&iwqp->free_qp);
|
|
|
|
return &iwqp->ibqp;
|
|
error:
|
|
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
|
|
+ i40iw_free_qp_resources(iwqp);
|
|
return ERR_PTR(err_code);
|
|
}
|
|
|
|
@@ -1053,7 +1066,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
|
|
* @ib_cq: cq pointer
|
|
* @udata: user data or NULL for kernel object
|
|
*/
|
|
-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
+static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
{
|
|
struct i40iw_cq *iwcq;
|
|
struct i40iw_device *iwdev;
|
|
@@ -1065,6 +1078,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|
i40iw_cq_wq_destroy(iwdev, cq);
|
|
cq_free_resources(iwdev, iwcq);
|
|
i40iw_rem_devusecount(iwdev);
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
@@ -2656,13 +2670,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
|
|
.get_hw_stats = i40iw_get_hw_stats,
|
|
.get_port_immutable = i40iw_port_immutable,
|
|
.iw_accept = i40iw_accept,
|
|
- .iw_add_ref = i40iw_add_ref,
|
|
+ .iw_add_ref = i40iw_qp_add_ref,
|
|
.iw_connect = i40iw_connect,
|
|
.iw_create_listen = i40iw_create_listen,
|
|
.iw_destroy_listen = i40iw_destroy_listen,
|
|
.iw_get_qp = i40iw_get_qp,
|
|
.iw_reject = i40iw_reject,
|
|
- .iw_rem_ref = i40iw_rem_ref,
|
|
+ .iw_rem_ref = i40iw_qp_rem_ref,
|
|
.map_mr_sg = i40iw_map_mr_sg,
|
|
.mmap = i40iw_mmap,
|
|
.modify_qp = i40iw_modify_qp,
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
|
|
index 331bc21cbcc73..bab71f3e56374 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
|
|
@@ -139,7 +139,7 @@ struct i40iw_qp {
|
|
struct i40iw_qp_host_ctx_info ctx_info;
|
|
struct i40iwarp_offload_info iwarp_info;
|
|
void *allocated_buffer;
|
|
- atomic_t refcount;
|
|
+ refcount_t refcount;
|
|
struct iw_cm_id *cm_id;
|
|
void *cm_node;
|
|
struct ib_mr *lsmm_mr;
|
|
@@ -174,5 +174,6 @@ struct i40iw_qp {
|
|
struct i40iw_dma_mem ietf_mem;
|
|
struct completion sq_drained;
|
|
struct completion rq_drained;
|
|
+ struct completion free_qp;
|
|
};
|
|
#endif
|
|
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
|
|
index b591861934b3c..81d6a3460b55d 100644
|
|
--- a/drivers/infiniband/hw/mlx4/cm.c
|
|
+++ b/drivers/infiniband/hw/mlx4/cm.c
|
|
@@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
|
|
if (!sriov->is_going_down && !id->scheduled_delete) {
|
|
id->scheduled_delete = 1;
|
|
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
|
+ } else if (id->scheduled_delete) {
|
|
+ /* Adjust timeout if already scheduled */
|
|
+ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
|
}
|
|
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
|
|
spin_unlock(&sriov->id_map_lock);
|
|
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
|
|
index f8b936b76dcdf..3851316407ceb 100644
|
|
--- a/drivers/infiniband/hw/mlx4/cq.c
|
|
+++ b/drivers/infiniband/hw/mlx4/cq.c
|
|
@@ -475,7 +475,7 @@ out:
|
|
return err;
|
|
}
|
|
|
|
-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
{
|
|
struct mlx4_ib_dev *dev = to_mdev(cq->device);
|
|
struct mlx4_ib_cq *mcq = to_mcq(cq);
|
|
@@ -495,6 +495,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
mlx4_db_free(dev->dev, &mcq->db);
|
|
}
|
|
ib_umem_release(mcq->umem);
|
|
+ return 0;
|
|
}
|
|
|
|
static void dump_cqe(void *cqe)
|
|
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
|
|
index abe68708d6d6e..2cbdba4da9dfe 100644
|
|
--- a/drivers/infiniband/hw/mlx4/mad.c
|
|
+++ b/drivers/infiniband/hw/mlx4/mad.c
|
|
@@ -1299,6 +1299,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
|
|
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
|
|
}
|
|
|
|
+static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
|
|
+ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
|
|
+
|
|
+ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
|
|
+ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
|
|
+ queue_work(ctx->wi_wq, &ctx->work);
|
|
+ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
|
|
+}
|
|
+
|
|
static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
|
|
struct mlx4_ib_demux_pv_qp *tun_qp,
|
|
int index)
|
|
@@ -2001,7 +2013,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
|
|
cq_size *= 2;
|
|
|
|
cq_attr.cqe = cq_size;
|
|
- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
|
|
+ ctx->cq = ib_create_cq(ctx->ib_dev,
|
|
+ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
|
|
NULL, ctx, &cq_attr);
|
|
if (IS_ERR(ctx->cq)) {
|
|
ret = PTR_ERR(ctx->cq);
|
|
@@ -2038,6 +2051,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
|
|
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
|
|
|
|
ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
|
|
+ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
|
|
|
|
ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
|
|
if (ret) {
|
|
@@ -2181,7 +2195,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
|
|
goto err_mcg;
|
|
}
|
|
|
|
- snprintf(name, sizeof name, "mlx4_ibt%d", port);
|
|
+ snprintf(name, sizeof(name), "mlx4_ibt%d", port);
|
|
ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
|
if (!ctx->wq) {
|
|
pr_err("Failed to create tunnelling WQ for port %d\n", port);
|
|
@@ -2189,7 +2203,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
|
|
goto err_wq;
|
|
}
|
|
|
|
- snprintf(name, sizeof name, "mlx4_ibud%d", port);
|
|
+ snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
|
|
+ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
|
+ if (!ctx->wi_wq) {
|
|
+ pr_err("Failed to create wire WQ for port %d\n", port);
|
|
+ ret = -ENOMEM;
|
|
+ goto err_wiwq;
|
|
+ }
|
|
+
|
|
+ snprintf(name, sizeof(name), "mlx4_ibud%d", port);
|
|
ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
|
if (!ctx->ud_wq) {
|
|
pr_err("Failed to create up/down WQ for port %d\n", port);
|
|
@@ -2200,6 +2222,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
|
|
return 0;
|
|
|
|
err_udwq:
|
|
+ destroy_workqueue(ctx->wi_wq);
|
|
+ ctx->wi_wq = NULL;
|
|
+
|
|
+err_wiwq:
|
|
destroy_workqueue(ctx->wq);
|
|
ctx->wq = NULL;
|
|
|
|
@@ -2247,12 +2273,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
|
|
ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
|
|
}
|
|
flush_workqueue(ctx->wq);
|
|
+ flush_workqueue(ctx->wi_wq);
|
|
for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
|
|
destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
|
|
free_pv_object(dev, i, ctx->port);
|
|
}
|
|
kfree(ctx->tun);
|
|
destroy_workqueue(ctx->ud_wq);
|
|
+ destroy_workqueue(ctx->wi_wq);
|
|
destroy_workqueue(ctx->wq);
|
|
}
|
|
}
|
|
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
|
|
index 6f4ea1067095e..bac526a703173 100644
|
|
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
|
|
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
|
|
@@ -454,6 +454,7 @@ struct mlx4_ib_demux_pv_ctx {
|
|
struct ib_pd *pd;
|
|
struct work_struct work;
|
|
struct workqueue_struct *wq;
|
|
+ struct workqueue_struct *wi_wq;
|
|
struct mlx4_ib_demux_pv_qp qp[2];
|
|
};
|
|
|
|
@@ -461,6 +462,7 @@ struct mlx4_ib_demux_ctx {
|
|
struct ib_device *ib_dev;
|
|
int port;
|
|
struct workqueue_struct *wq;
|
|
+ struct workqueue_struct *wi_wq;
|
|
struct workqueue_struct *ud_wq;
|
|
spinlock_t ud_lock;
|
|
atomic64_t subnet_prefix;
|
|
@@ -736,7 +738,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
|
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
|
int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
|
int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
|
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
|
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
|
|
index 0c18cb6a2f148..ec634085e1d9a 100644
|
|
--- a/drivers/infiniband/hw/mlx5/cq.c
|
|
+++ b/drivers/infiniband/hw/mlx5/cq.c
|
|
@@ -168,7 +168,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
|
|
{
|
|
enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
|
|
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
|
|
- struct mlx5_ib_srq *srq;
|
|
+ struct mlx5_ib_srq *srq = NULL;
|
|
struct mlx5_ib_wq *wq;
|
|
u16 wqe_ctr;
|
|
u8 roce_packet_type;
|
|
@@ -180,7 +180,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
|
|
|
|
if (qp->ibqp.xrcd) {
|
|
msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
|
|
- srq = to_mibsrq(msrq);
|
|
+ if (msrq)
|
|
+ srq = to_mibsrq(msrq);
|
|
} else {
|
|
srq = to_msrq(qp->ibqp.srq);
|
|
}
|
|
@@ -1023,16 +1024,21 @@ err_cqb:
|
|
return err;
|
|
}
|
|
|
|
-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
{
|
|
struct mlx5_ib_dev *dev = to_mdev(cq->device);
|
|
struct mlx5_ib_cq *mcq = to_mcq(cq);
|
|
+ int ret;
|
|
+
|
|
+ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
|
|
if (udata)
|
|
destroy_cq_user(mcq, udata);
|
|
else
|
|
destroy_cq_kernel(dev, mcq);
|
|
+ return 0;
|
|
}
|
|
|
|
static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
|
|
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
|
|
index 6f99ed03d88e7..1f4aa2647a6f3 100644
|
|
--- a/drivers/infiniband/hw/mlx5/main.c
|
|
+++ b/drivers/infiniband/hw/mlx5/main.c
|
|
@@ -867,7 +867,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|
/* We support 'Gappy' memory registration too */
|
|
props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
|
|
}
|
|
- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
|
|
+ /* IB_WR_REG_MR always requires changing the entity size with UMR */
|
|
+ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
|
|
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
|
|
if (MLX5_CAP_GEN(mdev, sho)) {
|
|
props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
|
|
/* At this stage no support for signature handover */
|
|
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
|
|
index 5dbe3eb0d9cb9..3825cdec6ac68 100644
|
|
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
|
|
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
|
|
@@ -1180,7 +1180,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
|
|
size_t buflen, size_t *bc);
|
|
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
|
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
|
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
|
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
|
|
index 44683073be0c4..85c9a1ffdbb64 100644
|
|
--- a/drivers/infiniband/hw/mlx5/mr.c
|
|
+++ b/drivers/infiniband/hw/mlx5/mr.c
|
|
@@ -50,6 +50,29 @@ enum {
|
|
static void
|
|
create_mkey_callback(int status, struct mlx5_async_work *context);
|
|
|
|
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
|
|
+ struct ib_pd *pd)
|
|
+{
|
|
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
+
|
|
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
|
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
|
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
|
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
|
+ MLX5_SET(mkc, mkc, lr, 1);
|
|
+
|
|
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
|
|
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
|
|
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
|
|
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
|
|
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
|
|
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
|
|
+
|
|
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
|
|
+}
|
|
+
|
|
static void
|
|
assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
|
|
u32 *in)
|
|
@@ -152,12 +175,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
|
|
mr->cache_ent = ent;
|
|
mr->dev = ent->dev;
|
|
|
|
+ set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
|
|
MLX5_SET(mkc, mkc, free, 1);
|
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
|
|
MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
|
|
|
|
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
|
|
MLX5_SET(mkc, mkc, log_page_size, ent->page);
|
|
return mr;
|
|
@@ -774,29 +797,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
|
return 0;
|
|
}
|
|
|
|
-static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
|
|
- struct ib_pd *pd)
|
|
-{
|
|
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
-
|
|
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
|
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
|
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
|
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
|
- MLX5_SET(mkc, mkc, lr, 1);
|
|
-
|
|
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
|
|
- MLX5_SET(mkc, mkc, relaxed_ordering_write,
|
|
- !!(acc & IB_ACCESS_RELAXED_ORDERING));
|
|
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
|
|
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
|
|
- !!(acc & IB_ACCESS_RELAXED_ORDERING));
|
|
-
|
|
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
- MLX5_SET64(mkc, mkc, start_addr, start_addr);
|
|
-}
|
|
-
|
|
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
|
{
|
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
|
@@ -1190,29 +1190,17 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
|
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
|
|
|
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
+ set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
|
|
+ populate ? pd : dev->umrc.pd);
|
|
MLX5_SET(mkc, mkc, free, !populate);
|
|
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
|
|
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
|
|
- MLX5_SET(mkc, mkc, relaxed_ordering_write,
|
|
- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
|
|
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
|
|
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
|
|
- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
|
|
- MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
|
- MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
|
- MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
|
- MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
|
|
- MLX5_SET(mkc, mkc, lr, 1);
|
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
|
|
|
- MLX5_SET64(mkc, mkc, start_addr, virt_addr);
|
|
MLX5_SET64(mkc, mkc, len, length);
|
|
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
|
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
|
|
MLX5_SET(mkc, mkc, translations_octword_size,
|
|
get_octo_len(virt_addr, length, page_shift));
|
|
MLX5_SET(mkc, mkc, log_page_size, page_shift);
|
|
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
if (populate) {
|
|
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
|
|
get_octo_len(virt_addr, length, page_shift));
|
|
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
index 9fa2f9164a47b..2ad15adf304e5 100644
|
|
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
|
|
@@ -789,7 +789,7 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
-static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
+static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
{
|
|
if (udata) {
|
|
struct mthca_ucontext *context =
|
|
@@ -808,6 +808,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
to_mcq(cq)->set_ci_db_index);
|
|
}
|
|
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
|
|
+ return 0;
|
|
}
|
|
|
|
static inline u32 convert_access(int acc)
|
|
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
|
|
index d11c74390a124..927c70d1ffbc3 100644
|
|
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
|
|
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
|
|
@@ -1056,7 +1056,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
|
|
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
|
}
|
|
|
|
-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
{
|
|
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
|
|
struct ocrdma_eq *eq = NULL;
|
|
@@ -1081,6 +1081,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
ocrdma_get_db_addr(dev, pdid),
|
|
dev->nic_info.db_page_size);
|
|
}
|
|
+ return 0;
|
|
}
|
|
|
|
static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
|
|
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
|
|
index 3a5010881be5b..c46412dff924a 100644
|
|
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
|
|
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
|
|
@@ -72,7 +72,7 @@ void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
|
int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
|
-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
|
|
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
|
|
struct ib_qp_init_attr *attrs,
|
|
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
|
|
index ccaedfd53e49e..679766abb436e 100644
|
|
--- a/drivers/infiniband/hw/qedr/main.c
|
|
+++ b/drivers/infiniband/hw/qedr/main.c
|
|
@@ -601,7 +601,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
|
|
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
|
|
|
|
/* Part 2 - check capabilities */
|
|
- page_size = ~dev->attr.page_size_caps + 1;
|
|
+ page_size = ~qed_attr->page_size_caps + 1;
|
|
if (page_size > PAGE_SIZE) {
|
|
DP_ERR(dev,
|
|
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
|
|
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
|
|
index 97fc7dd353b04..c7169d2c69e5b 100644
|
|
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
|
|
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
|
|
@@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|
struct qedr_dev *dev = ep->dev;
|
|
struct qedr_qp *qp;
|
|
struct qed_iwarp_accept_in params;
|
|
- int rc = 0;
|
|
+ int rc;
|
|
|
|
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
|
|
|
|
@@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|
params.ord = conn_param->ord;
|
|
|
|
if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
|
|
- &qp->iwarp_cm_flags))
|
|
+ &qp->iwarp_cm_flags)) {
|
|
+ rc = -EINVAL;
|
|
goto err; /* QP already destroyed */
|
|
+ }
|
|
|
|
rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms);
|
|
if (rc) {
|
|
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
|
|
index 1a7f1f805be3e..41813e9d771ff 100644
|
|
--- a/drivers/infiniband/hw/qedr/verbs.c
|
|
+++ b/drivers/infiniband/hw/qedr/verbs.c
|
|
@@ -998,7 +998,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
/* Generate doorbell address. */
|
|
cq->db.data.icid = cq->icid;
|
|
cq->db_addr = dev->db_addr + db_offset;
|
|
- cq->db.data.params = DB_AGG_CMD_SET <<
|
|
+ cq->db.data.params = DB_AGG_CMD_MAX <<
|
|
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
|
|
|
|
/* point to the very last element, passing it we will toggle */
|
|
@@ -1050,7 +1050,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
|
|
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
|
|
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
|
|
|
|
-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
{
|
|
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
|
|
struct qed_rdma_destroy_cq_out_params oparams;
|
|
@@ -1065,7 +1065,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
/* GSIs CQs are handled by driver, so they don't exist in the FW */
|
|
if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
|
|
qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
|
|
- return;
|
|
+ return 0;
|
|
}
|
|
|
|
iparams.icid = cq->icid;
|
|
@@ -1113,6 +1113,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
* Since the destroy CQ ramrod has also been received on the EQ we can
|
|
* be certain that there's no event handler in process.
|
|
*/
|
|
+ return 0;
|
|
}
|
|
|
|
static inline int get_gid_info_from_table(struct ib_qp *ibqp,
|
|
@@ -2112,6 +2113,28 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
|
|
return rc;
|
|
}
|
|
|
|
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
|
|
+ struct ib_udata *udata)
|
|
+{
|
|
+ struct qedr_ucontext *ctx =
|
|
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
|
|
+ ibucontext);
|
|
+ int rc;
|
|
+
|
|
+ if (qp->qp_type != IB_QPT_GSI) {
|
|
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ if (qp->create_type == QEDR_QP_CREATE_USER)
|
|
+ qedr_cleanup_user(dev, ctx, qp);
|
|
+ else
|
|
+ qedr_cleanup_kernel(dev, qp);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
|
struct ib_qp_init_attr *attrs,
|
|
struct ib_udata *udata)
|
|
@@ -2158,19 +2181,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
|
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
|
|
|
|
if (rc)
|
|
- goto err;
|
|
+ goto out_free_qp;
|
|
|
|
qp->ibqp.qp_num = qp->qp_id;
|
|
|
|
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
|
rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
|
|
if (rc)
|
|
- goto err;
|
|
+ goto out_free_qp_resources;
|
|
}
|
|
|
|
return &qp->ibqp;
|
|
|
|
-err:
|
|
+out_free_qp_resources:
|
|
+ qedr_free_qp_resources(dev, qp, udata);
|
|
+out_free_qp:
|
|
kfree(qp);
|
|
|
|
return ERR_PTR(-EFAULT);
|
|
@@ -2636,7 +2661,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
|
|
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
|
|
qp_attr->cap.max_send_sge = qp->sq.max_sges;
|
|
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
|
|
- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
|
|
+ qp_attr->cap.max_inline_data = dev->attr.max_inline;
|
|
qp_init_attr->cap = qp_attr->cap;
|
|
|
|
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
|
|
@@ -2671,28 +2696,6 @@ err:
|
|
return rc;
|
|
}
|
|
|
|
-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
|
|
- struct ib_udata *udata)
|
|
-{
|
|
- struct qedr_ucontext *ctx =
|
|
- rdma_udata_to_drv_context(udata, struct qedr_ucontext,
|
|
- ibucontext);
|
|
- int rc;
|
|
-
|
|
- if (qp->qp_type != IB_QPT_GSI) {
|
|
- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
|
- if (rc)
|
|
- return rc;
|
|
- }
|
|
-
|
|
- if (qp->create_type == QEDR_QP_CREATE_USER)
|
|
- qedr_cleanup_user(dev, ctx, qp);
|
|
- else
|
|
- qedr_cleanup_kernel(dev, qp);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|
{
|
|
struct qedr_qp *qp = get_qedr_qp(ibqp);
|
|
@@ -2752,6 +2755,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|
|
|
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
|
qedr_iw_qp_rem_ref(&qp->ibqp);
|
|
+ else
|
|
+ kfree(qp);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
|
|
index 5e02387e068d1..e0db3bc1653e2 100644
|
|
--- a/drivers/infiniband/hw/qedr/verbs.h
|
|
+++ b/drivers/infiniband/hw/qedr/verbs.h
|
|
@@ -52,7 +52,7 @@ void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
|
int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
|
-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
|
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
|
|
struct ib_udata *);
|
|
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
|
|
index b8a77ce115908..586ff16be1bb3 100644
|
|
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
|
|
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
|
|
@@ -596,9 +596,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
return 0;
|
|
}
|
|
|
|
-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
{
|
|
- return;
|
|
+ return 0;
|
|
}
|
|
|
|
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
|
|
index 2aedf78c13cf2..f13b08c59b9a3 100644
|
|
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
|
|
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
|
|
@@ -60,7 +60,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
int attr_mask, struct ib_udata *udata);
|
|
int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
|
|
u64 virt_addr, int access_flags,
|
|
struct ib_udata *udata);
|
|
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
|
|
index 4f6cc0de7ef95..6d3e6389e47da 100644
|
|
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
|
|
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
|
|
@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
|
|
* @cq: the completion queue to destroy.
|
|
* @udata: user data or null for kernel object
|
|
*/
|
|
-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
{
|
|
struct pvrdma_cq *vcq = to_vcq(cq);
|
|
union pvrdma_cmd_req req;
|
|
@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
|
|
|
pvrdma_free_cq(dev, vcq);
|
|
atomic_dec(&dev->num_cqs);
|
|
+ return 0;
|
|
}
|
|
|
|
static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
|
|
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
|
|
index 267702226f108..af36e9f767eed 100644
|
|
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
|
|
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
|
|
@@ -411,7 +411,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
|
int sg_nents, unsigned int *sg_offset);
|
|
int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
|
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
|
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
|
int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
|
|
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
|
|
index 04d2e72017fed..19248be140933 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/cq.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/cq.c
|
|
@@ -315,7 +315,7 @@ bail_wc:
|
|
*
|
|
* Called by ib_destroy_cq() in the generic verbs code.
|
|
*/
|
|
-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
{
|
|
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
|
|
struct rvt_dev_info *rdi = cq->rdi;
|
|
@@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
kref_put(&cq->ip->ref, rvt_release_mmap_info);
|
|
else
|
|
vfree(cq->kqueue);
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
|
|
index 5e26a2eb19a4c..feb01e7ee0044 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/cq.h
|
|
+++ b/drivers/infiniband/sw/rdmavt/cq.h
|
|
@@ -53,7 +53,7 @@
|
|
|
|
int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
|
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
|
|
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
|
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
|
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
|
|
index f904bb34477ae..2d534c450f3c8 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/vt.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/vt.c
|
|
@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
|
|
if (!rdi)
|
|
return rdi;
|
|
|
|
- rdi->ports = kcalloc(nports,
|
|
- sizeof(struct rvt_ibport **),
|
|
- GFP_KERNEL);
|
|
+ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
|
|
if (!rdi->ports)
|
|
ib_dealloc_device(&rdi->ibdev);
|
|
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
|
|
index 46e111c218fd4..9bfb98056fc2a 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
|
|
@@ -281,6 +281,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
|
|
struct rxe_mc_elem *mce;
|
|
struct rxe_qp *qp;
|
|
union ib_gid dgid;
|
|
+ struct sk_buff *per_qp_skb;
|
|
+ struct rxe_pkt_info *per_qp_pkt;
|
|
int err;
|
|
|
|
if (skb->protocol == htons(ETH_P_IP))
|
|
@@ -309,21 +311,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
|
|
if (err)
|
|
continue;
|
|
|
|
- /* if *not* the last qp in the list
|
|
- * increase the users of the skb then post to the next qp
|
|
+ /* for all but the last qp create a new clone of the
|
|
+ * skb and pass to the qp.
|
|
*/
|
|
if (mce->qp_list.next != &mcg->qp_list)
|
|
- skb_get(skb);
|
|
+ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
|
|
+ else
|
|
+ per_qp_skb = skb;
|
|
+
|
|
+ if (unlikely(!per_qp_skb))
|
|
+ continue;
|
|
|
|
- pkt->qp = qp;
|
|
+ per_qp_pkt = SKB_TO_PKT(per_qp_skb);
|
|
+ per_qp_pkt->qp = qp;
|
|
rxe_add_ref(qp);
|
|
- rxe_rcv_pkt(pkt, skb);
|
|
+ rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
|
|
}
|
|
|
|
spin_unlock_bh(&mcg->mcg_lock);
|
|
|
|
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
|
|
|
|
+ return;
|
|
+
|
|
err1:
|
|
kfree_skb(skb);
|
|
}
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
|
|
index 00ba6fb1e6763..452748b3854b5 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
|
|
@@ -816,13 +816,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|
return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
|
|
}
|
|
|
|
-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|
{
|
|
struct rxe_cq *cq = to_rcq(ibcq);
|
|
|
|
rxe_cq_disable(cq);
|
|
|
|
rxe_drop_ref(cq);
|
|
+ return 0;
|
|
}
|
|
|
|
static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
|
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
|
|
index 987e2ba05dbc0..7e657f90ca4f4 100644
|
|
--- a/drivers/infiniband/sw/siw/siw_verbs.c
|
|
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
|
|
@@ -1064,7 +1064,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
|
|
return rv > 0 ? 0 : rv;
|
|
}
|
|
|
|
-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
|
|
+int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
|
|
{
|
|
struct siw_cq *cq = to_siw_cq(base_cq);
|
|
struct siw_device *sdev = to_siw_dev(base_cq->device);
|
|
@@ -1082,6 +1082,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
|
|
atomic_dec(&sdev->num_cq);
|
|
|
|
vfree(cq->queue);
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
|
|
index 1a731989fad60..b0b7488869104 100644
|
|
--- a/drivers/infiniband/sw/siw/siw_verbs.h
|
|
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
|
|
@@ -63,7 +63,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
|
const struct ib_send_wr **bad_wr);
|
|
int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
|
|
const struct ib_recv_wr **bad_wr);
|
|
-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
|
|
+int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
|
|
int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
|
|
int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
|
|
struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
|
|
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
|
|
index ef60e8e4ae67b..7c0bb2642d232 100644
|
|
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
|
|
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
|
|
@@ -2470,6 +2470,8 @@ static struct net_device *ipoib_add_port(const char *format,
|
|
/* call event handler to ensure pkey in sync */
|
|
queue_work(ipoib_workqueue, &priv->flush_heavy);
|
|
|
|
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
|
|
+
|
|
result = register_netdev(ndev);
|
|
if (result) {
|
|
pr_warn("%s: couldn't register ipoib port %d; error %d\n",
|
|
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
|
|
index 38c984d16996d..d5a90a66b45cf 100644
|
|
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
|
|
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
|
|
@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
|
|
return 0;
|
|
}
|
|
|
|
+static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
|
|
+{
|
|
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
|
+
|
|
+ if (!priv->parent)
|
|
+ return;
|
|
+
|
|
+ unregister_netdevice_queue(dev, head);
|
|
+}
|
|
+
|
|
static size_t ipoib_get_size(const struct net_device *dev)
|
|
{
|
|
return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
|
|
@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
|
|
.priv_size = sizeof(struct ipoib_dev_priv),
|
|
.setup = ipoib_setup_common,
|
|
.newlink = ipoib_new_child_link,
|
|
+ .dellink = ipoib_del_child_link,
|
|
.changelink = ipoib_changelink,
|
|
.get_size = ipoib_get_size,
|
|
.fill_info = ipoib_fill_info,
|
|
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
|
|
index 30865605e0980..4c50a87ed7cc2 100644
|
|
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
|
|
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
|
|
@@ -195,6 +195,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
|
|
}
|
|
priv = ipoib_priv(ndev);
|
|
|
|
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
|
|
+
|
|
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
|
|
|
|
if (result && ndev->reg_state == NETREG_UNINITIALIZED)
|
|
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
|
|
index 28f6414dfa3dc..d6f93601712e4 100644
|
|
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
|
|
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
|
|
@@ -16,6 +16,7 @@
|
|
#include "rtrs-srv.h"
|
|
#include "rtrs-log.h"
|
|
#include <rdma/ib_cm.h>
|
|
+#include <rdma/ib_verbs.h>
|
|
|
|
MODULE_DESCRIPTION("RDMA Transport Server");
|
|
MODULE_LICENSE("GPL");
|
|
@@ -31,6 +32,7 @@ MODULE_LICENSE("GPL");
|
|
static struct rtrs_rdma_dev_pd dev_pd;
|
|
static mempool_t *chunk_pool;
|
|
struct class *rtrs_dev_class;
|
|
+static struct rtrs_srv_ib_ctx ib_ctx;
|
|
|
|
static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
|
|
static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
|
|
@@ -2042,6 +2044,70 @@ static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
|
|
kfree(ctx);
|
|
}
|
|
|
|
+static int rtrs_srv_add_one(struct ib_device *device)
|
|
+{
|
|
+ struct rtrs_srv_ctx *ctx;
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&ib_ctx.ib_dev_mutex);
|
|
+ if (ib_ctx.ib_dev_count)
|
|
+ goto out;
|
|
+
|
|
+ /*
|
|
+ * Since our CM IDs are NOT bound to any ib device we will create them
|
|
+ * only once
|
|
+ */
|
|
+ ctx = ib_ctx.srv_ctx;
|
|
+ ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
|
|
+ if (ret) {
|
|
+ /*
|
|
+ * We errored out here.
|
|
+ * According to the ib code, if we encounter an error here then the
|
|
+ * error code is ignored, and no more calls to our ops are made.
|
|
+ */
|
|
+ pr_err("Failed to initialize RDMA connection");
|
|
+ goto err_out;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ /*
|
|
+ * Keep a track on the number of ib devices added
|
|
+ */
|
|
+ ib_ctx.ib_dev_count++;
|
|
+
|
|
+err_out:
|
|
+ mutex_unlock(&ib_ctx.ib_dev_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
|
|
+{
|
|
+ struct rtrs_srv_ctx *ctx;
|
|
+
|
|
+ mutex_lock(&ib_ctx.ib_dev_mutex);
|
|
+ ib_ctx.ib_dev_count--;
|
|
+
|
|
+ if (ib_ctx.ib_dev_count)
|
|
+ goto out;
|
|
+
|
|
+ /*
|
|
+ * Since our CM IDs are NOT bound to any ib device we will remove them
|
|
+ * only once, when the last device is removed
|
|
+ */
|
|
+ ctx = ib_ctx.srv_ctx;
|
|
+ rdma_destroy_id(ctx->cm_id_ip);
|
|
+ rdma_destroy_id(ctx->cm_id_ib);
|
|
+
|
|
+out:
|
|
+ mutex_unlock(&ib_ctx.ib_dev_mutex);
|
|
+}
|
|
+
|
|
+static struct ib_client rtrs_srv_client = {
|
|
+ .name = "rtrs_server",
|
|
+ .add = rtrs_srv_add_one,
|
|
+ .remove = rtrs_srv_remove_one
|
|
+};
|
|
+
|
|
/**
|
|
* rtrs_srv_open() - open RTRS server context
|
|
* @ops: callback functions
|
|
@@ -2060,7 +2126,11 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
|
|
if (!ctx)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
- err = rtrs_srv_rdma_init(ctx, port);
|
|
+ mutex_init(&ib_ctx.ib_dev_mutex);
|
|
+ ib_ctx.srv_ctx = ctx;
|
|
+ ib_ctx.port = port;
|
|
+
|
|
+ err = ib_register_client(&rtrs_srv_client);
|
|
if (err) {
|
|
free_srv_ctx(ctx);
|
|
return ERR_PTR(err);
|
|
@@ -2099,8 +2169,8 @@ static void close_ctx(struct rtrs_srv_ctx *ctx)
|
|
*/
|
|
void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
|
|
{
|
|
- rdma_destroy_id(ctx->cm_id_ip);
|
|
- rdma_destroy_id(ctx->cm_id_ib);
|
|
+ ib_unregister_client(&rtrs_srv_client);
|
|
+ mutex_destroy(&ib_ctx.ib_dev_mutex);
|
|
close_ctx(ctx);
|
|
free_srv_ctx(ctx);
|
|
}
|
|
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
|
|
index dc95b0932f0df..08b0b8a6eebe6 100644
|
|
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
|
|
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
|
|
@@ -118,6 +118,13 @@ struct rtrs_srv_ctx {
|
|
struct list_head srv_list;
|
|
};
|
|
|
|
+struct rtrs_srv_ib_ctx {
|
|
+ struct rtrs_srv_ctx *srv_ctx;
|
|
+ u16 port;
|
|
+ struct mutex ib_dev_mutex;
|
|
+ int ib_dev_count;
|
|
+};
|
|
+
|
|
extern struct class *rtrs_dev_class;
|
|
|
|
void close_sess(struct rtrs_srv_sess *sess);
|
|
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
|
|
index 7c70492d9d6b5..f831f01501d58 100644
|
|
--- a/drivers/input/keyboard/ep93xx_keypad.c
|
|
+++ b/drivers/input/keyboard/ep93xx_keypad.c
|
|
@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
keypad->irq = platform_get_irq(pdev, 0);
|
|
- if (!keypad->irq) {
|
|
- err = -ENXIO;
|
|
+ if (keypad->irq < 0) {
|
|
+ err = keypad->irq;
|
|
goto failed_free;
|
|
}
|
|
|
|
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
|
|
index 94c94d7f5155f..d6c924032aaa8 100644
|
|
--- a/drivers/input/keyboard/omap4-keypad.c
|
|
+++ b/drivers/input/keyboard/omap4-keypad.c
|
|
@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
- if (!irq) {
|
|
- dev_err(&pdev->dev, "no keyboard irq assigned\n");
|
|
- return -EINVAL;
|
|
- }
|
|
+ if (irq < 0)
|
|
+ return irq;
|
|
|
|
keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
|
|
if (!keypad_data) {
|
|
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
|
|
index af3a6824f1a4d..77e0743a3cf85 100644
|
|
--- a/drivers/input/keyboard/twl4030_keypad.c
|
|
+++ b/drivers/input/keyboard/twl4030_keypad.c
|
|
@@ -50,7 +50,7 @@ struct twl4030_keypad {
|
|
bool autorepeat;
|
|
unsigned int n_rows;
|
|
unsigned int n_cols;
|
|
- unsigned int irq;
|
|
+ int irq;
|
|
|
|
struct device *dbg_dev;
|
|
struct input_dev *input;
|
|
@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
kp->irq = platform_get_irq(pdev, 0);
|
|
- if (!kp->irq) {
|
|
- dev_err(&pdev->dev, "no keyboard irq assigned\n");
|
|
- return -EINVAL;
|
|
- }
|
|
+ if (kp->irq < 0)
|
|
+ return kp->irq;
|
|
|
|
error = matrix_keypad_build_keymap(keymap_data, NULL,
|
|
TWL4030_MAX_ROWS,
|
|
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
|
|
index a681a2c04e399..f15ed3dcdb9b2 100644
|
|
--- a/drivers/input/serio/sun4i-ps2.c
|
|
+++ b/drivers/input/serio/sun4i-ps2.c
|
|
@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
|
|
struct sun4i_ps2data *drvdata;
|
|
struct serio *serio;
|
|
struct device *dev = &pdev->dev;
|
|
- unsigned int irq;
|
|
int error;
|
|
|
|
drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
|
|
@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
|
|
writel(0, drvdata->reg_base + PS2_REG_GCTL);
|
|
|
|
/* Get IRQ for the device */
|
|
- irq = platform_get_irq(pdev, 0);
|
|
- if (!irq) {
|
|
- dev_err(dev, "no IRQ found\n");
|
|
- error = -ENXIO;
|
|
+ drvdata->irq = platform_get_irq(pdev, 0);
|
|
+ if (drvdata->irq < 0) {
|
|
+ error = drvdata->irq;
|
|
goto err_disable_clk;
|
|
}
|
|
|
|
- drvdata->irq = irq;
|
|
drvdata->serio = serio;
|
|
drvdata->dev = dev;
|
|
|
|
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
|
|
index 5477a5718202a..db7f27d4734a9 100644
|
|
--- a/drivers/input/touchscreen/elants_i2c.c
|
|
+++ b/drivers/input/touchscreen/elants_i2c.c
|
|
@@ -90,7 +90,7 @@
|
|
/* FW read command, 0x53 0x?? 0x0, 0x01 */
|
|
#define E_ELAN_INFO_FW_VER 0x00
|
|
#define E_ELAN_INFO_BC_VER 0x10
|
|
-#define E_ELAN_INFO_REK 0xE0
|
|
+#define E_ELAN_INFO_REK 0xD0
|
|
#define E_ELAN_INFO_TEST_VER 0xE0
|
|
#define E_ELAN_INFO_FW_ID 0xF0
|
|
#define E_INFO_OSR 0xD6
|
|
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
|
|
index 9ed258854349b..5e6ba5c4eca2a 100644
|
|
--- a/drivers/input/touchscreen/imx6ul_tsc.c
|
|
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
|
|
@@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
|
|
|
|
mutex_lock(&input_dev->mutex);
|
|
|
|
- if (input_dev->users) {
|
|
- retval = clk_prepare_enable(tsc->adc_clk);
|
|
- if (retval)
|
|
- goto out;
|
|
-
|
|
- retval = clk_prepare_enable(tsc->tsc_clk);
|
|
- if (retval) {
|
|
- clk_disable_unprepare(tsc->adc_clk);
|
|
- goto out;
|
|
- }
|
|
+ if (!input_dev->users)
|
|
+ goto out;
|
|
|
|
- retval = imx6ul_tsc_init(tsc);
|
|
+ retval = clk_prepare_enable(tsc->adc_clk);
|
|
+ if (retval)
|
|
+ goto out;
|
|
+
|
|
+ retval = clk_prepare_enable(tsc->tsc_clk);
|
|
+ if (retval) {
|
|
+ clk_disable_unprepare(tsc->adc_clk);
|
|
+ goto out;
|
|
}
|
|
|
|
+ retval = imx6ul_tsc_init(tsc);
|
|
+ if (retval) {
|
|
+ clk_disable_unprepare(tsc->tsc_clk);
|
|
+ clk_disable_unprepare(tsc->adc_clk);
|
|
+ goto out;
|
|
+ }
|
|
out:
|
|
mutex_unlock(&input_dev->mutex);
|
|
return retval;
|
|
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
|
|
index b54cc64e4ea64..389356332c54a 100644
|
|
--- a/drivers/input/touchscreen/stmfts.c
|
|
+++ b/drivers/input/touchscreen/stmfts.c
|
|
@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
|
|
|
|
mutex_lock(&sdata->mutex);
|
|
|
|
- if (value & sdata->hover_enabled)
|
|
+ if (value && sdata->hover_enabled)
|
|
goto out;
|
|
|
|
if (sdata->running)
|
|
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
|
|
index d176df569af8f..78d813bd0dcc8 100644
|
|
--- a/drivers/iommu/qcom_iommu.c
|
|
+++ b/drivers/iommu/qcom_iommu.c
|
|
@@ -578,8 +578,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|
* index into qcom_iommu->ctxs:
|
|
*/
|
|
if (WARN_ON(asid < 1) ||
|
|
- WARN_ON(asid > qcom_iommu->num_ctxs))
|
|
+ WARN_ON(asid > qcom_iommu->num_ctxs)) {
|
|
+ put_device(&iommu_pdev->dev);
|
|
return -EINVAL;
|
|
+ }
|
|
|
|
if (!dev_iommu_priv_get(dev)) {
|
|
dev_iommu_priv_set(dev, qcom_iommu);
|
|
@@ -588,8 +590,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|
* multiple different iommu devices. Multiple context
|
|
* banks are ok, but multiple devices are not:
|
|
*/
|
|
- if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
|
|
+ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
|
|
+ put_device(&iommu_pdev->dev);
|
|
return -EINVAL;
|
|
+ }
|
|
}
|
|
|
|
return iommu_fwspec_add_ids(dev, &asid, 1);
|
|
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
|
|
index db38a68abb6c0..a6f4ca438bca1 100644
|
|
--- a/drivers/lightnvm/core.c
|
|
+++ b/drivers/lightnvm/core.c
|
|
@@ -1315,8 +1315,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
|
|
strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
|
|
i++;
|
|
|
|
- if (i > 31) {
|
|
- pr_err("max 31 devices can be reported.\n");
|
|
+ if (i >= ARRAY_SIZE(devices->info)) {
|
|
+ pr_err("max %zd devices can be reported.\n",
|
|
+ ARRAY_SIZE(devices->info));
|
|
break;
|
|
}
|
|
}
|
|
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
|
|
index 0b821a5b2db84..3e7d4b20ab34f 100644
|
|
--- a/drivers/mailbox/mailbox.c
|
|
+++ b/drivers/mailbox/mailbox.c
|
|
@@ -82,10 +82,12 @@
|
|
exit:
|
|
spin_unlock_irqrestore(&chan->lock, flags);
|
|
|
|
+ /* kick start the timer immediately to avoid delays */
|
|
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
|
|
if (!timekeeping_suspended) {
|
|
- /* kick start the timer immediately to avoid delays */
|
|
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
|
|
+ /* but only if not already active */
|
|
+ if (!hrtimer_active(&chan->mbox->poll_hrt))
|
|
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
|
|
}
|
|
}
|
|
}
|
|
|
|
@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
|
|
struct mbox_chan *chan = &mbox->chans[i];
|
|
|
|
if (chan->active_req && chan->cl) {
|
|
+ resched = true;
|
|
txdone = chan->mbox->ops->last_tx_done(chan);
|
|
if (txdone)
|
|
tx_tick(chan, 0);
|
|
- else
|
|
- resched = true;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
|
|
index b24822ad8409c..9963bb9cd74fa 100644
|
|
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
|
|
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
|
|
@@ -69,7 +69,7 @@ struct cmdq_task {
|
|
struct cmdq {
|
|
struct mbox_controller mbox;
|
|
void __iomem *base;
|
|
- u32 irq;
|
|
+ int irq;
|
|
u32 thread_nr;
|
|
u32 irq_mask;
|
|
struct cmdq_thread *thread;
|
|
@@ -466,10 +466,8 @@ static int cmdq_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
cmdq->irq = platform_get_irq(pdev, 0);
|
|
- if (!cmdq->irq) {
|
|
- dev_err(dev, "failed to get irq\n");
|
|
- return -EINVAL;
|
|
- }
|
|
+ if (cmdq->irq < 0)
|
|
+ return cmdq->irq;
|
|
|
|
cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
|
|
cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
|
|
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
|
|
index 95a5f3757fa30..19b2601be3c5e 100644
|
|
--- a/drivers/md/md-bitmap.c
|
|
+++ b/drivers/md/md-bitmap.c
|
|
@@ -1949,6 +1949,7 @@ out:
|
|
}
|
|
EXPORT_SYMBOL_GPL(md_bitmap_load);
|
|
|
|
+/* caller need to free returned bitmap with md_bitmap_free() */
|
|
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
|
|
{
|
|
int rv = 0;
|
|
@@ -2012,6 +2013,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
|
md_bitmap_unplug(mddev->bitmap);
|
|
*low = lo;
|
|
*high = hi;
|
|
+ md_bitmap_free(bitmap);
|
|
|
|
return rv;
|
|
}
|
|
@@ -2615,4 +2617,3 @@ struct attribute_group md_bitmap_group = {
|
|
.name = "bitmap",
|
|
.attrs = md_bitmap_attrs,
|
|
};
|
|
-
|
|
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
|
|
index d50737ec40394..afbbc552c3275 100644
|
|
--- a/drivers/md/md-cluster.c
|
|
+++ b/drivers/md/md-cluster.c
|
|
@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
|
|
* can't resize bitmap
|
|
*/
|
|
goto out;
|
|
+ md_bitmap_free(bitmap);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
|
|
index 3f1ca40b9b987..8a8585261bb80 100644
|
|
--- a/drivers/media/firewire/firedtv-fw.c
|
|
+++ b/drivers/media/firewire/firedtv-fw.c
|
|
@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
|
|
|
|
name_len = fw_csr_string(unit->directory, CSR_MODEL,
|
|
name, sizeof(name));
|
|
- if (name_len < 0)
|
|
- return name_len;
|
|
+ if (name_len < 0) {
|
|
+ err = name_len;
|
|
+ goto fail_free;
|
|
+ }
|
|
for (i = ARRAY_SIZE(model_names); --i; )
|
|
if (strlen(model_names[i]) <= name_len &&
|
|
strncmp(name, model_names[i], name_len) == 0)
|
|
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
|
|
index de295114ca482..21666d705e372 100644
|
|
--- a/drivers/media/i2c/m5mols/m5mols_core.c
|
|
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
|
|
@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
|
|
|
|
ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
|
|
if (ret) {
|
|
- info->set_power(&client->dev, 0);
|
|
+ if (info->set_power)
|
|
+ info->set_power(&client->dev, 0);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
|
|
index 2fe4a7ac05929..3a4268aa5f023 100644
|
|
--- a/drivers/media/i2c/ov5640.c
|
|
+++ b/drivers/media/i2c/ov5640.c
|
|
@@ -34,6 +34,8 @@
|
|
#define OV5640_REG_SYS_RESET02 0x3002
|
|
#define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
|
|
#define OV5640_REG_SYS_CTRL0 0x3008
|
|
+#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
|
|
+#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
|
|
#define OV5640_REG_CHIP_ID 0x300a
|
|
#define OV5640_REG_IO_MIPI_CTRL00 0x300e
|
|
#define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
|
|
@@ -274,8 +276,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
|
|
/* YUV422 UYVY VGA@30fps */
|
|
static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
|
|
{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
|
|
- {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
|
|
- {0x3630, 0x36, 0, 0},
|
|
+ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
|
|
{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
|
|
{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
|
|
{0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
|
|
@@ -751,7 +752,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
|
|
* +->| PLL Root Div | - reg 0x3037, bit 4
|
|
* +-+------------+
|
|
* | +---------+
|
|
- * +->| Bit Div | - reg 0x3035, bits 0-3
|
|
+ * +->| Bit Div | - reg 0x3034, bits 0-3
|
|
* +-+-------+
|
|
* | +-------------+
|
|
* +->| SCLK Div | - reg 0x3108, bits 0-1
|
|
@@ -1120,6 +1121,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
|
|
val = regs->val;
|
|
mask = regs->mask;
|
|
|
|
+ /* remain in power down mode for DVP */
|
|
+ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
|
|
+ val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
|
|
+ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
|
|
+ continue;
|
|
+
|
|
if (mask)
|
|
ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
|
|
else
|
|
@@ -1275,31 +1282,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- /*
|
|
- * enable VSYNC/HREF/PCLK DVP control lines
|
|
- * & D[9:6] DVP data lines
|
|
- *
|
|
- * PAD OUTPUT ENABLE 01
|
|
- * - 6: VSYNC output enable
|
|
- * - 5: HREF output enable
|
|
- * - 4: PCLK output enable
|
|
- * - [3:0]: D[9:6] output enable
|
|
- */
|
|
- ret = ov5640_write_reg(sensor,
|
|
- OV5640_REG_PAD_OUTPUT_ENABLE01,
|
|
- on ? 0x7f : 0);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- /*
|
|
- * enable D[5:0] DVP data lines
|
|
- *
|
|
- * PAD OUTPUT ENABLE 02
|
|
- * - [7:2]: D[5:0] output enable
|
|
- */
|
|
- return ov5640_write_reg(sensor,
|
|
- OV5640_REG_PAD_OUTPUT_ENABLE02,
|
|
- on ? 0xfc : 0);
|
|
+ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
|
|
+ OV5640_REG_SYS_CTRL0_SW_PWUP :
|
|
+ OV5640_REG_SYS_CTRL0_SW_PWDN);
|
|
}
|
|
|
|
static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
|
|
@@ -2001,6 +1986,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
|
|
clk_disable_unprepare(sensor->xclk);
|
|
}
|
|
|
|
+static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (!on) {
|
|
+ /* Reset MIPI bus settings to their default values. */
|
|
+ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
|
|
+ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
|
|
+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
|
|
+ *
|
|
+ * 0x300e = 0x40
|
|
+ * [7:5] = 010 : 2 data lanes mode (see FIXME note in
|
|
+ * "ov5640_set_stream_mipi()")
|
|
+ * [4] = 0 : Power up MIPI HS Tx
|
|
+ * [3] = 0 : Power up MIPI LS Rx
|
|
+ * [2] = 0 : MIPI interface disabled
|
|
+ */
|
|
+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * Gate clock and set LP11 in 'no packets mode' (idle)
|
|
+ *
|
|
+ * 0x4800 = 0x24
|
|
+ * [5] = 1 : Gate clock when 'no packets'
|
|
+ * [2] = 1 : MIPI bus in LP11 when 'no packets'
|
|
+ */
|
|
+ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * Set data lanes and clock in LP11 when 'sleeping'
|
|
+ *
|
|
+ * 0x3019 = 0x70
|
|
+ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
|
|
+ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
|
|
+ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
|
|
+ */
|
|
+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Give lanes some time to coax into LP11 state. */
|
|
+ usleep_range(500, 1000);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (!on) {
|
|
+ /* Reset settings to their default values. */
|
|
+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
|
|
+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * enable VSYNC/HREF/PCLK DVP control lines
|
|
+ * & D[9:6] DVP data lines
|
|
+ *
|
|
+ * PAD OUTPUT ENABLE 01
|
|
+ * - 6: VSYNC output enable
|
|
+ * - 5: HREF output enable
|
|
+ * - 4: PCLK output enable
|
|
+ * - [3:0]: D[9:6] output enable
|
|
+ */
|
|
+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /*
|
|
+ * enable D[5:0] DVP data lines
|
|
+ *
|
|
+ * PAD OUTPUT ENABLE 02
|
|
+ * - [7:2]: D[5:0] output enable
|
|
+ */
|
|
+ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
|
|
+}
|
|
+
|
|
static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
|
|
{
|
|
int ret = 0;
|
|
@@ -2013,67 +2087,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
|
|
ret = ov5640_restore_mode(sensor);
|
|
if (ret)
|
|
goto power_off;
|
|
+ }
|
|
|
|
- /* We're done here for DVP bus, while CSI-2 needs setup. */
|
|
- if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
|
|
- return 0;
|
|
-
|
|
- /*
|
|
- * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
|
|
- *
|
|
- * 0x300e = 0x40
|
|
- * [7:5] = 010 : 2 data lanes mode (see FIXME note in
|
|
- * "ov5640_set_stream_mipi()")
|
|
- * [4] = 0 : Power up MIPI HS Tx
|
|
- * [3] = 0 : Power up MIPI LS Rx
|
|
- * [2] = 0 : MIPI interface disabled
|
|
- */
|
|
- ret = ov5640_write_reg(sensor,
|
|
- OV5640_REG_IO_MIPI_CTRL00, 0x40);
|
|
- if (ret)
|
|
- goto power_off;
|
|
-
|
|
- /*
|
|
- * Gate clock and set LP11 in 'no packets mode' (idle)
|
|
- *
|
|
- * 0x4800 = 0x24
|
|
- * [5] = 1 : Gate clock when 'no packets'
|
|
- * [2] = 1 : MIPI bus in LP11 when 'no packets'
|
|
- */
|
|
- ret = ov5640_write_reg(sensor,
|
|
- OV5640_REG_MIPI_CTRL00, 0x24);
|
|
- if (ret)
|
|
- goto power_off;
|
|
-
|
|
- /*
|
|
- * Set data lanes and clock in LP11 when 'sleeping'
|
|
- *
|
|
- * 0x3019 = 0x70
|
|
- * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
|
|
- * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
|
|
- * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
|
|
- */
|
|
- ret = ov5640_write_reg(sensor,
|
|
- OV5640_REG_PAD_OUTPUT00, 0x70);
|
|
- if (ret)
|
|
- goto power_off;
|
|
-
|
|
- /* Give lanes some time to coax into LP11 state. */
|
|
- usleep_range(500, 1000);
|
|
-
|
|
- } else {
|
|
- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
|
|
- /* Reset MIPI bus settings to their default values. */
|
|
- ov5640_write_reg(sensor,
|
|
- OV5640_REG_IO_MIPI_CTRL00, 0x58);
|
|
- ov5640_write_reg(sensor,
|
|
- OV5640_REG_MIPI_CTRL00, 0x04);
|
|
- ov5640_write_reg(sensor,
|
|
- OV5640_REG_PAD_OUTPUT00, 0x00);
|
|
- }
|
|
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
|
|
+ ret = ov5640_set_power_mipi(sensor, on);
|
|
+ else
|
|
+ ret = ov5640_set_power_dvp(sensor, on);
|
|
+ if (ret)
|
|
+ goto power_off;
|
|
|
|
+ if (!on)
|
|
ov5640_set_power_off(sensor);
|
|
- }
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
|
|
index dbbab75f135ec..cff99cf61ed4d 100644
|
|
--- a/drivers/media/i2c/tc358743.c
|
|
+++ b/drivers/media/i2c/tc358743.c
|
|
@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
|
|
.adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
|
|
};
|
|
|
|
-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
|
|
- bool *handled)
|
|
+static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
|
|
+ bool *handled)
|
|
{
|
|
struct tc358743_state *state = to_state(sd);
|
|
unsigned int cec_rxint, cec_txint;
|
|
@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
|
|
cec_transmit_attempt_done(state->cec_adap,
|
|
CEC_TX_STATUS_ERROR);
|
|
}
|
|
- *handled = true;
|
|
+ if (handled)
|
|
+ *handled = true;
|
|
}
|
|
if ((intstatus & MASK_CEC_RINT) &&
|
|
(cec_rxint & MASK_CECRIEND)) {
|
|
@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
|
|
msg.msg[i] = v & 0xff;
|
|
}
|
|
cec_received_msg(state->cec_adap, &msg);
|
|
- *handled = true;
|
|
+ if (handled)
|
|
+ *handled = true;
|
|
}
|
|
i2c_wr16(sd, INTSTATUS,
|
|
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
|
|
@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
|
|
|
|
#ifdef CONFIG_VIDEO_TC358743_CEC
|
|
if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
|
|
- tc358743_cec_isr(sd, intstatus, handled);
|
|
+ tc358743_cec_handler(sd, intstatus, handled);
|
|
i2c_wr16(sd, INTSTATUS,
|
|
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
|
|
intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
|
|
@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
|
|
static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
|
|
{
|
|
struct tc358743_state *state = dev_id;
|
|
- bool handled;
|
|
+ bool handled = false;
|
|
|
|
tc358743_isr(&state->sd, 0, &handled);
|
|
|
|
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
|
|
index 9144f795fb933..b721720f9845a 100644
|
|
--- a/drivers/media/pci/bt8xx/bttv-driver.c
|
|
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
|
|
@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
|
|
btv->id = dev->device;
|
|
if (pci_enable_device(dev)) {
|
|
pr_warn("%d: Can't enable device\n", btv->c.nr);
|
|
- return -EIO;
|
|
+ result = -EIO;
|
|
+ goto free_mem;
|
|
}
|
|
if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
|
|
pr_warn("%d: No suitable DMA available\n", btv->c.nr);
|
|
- return -EIO;
|
|
+ result = -EIO;
|
|
+ goto free_mem;
|
|
}
|
|
if (!request_mem_region(pci_resource_start(dev,0),
|
|
pci_resource_len(dev,0),
|
|
@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
|
|
pr_warn("%d: can't request iomem (0x%llx)\n",
|
|
btv->c.nr,
|
|
(unsigned long long)pci_resource_start(dev, 0));
|
|
- return -EBUSY;
|
|
+ result = -EBUSY;
|
|
+ goto free_mem;
|
|
}
|
|
pci_set_master(dev);
|
|
pci_set_command(dev);
|
|
@@ -4211,6 +4214,10 @@ fail0:
|
|
release_mem_region(pci_resource_start(btv->c.pci,0),
|
|
pci_resource_len(btv->c.pci,0));
|
|
pci_disable_device(btv->c.pci);
|
|
+
|
|
+free_mem:
|
|
+ bttvs[btv->c.nr] = NULL;
|
|
+ kfree(btv);
|
|
return result;
|
|
}
|
|
|
|
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
|
|
index 79e1afb710758..5cc4ef21f9d37 100644
|
|
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
|
|
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
|
|
@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
|
|
{
|
|
int err;
|
|
|
|
- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
|
|
+ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
|
|
+ (reg << 2) & 0xffffffff, value);
|
|
err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
|
|
if (err < 0)
|
|
return err;
|
|
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
|
|
index cde0d254ec1c4..a77c49b185115 100644
|
|
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
|
|
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
|
|
@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
|
|
|
|
if (on) {
|
|
ret = pm_runtime_get_sync(&is->pdev->dev);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put(&is->pdev->dev);
|
|
return ret;
|
|
+ }
|
|
set_bit(IS_ST_PWR_ON, &is->state);
|
|
|
|
ret = fimc_is_start_firmware(is);
|
|
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
|
|
index 394e0818f2d5c..92130d7791378 100644
|
|
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
|
|
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
|
|
@@ -470,7 +470,7 @@ static int fimc_lite_open(struct file *file)
|
|
set_bit(ST_FLITE_IN_USE, &fimc->state);
|
|
ret = pm_runtime_get_sync(&fimc->pdev->dev);
|
|
if (ret < 0)
|
|
- goto unlock;
|
|
+ goto err_pm;
|
|
|
|
ret = v4l2_fh_open(file);
|
|
if (ret < 0)
|
|
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
|
|
index 9c31d950cddf7..a07d796f63df0 100644
|
|
--- a/drivers/media/platform/exynos4-is/media-dev.c
|
|
+++ b/drivers/media/platform/exynos4-is/media-dev.c
|
|
@@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
|
|
return -ENXIO;
|
|
|
|
ret = pm_runtime_get_sync(fmd->pmf);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put(fmd->pmf);
|
|
return ret;
|
|
+ }
|
|
|
|
fmd->num_sensors = 0;
|
|
|
|
@@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
|
|
if (IS_ERR(pctl->state_default))
|
|
return PTR_ERR(pctl->state_default);
|
|
|
|
+ /* PINCTRL_STATE_IDLE is optional */
|
|
pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
|
|
PINCTRL_STATE_IDLE);
|
|
- if (IS_ERR(pctl->state_idle))
|
|
- return PTR_ERR(pctl->state_idle);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
|
|
index 540151bbf58f2..1aac167abb175 100644
|
|
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
|
|
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
|
|
@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
|
|
if (enable) {
|
|
s5pcsis_clear_counters(state);
|
|
ret = pm_runtime_get_sync(&state->pdev->dev);
|
|
- if (ret && ret != 1)
|
|
+ if (ret && ret != 1) {
|
|
+ pm_runtime_put_noidle(&state->pdev->dev);
|
|
return ret;
|
|
+ }
|
|
}
|
|
|
|
mutex_lock(&state->lock);
|
|
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
|
|
index df78df59da456..08a5473b56104 100644
|
|
--- a/drivers/media/platform/mx2_emmaprp.c
|
|
+++ b/drivers/media/platform/mx2_emmaprp.c
|
|
@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
|
|
platform_set_drvdata(pdev, pcdev);
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
- if (irq < 0)
|
|
- return irq;
|
|
+ if (irq < 0) {
|
|
+ ret = irq;
|
|
+ goto rel_vdev;
|
|
+ }
|
|
+
|
|
ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
|
|
dev_name(&pdev->dev), pcdev);
|
|
if (ret)
|
|
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
|
|
index b91e472ee764e..de066757726de 100644
|
|
--- a/drivers/media/platform/omap3isp/isp.c
|
|
+++ b/drivers/media/platform/omap3isp/isp.c
|
|
@@ -2328,8 +2328,10 @@ static int isp_probe(struct platform_device *pdev)
|
|
mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
|
|
isp->mmio_base[map_idx] =
|
|
devm_ioremap_resource(isp->dev, mem);
|
|
- if (IS_ERR(isp->mmio_base[map_idx]))
|
|
- return PTR_ERR(isp->mmio_base[map_idx]);
|
|
+ if (IS_ERR(isp->mmio_base[map_idx])) {
|
|
+ ret = PTR_ERR(isp->mmio_base[map_idx]);
|
|
+ goto error;
|
|
+ }
|
|
}
|
|
|
|
ret = isp_get_clocks(isp);
|
|
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
|
|
index 008afb85023be..3c5b9082ad723 100644
|
|
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
|
|
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
|
|
@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
|
|
int ret;
|
|
|
|
ret = pm_runtime_get_sync(dev);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_sync(dev);
|
|
return ret;
|
|
+ }
|
|
|
|
ret = csiphy_set_clock_rates(csiphy);
|
|
if (ret < 0) {
|
|
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
|
|
index 203c6538044fb..321ad77cb6cf4 100644
|
|
--- a/drivers/media/platform/qcom/venus/core.c
|
|
+++ b/drivers/media/platform/qcom/venus/core.c
|
|
@@ -224,13 +224,15 @@ static int venus_probe(struct platform_device *pdev)
|
|
|
|
ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_core_put;
|
|
|
|
if (!dev->dma_parms) {
|
|
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
|
|
GFP_KERNEL);
|
|
- if (!dev->dma_parms)
|
|
- return -ENOMEM;
|
|
+ if (!dev->dma_parms) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_core_put;
|
|
+ }
|
|
}
|
|
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
|
|
|
|
@@ -242,11 +244,11 @@ static int venus_probe(struct platform_device *pdev)
|
|
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
|
"venus", core);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_core_put;
|
|
|
|
ret = hfi_create(core, &venus_core_ops);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_core_put;
|
|
|
|
pm_runtime_enable(dev);
|
|
|
|
@@ -287,8 +289,10 @@ static int venus_probe(struct platform_device *pdev)
|
|
goto err_core_deinit;
|
|
|
|
ret = pm_runtime_put_sync(dev);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ pm_runtime_get_noresume(dev);
|
|
goto err_dev_unregister;
|
|
+ }
|
|
|
|
return 0;
|
|
|
|
@@ -299,9 +303,13 @@ err_core_deinit:
|
|
err_venus_shutdown:
|
|
venus_shutdown(core);
|
|
err_runtime_disable:
|
|
+ pm_runtime_put_noidle(dev);
|
|
pm_runtime_set_suspended(dev);
|
|
pm_runtime_disable(dev);
|
|
hfi_destroy(core);
|
|
+err_core_put:
|
|
+ if (core->pm_ops->core_put)
|
|
+ core->pm_ops->core_put(dev);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
|
|
index 7c4c483d54389..76be14efbfb09 100644
|
|
--- a/drivers/media/platform/qcom/venus/vdec.c
|
|
+++ b/drivers/media/platform/qcom/venus/vdec.c
|
|
@@ -1088,8 +1088,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
|
|
break;
|
|
}
|
|
|
|
- INIT_LIST_HEAD(&inst->registeredbufs);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -1189,6 +1187,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
|
|
static void vdec_buf_cleanup(struct vb2_buffer *vb)
|
|
{
|
|
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
|
|
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
|
|
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
|
|
+
|
|
+ mutex_lock(&inst->lock);
|
|
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
|
|
+ if (!list_empty(&inst->registeredbufs))
|
|
+ list_del_init(&buf->reg_list);
|
|
+ mutex_unlock(&inst->lock);
|
|
|
|
inst->buf_count--;
|
|
if (!inst->buf_count)
|
|
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
|
|
index 5c6b00737fe75..05c712e00a2a7 100644
|
|
--- a/drivers/media/platform/rcar-fcp.c
|
|
+++ b/drivers/media/platform/rcar-fcp.c
|
|
@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
|
|
return 0;
|
|
|
|
ret = pm_runtime_get_sync(fcp->dev);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_noidle(fcp->dev);
|
|
return ret;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
|
|
index 151e6a90c5fbc..d9bc8cef7db58 100644
|
|
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
|
|
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
|
|
@@ -361,7 +361,6 @@ struct rcar_csi2 {
|
|
struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
|
|
|
|
struct v4l2_async_notifier notifier;
|
|
- struct v4l2_async_subdev asd;
|
|
struct v4l2_subdev *remote;
|
|
|
|
struct v4l2_mbus_framefmt mf;
|
|
@@ -810,6 +809,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
|
|
|
|
static int rcsi2_parse_dt(struct rcar_csi2 *priv)
|
|
{
|
|
+ struct v4l2_async_subdev *asd;
|
|
+ struct fwnode_handle *fwnode;
|
|
struct device_node *ep;
|
|
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
|
|
int ret;
|
|
@@ -833,24 +834,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
|
|
return ret;
|
|
}
|
|
|
|
- priv->asd.match.fwnode =
|
|
- fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
|
|
- priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
|
|
-
|
|
+ fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
|
|
of_node_put(ep);
|
|
|
|
- v4l2_async_notifier_init(&priv->notifier);
|
|
-
|
|
- ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
|
|
- if (ret) {
|
|
- fwnode_handle_put(priv->asd.match.fwnode);
|
|
- return ret;
|
|
- }
|
|
+ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
|
|
|
|
+ v4l2_async_notifier_init(&priv->notifier);
|
|
priv->notifier.ops = &rcar_csi2_notify_ops;
|
|
|
|
- dev_dbg(priv->dev, "Found '%pOF'\n",
|
|
- to_of_node(priv->asd.match.fwnode));
|
|
+ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
|
|
+ sizeof(*asd));
|
|
+ fwnode_handle_put(fwnode);
|
|
+ if (IS_ERR(asd))
|
|
+ return PTR_ERR(asd);
|
|
|
|
ret = v4l2_async_subdev_notifier_register(&priv->subdev,
|
|
&priv->notifier);
|
|
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
|
|
index 1a30cd0363711..95bc9e0e87926 100644
|
|
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
|
|
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
|
|
@@ -1392,8 +1392,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
|
|
int ret;
|
|
|
|
ret = pm_runtime_get_sync(vin->dev);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_noidle(vin->dev);
|
|
return ret;
|
|
+ }
|
|
|
|
/* Make register writes take effect immediately. */
|
|
vnmc = rvin_read(vin, VNMC_REG);
|
|
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
|
|
index 3d2451ac347d7..f318cd4b8086f 100644
|
|
--- a/drivers/media/platform/rcar_drif.c
|
|
+++ b/drivers/media/platform/rcar_drif.c
|
|
@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
|
|
/* OF graph endpoint's V4L2 async data */
|
|
struct rcar_drif_graph_ep {
|
|
struct v4l2_subdev *subdev; /* Async matched subdev */
|
|
- struct v4l2_async_subdev asd; /* Async sub-device descriptor */
|
|
};
|
|
|
|
/* DMA buffer */
|
|
@@ -1109,12 +1108,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
|
|
struct rcar_drif_sdr *sdr =
|
|
container_of(notifier, struct rcar_drif_sdr, notifier);
|
|
|
|
- if (sdr->ep.asd.match.fwnode !=
|
|
- of_fwnode_handle(subdev->dev->of_node)) {
|
|
- rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
v4l2_set_subdev_hostdata(subdev, sdr);
|
|
sdr->ep.subdev = subdev;
|
|
rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
|
|
@@ -1218,7 +1211,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
|
|
{
|
|
struct v4l2_async_notifier *notifier = &sdr->notifier;
|
|
struct fwnode_handle *fwnode, *ep;
|
|
- int ret;
|
|
+ struct v4l2_async_subdev *asd;
|
|
|
|
v4l2_async_notifier_init(notifier);
|
|
|
|
@@ -1227,26 +1220,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
|
|
if (!ep)
|
|
return 0;
|
|
|
|
+ /* Get the endpoint properties */
|
|
+ rcar_drif_get_ep_properties(sdr, ep);
|
|
+
|
|
fwnode = fwnode_graph_get_remote_port_parent(ep);
|
|
+ fwnode_handle_put(ep);
|
|
if (!fwnode) {
|
|
dev_warn(sdr->dev, "bad remote port parent\n");
|
|
- fwnode_handle_put(ep);
|
|
return -EINVAL;
|
|
}
|
|
|
|
- sdr->ep.asd.match.fwnode = fwnode;
|
|
- sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
|
|
- ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
|
|
- if (ret) {
|
|
- fwnode_handle_put(fwnode);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- /* Get the endpoint properties */
|
|
- rcar_drif_get_ep_properties(sdr, ep);
|
|
-
|
|
+ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
|
|
+ sizeof(*asd));
|
|
fwnode_handle_put(fwnode);
|
|
- fwnode_handle_put(ep);
|
|
+ if (IS_ERR(asd))
|
|
+ return PTR_ERR(asd);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
|
|
index 36b821ccc1dba..bf9a75b75083b 100644
|
|
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
|
|
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
|
|
@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
|
|
|
|
ret = pm_runtime_get_sync(rga->dev);
|
|
if (ret < 0) {
|
|
+ pm_runtime_put_noidle(rga->dev);
|
|
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
|
|
index c6fbcd7036d6d..ee624804862e2 100644
|
|
--- a/drivers/media/platform/s3c-camif/camif-core.c
|
|
+++ b/drivers/media/platform/s3c-camif/camif-core.c
|
|
@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
|
|
|
|
ret = camif_media_dev_init(camif);
|
|
if (ret < 0)
|
|
- goto err_alloc;
|
|
+ goto err_pm;
|
|
|
|
ret = camif_register_sensor(camif);
|
|
if (ret < 0)
|
|
@@ -498,10 +498,9 @@ err_sens:
|
|
media_device_unregister(&camif->media_dev);
|
|
media_device_cleanup(&camif->media_dev);
|
|
camif_unregister_media_entities(camif);
|
|
-err_alloc:
|
|
+err_pm:
|
|
pm_runtime_put(dev);
|
|
pm_runtime_disable(dev);
|
|
-err_pm:
|
|
camif_clk_put(camif);
|
|
err_clk:
|
|
s3c_camif_unregister_subdev(camif);
|
|
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
|
|
index 7d52431c2c837..62d2320a72186 100644
|
|
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
|
|
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
|
|
@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
|
|
int i, ret = 0;
|
|
|
|
ret = pm_runtime_get_sync(pm->device);
|
|
- if (ret < 0)
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_noidle(pm->device);
|
|
return ret;
|
|
+ }
|
|
|
|
/* clock control */
|
|
for (i = 0; i < pm->num_clocks; i++) {
|
|
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
|
|
index af2d5eb782cee..e1d150584bdc2 100644
|
|
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
|
|
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
|
|
@@ -1371,7 +1371,7 @@ static int bdisp_probe(struct platform_device *pdev)
|
|
ret = pm_runtime_get_sync(dev);
|
|
if (ret < 0) {
|
|
dev_err(dev, "failed to set PM\n");
|
|
- goto err_dbg;
|
|
+ goto err_pm;
|
|
}
|
|
|
|
/* Filters */
|
|
@@ -1399,7 +1399,6 @@ err_filter:
|
|
bdisp_hw_free_filters(bdisp->dev);
|
|
err_pm:
|
|
pm_runtime_put(dev);
|
|
-err_dbg:
|
|
bdisp_debugfs_remove(bdisp);
|
|
err_v4l2:
|
|
v4l2_device_unregister(&bdisp->v4l2_dev);
|
|
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
|
|
index 2503224eeee51..c691b3d81549d 100644
|
|
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
|
|
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
|
|
@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
|
|
/* enable the hardware */
|
|
if (!dec->pm) {
|
|
ret = delta_get_sync(ctx);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ delta_put_autosuspend(ctx);
|
|
goto err;
|
|
+ }
|
|
}
|
|
|
|
/* decode this access unit */
|
|
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
|
|
index 401aaafa17109..43f279e2a6a38 100644
|
|
--- a/drivers/media/platform/sti/hva/hva-hw.c
|
|
+++ b/drivers/media/platform/sti/hva/hva-hw.c
|
|
@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
|
|
|
|
if (pm_runtime_get_sync(dev) < 0) {
|
|
dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
|
|
+ pm_runtime_put_noidle(dev);
|
|
mutex_unlock(&hva->protect_mutex);
|
|
return -EFAULT;
|
|
}
|
|
@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
|
|
ret = pm_runtime_get_sync(dev);
|
|
if (ret < 0) {
|
|
dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
|
|
- goto err_clk;
|
|
+ goto err_pm;
|
|
}
|
|
|
|
/* check IP hardware version */
|
|
@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
|
|
|
|
if (pm_runtime_get_sync(dev) < 0) {
|
|
seq_puts(s, "Cannot wake up IP\n");
|
|
+ pm_runtime_put_noidle(dev);
|
|
mutex_unlock(&hva->protect_mutex);
|
|
return;
|
|
}
|
|
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
|
|
index b8931490b83b7..fd1c41cba52fc 100644
|
|
--- a/drivers/media/platform/stm32/stm32-dcmi.c
|
|
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
|
|
@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
|
|
if (ret < 0) {
|
|
dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
|
|
__func__, ret);
|
|
- goto err_release_buffers;
|
|
+ goto err_pm_put;
|
|
}
|
|
|
|
ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
|
|
@@ -837,8 +837,6 @@ err_media_pipeline_stop:
|
|
|
|
err_pm_put:
|
|
pm_runtime_put(dcmi->dev);
|
|
-
|
|
-err_release_buffers:
|
|
spin_lock_irq(&dcmi->irqlock);
|
|
/*
|
|
* Return all buffers to vb2 in QUEUED state.
|
|
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
|
|
index cff2fcd6d812a..82d3ee45e2e90 100644
|
|
--- a/drivers/media/platform/ti-vpe/vpe.c
|
|
+++ b/drivers/media/platform/ti-vpe/vpe.c
|
|
@@ -2475,6 +2475,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
|
|
|
|
r = pm_runtime_get_sync(&pdev->dev);
|
|
WARN_ON(r < 0);
|
|
+ if (r)
|
|
+ pm_runtime_put_noidle(&pdev->dev);
|
|
return r < 0 ? r : 0;
|
|
}
|
|
|
|
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
|
|
index c650e45bb0ad1..dc62533cf32ce 100644
|
|
--- a/drivers/media/platform/vsp1/vsp1_drv.c
|
|
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
|
|
@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
|
|
int ret;
|
|
|
|
ret = pm_runtime_get_sync(vsp1->dev);
|
|
- return ret < 0 ? ret : 0;
|
|
+ if (ret < 0) {
|
|
+ pm_runtime_put_noidle(vsp1->dev);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
|
|
/* Configure device parameters based on the version register. */
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
- ret = pm_runtime_get_sync(&pdev->dev);
|
|
+ ret = vsp1_device_get(vsp1);
|
|
if (ret < 0)
|
|
goto done;
|
|
|
|
vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
|
|
- pm_runtime_put_sync(&pdev->dev);
|
|
+ vsp1_device_put(vsp1);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
|
|
if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
|
|
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
|
|
index 9cdef17b4793f..c12dda73cdd53 100644
|
|
--- a/drivers/media/rc/ati_remote.c
|
|
+++ b/drivers/media/rc/ati_remote.c
|
|
@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
|
|
err("%s: endpoint_in message size==0? \n", __func__);
|
|
return -ENODEV;
|
|
}
|
|
+ if (!usb_endpoint_is_int_out(endpoint_out)) {
|
|
+ err("%s: Unexpected endpoint_out\n", __func__);
|
|
+ return -ENODEV;
|
|
+ }
|
|
|
|
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
|
|
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
|
|
diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
|
|
index ff8a039aba72e..95835b52b58fc 100644
|
|
--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
|
|
+++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
|
|
@@ -164,10 +164,11 @@ void vivid_meta_out_process(struct vivid_dev *dev,
|
|
{
|
|
struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
|
|
|
|
- tpg_s_brightness(&dev->tpg, meta->brightness);
|
|
- tpg_s_contrast(&dev->tpg, meta->contrast);
|
|
- tpg_s_saturation(&dev->tpg, meta->saturation);
|
|
- tpg_s_hue(&dev->tpg, meta->hue);
|
|
+ v4l2_ctrl_s_ctrl(dev->brightness, meta->brightness);
|
|
+ v4l2_ctrl_s_ctrl(dev->contrast, meta->contrast);
|
|
+ v4l2_ctrl_s_ctrl(dev->saturation, meta->saturation);
|
|
+ v4l2_ctrl_s_ctrl(dev->hue, meta->hue);
|
|
+
|
|
dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
|
|
__func__, meta->brightness, meta->contrast,
|
|
meta->saturation, meta->hue);
|
|
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
|
|
index b6e70fada3fb2..8fb186b25d6af 100644
|
|
--- a/drivers/media/tuners/tuner-simple.c
|
|
+++ b/drivers/media/tuners/tuner-simple.c
|
|
@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
|
|
case TUNER_TENA_9533_DI:
|
|
case TUNER_YMEC_TVF_5533MF:
|
|
tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
|
|
- return 0;
|
|
+ return -EINVAL;
|
|
case TUNER_PHILIPS_FM1216ME_MK3:
|
|
case TUNER_PHILIPS_FM1236_MK3:
|
|
case TUNER_PHILIPS_FMD1216ME_MK3:
|
|
@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
|
|
TUNER_RATIO_SELECT_50; /* 50 kHz step */
|
|
|
|
/* Bandswitch byte */
|
|
- simple_radio_bandswitch(fe, &buffer[0]);
|
|
+ if (simple_radio_bandswitch(fe, &buffer[0]))
|
|
+ return 0;
|
|
|
|
/* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
|
|
freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
|
|
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
|
|
index e399b9fad7574..a30a8a731eda8 100644
|
|
--- a/drivers/media/usb/uvc/uvc_ctrl.c
|
|
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
|
|
@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
|
|
offset &= 7;
|
|
mask = ((1LL << bits) - 1) << offset;
|
|
|
|
- for (; bits > 0; data++) {
|
|
+ while (1) {
|
|
u8 byte = *data & mask;
|
|
value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
|
|
bits -= 8 - (offset > 0 ? offset : 0);
|
|
+ if (bits <= 0)
|
|
+ break;
|
|
+
|
|
offset -= 8;
|
|
mask = (1 << bits) - 1;
|
|
+ data++;
|
|
}
|
|
|
|
/* Sign-extend the value if needed. */
|
|
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
|
|
index b4499cddeffe5..ca3a9c2eec271 100644
|
|
--- a/drivers/media/usb/uvc/uvc_entity.c
|
|
+++ b/drivers/media/usb/uvc/uvc_entity.c
|
|
@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
|
|
int ret;
|
|
|
|
if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
|
|
+ u32 function;
|
|
+
|
|
v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
|
|
strscpy(entity->subdev.name, entity->name,
|
|
sizeof(entity->subdev.name));
|
|
|
|
+ switch (UVC_ENTITY_TYPE(entity)) {
|
|
+ case UVC_VC_SELECTOR_UNIT:
|
|
+ function = MEDIA_ENT_F_VID_MUX;
|
|
+ break;
|
|
+ case UVC_VC_PROCESSING_UNIT:
|
|
+ case UVC_VC_EXTENSION_UNIT:
|
|
+ /* For lack of a better option. */
|
|
+ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
|
|
+ break;
|
|
+ case UVC_COMPOSITE_CONNECTOR:
|
|
+ case UVC_COMPONENT_CONNECTOR:
|
|
+ function = MEDIA_ENT_F_CONN_COMPOSITE;
|
|
+ break;
|
|
+ case UVC_SVIDEO_CONNECTOR:
|
|
+ function = MEDIA_ENT_F_CONN_SVIDEO;
|
|
+ break;
|
|
+ case UVC_ITT_CAMERA:
|
|
+ function = MEDIA_ENT_F_CAM_SENSOR;
|
|
+ break;
|
|
+ case UVC_TT_VENDOR_SPECIFIC:
|
|
+ case UVC_ITT_VENDOR_SPECIFIC:
|
|
+ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
|
|
+ case UVC_OTT_VENDOR_SPECIFIC:
|
|
+ case UVC_OTT_DISPLAY:
|
|
+ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
|
|
+ case UVC_EXTERNAL_VENDOR_SPECIFIC:
|
|
+ default:
|
|
+ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ entity->subdev.entity.function = function;
|
|
+
|
|
ret = media_entity_pads_init(&entity->subdev.entity,
|
|
entity->num_pads, entity->pads);
|
|
|
|
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
|
|
index 0335e69b70abe..5e6f3153b5ff8 100644
|
|
--- a/drivers/media/usb/uvc/uvc_v4l2.c
|
|
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
|
|
@@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
|
|
if (ret < 0)
|
|
goto done;
|
|
|
|
+ /* After the probe, update fmt with the values returned from
|
|
+ * negotiation with the device.
|
|
+ */
|
|
+ for (i = 0; i < stream->nformats; ++i) {
|
|
+ if (probe->bFormatIndex == stream->format[i].index) {
|
|
+ format = &stream->format[i];
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (i == stream->nformats) {
|
|
+ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
|
|
+ probe->bFormatIndex);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < format->nframes; ++i) {
|
|
+ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
|
|
+ frame = &format->frame[i];
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (i == format->nframes) {
|
|
+ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
|
|
+ probe->bFrameIndex);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
fmt->fmt.pix.width = frame->wWidth;
|
|
fmt->fmt.pix.height = frame->wHeight;
|
|
fmt->fmt.pix.field = V4L2_FIELD_NONE;
|
|
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
|
|
fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
|
|
+ fmt->fmt.pix.pixelformat = format->fcc;
|
|
fmt->fmt.pix.colorspace = format->colorspace;
|
|
|
|
if (uvc_format != NULL)
|
|
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
|
|
index 0b0ed72016da8..0309bd5a18008 100644
|
|
--- a/drivers/memory/fsl-corenet-cf.c
|
|
+++ b/drivers/memory/fsl-corenet-cf.c
|
|
@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
|
|
dev_set_drvdata(&pdev->dev, ccf);
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
- if (!irq) {
|
|
- dev_err(&pdev->dev, "%s: no irq\n", __func__);
|
|
- return -ENXIO;
|
|
- }
|
|
+ if (irq < 0)
|
|
+ return irq;
|
|
|
|
ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
|
|
if (ret) {
|
|
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
|
|
index eff26c1b13940..27bc417029e11 100644
|
|
--- a/drivers/memory/omap-gpmc.c
|
|
+++ b/drivers/memory/omap-gpmc.c
|
|
@@ -949,7 +949,7 @@ static int gpmc_cs_remap(int cs, u32 base)
|
|
int ret;
|
|
u32 old_base, size;
|
|
|
|
- if (cs > gpmc_cs_num) {
|
|
+ if (cs >= gpmc_cs_num) {
|
|
pr_err("%s: requested chip-select is disabled\n", __func__);
|
|
return -ENODEV;
|
|
}
|
|
@@ -984,7 +984,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
|
|
struct resource *res = &gpmc->mem;
|
|
int r = -1;
|
|
|
|
- if (cs > gpmc_cs_num) {
|
|
+ if (cs >= gpmc_cs_num) {
|
|
pr_err("%s: requested chip-select is disabled\n", __func__);
|
|
return -ENODEV;
|
|
}
|
|
@@ -2274,6 +2274,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
|
|
}
|
|
}
|
|
#else
|
|
+void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
|
|
+{
|
|
+ memset(p, 0, sizeof(*p));
|
|
+}
|
|
static int gpmc_probe_dt(struct platform_device *pdev)
|
|
{
|
|
return 0;
|
|
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
|
|
index ccd62b9639528..6d2f4a0a901dc 100644
|
|
--- a/drivers/mfd/sm501.c
|
|
+++ b/drivers/mfd/sm501.c
|
|
@@ -1415,8 +1415,14 @@ static int sm501_plat_probe(struct platform_device *dev)
|
|
goto err_claim;
|
|
}
|
|
|
|
- return sm501_init_dev(sm);
|
|
+ ret = sm501_init_dev(sm);
|
|
+ if (ret)
|
|
+ goto err_unmap;
|
|
+
|
|
+ return 0;
|
|
|
|
+ err_unmap:
|
|
+ iounmap(sm->regs);
|
|
err_claim:
|
|
release_mem_region(sm->io_res->start, 0x100);
|
|
err_res:
|
|
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
|
|
index 0d5928bc1b6d7..82246f7aec6fb 100644
|
|
--- a/drivers/misc/cardreader/rtsx_pcr.c
|
|
+++ b/drivers/misc/cardreader/rtsx_pcr.c
|
|
@@ -1536,12 +1536,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
|
|
ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
|
|
ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
|
|
if (ret < 0)
|
|
- goto disable_irq;
|
|
+ goto free_slots;
|
|
|
|
schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
|
|
|
|
return 0;
|
|
|
|
+free_slots:
|
|
+ kfree(pcr->slots);
|
|
disable_irq:
|
|
free_irq(pcr->irq, (void *)pcr);
|
|
disable_msi:
|
|
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
|
|
index cde9a2fc13250..490ff49d11ede 100644
|
|
--- a/drivers/misc/eeprom/at25.c
|
|
+++ b/drivers/misc/eeprom/at25.c
|
|
@@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi)
|
|
at25->nvmem_config.reg_read = at25_ee_read;
|
|
at25->nvmem_config.reg_write = at25_ee_write;
|
|
at25->nvmem_config.priv = at25;
|
|
- at25->nvmem_config.stride = 4;
|
|
+ at25->nvmem_config.stride = 1;
|
|
at25->nvmem_config.word_size = 1;
|
|
at25->nvmem_config.size = chip.byte_len;
|
|
|
|
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
|
|
index ca183733847b6..bcc45bf7af2c8 100644
|
|
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
|
|
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
|
|
@@ -6285,7 +6285,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
|
|
is_idle &= is_eng_idle;
|
|
|
|
if (mask)
|
|
- *mask |= !is_eng_idle <<
|
|
+ *mask |= ((u64) !is_eng_idle) <<
|
|
(GAUDI_ENGINE_ID_DMA_0 + dma_id);
|
|
if (s)
|
|
seq_printf(s, fmt, dma_id,
|
|
@@ -6308,7 +6308,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
|
|
is_idle &= is_eng_idle;
|
|
|
|
if (mask)
|
|
- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
|
|
+ *mask |= ((u64) !is_eng_idle) <<
|
|
+ (GAUDI_ENGINE_ID_TPC_0 + i);
|
|
if (s)
|
|
seq_printf(s, fmt, i,
|
|
is_eng_idle ? "Y" : "N",
|
|
@@ -6336,7 +6337,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
|
|
is_idle &= is_eng_idle;
|
|
|
|
if (mask)
|
|
- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
|
|
+ *mask |= ((u64) !is_eng_idle) <<
|
|
+ (GAUDI_ENGINE_ID_MME_0 + i);
|
|
if (s) {
|
|
if (!is_slave)
|
|
seq_printf(s, fmt, i,
|
|
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
|
|
index c179085ced7b8..a8041a39fae31 100644
|
|
--- a/drivers/misc/habanalabs/goya/goya.c
|
|
+++ b/drivers/misc/habanalabs/goya/goya.c
|
|
@@ -5098,7 +5098,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
|
|
is_idle &= is_eng_idle;
|
|
|
|
if (mask)
|
|
- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
|
|
+ *mask |= ((u64) !is_eng_idle) <<
|
|
+ (GOYA_ENGINE_ID_DMA_0 + i);
|
|
if (s)
|
|
seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
|
|
qm_glbl_sts0, dma_core_sts0);
|
|
@@ -5121,7 +5122,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
|
|
is_idle &= is_eng_idle;
|
|
|
|
if (mask)
|
|
- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
|
|
+ *mask |= ((u64) !is_eng_idle) <<
|
|
+ (GOYA_ENGINE_ID_TPC_0 + i);
|
|
if (s)
|
|
seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
|
|
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
|
|
@@ -5141,7 +5143,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
|
|
is_idle &= is_eng_idle;
|
|
|
|
if (mask)
|
|
- *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
|
|
+ *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
|
|
if (s) {
|
|
seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
|
|
cmdq_glbl_sts0, mme_arch_sts);
|
|
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
|
|
index 406cd5abfa726..56c784699eb8e 100644
|
|
--- a/drivers/misc/mic/scif/scif_rma.c
|
|
+++ b/drivers/misc/mic/scif/scif_rma.c
|
|
@@ -1384,6 +1384,8 @@ retry:
|
|
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
|
|
pinned_pages->pages);
|
|
if (nr_pages != pinned_pages->nr_pages) {
|
|
+ if (pinned_pages->nr_pages < 0)
|
|
+ pinned_pages->nr_pages = 0;
|
|
if (try_upgrade) {
|
|
if (ulimit)
|
|
__scif_dec_pinned_vm_lock(mm, nr_pages);
|
|
@@ -1400,7 +1402,6 @@ retry:
|
|
|
|
if (pinned_pages->nr_pages < nr_pages) {
|
|
err = -EFAULT;
|
|
- pinned_pages->nr_pages = nr_pages;
|
|
goto dec_pinned;
|
|
}
|
|
|
|
@@ -1413,7 +1414,6 @@ dec_pinned:
|
|
__scif_dec_pinned_vm_lock(mm, nr_pages);
|
|
/* Something went wrong! Rollback */
|
|
error_unmap:
|
|
- pinned_pages->nr_pages = nr_pages;
|
|
scif_destroy_pinned_pages(pinned_pages);
|
|
*pages = NULL;
|
|
dev_dbg(scif_info.mdev.this_device,
|
|
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
|
|
index 85942f6717c57..8aadc6055df17 100644
|
|
--- a/drivers/misc/mic/vop/vop_main.c
|
|
+++ b/drivers/misc/mic/vop/vop_main.c
|
|
@@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
|
|
/* First assign the vring's allocated in host memory */
|
|
vqconfig = _vop_vq_config(vdev->desc) + index;
|
|
memcpy_fromio(&config, vqconfig, sizeof(config));
|
|
- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
|
|
+ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
|
|
vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
|
|
va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
|
|
if (!va)
|
|
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
|
|
index 30eac172f0170..7014ffe88632e 100644
|
|
--- a/drivers/misc/mic/vop/vop_vringh.c
|
|
+++ b/drivers/misc/mic/vop/vop_vringh.c
|
|
@@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
|
|
|
|
num = le16_to_cpu(vqconfig[i].num);
|
|
mutex_init(&vvr->vr_mutex);
|
|
- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
|
|
+ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
|
|
sizeof(struct _mic_vring_info));
|
|
vr->va = (void *)
|
|
__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
|
@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
|
|
goto err;
|
|
}
|
|
vr->len = vr_size;
|
|
- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
|
|
+ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
|
|
vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
|
|
vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
|
|
DMA_BIDIRECTIONAL);
|
|
@@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
|
|
size_t partlen;
|
|
bool dma = VOP_USE_DMA && vi->dma_ch;
|
|
int err = 0;
|
|
+ size_t offset = 0;
|
|
|
|
if (dma) {
|
|
dma_alignment = 1 << vi->dma_ch->device->copy_align;
|
|
@@ -655,13 +656,20 @@ memcpy:
|
|
* We are copying to IO below and should ideally use something
|
|
* like copy_from_user_toio(..) if it existed.
|
|
*/
|
|
- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
|
|
- err = -EFAULT;
|
|
- dev_err(vop_dev(vdev), "%s %d err %d\n",
|
|
- __func__, __LINE__, err);
|
|
- goto err;
|
|
+ while (len) {
|
|
+ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
|
|
+
|
|
+ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
|
|
+ err = -EFAULT;
|
|
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
|
|
+ __func__, __LINE__, err);
|
|
+ goto err;
|
|
+ }
|
|
+ memcpy_toio(dbuf + offset, vvr->buf, partlen);
|
|
+ offset += partlen;
|
|
+ vdev->out_bytes += partlen;
|
|
+ len -= partlen;
|
|
}
|
|
- vdev->out_bytes += len;
|
|
err = 0;
|
|
err:
|
|
vpdev->hw_ops->unmap(vpdev, dbuf);
|
|
diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
|
|
index 2d2266c1439ef..51b51f3774701 100644
|
|
--- a/drivers/misc/ocxl/Kconfig
|
|
+++ b/drivers/misc/ocxl/Kconfig
|
|
@@ -9,9 +9,8 @@ config OCXL_BASE
|
|
|
|
config OCXL
|
|
tristate "OpenCAPI coherent accelerator support"
|
|
- depends on PPC_POWERNV && PCI && EEH
|
|
+ depends on PPC_POWERNV && PCI && EEH && HOTPLUG_PCI_POWERNV
|
|
select OCXL_BASE
|
|
- select HOTPLUG_PCI_POWERNV
|
|
default m
|
|
help
|
|
Select this option to enable the ocxl driver for Open
|
|
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
|
|
index 8531ae7811956..c49065887e8f5 100644
|
|
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
|
|
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
|
|
@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
|
|
if (retval < (int)produce_q->kernel_if->num_pages) {
|
|
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
|
|
retval);
|
|
- qp_release_pages(produce_q->kernel_if->u.h.header_page,
|
|
- retval, false);
|
|
+ if (retval > 0)
|
|
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
|
|
+ retval, false);
|
|
err = VMCI_ERROR_NO_MEM;
|
|
goto out;
|
|
}
|
|
@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
|
|
if (retval < (int)consume_q->kernel_if->num_pages) {
|
|
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
|
|
retval);
|
|
- qp_release_pages(consume_q->kernel_if->u.h.header_page,
|
|
- retval, false);
|
|
+ if (retval > 0)
|
|
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
|
|
+ retval, false);
|
|
qp_release_pages(produce_q->kernel_if->u.h.header_page,
|
|
produce_q->kernel_if->num_pages, false);
|
|
err = VMCI_ERROR_NO_MEM;
|
|
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
|
|
index e0655278c5c32..3efaa9534a777 100644
|
|
--- a/drivers/mmc/core/sdio_cis.c
|
|
+++ b/drivers/mmc/core/sdio_cis.c
|
|
@@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
|
|
unsigned i, nr_strings;
|
|
char **buffer, *string;
|
|
|
|
+ if (size < 2)
|
|
+ return 0;
|
|
+
|
|
/* Find all null-terminated (including zero length) strings in
|
|
the TPLLV1_INFO field. Trailing garbage is ignored. */
|
|
buf += 2;
|
|
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
|
|
index f350a0809f880..a808fa28cd9a1 100644
|
|
--- a/drivers/mtd/hyperbus/hbmc-am654.c
|
|
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
|
|
@@ -70,7 +70,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
|
|
|
|
platform_set_drvdata(pdev, priv);
|
|
|
|
- ret = of_address_to_resource(np, 0, &res);
|
|
+ priv->hbdev.np = of_get_next_child(np, NULL);
|
|
+ ret = of_address_to_resource(priv->hbdev.np, 0, &res);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -103,7 +104,6 @@ static int am654_hbmc_probe(struct platform_device *pdev)
|
|
priv->ctlr.dev = dev;
|
|
priv->ctlr.ops = &am654_hbmc_ops;
|
|
priv->hbdev.ctlr = &priv->ctlr;
|
|
- priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
|
|
ret = hyperbus_register_device(&priv->hbdev);
|
|
if (ret) {
|
|
dev_err(dev, "failed to register controller\n");
|
|
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
|
|
index 0f1547f09d08b..72f5c7b300790 100644
|
|
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
|
|
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
|
|
@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
|
|
return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
|
|
}
|
|
|
|
+static const struct mtd_info lpddr2_nvm_mtd_info = {
|
|
+ .type = MTD_RAM,
|
|
+ .writesize = 1,
|
|
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
|
|
+ ._read = lpddr2_nvm_read,
|
|
+ ._write = lpddr2_nvm_write,
|
|
+ ._erase = lpddr2_nvm_erase,
|
|
+ ._unlock = lpddr2_nvm_unlock,
|
|
+ ._lock = lpddr2_nvm_lock,
|
|
+};
|
|
+
|
|
/*
|
|
* lpddr2_nvm driver probe method
|
|
*/
|
|
@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
|
|
.pfow_base = OW_BASE_ADDRESS,
|
|
.fldrv_priv = pcm_data,
|
|
};
|
|
+
|
|
if (IS_ERR(map->virt))
|
|
return PTR_ERR(map->virt);
|
|
|
|
@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
|
|
return PTR_ERR(pcm_data->ctl_regs);
|
|
|
|
/* Populate mtd_info data structure */
|
|
- *mtd = (struct mtd_info) {
|
|
- .dev = { .parent = &pdev->dev },
|
|
- .name = pdev->dev.init_name,
|
|
- .type = MTD_RAM,
|
|
- .priv = map,
|
|
- .size = resource_size(add_range),
|
|
- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
|
|
- .writesize = 1,
|
|
- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
|
|
- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
|
|
- ._read = lpddr2_nvm_read,
|
|
- ._write = lpddr2_nvm_write,
|
|
- ._erase = lpddr2_nvm_erase,
|
|
- ._unlock = lpddr2_nvm_unlock,
|
|
- ._lock = lpddr2_nvm_lock,
|
|
- };
|
|
+ *mtd = lpddr2_nvm_mtd_info;
|
|
+ mtd->dev.parent = &pdev->dev;
|
|
+ mtd->name = pdev->dev.init_name;
|
|
+ mtd->priv = map;
|
|
+ mtd->size = resource_size(add_range);
|
|
+ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
|
|
+ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
|
|
|
|
/* Verify the presence of the device looking for PFOW string */
|
|
if (!lpddr2_nvm_pfow_present(map)) {
|
|
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
|
|
index 4ced68be7ed7e..774970bfcf859 100644
|
|
--- a/drivers/mtd/mtdoops.c
|
|
+++ b/drivers/mtd/mtdoops.c
|
|
@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
|
|
kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
|
|
record_size - MTDOOPS_HEADER_SIZE, NULL);
|
|
|
|
- /* Panics must be written immediately */
|
|
- if (reason != KMSG_DUMP_OOPS)
|
|
+ if (reason != KMSG_DUMP_OOPS) {
|
|
+ /* Panics must be written immediately */
|
|
mtdoops_write(cxt, 1);
|
|
-
|
|
- /* For other cases, schedule work to write it "nicely" */
|
|
- schedule_work(&cxt->work_write);
|
|
+ } else {
|
|
+ /* For other cases, schedule work to write it "nicely" */
|
|
+ schedule_work(&cxt->work_write);
|
|
+ }
|
|
}
|
|
|
|
static void mtdoops_notify_add(struct mtd_info *mtd)
|
|
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
|
|
index 3711e7a0436cd..b3390028c6bfb 100644
|
|
--- a/drivers/mtd/nand/raw/ams-delta.c
|
|
+++ b/drivers/mtd/nand/raw/ams-delta.c
|
|
@@ -400,12 +400,14 @@ static int gpio_nand_remove(struct platform_device *pdev)
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_OF
|
|
static const struct of_device_id gpio_nand_of_id_table[] = {
|
|
{
|
|
/* sentinel */
|
|
},
|
|
};
|
|
MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
|
|
+#endif
|
|
|
|
static const struct platform_device_id gpio_nand_plat_id_table[] = {
|
|
{
|
|
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
|
|
index 65c9d17b25a3c..dce6d7a10a364 100644
|
|
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
|
|
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
|
|
@@ -1791,7 +1791,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
|
|
return ret;
|
|
}
|
|
|
|
- if (cs > FMC2_MAX_CE) {
|
|
+ if (cs >= FMC2_MAX_CE) {
|
|
dev_err(nfc->dev, "invalid reg value: %d\n", cs);
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
|
|
index 7248c59011836..fcca45e2abe20 100644
|
|
--- a/drivers/mtd/nand/raw/vf610_nfc.c
|
|
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
|
|
@@ -852,8 +852,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
|
|
- if (!of_id)
|
|
- return -ENODEV;
|
|
+ if (!of_id) {
|
|
+ err = -ENODEV;
|
|
+ goto err_disable_clk;
|
|
+ }
|
|
|
|
nfc->variant = (enum vf610_nfc_variant)of_id->data;
|
|
|
|
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
|
|
index d219c970042a2..0b7667e60780f 100644
|
|
--- a/drivers/mtd/nand/spi/gigadevice.c
|
|
+++ b/drivers/mtd/nand/spi/gigadevice.c
|
|
@@ -21,7 +21,7 @@
|
|
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
|
|
|
|
static SPINAND_OP_VARIANTS(read_cache_variants,
|
|
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
|
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
|
|
@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
|
|
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
|
|
|
|
static SPINAND_OP_VARIANTS(read_cache_variants_f,
|
|
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
|
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
|
|
@@ -202,7 +202,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
- 0,
|
|
+ SPINAND_HAS_QE_BIT,
|
|
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
|
|
gd5fxgq4xa_ecc_get_status)),
|
|
SPINAND_INFO("GD5F2GQ4xA",
|
|
@@ -212,7 +212,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
- 0,
|
|
+ SPINAND_HAS_QE_BIT,
|
|
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
|
|
gd5fxgq4xa_ecc_get_status)),
|
|
SPINAND_INFO("GD5F4GQ4xA",
|
|
@@ -222,7 +222,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
- 0,
|
|
+ SPINAND_HAS_QE_BIT,
|
|
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
|
|
gd5fxgq4xa_ecc_get_status)),
|
|
SPINAND_INFO("GD5F1GQ4UExxG",
|
|
@@ -232,7 +232,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
- 0,
|
|
+ SPINAND_HAS_QE_BIT,
|
|
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
|
|
gd5fxgq4uexxg_ecc_get_status)),
|
|
SPINAND_INFO("GD5F1GQ4UFxxG",
|
|
@@ -242,7 +242,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
|
|
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
- 0,
|
|
+ SPINAND_HAS_QE_BIT,
|
|
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
|
|
gd5fxgq4ufxxg_ecc_get_status)),
|
|
};
|
|
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
|
|
index 94d10ec954a05..2ac7a667bde35 100644
|
|
--- a/drivers/net/can/flexcan.c
|
|
+++ b/drivers/net/can/flexcan.c
|
|
@@ -1260,18 +1260,23 @@ static int flexcan_chip_start(struct net_device *dev)
|
|
return err;
|
|
}
|
|
|
|
-/* flexcan_chip_stop
|
|
+/* __flexcan_chip_stop
|
|
*
|
|
- * this functions is entered with clocks enabled
|
|
+ * this function is entered with clocks enabled
|
|
*/
|
|
-static void flexcan_chip_stop(struct net_device *dev)
|
|
+static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
|
|
{
|
|
struct flexcan_priv *priv = netdev_priv(dev);
|
|
struct flexcan_regs __iomem *regs = priv->regs;
|
|
+ int err;
|
|
|
|
/* freeze + disable module */
|
|
- flexcan_chip_freeze(priv);
|
|
- flexcan_chip_disable(priv);
|
|
+ err = flexcan_chip_freeze(priv);
|
|
+ if (err && !disable_on_error)
|
|
+ return err;
|
|
+ err = flexcan_chip_disable(priv);
|
|
+ if (err && !disable_on_error)
|
|
+ goto out_chip_unfreeze;
|
|
|
|
/* Disable all interrupts */
|
|
priv->write(0, ®s->imask2);
|
|
@@ -1281,6 +1286,23 @@ static void flexcan_chip_stop(struct net_device *dev)
|
|
|
|
flexcan_transceiver_disable(priv);
|
|
priv->can.state = CAN_STATE_STOPPED;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+ out_chip_unfreeze:
|
|
+ flexcan_chip_unfreeze(priv);
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
|
|
+{
|
|
+ return __flexcan_chip_stop(dev, true);
|
|
+}
|
|
+
|
|
+static inline int flexcan_chip_stop(struct net_device *dev)
|
|
+{
|
|
+ return __flexcan_chip_stop(dev, false);
|
|
}
|
|
|
|
static int flexcan_open(struct net_device *dev)
|
|
@@ -1362,7 +1384,7 @@ static int flexcan_close(struct net_device *dev)
|
|
|
|
netif_stop_queue(dev);
|
|
can_rx_offload_disable(&priv->offload);
|
|
- flexcan_chip_stop(dev);
|
|
+ flexcan_chip_stop_disable_on_error(dev);
|
|
|
|
can_rx_offload_del(&priv->offload);
|
|
free_irq(dev->irq, dev);
|
|
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
|
|
index 38ea5e600fb84..e6d0cb9ee02f0 100644
|
|
--- a/drivers/net/can/m_can/m_can_platform.c
|
|
+++ b/drivers/net/can/m_can/m_can_platform.c
|
|
@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
|
|
struct net_device *ndev = dev_get_drvdata(dev);
|
|
struct m_can_classdev *mcan_class = netdev_priv(ndev);
|
|
|
|
- m_can_class_suspend(dev);
|
|
-
|
|
clk_disable_unprepare(mcan_class->cclk);
|
|
clk_disable_unprepare(mcan_class->hclk);
|
|
|
|
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
|
|
index 7b6c0dce75360..ee433abc2d4b5 100644
|
|
--- a/drivers/net/dsa/microchip/ksz_common.c
|
|
+++ b/drivers/net/dsa/microchip/ksz_common.c
|
|
@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
|
|
|
|
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
|
|
|
|
- /* Read MIB counters every 30 seconds to avoid overflow. */
|
|
- dev->mib_read_interval = msecs_to_jiffies(30000);
|
|
-
|
|
for (i = 0; i < dev->mib_port_cnt; i++)
|
|
dev->dev_ops->port_init_cnt(dev, i);
|
|
-
|
|
- /* Start the timer 2 seconds later. */
|
|
- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
|
|
}
|
|
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
|
|
|
|
@@ -144,7 +138,9 @@ void ksz_adjust_link(struct dsa_switch *ds, int port,
|
|
/* Read all MIB counters when the link is going down. */
|
|
if (!phydev->link) {
|
|
p->read = true;
|
|
- schedule_delayed_work(&dev->mib_read, 0);
|
|
+ /* timer started */
|
|
+ if (dev->mib_read_interval)
|
|
+ schedule_delayed_work(&dev->mib_read, 0);
|
|
}
|
|
mutex_lock(&dev->dev_mutex);
|
|
if (!phydev->link)
|
|
@@ -460,6 +456,12 @@ int ksz_switch_register(struct ksz_device *dev,
|
|
return ret;
|
|
}
|
|
|
|
+ /* Read MIB counters every 30 seconds to avoid overflow. */
|
|
+ dev->mib_read_interval = msecs_to_jiffies(30000);
|
|
+
|
|
+ /* Start the MIB timer. */
|
|
+ schedule_delayed_work(&dev->mib_read, 0);
|
|
+
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(ksz_switch_register);
|
|
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
|
|
index 9a63b51e1d82f..6f2dab7e33d65 100644
|
|
--- a/drivers/net/dsa/realtek-smi-core.h
|
|
+++ b/drivers/net/dsa/realtek-smi-core.h
|
|
@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
|
|
const char *name;
|
|
};
|
|
|
|
+/**
|
|
+ * struct rtl8366_vlan_mc - Virtual LAN member configuration
|
|
+ */
|
|
struct rtl8366_vlan_mc {
|
|
u16 vid;
|
|
u16 untag;
|
|
@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
|
|
int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
|
|
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
|
|
u32 untag, u32 fid);
|
|
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
|
|
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
|
|
unsigned int vid);
|
|
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
|
|
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
|
|
index 99cdb2f18fa2f..49c626a336803 100644
|
|
--- a/drivers/net/dsa/rtl8366.c
|
|
+++ b/drivers/net/dsa/rtl8366.c
|
|
@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
|
|
|
|
+/**
|
|
+ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
|
|
+ * @smi: the Realtek SMI device instance
|
|
+ * @vid: the VLAN ID to look up or allocate
|
|
+ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
|
|
+ * if successful
|
|
+ * @return: index of a new member config or negative error number
|
|
+ */
|
|
+static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
|
|
+ struct rtl8366_vlan_mc *vlanmc)
|
|
+{
|
|
+ struct rtl8366_vlan_4k vlan4k;
|
|
+ int ret;
|
|
+ int i;
|
|
+
|
|
+ /* Try to find an existing member config entry for this VID */
|
|
+ for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
|
|
+ if (ret) {
|
|
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
|
|
+ i, vid);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (vid == vlanmc->vid)
|
|
+ return i;
|
|
+ }
|
|
+
|
|
+ /* We have no MC entry for this VID, try to find an empty one */
|
|
+ for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
|
|
+ if (ret) {
|
|
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
|
|
+ i, vid);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (vlanmc->vid == 0 && vlanmc->member == 0) {
|
|
+ /* Update the entry from the 4K table */
|
|
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
|
|
+ if (ret) {
|
|
+ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
|
|
+ i, vid);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ vlanmc->vid = vid;
|
|
+ vlanmc->member = vlan4k.member;
|
|
+ vlanmc->untag = vlan4k.untag;
|
|
+ vlanmc->fid = vlan4k.fid;
|
|
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
|
|
+ if (ret) {
|
|
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
|
|
+ i, vid);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
|
|
+ i, vid);
|
|
+ return i;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* MC table is full, try to find an unused entry and replace it */
|
|
+ for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
+ int used;
|
|
+
|
|
+ ret = rtl8366_mc_is_used(smi, i, &used);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (!used) {
|
|
+ /* Update the entry from the 4K table */
|
|
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ vlanmc->vid = vid;
|
|
+ vlanmc->member = vlan4k.member;
|
|
+ vlanmc->untag = vlan4k.untag;
|
|
+ vlanmc->fid = vlan4k.fid;
|
|
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
|
|
+ if (ret) {
|
|
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
|
|
+ i, vid);
|
|
+ return ret;
|
|
+ }
|
|
+ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
|
|
+ i, vid);
|
|
+ return i;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dev_err(smi->dev, "all VLAN member configurations are in use\n");
|
|
+ return -ENOSPC;
|
|
+}
|
|
+
|
|
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
|
|
u32 untag, u32 fid)
|
|
{
|
|
+ struct rtl8366_vlan_mc vlanmc;
|
|
struct rtl8366_vlan_4k vlan4k;
|
|
+ int mc;
|
|
int ret;
|
|
- int i;
|
|
+
|
|
+ if (!smi->ops->is_vlan_valid(smi, vid))
|
|
+ return -EINVAL;
|
|
|
|
dev_dbg(smi->dev,
|
|
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
|
|
@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
|
|
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
|
|
vid, vlan4k.member, vlan4k.untag);
|
|
|
|
- /* Try to find an existing MC entry for this VID */
|
|
- for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
- struct rtl8366_vlan_mc vlanmc;
|
|
-
|
|
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (vid == vlanmc.vid) {
|
|
- /* update the MC entry */
|
|
- vlanmc.member |= member;
|
|
- vlanmc.untag |= untag;
|
|
- vlanmc.fid = fid;
|
|
-
|
|
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
|
|
+ /* Find or allocate a member config for this VID */
|
|
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ mc = ret;
|
|
|
|
- dev_dbg(smi->dev,
|
|
- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
|
|
- vid, vlanmc.member, vlanmc.untag);
|
|
+ /* Update the MC entry */
|
|
+ vlanmc.member |= member;
|
|
+ vlanmc.untag |= untag;
|
|
+ vlanmc.fid = fid;
|
|
|
|
- break;
|
|
- }
|
|
- }
|
|
+ /* Commit updates to the MC entry */
|
|
+ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
|
|
+ if (ret)
|
|
+ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
|
|
+ mc, vid);
|
|
+ else
|
|
+ dev_dbg(smi->dev,
|
|
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
|
|
+ vid, vlanmc.member, vlanmc.untag);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
|
|
|
|
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
|
|
-{
|
|
- struct rtl8366_vlan_mc vlanmc;
|
|
- int ret;
|
|
- int index;
|
|
-
|
|
- ret = smi->ops->get_mc_index(smi, port, &index);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- *val = vlanmc.vid;
|
|
- return 0;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
|
|
-
|
|
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
|
|
unsigned int vid)
|
|
{
|
|
struct rtl8366_vlan_mc vlanmc;
|
|
- struct rtl8366_vlan_4k vlan4k;
|
|
+ int mc;
|
|
int ret;
|
|
- int i;
|
|
-
|
|
- /* Try to find an existing MC entry for this VID */
|
|
- for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (vid == vlanmc.vid) {
|
|
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = smi->ops->set_mc_index(smi, port, i);
|
|
- return ret;
|
|
- }
|
|
- }
|
|
-
|
|
- /* We have no MC entry for this VID, try to find an empty one */
|
|
- for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (vlanmc.vid == 0 && vlanmc.member == 0) {
|
|
- /* Update the entry from the 4K table */
|
|
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
|
|
- if (ret)
|
|
- return ret;
|
|
|
|
- vlanmc.vid = vid;
|
|
- vlanmc.member = vlan4k.member;
|
|
- vlanmc.untag = vlan4k.untag;
|
|
- vlanmc.fid = vlan4k.fid;
|
|
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- ret = smi->ops->set_mc_index(smi, port, i);
|
|
- return ret;
|
|
- }
|
|
- }
|
|
-
|
|
- /* MC table is full, try to find an unused entry and replace it */
|
|
- for (i = 0; i < smi->num_vlan_mc; i++) {
|
|
- int used;
|
|
-
|
|
- ret = rtl8366_mc_is_used(smi, i, &used);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (!used) {
|
|
- /* Update the entry from the 4K table */
|
|
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (!smi->ops->is_vlan_valid(smi, vid))
|
|
+ return -EINVAL;
|
|
|
|
- vlanmc.vid = vid;
|
|
- vlanmc.member = vlan4k.member;
|
|
- vlanmc.untag = vlan4k.untag;
|
|
- vlanmc.fid = vlan4k.fid;
|
|
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
|
|
- if (ret)
|
|
- return ret;
|
|
+ /* Find or allocate a member config for this VID */
|
|
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ mc = ret;
|
|
|
|
- ret = smi->ops->set_mc_index(smi, port, i);
|
|
- return ret;
|
|
- }
|
|
+ ret = smi->ops->set_mc_index(smi, port, mc);
|
|
+ if (ret) {
|
|
+ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
|
|
+ mc, port);
|
|
+ return ret;
|
|
}
|
|
|
|
- dev_err(smi->dev,
|
|
- "all VLAN member configurations are in use\n");
|
|
+ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
|
|
+ port, vid, mc);
|
|
|
|
- return -ENOSPC;
|
|
+ return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
|
|
|
|
@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
|
if (!smi->ops->is_vlan_valid(smi, vid))
|
|
return;
|
|
|
|
- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
|
|
+ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
|
|
+ vlan->vid_begin,
|
|
port,
|
|
untagged ? "untagged" : "tagged",
|
|
pvid ? " PVID" : "no PVID");
|
|
@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
|
dev_err(smi->dev, "port is DSA or CPU port\n");
|
|
|
|
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
|
|
- int pvid_val = 0;
|
|
-
|
|
- dev_info(smi->dev, "add VLAN %04x\n", vid);
|
|
member |= BIT(port);
|
|
|
|
if (untagged)
|
|
untag |= BIT(port);
|
|
|
|
- /* To ensure that we have a valid MC entry for this VLAN,
|
|
- * initialize the port VLAN ID here.
|
|
- */
|
|
- ret = rtl8366_get_pvid(smi, port, &pvid_val);
|
|
- if (ret < 0) {
|
|
- dev_err(smi->dev, "could not lookup PVID for port %d\n",
|
|
- port);
|
|
- return;
|
|
- }
|
|
- if (pvid_val == 0) {
|
|
- ret = rtl8366_set_pvid(smi, port, vid);
|
|
- if (ret < 0)
|
|
- return;
|
|
- }
|
|
-
|
|
ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
|
|
if (ret)
|
|
dev_err(smi->dev,
|
|
"failed to set up VLAN %04x",
|
|
vid);
|
|
+
|
|
+ if (!pvid)
|
|
+ continue;
|
|
+
|
|
+ ret = rtl8366_set_pvid(smi, port, vid);
|
|
+ if (ret)
|
|
+ dev_err(smi->dev,
|
|
+ "failed to set PVID on port %d to VLAN %04x",
|
|
+ port, vid);
|
|
+
|
|
+ if (!ret)
|
|
+ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
|
|
+ vid, port);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
|
|
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
|
|
index fd1977590cb4b..c83b332656a4b 100644
|
|
--- a/drivers/net/dsa/rtl8366rb.c
|
|
+++ b/drivers/net/dsa/rtl8366rb.c
|
|
@@ -1270,7 +1270,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
|
|
if (smi->vlan4k_enabled)
|
|
max = RTL8366RB_NUM_VIDS - 1;
|
|
|
|
- if (vlan == 0 || vlan >= max)
|
|
+ if (vlan == 0 || vlan > max)
|
|
return false;
|
|
|
|
return true;
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
|
|
index 59b65d4db086e..dff564e1cfc7f 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
|
|
@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
|
|
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
|
|
};
|
|
|
|
+static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
|
|
+ /* Default supported NAT modes */
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_NONE,
|
|
+ .natmode = NAT_MODE_NONE,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP,
|
|
+ .natmode = NAT_MODE_DIP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
|
|
+ .natmode = NAT_MODE_DIP_DP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
|
|
+ CXGB4_ACTION_NATMODE_SIP,
|
|
+ .natmode = NAT_MODE_DIP_DP_SIP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
|
|
+ CXGB4_ACTION_NATMODE_SPORT,
|
|
+ .natmode = NAT_MODE_DIP_DP_SP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
|
|
+ .natmode = NAT_MODE_SIP_SP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
|
|
+ CXGB4_ACTION_NATMODE_SPORT,
|
|
+ .natmode = NAT_MODE_DIP_SIP_SP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T5,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
|
|
+ CXGB4_ACTION_NATMODE_DPORT |
|
|
+ CXGB4_ACTION_NATMODE_SPORT,
|
|
+ .natmode = NAT_MODE_ALL,
|
|
+ },
|
|
+ /* T6+ can ignore L4 ports when they're disabled. */
|
|
+ {
|
|
+ .chip = CHELSIO_T6,
|
|
+ .flags = CXGB4_ACTION_NATMODE_SIP,
|
|
+ .natmode = NAT_MODE_SIP_SP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T6,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
|
|
+ .natmode = NAT_MODE_DIP_DP_SP,
|
|
+ },
|
|
+ {
|
|
+ .chip = CHELSIO_T6,
|
|
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
|
|
+ .natmode = NAT_MODE_ALL,
|
|
+ },
|
|
+};
|
|
+
|
|
+static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
|
|
+ u8 natmode_flags)
|
|
+{
|
|
+ u8 i = 0;
|
|
+
|
|
+ /* Translate the enabled NAT 4-tuple fields to one of the
|
|
+ * hardware supported NAT mode configurations. This ensures
|
|
+ * that we pick a valid combination, where the disabled fields
|
|
+ * do not get overwritten to 0.
|
|
+ */
|
|
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
|
|
+ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
|
|
+ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
static struct ch_tc_flower_entry *allocate_flower_entry(void)
|
|
{
|
|
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
|
|
@@ -287,7 +370,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
|
|
}
|
|
|
|
static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
|
|
- u32 mask, u32 offset, u8 htype)
|
|
+ u32 mask, u32 offset, u8 htype,
|
|
+ u8 *natmode_flags)
|
|
{
|
|
switch (htype) {
|
|
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
|
|
@@ -312,60 +396,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
|
|
switch (offset) {
|
|
case PEDIT_IP4_SRC:
|
|
offload_pedit(fs, val, mask, IP4_SRC);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
break;
|
|
case PEDIT_IP4_DST:
|
|
offload_pedit(fs, val, mask, IP4_DST);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
}
|
|
- fs->nat_mode = NAT_MODE_ALL;
|
|
break;
|
|
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
|
|
switch (offset) {
|
|
case PEDIT_IP6_SRC_31_0:
|
|
offload_pedit(fs, val, mask, IP6_SRC_31_0);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
break;
|
|
case PEDIT_IP6_SRC_63_32:
|
|
offload_pedit(fs, val, mask, IP6_SRC_63_32);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
break;
|
|
case PEDIT_IP6_SRC_95_64:
|
|
offload_pedit(fs, val, mask, IP6_SRC_95_64);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
break;
|
|
case PEDIT_IP6_SRC_127_96:
|
|
offload_pedit(fs, val, mask, IP6_SRC_127_96);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
break;
|
|
case PEDIT_IP6_DST_31_0:
|
|
offload_pedit(fs, val, mask, IP6_DST_31_0);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
break;
|
|
case PEDIT_IP6_DST_63_32:
|
|
offload_pedit(fs, val, mask, IP6_DST_63_32);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
break;
|
|
case PEDIT_IP6_DST_95_64:
|
|
offload_pedit(fs, val, mask, IP6_DST_95_64);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
break;
|
|
case PEDIT_IP6_DST_127_96:
|
|
offload_pedit(fs, val, mask, IP6_DST_127_96);
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
}
|
|
- fs->nat_mode = NAT_MODE_ALL;
|
|
break;
|
|
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
|
|
switch (offset) {
|
|
case PEDIT_TCP_SPORT_DPORT:
|
|
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
|
|
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
|
|
fs->nat_fport = val;
|
|
- else
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
|
|
+ } else {
|
|
fs->nat_lport = val >> 16;
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
|
|
+ }
|
|
}
|
|
- fs->nat_mode = NAT_MODE_ALL;
|
|
break;
|
|
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
|
|
switch (offset) {
|
|
case PEDIT_UDP_SPORT_DPORT:
|
|
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
|
|
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
|
|
fs->nat_fport = val;
|
|
- else
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
|
|
+ } else {
|
|
fs->nat_lport = val >> 16;
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
|
|
+ }
|
|
}
|
|
- fs->nat_mode = NAT_MODE_ALL;
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
|
|
+ struct netlink_ext_ack *extack)
|
|
+{
|
|
+ u8 i = 0;
|
|
+
|
|
+ /* Extract the NAT mode to enable based on what 4-tuple fields
|
|
+ * are enabled to be overwritten. This ensures that the
|
|
+ * disabled fields don't get overwritten to 0.
|
|
+ */
|
|
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
|
|
+ const struct cxgb4_natmode_config *c;
|
|
+
|
|
+ c = &cxgb4_natmode_config_array[i];
|
|
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
|
|
+ natmode_flags == c->flags)
|
|
+ return 0;
|
|
}
|
|
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
|
|
+ return -EOPNOTSUPP;
|
|
}
|
|
|
|
void cxgb4_process_flow_actions(struct net_device *in,
|
|
@@ -373,6 +491,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
|
|
struct ch_filter_specification *fs)
|
|
{
|
|
struct flow_action_entry *act;
|
|
+ u8 natmode_flags = 0;
|
|
int i;
|
|
|
|
flow_action_for_each(i, act, actions) {
|
|
@@ -423,13 +542,17 @@ void cxgb4_process_flow_actions(struct net_device *in,
|
|
val = act->mangle.val;
|
|
offset = act->mangle.offset;
|
|
|
|
- process_pedit_field(fs, val, mask, offset, htype);
|
|
+ process_pedit_field(fs, val, mask, offset, htype,
|
|
+ &natmode_flags);
|
|
}
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
+ if (natmode_flags)
|
|
+ cxgb4_action_natmode_tweak(fs, natmode_flags);
|
|
+
|
|
}
|
|
|
|
static bool valid_l4_mask(u32 mask)
|
|
@@ -446,7 +569,8 @@ static bool valid_l4_mask(u32 mask)
|
|
}
|
|
|
|
static bool valid_pedit_action(struct net_device *dev,
|
|
- const struct flow_action_entry *act)
|
|
+ const struct flow_action_entry *act,
|
|
+ u8 *natmode_flags)
|
|
{
|
|
u32 mask, offset;
|
|
u8 htype;
|
|
@@ -471,7 +595,10 @@ static bool valid_pedit_action(struct net_device *dev,
|
|
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
|
|
switch (offset) {
|
|
case PEDIT_IP4_SRC:
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
+ break;
|
|
case PEDIT_IP4_DST:
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
break;
|
|
default:
|
|
netdev_err(dev, "%s: Unsupported pedit field\n",
|
|
@@ -485,10 +612,13 @@ static bool valid_pedit_action(struct net_device *dev,
|
|
case PEDIT_IP6_SRC_63_32:
|
|
case PEDIT_IP6_SRC_95_64:
|
|
case PEDIT_IP6_SRC_127_96:
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
|
|
+ break;
|
|
case PEDIT_IP6_DST_31_0:
|
|
case PEDIT_IP6_DST_63_32:
|
|
case PEDIT_IP6_DST_95_64:
|
|
case PEDIT_IP6_DST_127_96:
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
|
|
break;
|
|
default:
|
|
netdev_err(dev, "%s: Unsupported pedit field\n",
|
|
@@ -504,6 +634,10 @@ static bool valid_pedit_action(struct net_device *dev,
|
|
__func__);
|
|
return false;
|
|
}
|
|
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
|
|
+ else
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
|
|
break;
|
|
default:
|
|
netdev_err(dev, "%s: Unsupported pedit field\n",
|
|
@@ -519,6 +653,10 @@ static bool valid_pedit_action(struct net_device *dev,
|
|
__func__);
|
|
return false;
|
|
}
|
|
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
|
|
+ else
|
|
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
|
|
break;
|
|
default:
|
|
netdev_err(dev, "%s: Unsupported pedit field\n",
|
|
@@ -537,10 +675,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
|
|
struct flow_action *actions,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
+ struct adapter *adap = netdev2adap(dev);
|
|
struct flow_action_entry *act;
|
|
bool act_redir = false;
|
|
bool act_pedit = false;
|
|
bool act_vlan = false;
|
|
+ u8 natmode_flags = 0;
|
|
int i;
|
|
|
|
if (!flow_action_basic_hw_stats_check(actions, extack))
|
|
@@ -553,7 +693,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
|
|
/* Do nothing */
|
|
break;
|
|
case FLOW_ACTION_REDIRECT: {
|
|
- struct adapter *adap = netdev2adap(dev);
|
|
struct net_device *n_dev, *target_dev;
|
|
unsigned int i;
|
|
bool found = false;
|
|
@@ -603,7 +742,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
|
|
}
|
|
break;
|
|
case FLOW_ACTION_MANGLE: {
|
|
- bool pedit_valid = valid_pedit_action(dev, act);
|
|
+ bool pedit_valid = valid_pedit_action(dev, act,
|
|
+ &natmode_flags);
|
|
|
|
if (!pedit_valid)
|
|
return -EOPNOTSUPP;
|
|
@@ -622,6 +762,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (act_pedit) {
|
|
+ int ret;
|
|
+
|
|
+ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
|
|
+ extack);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
|
|
index 0a30c96b81ffa..95142b1a88af6 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
|
|
@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
|
|
#define PEDIT_TCP_SPORT_DPORT 0x0
|
|
#define PEDIT_UDP_SPORT_DPORT 0x0
|
|
|
|
+enum cxgb4_action_natmode_flags {
|
|
+ CXGB4_ACTION_NATMODE_NONE = 0,
|
|
+ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
|
|
+ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
|
|
+ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
|
|
+ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
|
|
+};
|
|
+
|
|
+/* TC PEDIT action to NATMODE translation entry */
|
|
+struct cxgb4_natmode_config {
|
|
+ enum chip_type chip;
|
|
+ u8 flags;
|
|
+ u8 natmode;
|
|
+};
|
|
+
|
|
void cxgb4_process_flow_actions(struct net_device *in,
|
|
struct flow_action *actions,
|
|
struct ch_filter_specification *fs);
|
|
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
|
|
index 18f3aeb88f22a..c67a16a48d624 100644
|
|
--- a/drivers/net/ethernet/cisco/enic/enic.h
|
|
+++ b/drivers/net/ethernet/cisco/enic/enic.h
|
|
@@ -169,6 +169,7 @@ struct enic {
|
|
u16 num_vfs;
|
|
#endif
|
|
spinlock_t enic_api_lock;
|
|
+ bool enic_api_busy;
|
|
struct enic_port_profile *pp;
|
|
|
|
/* work queue cache line section */
|
|
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
|
|
index b161f24522b87..b028ea2dec2b9 100644
|
|
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
|
|
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
|
|
@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
|
|
struct vnic_dev *vdev = enic->vdev;
|
|
|
|
spin_lock(&enic->enic_api_lock);
|
|
+ while (enic->enic_api_busy) {
|
|
+ spin_unlock(&enic->enic_api_lock);
|
|
+ cpu_relax();
|
|
+ spin_lock(&enic->enic_api_lock);
|
|
+ }
|
|
+
|
|
spin_lock_bh(&enic->devcmd_lock);
|
|
|
|
vnic_dev_cmd_proxy_by_index_start(vdev, vf);
|
|
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
index cd5fe4f6b54ce..21093f33d2d73 100644
|
|
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
|
|
@@ -2140,8 +2140,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
|
|
int done;
|
|
int err;
|
|
|
|
- BUG_ON(in_interrupt());
|
|
-
|
|
err = start(vdev, arg);
|
|
if (err)
|
|
return err;
|
|
@@ -2329,6 +2327,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
|
|
rss_hash_bits, rss_base_cpu, rss_enable);
|
|
}
|
|
|
|
+static void enic_set_api_busy(struct enic *enic, bool busy)
|
|
+{
|
|
+ spin_lock(&enic->enic_api_lock);
|
|
+ enic->enic_api_busy = busy;
|
|
+ spin_unlock(&enic->enic_api_lock);
|
|
+}
|
|
+
|
|
static void enic_reset(struct work_struct *work)
|
|
{
|
|
struct enic *enic = container_of(work, struct enic, reset);
|
|
@@ -2338,7 +2343,9 @@ static void enic_reset(struct work_struct *work)
|
|
|
|
rtnl_lock();
|
|
|
|
- spin_lock(&enic->enic_api_lock);
|
|
+ /* Stop any activity from infiniband */
|
|
+ enic_set_api_busy(enic, true);
|
|
+
|
|
enic_stop(enic->netdev);
|
|
enic_dev_soft_reset(enic);
|
|
enic_reset_addr_lists(enic);
|
|
@@ -2346,7 +2353,10 @@ static void enic_reset(struct work_struct *work)
|
|
enic_set_rss_nic_cfg(enic);
|
|
enic_dev_set_ig_vlan_rewrite_mode(enic);
|
|
enic_open(enic->netdev);
|
|
- spin_unlock(&enic->enic_api_lock);
|
|
+
|
|
+ /* Allow infiniband to fiddle with the device again */
|
|
+ enic_set_api_busy(enic, false);
|
|
+
|
|
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
|
|
|
|
rtnl_unlock();
|
|
@@ -2358,7 +2368,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
|
|
|
|
rtnl_lock();
|
|
|
|
- spin_lock(&enic->enic_api_lock);
|
|
+ /* Stop any activity from infiniband */
|
|
+ enic_set_api_busy(enic, true);
|
|
+
|
|
enic_dev_hang_notify(enic);
|
|
enic_stop(enic->netdev);
|
|
enic_dev_hang_reset(enic);
|
|
@@ -2367,7 +2379,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
|
|
enic_set_rss_nic_cfg(enic);
|
|
enic_dev_set_ig_vlan_rewrite_mode(enic);
|
|
enic_open(enic->netdev);
|
|
- spin_unlock(&enic->enic_api_lock);
|
|
+
|
|
+ /* Allow infiniband to fiddle with the device again */
|
|
+ enic_set_api_busy(enic, false);
|
|
+
|
|
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
|
|
|
|
rtnl_unlock();
|
|
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
|
|
index 87236206366fd..00024dd411471 100644
|
|
--- a/drivers/net/ethernet/faraday/ftgmac100.c
|
|
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
|
|
@@ -1817,6 +1817,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
|
priv->rxdes0_edorr_mask = BIT(30);
|
|
priv->txdes0_edotr_mask = BIT(30);
|
|
priv->is_aspeed = true;
|
|
+ /* Disable ast2600 problematic HW arbitration */
|
|
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
|
|
+ iowrite32(FTGMAC100_TM_DEFAULT,
|
|
+ priv->base + FTGMAC100_OFFSET_TM);
|
|
+ }
|
|
} else {
|
|
priv->rxdes0_edorr_mask = BIT(15);
|
|
priv->txdes0_edotr_mask = BIT(15);
|
|
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
|
|
index e5876a3fda91d..63b3e02fab162 100644
|
|
--- a/drivers/net/ethernet/faraday/ftgmac100.h
|
|
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
|
|
@@ -169,6 +169,14 @@
|
|
#define FTGMAC100_MACCR_FAST_MODE (1 << 19)
|
|
#define FTGMAC100_MACCR_SW_RST (1 << 31)
|
|
|
|
+/*
|
|
+ * test mode control register
|
|
+ */
|
|
+#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
|
|
+#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
|
|
+#define FTGMAC100_TM_DEFAULT \
|
|
+ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
|
|
+
|
|
/*
|
|
* PHY control register
|
|
*/
|
|
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
|
|
index 534fcc71a2a53..e1cd795556294 100644
|
|
--- a/drivers/net/ethernet/freescale/fec_main.c
|
|
+++ b/drivers/net/ethernet/freescale/fec_main.c
|
|
@@ -1913,6 +1913,27 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
|
|
+{
|
|
+ struct fec_enet_private *fep = netdev_priv(ndev);
|
|
+ struct phy_device *phy_dev = ndev->phydev;
|
|
+
|
|
+ if (phy_dev) {
|
|
+ phy_reset_after_clk_enable(phy_dev);
|
|
+ } else if (fep->phy_node) {
|
|
+ /*
|
|
+ * If the PHY still is not bound to the MAC, but there is
|
|
+ * OF PHY node and a matching PHY device instance already,
|
|
+ * use the OF PHY node to obtain the PHY device instance,
|
|
+ * and then use that PHY device instance when triggering
|
|
+ * the PHY reset.
|
|
+ */
|
|
+ phy_dev = of_phy_find_device(fep->phy_node);
|
|
+ phy_reset_after_clk_enable(phy_dev);
|
|
+ put_device(&phy_dev->mdio.dev);
|
|
+ }
|
|
+}
|
|
+
|
|
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|
{
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
@@ -1939,7 +1960,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
|
|
if (ret)
|
|
goto failed_clk_ref;
|
|
|
|
- phy_reset_after_clk_enable(ndev->phydev);
|
|
+ fec_enet_phy_reset_after_clk_enable(ndev);
|
|
} else {
|
|
clk_disable_unprepare(fep->clk_enet_out);
|
|
if (fep->clk_ptp) {
|
|
@@ -2985,16 +3006,16 @@ fec_enet_open(struct net_device *ndev)
|
|
/* Init MAC prior to mii bus probe */
|
|
fec_restart(ndev);
|
|
|
|
- /* Probe and connect to PHY when open the interface */
|
|
- ret = fec_enet_mii_probe(ndev);
|
|
- if (ret)
|
|
- goto err_enet_mii_probe;
|
|
-
|
|
/* Call phy_reset_after_clk_enable() again if it failed during
|
|
* phy_reset_after_clk_enable() before because the PHY wasn't probed.
|
|
*/
|
|
if (reset_again)
|
|
- phy_reset_after_clk_enable(ndev->phydev);
|
|
+ fec_enet_phy_reset_after_clk_enable(ndev);
|
|
+
|
|
+ /* Probe and connect to PHY when open the interface */
|
|
+ ret = fec_enet_mii_probe(ndev);
|
|
+ if (ret)
|
|
+ goto err_enet_mii_probe;
|
|
|
|
if (fep->quirks & FEC_QUIRK_ERR006687)
|
|
imx6q_cpuidle_fec_irqs_used();
|
|
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
|
|
index c5c732601e35e..7ef3369953b6a 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmveth.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmveth.c
|
|
@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
|
|
int offset = ibmveth_rxq_frame_offset(adapter);
|
|
int csum_good = ibmveth_rxq_csum_good(adapter);
|
|
int lrg_pkt = ibmveth_rxq_large_packet(adapter);
|
|
+ __sum16 iph_check = 0;
|
|
|
|
skb = ibmveth_rxq_get_buffer(adapter);
|
|
|
|
@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
|
|
skb_put(skb, length);
|
|
skb->protocol = eth_type_trans(skb, netdev);
|
|
|
|
- if (csum_good) {
|
|
- skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
- ibmveth_rx_csum_helper(skb, adapter);
|
|
+ /* PHYP without PLSO support places a -1 in the ip
|
|
+ * checksum for large send frames.
|
|
+ */
|
|
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
|
|
+ struct iphdr *iph = (struct iphdr *)skb->data;
|
|
+
|
|
+ iph_check = iph->check;
|
|
}
|
|
|
|
- if (length > netdev->mtu + ETH_HLEN) {
|
|
+ if ((length > netdev->mtu + ETH_HLEN) ||
|
|
+ lrg_pkt || iph_check == 0xffff) {
|
|
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
|
|
adapter->rx_large_packets++;
|
|
}
|
|
|
|
+ if (csum_good) {
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
+ ibmveth_rx_csum_helper(skb, adapter);
|
|
+ }
|
|
+
|
|
napi_gro_receive(napi, skb); /* send it up */
|
|
|
|
netdev->stats.rx_packets++;
|
|
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
index 1b702a43a5d01..3e0aab04d86fb 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmvnic.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
@@ -4194,8 +4194,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
|
|
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
|
|
goto out;
|
|
}
|
|
+ /* crq->change_mac_addr.mac_addr is the requested one
|
|
+ * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
|
|
+ */
|
|
ether_addr_copy(netdev->dev_addr,
|
|
&crq->change_mac_addr_rsp.mac_addr[0]);
|
|
+ ether_addr_copy(adapter->mac_addr,
|
|
+ &crq->change_mac_addr_rsp.mac_addr[0]);
|
|
out:
|
|
complete(&adapter->fw_done);
|
|
return rc;
|
|
@@ -4605,7 +4610,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
|
|
case IBMVNIC_1GBPS:
|
|
adapter->speed = SPEED_1000;
|
|
break;
|
|
- case IBMVNIC_10GBP:
|
|
+ case IBMVNIC_10GBPS:
|
|
adapter->speed = SPEED_10000;
|
|
break;
|
|
case IBMVNIC_25GBPS:
|
|
@@ -4620,6 +4625,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
|
|
case IBMVNIC_100GBPS:
|
|
adapter->speed = SPEED_100000;
|
|
break;
|
|
+ case IBMVNIC_200GBPS:
|
|
+ adapter->speed = SPEED_200000;
|
|
+ break;
|
|
default:
|
|
if (netif_carrier_ok(netdev))
|
|
netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
|
|
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
|
|
index f8416e1d4cf09..43feb96b0a68a 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmvnic.h
|
|
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
|
|
@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
|
|
#define IBMVNIC_10MBPS 0x40000000
|
|
#define IBMVNIC_100MBPS 0x20000000
|
|
#define IBMVNIC_1GBPS 0x10000000
|
|
-#define IBMVNIC_10GBP 0x08000000
|
|
+#define IBMVNIC_10GBPS 0x08000000
|
|
#define IBMVNIC_40GBPS 0x04000000
|
|
#define IBMVNIC_100GBPS 0x02000000
|
|
#define IBMVNIC_25GBPS 0x01000000
|
|
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
|
|
index 03e034918d147..bf48f0ded9c7d 100644
|
|
--- a/drivers/net/ethernet/korina.c
|
|
+++ b/drivers/net/ethernet/korina.c
|
|
@@ -1113,7 +1113,7 @@ out:
|
|
return rc;
|
|
|
|
probe_err_register:
|
|
- kfree(lp->td_ring);
|
|
+ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
|
|
probe_err_td_ring:
|
|
iounmap(lp->tx_dma_regs);
|
|
probe_err_dma_tx:
|
|
@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
|
|
iounmap(lp->eth_regs);
|
|
iounmap(lp->rx_dma_regs);
|
|
iounmap(lp->tx_dma_regs);
|
|
+ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
|
|
|
|
unregister_netdev(bif->dev);
|
|
free_netdev(bif->dev);
|
|
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
|
|
index 62a820b1eb163..3362b148de23c 100644
|
|
--- a/drivers/net/ethernet/mediatek/Kconfig
|
|
+++ b/drivers/net/ethernet/mediatek/Kconfig
|
|
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
|
|
config NET_MEDIATEK_STAR_EMAC
|
|
tristate "MediaTek STAR Ethernet MAC support"
|
|
select PHYLIB
|
|
+ select REGMAP_MMIO
|
|
help
|
|
This driver supports the ethernet MAC IP first used on
|
|
MediaTek MT85** SoCs.
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
index 8a10285b0e10c..89edcb5fca4fb 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
|
@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
|
|
bool clean_complete = true;
|
|
int done;
|
|
|
|
+ if (!budget)
|
|
+ return 0;
|
|
+
|
|
if (priv->tx_ring_num[TX_XDP]) {
|
|
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
|
|
if (xdp_tx_cq->xdp_busy) {
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
|
index 9dff7b086c9fb..1f11379ad5b64 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
|
@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
|
|
.dma = tx_info->map0_dma,
|
|
};
|
|
|
|
- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
|
|
+ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
|
|
dma_unmap_page(priv->ddev, tx_info->map0_dma,
|
|
PAGE_SIZE, priv->dma_dir);
|
|
put_page(tx_info->page);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
|
|
index 7283443868f3c..13c87ab50b267 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
|
|
@@ -212,8 +212,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
|
|
|
|
{
|
|
u32 data_size;
|
|
+ int err = 0;
|
|
u32 offset;
|
|
- int err;
|
|
|
|
for (offset = 0; offset < value_len; offset += data_size) {
|
|
data_size = value_len - offset;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
index 2d55b7c22c034..4e7cfa22b3d2f 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
|
|
@@ -550,8 +550,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
|
|
switch (clock->ptp_info.pin_config[pin].func) {
|
|
case PTP_PF_EXTTS:
|
|
ptp_event.index = pin;
|
|
- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
|
|
- be64_to_cpu(eqe->data.pps.time_stamp));
|
|
+ ptp_event.timestamp =
|
|
+ mlx5_timecounter_cyc2time(clock,
|
|
+ be64_to_cpu(eqe->data.pps.time_stamp));
|
|
if (clock->pps_info.enabled) {
|
|
ptp_event.type = PTP_CLOCK_PPSUSR;
|
|
ptp_event.pps_times.ts_real =
|
|
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
|
|
index fe173ea894e2c..b1feef473b746 100644
|
|
--- a/drivers/net/ethernet/realtek/r8169_main.c
|
|
+++ b/drivers/net/ethernet/realtek/r8169_main.c
|
|
@@ -4675,7 +4675,7 @@ static int rtl8169_close(struct net_device *dev)
|
|
|
|
phy_disconnect(tp->phydev);
|
|
|
|
- pci_free_irq(pdev, 0, tp);
|
|
+ free_irq(pci_irq_vector(pdev, 0), tp);
|
|
|
|
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
|
|
tp->RxPhyAddr);
|
|
@@ -4726,8 +4726,8 @@ static int rtl_open(struct net_device *dev)
|
|
|
|
rtl_request_firmware(tp);
|
|
|
|
- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
|
|
- dev->name);
|
|
+ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
|
|
+ IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
|
|
if (retval < 0)
|
|
goto err_release_fw_2;
|
|
|
|
@@ -4759,7 +4759,7 @@ out:
|
|
return retval;
|
|
|
|
err_free_irq:
|
|
- pci_free_irq(pdev, 0, tp);
|
|
+ free_irq(pci_irq_vector(pdev, 0), tp);
|
|
err_release_fw_2:
|
|
rtl_release_firmware(tp);
|
|
rtl8169_rx_clear(tp);
|
|
@@ -4871,6 +4871,10 @@ static int __maybe_unused rtl8169_resume(struct device *device)
|
|
if (netif_running(tp->dev))
|
|
__rtl8169_resume(tp);
|
|
|
|
+ /* Reportedly at least Asus X453MA truncates packets otherwise */
|
|
+ if (tp->mac_version == RTL_GIGA_MAC_VER_37)
|
|
+ rtl_init_rxcfg(tp);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
|
|
index 0f366cc50b74c..7f8be61a37089 100644
|
|
--- a/drivers/net/ethernet/socionext/netsec.c
|
|
+++ b/drivers/net/ethernet/socionext/netsec.c
|
|
@@ -6,6 +6,7 @@
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/acpi.h>
|
|
#include <linux/of_mdio.h>
|
|
+#include <linux/of_net.h>
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/io.h>
|
|
@@ -1836,6 +1837,14 @@ static const struct net_device_ops netsec_netdev_ops = {
|
|
static int netsec_of_probe(struct platform_device *pdev,
|
|
struct netsec_priv *priv, u32 *phy_addr)
|
|
{
|
|
+ int err;
|
|
+
|
|
+ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
|
|
+ if (err) {
|
|
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
|
|
+ return err;
|
|
+ }
|
|
+
|
|
priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
|
|
if (!priv->phy_np) {
|
|
dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
|
|
@@ -1862,6 +1871,14 @@ static int netsec_acpi_probe(struct platform_device *pdev,
|
|
if (!IS_ENABLED(CONFIG_ACPI))
|
|
return -ENODEV;
|
|
|
|
+ /* ACPI systems are assumed to configure the PHY in firmware, so
|
|
+ * there is really no need to discover the PHY mode from the DSDT.
|
|
+ * Since firmware is known to exist in the field that configures the
|
|
+ * PHY correctly but passes the wrong mode string in the phy-mode
|
|
+ * device property, we have no choice but to ignore it.
|
|
+ */
|
|
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
|
|
+
|
|
ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
|
|
if (ret) {
|
|
dev_err(&pdev->dev,
|
|
@@ -1998,13 +2015,6 @@ static int netsec_probe(struct platform_device *pdev)
|
|
priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
|
|
NETIF_MSG_LINK | NETIF_MSG_PROBE;
|
|
|
|
- priv->phy_interface = device_get_phy_mode(&pdev->dev);
|
|
- if ((int)priv->phy_interface < 0) {
|
|
- dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
|
|
- ret = -ENODEV;
|
|
- goto free_ndev;
|
|
- }
|
|
-
|
|
priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
|
|
resource_size(mmio_res));
|
|
if (!priv->ioaddr) {
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
index 73465e5f5a417..d4be2559bb73d 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
|
|
@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
|
|
}
|
|
}
|
|
|
|
-/**
|
|
- * stmmac_stop_all_queues - Stop all queues
|
|
- * @priv: driver private structure
|
|
- */
|
|
-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
|
|
-{
|
|
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
|
|
- u32 queue;
|
|
-
|
|
- for (queue = 0; queue < tx_queues_cnt; queue++)
|
|
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
|
|
-}
|
|
-
|
|
-/**
|
|
- * stmmac_start_all_queues - Start all queues
|
|
- * @priv: driver private structure
|
|
- */
|
|
-static void stmmac_start_all_queues(struct stmmac_priv *priv)
|
|
-{
|
|
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
|
|
- u32 queue;
|
|
-
|
|
- for (queue = 0; queue < tx_queues_cnt; queue++)
|
|
- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
|
|
-}
|
|
-
|
|
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
|
|
{
|
|
if (!test_bit(STMMAC_DOWN, &priv->state) &&
|
|
@@ -2736,6 +2710,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
|
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
|
|
}
|
|
|
|
+ /* Configure real RX and TX queues */
|
|
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
|
|
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
|
|
+
|
|
/* Start the ball rolling... */
|
|
stmmac_start_all_dma(priv);
|
|
|
|
@@ -2862,7 +2840,7 @@ static int stmmac_open(struct net_device *dev)
|
|
}
|
|
|
|
stmmac_enable_all_queues(priv);
|
|
- stmmac_start_all_queues(priv);
|
|
+ netif_tx_start_all_queues(priv->dev);
|
|
|
|
return 0;
|
|
|
|
@@ -2903,8 +2881,6 @@ static int stmmac_release(struct net_device *dev)
|
|
phylink_stop(priv->phylink);
|
|
phylink_disconnect_phy(priv->phylink);
|
|
|
|
- stmmac_stop_all_queues(priv);
|
|
-
|
|
stmmac_disable_all_queues(priv);
|
|
|
|
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
|
|
@@ -4819,10 +4795,6 @@ int stmmac_dvr_probe(struct device *device,
|
|
|
|
stmmac_check_ether_addr(priv);
|
|
|
|
- /* Configure real RX and TX queues */
|
|
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
|
|
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
|
|
-
|
|
ndev->netdev_ops = &stmmac_netdev_ops;
|
|
|
|
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
@@ -5078,7 +5050,6 @@ int stmmac_suspend(struct device *dev)
|
|
mutex_lock(&priv->lock);
|
|
|
|
netif_device_detach(ndev);
|
|
- stmmac_stop_all_queues(priv);
|
|
|
|
stmmac_disable_all_queues(priv);
|
|
|
|
@@ -5203,8 +5174,6 @@ int stmmac_resume(struct device *dev)
|
|
|
|
stmmac_enable_all_queues(priv);
|
|
|
|
- stmmac_start_all_queues(priv);
|
|
-
|
|
mutex_unlock(&priv->lock);
|
|
|
|
if (!device_may_wakeup(priv->device)) {
|
|
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
|
|
index 9e58e495d3731..bb46741fbe47e 100644
|
|
--- a/drivers/net/ipa/ipa_endpoint.c
|
|
+++ b/drivers/net/ipa/ipa_endpoint.c
|
|
@@ -1447,6 +1447,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
|
|
|
|
void ipa_endpoint_suspend(struct ipa *ipa)
|
|
{
|
|
+ if (!ipa->setup_complete)
|
|
+ return;
|
|
+
|
|
if (ipa->modem_netdev)
|
|
ipa_modem_suspend(ipa->modem_netdev);
|
|
|
|
@@ -1458,6 +1461,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
|
|
|
|
void ipa_endpoint_resume(struct ipa *ipa)
|
|
{
|
|
+ if (!ipa->setup_complete)
|
|
+ return;
|
|
+
|
|
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
|
|
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
|
|
|
|
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
|
|
index 07c42c0719f5b..5ca1356b8656f 100644
|
|
--- a/drivers/net/usb/qmi_wwan.c
|
|
+++ b/drivers/net/usb/qmi_wwan.c
|
|
@@ -1375,6 +1375,7 @@ static const struct usb_device_id products[] = {
|
|
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
|
|
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
|
|
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
|
|
+ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
|
|
|
|
/* 4. Gobi 1000 devices */
|
|
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
|
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
|
|
index 9b00708676cf7..1bdd3df0867a5 100644
|
|
--- a/drivers/net/wan/hdlc.c
|
|
+++ b/drivers/net/wan/hdlc.c
|
|
@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
|
|
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
struct packet_type *p, struct net_device *orig_dev)
|
|
{
|
|
- struct hdlc_device *hdlc = dev_to_hdlc(dev);
|
|
+ struct hdlc_device *hdlc;
|
|
+
|
|
+ /* First make sure "dev" is an HDLC device */
|
|
+ if (!(dev->priv_flags & IFF_WAN_HDLC)) {
|
|
+ kfree_skb(skb);
|
|
+ return NET_RX_SUCCESS;
|
|
+ }
|
|
+
|
|
+ hdlc = dev_to_hdlc(dev);
|
|
|
|
if (!net_eq(dev_net(dev), &init_net)) {
|
|
kfree_skb(skb);
|
|
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
|
|
index 08e0a46501dec..c70a518b8b478 100644
|
|
--- a/drivers/net/wan/hdlc_raw_eth.c
|
|
+++ b/drivers/net/wan/hdlc_raw_eth.c
|
|
@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
|
|
old_qlen = dev->tx_queue_len;
|
|
ether_setup(dev);
|
|
dev->tx_queue_len = old_qlen;
|
|
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
eth_hw_addr_random(dev);
|
|
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
|
|
netif_dormant_off(dev);
|
|
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
|
|
index 294fbc1e89ab8..e6e0284e47837 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/ce.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/ce.c
|
|
@@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
|
|
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
|
|
if (ret) {
|
|
dma_free_coherent(ar->dev,
|
|
- (nentries * sizeof(struct ce_desc_64) +
|
|
+ (nentries * sizeof(struct ce_desc) +
|
|
CE_DESC_RING_ALIGN),
|
|
src_ring->base_addr_owner_space_unaligned,
|
|
base_addr);
|
|
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
index d787cbead56ab..215ade6faf328 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
|
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
|
|
|
|
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
|
|
+
|
|
+ if (idx < 0 || idx >= htt->rx_ring.size) {
|
|
+ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
|
|
+ idx &= htt->rx_ring.size_mask;
|
|
+ ret = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
while (num > 0) {
|
|
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
|
|
if (!skb) {
|
|
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
|
|
index 919d15584d4a2..77daca67a8e14 100644
|
|
--- a/drivers/net/wireless/ath/ath10k/mac.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/mac.c
|
|
@@ -7283,7 +7283,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
|
|
struct ieee80211_channel *channel)
|
|
{
|
|
int ret;
|
|
- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
|
|
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
|
|
|
|
lockdep_assert_held(&ar->conf_mutex);
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
|
|
index 30092841ac464..a0314c1c84653 100644
|
|
--- a/drivers/net/wireless/ath/ath11k/ahb.c
|
|
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
|
|
@@ -981,12 +981,16 @@ err_core_free:
|
|
static int ath11k_ahb_remove(struct platform_device *pdev)
|
|
{
|
|
struct ath11k_base *ab = platform_get_drvdata(pdev);
|
|
+ unsigned long left;
|
|
|
|
reinit_completion(&ab->driver_recovery);
|
|
|
|
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
|
|
- wait_for_completion_timeout(&ab->driver_recovery,
|
|
- ATH11K_AHB_RECOVERY_TIMEOUT);
|
|
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
|
|
+ left = wait_for_completion_timeout(&ab->driver_recovery,
|
|
+ ATH11K_AHB_RECOVERY_TIMEOUT);
|
|
+ if (!left)
|
|
+ ath11k_warn(ab, "failed to receive recovery response completion\n");
|
|
+ }
|
|
|
|
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
|
|
cancel_work_sync(&ab->restart_work);
|
|
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
|
|
index 2836a0f197ab0..fc5be7e8c043e 100644
|
|
--- a/drivers/net/wireless/ath/ath11k/mac.c
|
|
+++ b/drivers/net/wireless/ath/ath11k/mac.c
|
|
@@ -5824,7 +5824,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
|
|
ret = ath11k_mac_setup_channels_rates(ar,
|
|
cap->supported_bands);
|
|
if (ret)
|
|
- goto err_free;
|
|
+ goto err;
|
|
|
|
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
|
|
ath11k_mac_setup_he_cap(ar, cap);
|
|
@@ -5938,7 +5938,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
|
|
err_free:
|
|
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
|
|
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
|
|
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
|
|
|
|
+err:
|
|
SET_IEEE80211_DEV(ar->hw, NULL);
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
|
|
index c00a99ad8dbc1..497cff7e64cc5 100644
|
|
--- a/drivers/net/wireless/ath/ath11k/qmi.c
|
|
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
|
|
@@ -2419,6 +2419,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
|
|
ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
|
|
if (ret < 0) {
|
|
ath11k_warn(ab, "failed to add qmi lookup\n");
|
|
+ destroy_workqueue(ab->qmi.event_wq);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
|
|
index 5e7ea838a9218..814131a0680a4 100644
|
|
--- a/drivers/net/wireless/ath/ath6kl/main.c
|
|
+++ b/drivers/net/wireless/ath/ath6kl/main.c
|
|
@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
|
|
|
|
ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
|
|
|
|
+ if (aid < 1 || aid > AP_MAX_NUM_STA)
|
|
+ return;
|
|
+
|
|
if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
|
|
struct ieee80211_mgmt *mgmt =
|
|
(struct ieee80211_mgmt *) assoc_info;
|
|
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
|
|
index 6885d2ded53a8..3d5db84d64650 100644
|
|
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
|
|
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
|
|
@@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (tsid >= 16) {
|
|
+ ath6kl_err("invalid tsid: %d\n", tsid);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
|
|
if (!skb)
|
|
return -ENOMEM;
|
|
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
|
|
index 3f563e02d17da..2ed98aaed6fb5 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
|
|
@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
|
|
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
|
|
/* The pending URBs have to be canceled. */
|
|
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
|
|
&hif_dev->tx.tx_pending, list) {
|
|
+ usb_get_urb(tx_buf->urb);
|
|
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
usb_kill_urb(tx_buf->urb);
|
|
+ list_del(&tx_buf->list);
|
|
+ usb_free_urb(tx_buf->urb);
|
|
+ kfree(tx_buf->buf);
|
|
+ kfree(tx_buf);
|
|
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
}
|
|
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
|
|
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
|
|
}
|
|
@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
|
|
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
|
|
unsigned long flags;
|
|
|
|
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
|
|
&hif_dev->tx.tx_buf, list) {
|
|
+ usb_get_urb(tx_buf->urb);
|
|
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
usb_kill_urb(tx_buf->urb);
|
|
list_del(&tx_buf->list);
|
|
usb_free_urb(tx_buf->urb);
|
|
kfree(tx_buf->buf);
|
|
kfree(tx_buf);
|
|
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
}
|
|
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
|
|
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
|
|
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
|
|
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
|
|
&hif_dev->tx.tx_pending, list) {
|
|
+ usb_get_urb(tx_buf->urb);
|
|
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
usb_kill_urb(tx_buf->urb);
|
|
list_del(&tx_buf->list);
|
|
usb_free_urb(tx_buf->urb);
|
|
kfree(tx_buf->buf);
|
|
kfree(tx_buf);
|
|
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
|
|
}
|
|
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
|
|
|
|
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
|
|
}
|
|
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
|
|
index d2e062eaf5614..510e61e97dbcb 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
|
|
@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
|
|
|
|
if (skb) {
|
|
htc_hdr = (struct htc_frame_hdr *) skb->data;
|
|
+ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
|
|
+ goto ret;
|
|
endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
|
|
skb_pull(skb, sizeof(struct htc_frame_hdr));
|
|
|
|
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
|
|
index 702b689c06df3..f3ea629764fa8 100644
|
|
--- a/drivers/net/wireless/ath/wcn36xx/main.c
|
|
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
|
|
@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
|
|
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
|
|
.mcs = {
|
|
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
|
|
- .rx_highest = cpu_to_le16(72),
|
|
+ .rx_highest = cpu_to_le16(150),
|
|
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
|
|
}
|
|
}
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
|
|
index c88655acc78c7..76b478f70b4bb 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
|
|
@@ -483,7 +483,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
|
|
ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
|
|
|
|
if (ret || !(*ifp) || !(*ifp)->ndev) {
|
|
- if (ret != -ENODATA && *ifp)
|
|
+ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
|
|
(*ifp)->ndev->stats.rx_errors++;
|
|
brcmu_pkt_buf_free_skb(skb);
|
|
return -ENODATA;
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
|
|
index 8bb4f1fa790e7..1bb270e782ff2 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
|
|
@@ -1619,6 +1619,8 @@ fail:
|
|
BRCMF_TX_IOCTL_MAX_MSG_SIZE,
|
|
msgbuf->ioctbuf,
|
|
msgbuf->ioctbuf_handle);
|
|
+ if (msgbuf->txflow_wq)
|
|
+ destroy_workqueue(msgbuf->txflow_wq);
|
|
kfree(msgbuf);
|
|
}
|
|
return -ENOMEM;
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
|
|
index 7ef36234a25dc..66797dc5e90d5 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
|
|
@@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
|
|
pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
|
|
pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
|
|
|
|
- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
|
|
+ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
|
|
+ kfree(pi->u.pi_lcnphy);
|
|
return false;
|
|
+ }
|
|
|
|
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
|
|
if (pi_lcn->lcnphy_tempsense_option == 3) {
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
index 27116c7d3f4f8..48269a4cf8964 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
|
|
@@ -947,9 +947,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
|
|
struct iwl_rx_packet *pkt = tp_data->fw_pkt;
|
|
struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
|
|
|
|
- if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
|
|
- (pkt->hdr.cmd == wanted_hdr->cmd &&
|
|
- pkt->hdr.group_id == wanted_hdr->group_id))) {
|
|
+ if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
|
|
+ pkt->hdr.group_id == wanted_hdr->group_id)) {
|
|
struct iwl_rx_packet *fw_pkt =
|
|
kmemdup(pkt,
|
|
sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
|
|
@@ -1012,6 +1011,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
|
|
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
|
|
int ret, i;
|
|
|
|
+ if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
|
|
+ return;
|
|
+
|
|
IWL_DEBUG_FW(fwrt,
|
|
"WRT: Generating active triggers list, domain 0x%x\n",
|
|
fwrt->trans->dbg.domains_bitmap);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
index 77916231ff7d3..03b73003b0095 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
@@ -3685,9 +3685,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
|
|
tail->apply_time_max_delay = cpu_to_le32(delay);
|
|
|
|
IWL_DEBUG_TE(mvm,
|
|
- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
|
|
- channel->hw_value, req_dur, duration, delay,
|
|
- dtim_interval);
|
|
+ "ROC: Requesting to remain on channel %u for %ums\n",
|
|
+ channel->hw_value, req_dur);
|
|
+ IWL_DEBUG_TE(mvm,
|
|
+ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
|
|
+ duration, delay, dtim_interval);
|
|
+
|
|
/* Set the node address */
|
|
memcpy(tail->node_addr, vif->addr, ETH_ALEN);
|
|
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
|
|
index ff932627a46c1..2fb69a590bd8e 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
|
|
@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
|
|
chan, CFG80211_BSS_FTYPE_UNKNOWN,
|
|
bssid, timestamp,
|
|
cap_info_bitmap, beacon_period,
|
|
- ie_buf, ie_len, rssi, GFP_KERNEL);
|
|
+ ie_buf, ie_len, rssi, GFP_ATOMIC);
|
|
if (bss) {
|
|
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
|
|
bss_priv->band = band;
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
|
|
index a042965962a2d..1b6bee5465288 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
|
|
@@ -1976,6 +1976,8 @@ error:
|
|
kfree(card->mpa_rx.buf);
|
|
card->mpa_tx.buf_size = 0;
|
|
card->mpa_rx.buf_size = 0;
|
|
+ card->mpa_tx.buf = NULL;
|
|
+ card->mpa_rx.buf = NULL;
|
|
}
|
|
|
|
return ret;
|
|
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
|
|
index 6f3cfde4654cc..426e39d4ccf0f 100644
|
|
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
|
|
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
|
|
@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
|
|
skb_dequeue(&port->tx_aggr.aggr_list)))
|
|
mwifiex_write_data_complete(adapter, skb_tmp,
|
|
0, -1);
|
|
- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
|
|
+ if (port->tx_aggr.timer_cnxt.hold_timer.function)
|
|
+ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
|
|
port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
|
|
port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
|
|
}
|
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
|
|
index 8fb8255650a7e..6969579e6b1dd 100644
|
|
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
|
|
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
|
|
@@ -2267,14 +2267,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
|
|
struct bss_info_bcn *bcn;
|
|
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
|
|
|
|
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
|
|
- if (IS_ERR(rskb))
|
|
- return PTR_ERR(rskb);
|
|
-
|
|
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
|
|
- bcn = (struct bss_info_bcn *)tlv;
|
|
- bcn->enable = en;
|
|
-
|
|
skb = ieee80211_beacon_get_template(hw, vif, &offs);
|
|
if (!skb)
|
|
return -EINVAL;
|
|
@@ -2285,6 +2277,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
|
|
+ if (IS_ERR(rskb)) {
|
|
+ dev_kfree_skb(skb);
|
|
+ return PTR_ERR(rskb);
|
|
+ }
|
|
+
|
|
+ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
|
|
+ bcn = (struct bss_info_bcn *)tlv;
|
|
+ bcn->enable = en;
|
|
+
|
|
if (mvif->band_idx) {
|
|
info = IEEE80211_SKB_CB(skb);
|
|
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
|
|
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
|
|
index f40d8c3c3d9e5..f3ccbd2b10847 100644
|
|
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
|
|
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
|
|
@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
|
|
default:
|
|
pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
|
|
vif->vifid, vif->wdev.iftype);
|
|
+ dev_kfree_skb(cmd_skb);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
|
|
break;
|
|
default:
|
|
pr_err("unsupported iftype %d\n", vif->wdev.iftype);
|
|
+ dev_kfree_skb(cmd_skb);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
index 19efae462a242..5cd7ef3625c5e 100644
|
|
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
|
|
ret = usb_submit_urb(urb, GFP_KERNEL);
|
|
if (ret) {
|
|
usb_unanchor_urb(urb);
|
|
- usb_free_urb(urb);
|
|
goto error;
|
|
}
|
|
|
|
@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
|
|
rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
|
|
|
|
error:
|
|
+ usb_free_urb(urb);
|
|
return ret;
|
|
}
|
|
|
|
@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
|
|
struct rtl8xxxu_priv *priv = hw->priv;
|
|
struct rtl8xxxu_rx_urb *rx_urb;
|
|
struct rtl8xxxu_tx_urb *tx_urb;
|
|
+ struct sk_buff *skb;
|
|
unsigned long flags;
|
|
int ret, i;
|
|
|
|
@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
|
|
rx_urb->hw = hw;
|
|
|
|
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
|
|
+ if (ret) {
|
|
+ if (ret != -ENOMEM) {
|
|
+ skb = (struct sk_buff *)rx_urb->urb.context;
|
|
+ dev_kfree_skb(skb);
|
|
+ }
|
|
+ rtl8xxxu_queue_rx_urb(priv, rx_urb);
|
|
+ }
|
|
}
|
|
|
|
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
|
|
index 665d4bbdee6a0..6a881d0be9bf0 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/main.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/main.c
|
|
@@ -1465,6 +1465,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
|
|
ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
|
|
if (ret) {
|
|
rtw_warn(rtwdev, "no wow firmware loaded\n");
|
|
+ wait_for_completion(&rtwdev->fw.completion);
|
|
+ if (rtwdev->fw.firmware)
|
|
+ release_firmware(rtwdev->fw.firmware);
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1479,6 +1482,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
|
|
struct rtw_rsvd_page *rsvd_pkt, *tmp;
|
|
unsigned long flags;
|
|
|
|
+ rtw_wait_firmware_completion(rtwdev);
|
|
+
|
|
if (fw->firmware)
|
|
release_firmware(fw->firmware);
|
|
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
|
|
index 3413973bc4750..7f1f5073b9f4d 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/pci.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
|
|
@@ -1599,6 +1599,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
|
|
|
|
if (chip->ops->shutdown)
|
|
chip->ops->shutdown(rtwdev);
|
|
+
|
|
+ pci_set_power_state(pdev, PCI_D3hot);
|
|
}
|
|
EXPORT_SYMBOL(rtw_pci_shutdown);
|
|
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
|
|
index 024c2bc275cbe..ca17aa9cf7dc7 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/pci.h
|
|
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
|
|
@@ -9,8 +9,8 @@
|
|
#define RTK_BEQ_TX_DESC_NUM 256
|
|
|
|
#define RTK_MAX_RX_DESC_NUM 512
|
|
-/* 8K + rx desc size */
|
|
-#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
|
|
+/* 11K + rx desc size */
|
|
+#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
|
|
|
|
#define RTK_PCI_CTRL 0x300
|
|
#define BIT_RST_TRXDMA_INTF BIT(20)
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
|
|
index 8d93f31597469..9687b376d221b 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/phy.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
|
|
@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
|
|
{
|
|
struct rtw_chip_info *chip = rtwdev->chip;
|
|
struct rtw_hal *hal = &rtwdev->hal;
|
|
- const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
|
|
u32 addr, mask;
|
|
u8 path;
|
|
|
|
- if (dig_cck)
|
|
+ if (chip->dig_cck) {
|
|
+ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
|
|
rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
|
|
+ }
|
|
|
|
for (path = 0; path < hal->rf_path_num; path++) {
|
|
addr = chip->dig[path].addr;
|
|
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
|
|
index 88e1db65be02c..71428d8cbcfc5 100644
|
|
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
|
|
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
|
|
@@ -1203,6 +1203,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
|
|
|
|
err_dma_mask:
|
|
pci_clear_master(pdev);
|
|
+ pci_release_regions(pdev);
|
|
err_pci_regions:
|
|
pci_disable_device(pdev);
|
|
err_pci_enable:
|
|
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
|
|
index 423f9b8fbbcf5..fa561d455f7c8 100644
|
|
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
|
|
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
|
|
@@ -1893,7 +1893,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
|
|
goto err_init_dev;
|
|
} else {
|
|
rc = -EINVAL;
|
|
- goto err_ndev;
|
|
+ goto err_init_pci;
|
|
}
|
|
|
|
ndev_reset_unsafe_flags(ndev);
|
|
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
|
|
index 58b035cc67a01..75ed95a250fb5 100644
|
|
--- a/drivers/nvme/target/core.c
|
|
+++ b/drivers/nvme/target/core.c
|
|
@@ -1142,7 +1142,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
|
|
* in case a host died before it enabled the controller. Hence, simply
|
|
* reset the keep alive timer when the controller is enabled.
|
|
*/
|
|
- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
|
+ if (ctrl->kato)
|
|
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
|
}
|
|
|
|
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
|
|
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
|
|
index 927eb5f6003f0..4aca5b4a87d75 100644
|
|
--- a/drivers/nvmem/core.c
|
|
+++ b/drivers/nvmem/core.c
|
|
@@ -355,16 +355,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
|
|
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
|
|
}
|
|
|
|
-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
|
|
- const struct nvmem_cell_info *info,
|
|
- struct nvmem_cell *cell)
|
|
+static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
|
|
+ const struct nvmem_cell_info *info,
|
|
+ struct nvmem_cell *cell)
|
|
{
|
|
cell->nvmem = nvmem;
|
|
cell->offset = info->offset;
|
|
cell->bytes = info->bytes;
|
|
- cell->name = kstrdup_const(info->name, GFP_KERNEL);
|
|
- if (!cell->name)
|
|
- return -ENOMEM;
|
|
+ cell->name = info->name;
|
|
|
|
cell->bit_offset = info->bit_offset;
|
|
cell->nbits = info->nbits;
|
|
@@ -376,13 +374,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
|
|
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
|
|
dev_err(&nvmem->dev,
|
|
"cell %s unaligned to nvmem stride %d\n",
|
|
- cell->name, nvmem->stride);
|
|
+ cell->name ?: "<unknown>", nvmem->stride);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
+static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
|
|
+ const struct nvmem_cell_info *info,
|
|
+ struct nvmem_cell *cell)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ cell->name = kstrdup_const(info->name, GFP_KERNEL);
|
|
+ if (!cell->name)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/**
|
|
* nvmem_add_cells() - Add cell information to an nvmem device
|
|
*
|
|
@@ -823,6 +838,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
|
|
{
|
|
|
|
struct device_node *nvmem_np;
|
|
+ struct nvmem_device *nvmem;
|
|
int index = 0;
|
|
|
|
if (id)
|
|
@@ -832,7 +848,9 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
|
|
if (!nvmem_np)
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
- return __nvmem_device_get(nvmem_np, device_match_of_node);
|
|
+ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
|
|
+ of_node_put(nvmem_np);
|
|
+ return nvmem;
|
|
}
|
|
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
|
|
#endif
|
|
@@ -1433,7 +1451,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
|
|
if (!nvmem)
|
|
return -EINVAL;
|
|
|
|
- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
|
|
+ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
|
|
if (rc)
|
|
return rc;
|
|
|
|
@@ -1463,7 +1481,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
|
|
if (!nvmem)
|
|
return -EINVAL;
|
|
|
|
- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
|
|
+ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
|
|
if (rc)
|
|
return rc;
|
|
|
|
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
|
|
index 91dcad982d362..11d192fb2e813 100644
|
|
--- a/drivers/opp/core.c
|
|
+++ b/drivers/opp/core.c
|
|
@@ -1918,6 +1918,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
|
|
{
|
|
int index;
|
|
|
|
+ if (!opp_table->genpd_virt_devs)
|
|
+ return;
|
|
+
|
|
for (index = 0; index < opp_table->required_opp_count; index++) {
|
|
if (!opp_table->genpd_virt_devs[index])
|
|
continue;
|
|
@@ -1964,6 +1967,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
|
|
if (!opp_table)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
+ if (opp_table->genpd_virt_devs)
|
|
+ return opp_table;
|
|
+
|
|
/*
|
|
* If the genpd's OPP table isn't already initialized, parsing of the
|
|
* required-opps fail for dev. We should retry this after genpd's OPP
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
index 5e5b8821bed8c..ce1c00ea5fdca 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
|
|
@@ -505,7 +505,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
|
|
u32 reg;
|
|
int i;
|
|
|
|
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
|
|
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
|
|
+ PCI_HEADER_TYPE_MASK;
|
|
if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
|
|
dev_err(pci->dev,
|
|
"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
|
|
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
|
|
index 90ff291c24f09..d5f58684d962c 100644
|
|
--- a/drivers/pci/controller/pci-aardvark.c
|
|
+++ b/drivers/pci/controller/pci-aardvark.c
|
|
@@ -9,7 +9,7 @@
|
|
*/
|
|
|
|
#include <linux/delay.h>
|
|
-#include <linux/gpio.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/irqdomain.h>
|
|
@@ -608,7 +608,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
|
|
* Initialize the configuration space of the PCI-to-PCI bridge
|
|
* associated with the given PCIe interface.
|
|
*/
|
|
-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
|
|
+static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
|
|
{
|
|
struct pci_bridge_emul *bridge = &pcie->bridge;
|
|
|
|
@@ -634,8 +634,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
|
|
bridge->data = pcie;
|
|
bridge->ops = &advk_pci_bridge_emul_ops;
|
|
|
|
- pci_bridge_emul_init(bridge, 0);
|
|
-
|
|
+ return pci_bridge_emul_init(bridge, 0);
|
|
}
|
|
|
|
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
|
|
@@ -1169,7 +1168,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
|
|
|
advk_pcie_setup_hw(pcie);
|
|
|
|
- advk_sw_pci_bridge_init(pcie);
|
|
+ ret = advk_sw_pci_bridge_init(pcie);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "Failed to register emulated root PCI bridge\n");
|
|
+ return ret;
|
|
+ }
|
|
|
|
ret = advk_pcie_init_irq_domain(pcie);
|
|
if (ret) {
|
|
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
|
|
index bf40ff09c99d6..95c04b0ffeb16 100644
|
|
--- a/drivers/pci/controller/pci-hyperv.c
|
|
+++ b/drivers/pci/controller/pci-hyperv.c
|
|
@@ -1275,11 +1275,25 @@ static void hv_irq_unmask(struct irq_data *data)
|
|
exit_unlock:
|
|
spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
|
|
|
|
- if (res) {
|
|
+ /*
|
|
+ * During hibernation, when a CPU is offlined, the kernel tries
|
|
+ * to move the interrupt to the remaining CPUs that haven't
|
|
+ * been offlined yet. In this case, the below hv_do_hypercall()
|
|
+ * always fails since the vmbus channel has been closed:
|
|
+ * refer to cpu_disable_common() -> fixup_irqs() ->
|
|
+ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
|
|
+ *
|
|
+ * Suppress the error message for hibernation because the failure
|
|
+ * during hibernation does not matter (at this time all the devices
|
|
+ * have been frozen). Note: the correct affinity info is still updated
|
|
+ * into the irqdata data structure in migrate_one_irq() ->
|
|
+ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
|
|
+ * resumes, hv_pci_restore_msi_state() is able to correctly restore
|
|
+ * the interrupt with the correct affinity.
|
|
+ */
|
|
+ if (res && hbus->state != hv_pcibus_removing)
|
|
dev_err(&hbus->hdev->device,
|
|
"%s() failed: %#llx", __func__, res);
|
|
- return;
|
|
- }
|
|
|
|
pci_msi_unmask_irq(data);
|
|
}
|
|
@@ -3368,6 +3382,34 @@ static int hv_pci_suspend(struct hv_device *hdev)
|
|
return 0;
|
|
}
|
|
|
|
+static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
|
|
+{
|
|
+ struct msi_desc *entry;
|
|
+ struct irq_data *irq_data;
|
|
+
|
|
+ for_each_pci_msi_entry(entry, pdev) {
|
|
+ irq_data = irq_get_irq_data(entry->irq);
|
|
+ if (WARN_ON_ONCE(!irq_data))
|
|
+ return -EINVAL;
|
|
+
|
|
+ hv_compose_msi_msg(irq_data, &entry->msg);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
|
|
+ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
|
|
+ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
|
|
+ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
|
|
+ * Table entries.
|
|
+ */
|
|
+static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
|
|
+{
|
|
+ pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
|
|
+}
|
|
+
|
|
static int hv_pci_resume(struct hv_device *hdev)
|
|
{
|
|
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
|
|
@@ -3401,6 +3443,8 @@ static int hv_pci_resume(struct hv_device *hdev)
|
|
|
|
prepopulate_bars(hbus);
|
|
|
|
+ hv_pci_restore_msi_state(hbus);
|
|
+
|
|
hbus->state = hv_pcibus_installed;
|
|
return 0;
|
|
out:
|
|
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
|
|
index 3176ad3ab0e52..908475d27e0e7 100644
|
|
--- a/drivers/pci/controller/pcie-iproc-msi.c
|
|
+++ b/drivers/pci/controller/pcie-iproc-msi.c
|
|
@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
|
|
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
|
|
int target_cpu = cpumask_first(mask);
|
|
int curr_cpu;
|
|
+ int ret;
|
|
|
|
curr_cpu = hwirq_to_cpu(msi, data->hwirq);
|
|
if (curr_cpu == target_cpu)
|
|
- return IRQ_SET_MASK_OK_DONE;
|
|
+ ret = IRQ_SET_MASK_OK_DONE;
|
|
+ else {
|
|
+ /* steer MSI to the target CPU */
|
|
+ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
|
|
+ ret = IRQ_SET_MASK_OK;
|
|
+ }
|
|
|
|
- /* steer MSI to the target CPU */
|
|
- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
|
|
+ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
|
|
|
|
- return IRQ_SET_MASK_OK;
|
|
+ return ret;
|
|
}
|
|
|
|
static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
|
|
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
|
|
index b37e08c4f9d1a..4afd4ee4f7f04 100644
|
|
--- a/drivers/pci/iov.c
|
|
+++ b/drivers/pci/iov.c
|
|
@@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
|
|
virtfn->device = iov->vf_device;
|
|
virtfn->is_virtfn = 1;
|
|
virtfn->physfn = pci_dev_get(dev);
|
|
+ virtfn->no_command_memory = 1;
|
|
|
|
if (id == 0)
|
|
pci_read_vf_config_common(virtfn);
|
|
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
|
|
index aac9823b0c6bb..e116815fa8092 100644
|
|
--- a/drivers/perf/thunderx2_pmu.c
|
|
+++ b/drivers/perf/thunderx2_pmu.c
|
|
@@ -805,14 +805,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
|
|
list_for_each_entry(rentry, &list, node) {
|
|
if (resource_type(rentry->res) == IORESOURCE_MEM) {
|
|
res = *rentry->res;
|
|
+ rentry = NULL;
|
|
break;
|
|
}
|
|
}
|
|
+ acpi_dev_free_resource_list(&list);
|
|
|
|
- if (!rentry->res)
|
|
+ if (rentry) {
|
|
+ dev_err(dev, "PMU type %d: Fail to find resource\n", type);
|
|
return NULL;
|
|
+ }
|
|
|
|
- acpi_dev_free_resource_list(&list);
|
|
base = devm_ioremap_resource(dev, &res);
|
|
if (IS_ERR(base)) {
|
|
dev_err(dev, "PMU type %d: Fail to map resource\n", type);
|
|
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
|
|
index edac28cd25ddc..633cf07ba6723 100644
|
|
--- a/drivers/perf/xgene_pmu.c
|
|
+++ b/drivers/perf/xgene_pmu.c
|
|
@@ -1453,17 +1453,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
|
|
}
|
|
|
|
#if defined(CONFIG_ACPI)
|
|
-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
|
|
-{
|
|
- struct resource *res = data;
|
|
-
|
|
- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
|
|
- acpi_dev_resource_memory(ares, res);
|
|
-
|
|
- /* Always tell the ACPI core to skip this resource */
|
|
- return 1;
|
|
-}
|
|
-
|
|
static struct
|
|
xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
|
|
struct acpi_device *adev, u32 type)
|
|
@@ -1475,6 +1464,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
|
|
struct hw_pmu_info *inf;
|
|
void __iomem *dev_csr;
|
|
struct resource res;
|
|
+ struct resource_entry *rentry;
|
|
int enable_bit;
|
|
int rc;
|
|
|
|
@@ -1483,11 +1473,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
|
|
return NULL;
|
|
|
|
INIT_LIST_HEAD(&resource_list);
|
|
- rc = acpi_dev_get_resources(adev, &resource_list,
|
|
- acpi_pmu_dev_add_resource, &res);
|
|
+ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
|
|
+ if (rc <= 0) {
|
|
+ dev_err(dev, "PMU type %d: No resources found\n", type);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ list_for_each_entry(rentry, &resource_list, node) {
|
|
+ if (resource_type(rentry->res) == IORESOURCE_MEM) {
|
|
+ res = *rentry->res;
|
|
+ rentry = NULL;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
acpi_dev_free_resource_list(&resource_list);
|
|
- if (rc < 0) {
|
|
- dev_err(dev, "PMU type %d: No resource address found\n", type);
|
|
+
|
|
+ if (rentry) {
|
|
+ dev_err(dev, "PMU type %d: No memory resource found\n", type);
|
|
return NULL;
|
|
}
|
|
|
|
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
|
|
index b625a657171e6..11e27136032b9 100644
|
|
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
|
|
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
|
|
@@ -515,7 +515,7 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
|
|
val = pmap->val << __ffs(pconf->mask);
|
|
|
|
rc = regmap_update_bits(pdata->scu, pconf->reg,
|
|
- pmap->mask, val);
|
|
+ pconf->mask, val);
|
|
|
|
if (rc < 0)
|
|
return rc;
|
|
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
|
|
index dcf7df797af75..0ed14de0134cf 100644
|
|
--- a/drivers/pinctrl/bcm/Kconfig
|
|
+++ b/drivers/pinctrl/bcm/Kconfig
|
|
@@ -23,6 +23,7 @@ config PINCTRL_BCM2835
|
|
select PINMUX
|
|
select PINCONF
|
|
select GENERIC_PINCONF
|
|
+ select GPIOLIB
|
|
select GPIOLIB_IRQCHIP
|
|
default ARCH_BCM2835 || ARCH_BRCMSTB
|
|
help
|
|
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
|
|
index c6fe7d64c9137..c7448be64d073 100644
|
|
--- a/drivers/pinctrl/devicetree.c
|
|
+++ b/drivers/pinctrl/devicetree.c
|
|
@@ -129,9 +129,8 @@ static int dt_to_map_one_config(struct pinctrl *p,
|
|
if (!np_pctldev || of_node_is_root(np_pctldev)) {
|
|
of_node_put(np_pctldev);
|
|
ret = driver_deferred_probe_check_state(p->dev);
|
|
- /* keep deferring if modules are enabled unless we've timed out */
|
|
- if (IS_ENABLED(CONFIG_MODULES) && !allow_default &&
|
|
- (ret == -ENODEV))
|
|
+ /* keep deferring if modules are enabled */
|
|
+ if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0)
|
|
ret = -EPROBE_DEFER;
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
|
|
index 151931b593f6e..235a141182bf6 100644
|
|
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
|
|
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
|
|
@@ -87,7 +87,7 @@ const struct regmap_config mcp23x08_regmap = {
|
|
};
|
|
EXPORT_SYMBOL_GPL(mcp23x08_regmap);
|
|
|
|
-static const struct reg_default mcp23x16_defaults[] = {
|
|
+static const struct reg_default mcp23x17_defaults[] = {
|
|
{.reg = MCP_IODIR << 1, .def = 0xffff},
|
|
{.reg = MCP_IPOL << 1, .def = 0x0000},
|
|
{.reg = MCP_GPINTEN << 1, .def = 0x0000},
|
|
@@ -98,23 +98,23 @@ static const struct reg_default mcp23x16_defaults[] = {
|
|
{.reg = MCP_OLAT << 1, .def = 0x0000},
|
|
};
|
|
|
|
-static const struct regmap_range mcp23x16_volatile_range = {
|
|
+static const struct regmap_range mcp23x17_volatile_range = {
|
|
.range_min = MCP_INTF << 1,
|
|
.range_max = MCP_GPIO << 1,
|
|
};
|
|
|
|
-static const struct regmap_access_table mcp23x16_volatile_table = {
|
|
- .yes_ranges = &mcp23x16_volatile_range,
|
|
+static const struct regmap_access_table mcp23x17_volatile_table = {
|
|
+ .yes_ranges = &mcp23x17_volatile_range,
|
|
.n_yes_ranges = 1,
|
|
};
|
|
|
|
-static const struct regmap_range mcp23x16_precious_range = {
|
|
- .range_min = MCP_GPIO << 1,
|
|
+static const struct regmap_range mcp23x17_precious_range = {
|
|
+ .range_min = MCP_INTCAP << 1,
|
|
.range_max = MCP_GPIO << 1,
|
|
};
|
|
|
|
-static const struct regmap_access_table mcp23x16_precious_table = {
|
|
- .yes_ranges = &mcp23x16_precious_range,
|
|
+static const struct regmap_access_table mcp23x17_precious_table = {
|
|
+ .yes_ranges = &mcp23x17_precious_range,
|
|
.n_yes_ranges = 1,
|
|
};
|
|
|
|
@@ -124,10 +124,10 @@ const struct regmap_config mcp23x17_regmap = {
|
|
|
|
.reg_stride = 2,
|
|
.max_register = MCP_OLAT << 1,
|
|
- .volatile_table = &mcp23x16_volatile_table,
|
|
- .precious_table = &mcp23x16_precious_table,
|
|
- .reg_defaults = mcp23x16_defaults,
|
|
- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
|
|
+ .volatile_table = &mcp23x17_volatile_table,
|
|
+ .precious_table = &mcp23x17_precious_table,
|
|
+ .reg_defaults = mcp23x17_defaults,
|
|
+ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
|
|
.cache_type = REGCACHE_FLAT,
|
|
.val_format_endian = REGMAP_ENDIAN_LITTLE,
|
|
};
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
index c322f30a20648..22283ba797cd0 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
@@ -1060,12 +1060,10 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
|
|
* when TLMM is powered on. To allow that, enable the GPIO
|
|
* summary line to be wakeup capable at GIC.
|
|
*/
|
|
- if (d->parent_data)
|
|
- irq_chip_set_wake_parent(d, on);
|
|
-
|
|
- irq_set_irq_wake(pctrl->irq, on);
|
|
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
|
|
+ return irq_chip_set_wake_parent(d, on);
|
|
|
|
- return 0;
|
|
+ return irq_set_irq_wake(pctrl->irq, on);
|
|
}
|
|
|
|
static int msm_gpio_irq_reqres(struct irq_data *d)
|
|
@@ -1226,6 +1224,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
|
|
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
|
|
pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
|
|
pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
|
|
+ pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
|
|
+ IRQCHIP_SET_TYPE_MASKED;
|
|
|
|
np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
|
|
if (np) {
|
|
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
|
|
index b59180bff5a3e..ef61298c30bdd 100644
|
|
--- a/drivers/platform/chrome/cros_ec_lightbar.c
|
|
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
|
|
@@ -116,6 +116,8 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
|
|
|
|
param = (struct ec_params_lightbar *)msg->data;
|
|
param->cmd = LIGHTBAR_CMD_VERSION;
|
|
+ msg->outsize = sizeof(param->cmd);
|
|
+ msg->result = sizeof(resp->version);
|
|
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
|
|
if (ret < 0) {
|
|
ret = 0;
|
|
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
|
|
index c27548fd386ac..0d2ed6d1f9c79 100644
|
|
--- a/drivers/platform/x86/mlx-platform.c
|
|
+++ b/drivers/platform/x86/mlx-platform.c
|
|
@@ -319,15 +319,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
|
|
},
|
|
};
|
|
|
|
-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
|
|
- {
|
|
- I2C_BOARD_INFO("24c32", 0x51),
|
|
- },
|
|
- {
|
|
- I2C_BOARD_INFO("24c32", 0x50),
|
|
- },
|
|
-};
|
|
-
|
|
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
|
|
{
|
|
I2C_BOARD_INFO("dps460", 0x59),
|
|
@@ -752,15 +743,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
|
|
.label = "psu1",
|
|
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
|
|
.mask = BIT(0),
|
|
- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
|
|
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
|
|
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
|
|
},
|
|
{
|
|
.label = "psu2",
|
|
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
|
|
.mask = BIT(1),
|
|
- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
|
|
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
|
|
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
|
|
index 599a0f66a3845..a34d95ed70b20 100644
|
|
--- a/drivers/pwm/pwm-img.c
|
|
+++ b/drivers/pwm/pwm-img.c
|
|
@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
|
|
return PTR_ERR(pwm->pwm_clk);
|
|
}
|
|
|
|
+ platform_set_drvdata(pdev, pwm);
|
|
+
|
|
pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
|
|
pm_runtime_use_autosuspend(&pdev->dev);
|
|
pm_runtime_enable(&pdev->dev);
|
|
@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
|
|
goto err_suspend;
|
|
}
|
|
|
|
- platform_set_drvdata(pdev, pwm);
|
|
return 0;
|
|
|
|
err_suspend:
|
|
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
|
|
index 9d965ffe66d1e..da9bc3d10104a 100644
|
|
--- a/drivers/pwm/pwm-lpss.c
|
|
+++ b/drivers/pwm/pwm-lpss.c
|
|
@@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
|
|
* The equation is:
|
|
* base_unit = round(base_unit_range * freq / c)
|
|
*/
|
|
- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
|
|
+ base_unit_range = BIT(lpwm->info->base_unit_bits);
|
|
freq *= base_unit_range;
|
|
|
|
base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
|
|
+ /* base_unit must not be 0 and we also want to avoid overflowing it */
|
|
+ base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
|
|
|
|
on_time_div = 255ULL * duty_ns;
|
|
do_div(on_time_div, period_ns);
|
|
@@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
|
|
|
|
orig_ctrl = ctrl = pwm_lpss_read(pwm);
|
|
ctrl &= ~PWM_ON_TIME_DIV_MASK;
|
|
- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
|
|
- base_unit &= base_unit_range;
|
|
+ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
|
|
ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
|
|
ctrl |= on_time_div;
|
|
|
|
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
|
|
index eb8c9cb645a6c..098e94335cb5b 100644
|
|
--- a/drivers/pwm/pwm-rockchip.c
|
|
+++ b/drivers/pwm/pwm-rockchip.c
|
|
@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
|
|
const struct of_device_id *id;
|
|
struct rockchip_pwm_chip *pc;
|
|
struct resource *r;
|
|
+ u32 enable_conf, ctrl;
|
|
int ret, count;
|
|
|
|
id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
|
|
@@ -362,7 +363,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
/* Keep the PWM clk enabled if the PWM appears to be up and running. */
|
|
- if (!pwm_is_enabled(pc->chip.pwms))
|
|
+ enable_conf = pc->data->enable_conf;
|
|
+ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
|
|
+ if ((ctrl & enable_conf) != enable_conf)
|
|
clk_disable(pc->clk);
|
|
|
|
return 0;
|
|
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
|
|
index 451608e960a18..152946e033d17 100644
|
|
--- a/drivers/rapidio/devices/rio_mport_cdev.c
|
|
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
|
|
@@ -871,15 +871,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|
rmcd_error("pin_user_pages_fast err=%ld",
|
|
pinned);
|
|
nr_pages = 0;
|
|
- } else
|
|
+ } else {
|
|
rmcd_error("pinned %ld out of %ld pages",
|
|
pinned, nr_pages);
|
|
+ /*
|
|
+ * Set nr_pages up to mean "how many pages to unpin, in
|
|
+ * the error handler:
|
|
+ */
|
|
+ nr_pages = pinned;
|
|
+ }
|
|
ret = -EFAULT;
|
|
- /*
|
|
- * Set nr_pages up to mean "how many pages to unpin, in
|
|
- * the error handler:
|
|
- */
|
|
- nr_pages = pinned;
|
|
goto err_pg;
|
|
}
|
|
|
|
@@ -1679,6 +1680,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
|
|
struct rio_dev *rdev;
|
|
struct rio_switch *rswitch = NULL;
|
|
struct rio_mport *mport;
|
|
+ struct device *dev;
|
|
size_t size;
|
|
u32 rval;
|
|
u32 swpinfo = 0;
|
|
@@ -1693,8 +1695,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
|
|
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
|
|
dev_info.comptag, dev_info.destid, dev_info.hopcount);
|
|
|
|
- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
|
|
+ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
|
|
+ if (dev) {
|
|
rmcd_debug(RDEV, "device %s already exists", dev_info.name);
|
|
+ put_device(dev);
|
|
return -EEXIST;
|
|
}
|
|
|
|
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
|
|
index 569d9ad2c5942..6939aa5b3dc7f 100644
|
|
--- a/drivers/ras/cec.c
|
|
+++ b/drivers/ras/cec.c
|
|
@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = {
|
|
.priority = MCE_PRIO_CEC,
|
|
};
|
|
|
|
-static void __init cec_init(void)
|
|
+static int __init cec_init(void)
|
|
{
|
|
if (ce_arr.disabled)
|
|
- return;
|
|
+ return -ENODEV;
|
|
|
|
ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
|
|
if (!ce_arr.array) {
|
|
pr_err("Error allocating CE array page!\n");
|
|
- return;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
if (create_debugfs_nodes()) {
|
|
free_page((unsigned long)ce_arr.array);
|
|
- return;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
INIT_DELAYED_WORK(&cec_work, cec_work_fn);
|
|
@@ -575,6 +575,7 @@ static void __init cec_init(void)
|
|
mce_register_decode_chain(&cec_nb);
|
|
|
|
pr_info("Correctable Errors collector initialized.\n");
|
|
+ return 0;
|
|
}
|
|
late_initcall(cec_init);
|
|
|
|
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
|
|
index be8c709a74883..25e601bf9383e 100644
|
|
--- a/drivers/regulator/core.c
|
|
+++ b/drivers/regulator/core.c
|
|
@@ -5187,15 +5187,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
|
|
else if (regulator_desc->supply_name)
|
|
rdev->supply_name = regulator_desc->supply_name;
|
|
|
|
- /*
|
|
- * Attempt to resolve the regulator supply, if specified,
|
|
- * but don't return an error if we fail because we will try
|
|
- * to resolve it again later as more regulators are added.
|
|
- */
|
|
- if (regulator_resolve_supply(rdev))
|
|
- rdev_dbg(rdev, "unable to resolve supply\n");
|
|
-
|
|
ret = set_machine_constraints(rdev, constraints);
|
|
+ if (ret == -EPROBE_DEFER) {
|
|
+ /* Regulator might be in bypass mode and so needs its supply
|
|
+ * to set the constraints */
|
|
+ /* FIXME: this currently triggers a chicken-and-egg problem
|
|
+ * when creating -SUPPLY symlink in sysfs to a regulator
|
|
+ * that is just being created */
|
|
+ ret = regulator_resolve_supply(rdev);
|
|
+ if (!ret)
|
|
+ ret = set_machine_constraints(rdev, constraints);
|
|
+ else
|
|
+ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
|
|
+ ERR_PTR(ret));
|
|
+ }
|
|
if (ret < 0)
|
|
goto wash;
|
|
|
|
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
|
|
index 3d3d87210ef2c..58d1d7e571d66 100644
|
|
--- a/drivers/remoteproc/mtk_scp_ipi.c
|
|
+++ b/drivers/remoteproc/mtk_scp_ipi.c
|
|
@@ -30,10 +30,8 @@ int scp_ipi_register(struct mtk_scp *scp,
|
|
scp_ipi_handler_t handler,
|
|
void *priv)
|
|
{
|
|
- if (!scp) {
|
|
- dev_err(scp->dev, "scp device is not ready\n");
|
|
+ if (!scp)
|
|
return -EPROBE_DEFER;
|
|
- }
|
|
|
|
if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
|
|
return -EINVAL;
|
|
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
|
|
index 83f2b8804ee98..96a17ec291401 100644
|
|
--- a/drivers/rpmsg/mtk_rpmsg.c
|
|
+++ b/drivers/rpmsg/mtk_rpmsg.c
|
|
@@ -200,7 +200,6 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
|
|
struct rpmsg_device *rpdev;
|
|
struct mtk_rpmsg_device *mdev;
|
|
struct platform_device *pdev = mtk_subdev->pdev;
|
|
- int ret;
|
|
|
|
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
|
|
if (!mdev)
|
|
@@ -219,13 +218,7 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
|
|
rpdev->dev.parent = &pdev->dev;
|
|
rpdev->dev.release = mtk_rpmsg_release_device;
|
|
|
|
- ret = rpmsg_register_device(rpdev);
|
|
- if (ret) {
|
|
- kfree(mdev);
|
|
- return ret;
|
|
- }
|
|
-
|
|
- return 0;
|
|
+ return rpmsg_register_device(rpdev);
|
|
}
|
|
|
|
static void mtk_register_device_work_function(struct work_struct *register_work)
|
|
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
|
|
index 4abbeea782fa4..19903de6268db 100644
|
|
--- a/drivers/rpmsg/qcom_smd.c
|
|
+++ b/drivers/rpmsg/qcom_smd.c
|
|
@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
|
|
ret = of_property_read_u32(node, key, &edge->edge_id);
|
|
if (ret) {
|
|
dev_err(dev, "edge missing %s property\n", key);
|
|
- return -EINVAL;
|
|
+ goto put_node;
|
|
}
|
|
|
|
edge->remote_pid = QCOM_SMEM_HOST_ANY;
|
|
@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
|
|
edge->mbox_client.knows_txdone = true;
|
|
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
|
|
if (IS_ERR(edge->mbox_chan)) {
|
|
- if (PTR_ERR(edge->mbox_chan) != -ENODEV)
|
|
- return PTR_ERR(edge->mbox_chan);
|
|
+ if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
|
|
+ ret = PTR_ERR(edge->mbox_chan);
|
|
+ goto put_node;
|
|
+ }
|
|
|
|
edge->mbox_chan = NULL;
|
|
|
|
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
|
|
if (!syscon_np) {
|
|
dev_err(dev, "no qcom,ipc node\n");
|
|
- return -ENODEV;
|
|
+ ret = -ENODEV;
|
|
+ goto put_node;
|
|
}
|
|
|
|
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
|
|
- if (IS_ERR(edge->ipc_regmap))
|
|
- return PTR_ERR(edge->ipc_regmap);
|
|
+ if (IS_ERR(edge->ipc_regmap)) {
|
|
+ ret = PTR_ERR(edge->ipc_regmap);
|
|
+ goto put_node;
|
|
+ }
|
|
|
|
key = "qcom,ipc";
|
|
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
|
|
if (ret < 0) {
|
|
dev_err(dev, "no offset in %s\n", key);
|
|
- return -EINVAL;
|
|
+ goto put_node;
|
|
}
|
|
|
|
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
|
|
if (ret < 0) {
|
|
dev_err(dev, "no bit in %s\n", key);
|
|
- return -EINVAL;
|
|
+ goto put_node;
|
|
}
|
|
}
|
|
|
|
@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
|
|
irq = irq_of_parse_and_map(node, 0);
|
|
if (irq < 0) {
|
|
dev_err(dev, "required smd interrupt missing\n");
|
|
- return -EINVAL;
|
|
+ ret = irq;
|
|
+ goto put_node;
|
|
}
|
|
|
|
ret = devm_request_irq(dev, irq,
|
|
@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
|
|
node->name, edge);
|
|
if (ret) {
|
|
dev_err(dev, "failed to request smd irq\n");
|
|
- return ret;
|
|
+ goto put_node;
|
|
}
|
|
|
|
edge->irq = irq;
|
|
|
|
return 0;
|
|
+
|
|
+put_node:
|
|
+ of_node_put(node);
|
|
+ edge->of_node = NULL;
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
|
|
index 49702942bb086..70b198423deba 100644
|
|
--- a/drivers/rtc/rtc-ds1307.c
|
|
+++ b/drivers/rtc/rtc-ds1307.c
|
|
@@ -352,6 +352,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
|
|
regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
|
|
DS1340_BIT_OSF, 0);
|
|
break;
|
|
+ case ds_1388:
|
|
+ regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
|
|
+ DS1388_BIT_OSF, 0);
|
|
+ break;
|
|
case mcp794xx:
|
|
/*
|
|
* these bits were cleared when preparing the date/time
|
|
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
|
|
index 51ea56b73a97d..4e30047d76c46 100644
|
|
--- a/drivers/s390/net/qeth_core.h
|
|
+++ b/drivers/s390/net/qeth_core.h
|
|
@@ -680,6 +680,11 @@ struct qeth_card_blkt {
|
|
int inter_packet_jumbo;
|
|
};
|
|
|
|
+enum qeth_pnso_mode {
|
|
+ QETH_PNSO_NONE,
|
|
+ QETH_PNSO_BRIDGEPORT,
|
|
+};
|
|
+
|
|
#define QETH_BROADCAST_WITH_ECHO 0x01
|
|
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
|
|
struct qeth_card_info {
|
|
@@ -696,6 +701,7 @@ struct qeth_card_info {
|
|
/* no bitfield, we take a pointer on these two: */
|
|
u8 has_lp2lp_cso_v6;
|
|
u8 has_lp2lp_cso_v4;
|
|
+ enum qeth_pnso_mode pnso_mode;
|
|
enum qeth_card_types type;
|
|
enum qeth_link_types link_type;
|
|
int broadcast_capable;
|
|
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
|
|
index b4e06aeb6dc1c..7c6f6a09b99e4 100644
|
|
--- a/drivers/s390/net/qeth_l2_main.c
|
|
+++ b/drivers/s390/net/qeth_l2_main.c
|
|
@@ -273,6 +273,17 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
|
|
return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
|
|
}
|
|
|
|
+static void qeth_l2_set_pnso_mode(struct qeth_card *card,
|
|
+ enum qeth_pnso_mode mode)
|
|
+{
|
|
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
|
|
+ WRITE_ONCE(card->info.pnso_mode, mode);
|
|
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
|
|
+
|
|
+ if (mode == QETH_PNSO_NONE)
|
|
+ drain_workqueue(card->event_wq);
|
|
+}
|
|
+
|
|
static void qeth_l2_stop_card(struct qeth_card *card)
|
|
{
|
|
QETH_CARD_TEXT(card, 2, "stopcard");
|
|
@@ -291,7 +302,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
|
|
|
|
qeth_qdio_clear_card(card, 0);
|
|
qeth_clear_working_pool_list(card);
|
|
- flush_workqueue(card->event_wq);
|
|
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
|
|
qeth_flush_local_addrs(card);
|
|
card->info.promisc_mode = 0;
|
|
}
|
|
@@ -1111,12 +1122,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
|
|
NULL
|
|
};
|
|
|
|
- /* Role should not change by itself, but if it did, */
|
|
- /* information from the hardware is authoritative. */
|
|
- mutex_lock(&data->card->sbp_lock);
|
|
- data->card->options.sbp.role = entry->role;
|
|
- mutex_unlock(&data->card->sbp_lock);
|
|
-
|
|
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
|
|
snprintf(env_role, sizeof(env_role), "ROLE=%s",
|
|
(entry->role == QETH_SBP_ROLE_NONE) ? "none" :
|
|
@@ -1165,19 +1170,34 @@ static void qeth_bridge_state_change(struct qeth_card *card,
|
|
}
|
|
|
|
struct qeth_addr_change_data {
|
|
- struct work_struct worker;
|
|
+ struct delayed_work dwork;
|
|
struct qeth_card *card;
|
|
struct qeth_ipacmd_addr_change ac_event;
|
|
};
|
|
|
|
static void qeth_addr_change_event_worker(struct work_struct *work)
|
|
{
|
|
- struct qeth_addr_change_data *data =
|
|
- container_of(work, struct qeth_addr_change_data, worker);
|
|
+ struct delayed_work *dwork = to_delayed_work(work);
|
|
+ struct qeth_addr_change_data *data;
|
|
+ struct qeth_card *card;
|
|
int i;
|
|
|
|
+ data = container_of(dwork, struct qeth_addr_change_data, dwork);
|
|
+ card = data->card;
|
|
+
|
|
QETH_CARD_TEXT(data->card, 4, "adrchgew");
|
|
+
|
|
+ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
|
|
+ goto free;
|
|
+
|
|
if (data->ac_event.lost_event_mask) {
|
|
+ /* Potential re-config in progress, try again later: */
|
|
+ if (!mutex_trylock(&card->sbp_lock)) {
|
|
+ queue_delayed_work(card->event_wq, dwork,
|
|
+ msecs_to_jiffies(100));
|
|
+ return;
|
|
+ }
|
|
+
|
|
dev_info(&data->card->gdev->dev,
|
|
"Address change notification stopped on %s (%s)\n",
|
|
data->card->dev->name,
|
|
@@ -1186,8 +1206,9 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
|
|
: (data->ac_event.lost_event_mask == 0x02)
|
|
? "Bridge port state change"
|
|
: "Unknown reason");
|
|
- mutex_lock(&data->card->sbp_lock);
|
|
+
|
|
data->card->options.sbp.hostnotification = 0;
|
|
+ card->info.pnso_mode = QETH_PNSO_NONE;
|
|
mutex_unlock(&data->card->sbp_lock);
|
|
qeth_bridge_emit_host_event(data->card, anev_abort,
|
|
0, NULL, NULL);
|
|
@@ -1201,6 +1222,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
|
|
&entry->token,
|
|
&entry->addr_lnid);
|
|
}
|
|
+
|
|
+free:
|
|
kfree(data);
|
|
}
|
|
|
|
@@ -1212,6 +1235,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
|
|
struct qeth_addr_change_data *data;
|
|
int extrasize;
|
|
|
|
+ if (card->info.pnso_mode == QETH_PNSO_NONE)
|
|
+ return;
|
|
+
|
|
QETH_CARD_TEXT(card, 4, "adrchgev");
|
|
if (cmd->hdr.return_code != 0x0000) {
|
|
if (cmd->hdr.return_code == 0x0010) {
|
|
@@ -1231,11 +1257,11 @@ static void qeth_addr_change_event(struct qeth_card *card,
|
|
QETH_CARD_TEXT(card, 2, "ACNalloc");
|
|
return;
|
|
}
|
|
- INIT_WORK(&data->worker, qeth_addr_change_event_worker);
|
|
+ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
|
|
data->card = card;
|
|
memcpy(&data->ac_event, hostevs,
|
|
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
|
|
- queue_work(card->event_wq, &data->worker);
|
|
+ queue_delayed_work(card->event_wq, &data->dwork, 0);
|
|
}
|
|
|
|
/* SETBRIDGEPORT support; sending commands */
|
|
@@ -1556,9 +1582,14 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
|
|
|
|
if (enable) {
|
|
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
|
|
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
|
|
rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
|
|
- } else
|
|
+ if (rc)
|
|
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
|
|
+ } else {
|
|
rc = qeth_l2_pnso(card, 0, NULL, NULL);
|
|
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
|
|
+ }
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
|
|
index 86bcae992f725..4695d25e54f24 100644
|
|
--- a/drivers/s390/net/qeth_l2_sys.c
|
|
+++ b/drivers/s390/net/qeth_l2_sys.c
|
|
@@ -157,6 +157,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
|
|
rc = -EBUSY;
|
|
else if (qeth_card_hw_is_reachable(card)) {
|
|
rc = qeth_bridgeport_an_set(card, enable);
|
|
+ /* sbp_lock ensures ordering vs notifications-stopped events */
|
|
if (!rc)
|
|
card->options.sbp.hostnotification = enable;
|
|
} else
|
|
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
|
|
index 9b81cfbbc5c53..239e04c03cf90 100644
|
|
--- a/drivers/scsi/be2iscsi/be_main.c
|
|
+++ b/drivers/scsi/be2iscsi/be_main.c
|
|
@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
|
|
goto create_eq_error;
|
|
}
|
|
|
|
+ mem->dma = paddr;
|
|
mem->va = eq_vaddress;
|
|
ret = be_fill_queue(eq, phba->params.num_eq_entries,
|
|
sizeof(struct be_eq_entry), eq_vaddress);
|
|
@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
|
|
goto create_eq_error;
|
|
}
|
|
|
|
- mem->dma = paddr;
|
|
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
|
|
BEISCSI_EQ_DELAY_DEF);
|
|
if (ret) {
|
|
@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
|
|
goto create_cq_error;
|
|
}
|
|
|
|
+ mem->dma = paddr;
|
|
ret = be_fill_queue(cq, phba->params.num_cq_entries,
|
|
sizeof(struct sol_cqe), cq_vaddress);
|
|
if (ret) {
|
|
@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
|
|
goto create_cq_error;
|
|
}
|
|
|
|
- mem->dma = paddr;
|
|
ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
|
|
false, 0);
|
|
if (ret) {
|
|
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
|
|
index bc5d84f87d8fc..440ef32be048f 100644
|
|
--- a/drivers/scsi/bfa/bfad.c
|
|
+++ b/drivers/scsi/bfa/bfad.c
|
|
@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
|
|
|
|
if (bfad->pci_bar0_kva == NULL) {
|
|
printk(KERN_ERR "Fail to map bar0\n");
|
|
+ rc = -ENODEV;
|
|
goto out_release_region;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
|
|
index 950f9cdf0577f..5d0f42031d121 100644
|
|
--- a/drivers/scsi/csiostor/csio_hw.c
|
|
+++ b/drivers/scsi/csiostor/csio_hw.c
|
|
@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
|
|
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
|
|
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
|
|
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
|
|
- ret = EINVAL;
|
|
+ ret = -EINVAL;
|
|
goto bye;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
|
|
index 635f6f9cffc40..ef91f3d01f989 100644
|
|
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
|
|
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
|
|
@@ -4928,6 +4928,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|
if (IS_ERR(vhost->work_thread)) {
|
|
dev_err(dev, "Couldn't create kernel thread: %ld\n",
|
|
PTR_ERR(vhost->work_thread));
|
|
+ rc = PTR_ERR(vhost->work_thread);
|
|
goto free_host_mem;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
index a85c9672c6ea3..a67749c8f4ab3 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
|
|
@@ -1808,18 +1808,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
|
|
/* TMs are on msix_index == 0 */
|
|
if (reply_q->msix_index == 0)
|
|
continue;
|
|
+ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
|
if (reply_q->irq_poll_scheduled) {
|
|
/* Calling irq_poll_disable will wait for any pending
|
|
* callbacks to have completed.
|
|
*/
|
|
irq_poll_disable(&reply_q->irqpoll);
|
|
irq_poll_enable(&reply_q->irqpoll);
|
|
- reply_q->irq_poll_scheduled = false;
|
|
- reply_q->irq_line_enable = true;
|
|
- enable_irq(reply_q->os_irq);
|
|
- continue;
|
|
+ /* check how the scheduled poll has ended,
|
|
+ * clean up only if necessary
|
|
+ */
|
|
+ if (reply_q->irq_poll_scheduled) {
|
|
+ reply_q->irq_poll_scheduled = false;
|
|
+ reply_q->irq_line_enable = true;
|
|
+ enable_irq(reply_q->os_irq);
|
|
+ }
|
|
}
|
|
- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
|
|
index 8906aceda4c43..0354898d7cac1 100644
|
|
--- a/drivers/scsi/mvumi.c
|
|
+++ b/drivers/scsi/mvumi.c
|
|
@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
|
|
if (IS_ERR(mhba->dm_thread)) {
|
|
dev_err(&mhba->pdev->dev,
|
|
"failed to create device scan thread\n");
|
|
+ ret = PTR_ERR(mhba->dm_thread);
|
|
mutex_unlock(&mhba->sas_discovery_mutex);
|
|
goto fail_create_thread;
|
|
}
|
|
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
|
|
index 51cfab9d1afdc..ed3054fffa344 100644
|
|
--- a/drivers/scsi/qedf/qedf_main.c
|
|
+++ b/drivers/scsi/qedf/qedf_main.c
|
|
@@ -704,7 +704,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
|
|
rdata = fcport->rdata;
|
|
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
|
|
QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
|
|
- rc = 1;
|
|
+ rc = SUCCESS;
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
|
|
index 946cebc4c9322..90aa64604ad78 100644
|
|
--- a/drivers/scsi/qedi/qedi_fw.c
|
|
+++ b/drivers/scsi/qedi/qedi_fw.c
|
|
@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
|
|
"Freeing tid=0x%x for cid=0x%x\n",
|
|
cmd->task_id, qedi_conn->iscsi_conn_id);
|
|
|
|
+ spin_lock(&qedi_conn->list_lock);
|
|
if (likely(cmd->io_cmd_in_list)) {
|
|
cmd->io_cmd_in_list = false;
|
|
list_del_init(&cmd->io_cmd);
|
|
@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
|
|
cmd->task_id, qedi_conn->iscsi_conn_id,
|
|
&cmd->io_cmd);
|
|
}
|
|
+ spin_unlock(&qedi_conn->list_lock);
|
|
|
|
cmd->state = RESPONSE_RECEIVED;
|
|
qedi_clear_task_idx(qedi, cmd->task_id);
|
|
@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
|
|
"Freeing tid=0x%x for cid=0x%x\n",
|
|
cmd->task_id, qedi_conn->iscsi_conn_id);
|
|
|
|
+ spin_lock(&qedi_conn->list_lock);
|
|
if (likely(cmd->io_cmd_in_list)) {
|
|
cmd->io_cmd_in_list = false;
|
|
list_del_init(&cmd->io_cmd);
|
|
@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
|
|
cmd->task_id, qedi_conn->iscsi_conn_id,
|
|
&cmd->io_cmd);
|
|
}
|
|
+ spin_unlock(&qedi_conn->list_lock);
|
|
|
|
cmd->state = RESPONSE_RECEIVED;
|
|
qedi_clear_task_idx(qedi, cmd->task_id);
|
|
@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
|
|
|
|
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
|
|
|
|
+ spin_lock(&qedi_conn->list_lock);
|
|
if (likely(qedi_cmd->io_cmd_in_list)) {
|
|
qedi_cmd->io_cmd_in_list = false;
|
|
list_del_init(&qedi_cmd->io_cmd);
|
|
qedi_conn->active_cmd_count--;
|
|
}
|
|
+ spin_unlock(&qedi_conn->list_lock);
|
|
|
|
if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
|
|
ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
|
|
@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
|
|
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
|
|
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
|
|
|
|
+ spin_lock(&qedi_conn->list_lock);
|
|
if (likely(cmd->io_cmd_in_list)) {
|
|
cmd->io_cmd_in_list = false;
|
|
list_del_init(&cmd->io_cmd);
|
|
qedi_conn->active_cmd_count--;
|
|
}
|
|
+ spin_unlock(&qedi_conn->list_lock);
|
|
|
|
memset(task_ctx, '\0', sizeof(*task_ctx));
|
|
|
|
@@ -817,8 +825,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
|
|
qedi_clear_task_idx(qedi_conn->qedi, rtid);
|
|
|
|
spin_lock(&qedi_conn->list_lock);
|
|
- list_del_init(&dbg_cmd->io_cmd);
|
|
- qedi_conn->active_cmd_count--;
|
|
+ if (likely(dbg_cmd->io_cmd_in_list)) {
|
|
+ dbg_cmd->io_cmd_in_list = false;
|
|
+ list_del_init(&dbg_cmd->io_cmd);
|
|
+ qedi_conn->active_cmd_count--;
|
|
+ }
|
|
spin_unlock(&qedi_conn->list_lock);
|
|
qedi_cmd->state = CLEANUP_RECV;
|
|
wake_up_interruptible(&qedi_conn->wait_queue);
|
|
@@ -1236,6 +1247,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|
qedi_conn->cmd_cleanup_req++;
|
|
qedi_iscsi_cleanup_task(ctask, true);
|
|
|
|
+ cmd->io_cmd_in_list = false;
|
|
list_del_init(&cmd->io_cmd);
|
|
qedi_conn->active_cmd_count--;
|
|
QEDI_WARN(&qedi->dbg_ctx,
|
|
@@ -1447,8 +1459,11 @@ ldel_exit:
|
|
spin_unlock_bh(&qedi_conn->tmf_work_lock);
|
|
|
|
spin_lock(&qedi_conn->list_lock);
|
|
- list_del_init(&cmd->io_cmd);
|
|
- qedi_conn->active_cmd_count--;
|
|
+ if (likely(cmd->io_cmd_in_list)) {
|
|
+ cmd->io_cmd_in_list = false;
|
|
+ list_del_init(&cmd->io_cmd);
|
|
+ qedi_conn->active_cmd_count--;
|
|
+ }
|
|
spin_unlock(&qedi_conn->list_lock);
|
|
|
|
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
|
|
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
|
|
index 425e665ec08b2..6e92625df4b7c 100644
|
|
--- a/drivers/scsi/qedi/qedi_iscsi.c
|
|
+++ b/drivers/scsi/qedi/qedi_iscsi.c
|
|
@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
|
|
{
|
|
struct qedi_cmd *cmd, *cmd_tmp;
|
|
|
|
+ spin_lock(&qedi_conn->list_lock);
|
|
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
|
|
io_cmd) {
|
|
list_del_init(&cmd->io_cmd);
|
|
qedi_conn->active_cmd_count--;
|
|
}
|
|
+ spin_unlock(&qedi_conn->list_lock);
|
|
}
|
|
|
|
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
|
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
|
|
index 81a307695cc91..569fa4b28e4e2 100644
|
|
--- a/drivers/scsi/qedi/qedi_main.c
|
|
+++ b/drivers/scsi/qedi/qedi_main.c
|
|
@@ -1127,6 +1127,15 @@ static void qedi_schedule_recovery_handler(void *dev)
|
|
schedule_delayed_work(&qedi->recovery_work, 0);
|
|
}
|
|
|
|
+static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
|
|
+{
|
|
+ struct iscsi_session *session = cls_session->dd_data;
|
|
+ struct iscsi_conn *conn = session->leadconn;
|
|
+ struct qedi_conn *qedi_conn = conn->dd_data;
|
|
+
|
|
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
|
|
+}
|
|
+
|
|
static void qedi_link_update(void *dev, struct qed_link_output *link)
|
|
{
|
|
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
|
|
@@ -1138,6 +1147,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
|
|
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
|
"Link Down event.\n");
|
|
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
|
|
+ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
|
|
index 2861c636dd651..f17ab22ad0e4a 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_init.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_init.c
|
|
@@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp)
|
|
qla2x00_rel_sp(sp);
|
|
}
|
|
|
|
+void qla2xxx_rel_done_warning(srb_t *sp, int res)
|
|
+{
|
|
+ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
|
|
+}
|
|
+
|
|
+void qla2xxx_rel_free_warning(srb_t *sp)
|
|
+{
|
|
+ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
|
|
+}
|
|
+
|
|
/* Asynchronous Login/Logout Routines -------------------------------------- */
|
|
|
|
unsigned long
|
|
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
|
|
index 1fb6ccac07ccd..26d9c78d4c52c 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_inline.h
|
|
+++ b/drivers/scsi/qla2xxx/qla_inline.h
|
|
@@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
|
|
return sp;
|
|
}
|
|
|
|
+void qla2xxx_rel_done_warning(srb_t *sp, int res);
|
|
+void qla2xxx_rel_free_warning(srb_t *sp);
|
|
+
|
|
static inline void
|
|
qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
|
|
{
|
|
sp->qpair = NULL;
|
|
+ sp->done = qla2xxx_rel_done_warning;
|
|
+ sp->free = qla2xxx_rel_free_warning;
|
|
mempool_free(sp, qpair->srb_mempool);
|
|
QLA_QPAIR_MARK_NOT_BUSY(qpair);
|
|
}
|
|
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
|
|
index fdb2ce7acb912..9f5d3aa1d8745 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_mbx.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
|
|
@@ -4908,7 +4908,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
|
|
"Done %s.\n", __func__);
|
|
}
|
|
|
|
- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
|
|
+ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
|
|
els_cmd_map, els_cmd_map_dma);
|
|
|
|
return rval;
|
|
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
|
|
index 262dfd7635a48..7b14fd1cb0309 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_nvme.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
|
|
@@ -683,7 +683,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
|
|
struct nvme_fc_port_template *tmpl;
|
|
struct qla_hw_data *ha;
|
|
struct nvme_fc_port_info pinfo;
|
|
- int ret = EINVAL;
|
|
+ int ret = -EINVAL;
|
|
|
|
if (!IS_ENABLED(CONFIG_NVME_FC))
|
|
return ret;
|
|
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
|
|
index 90289162dbd4c..a034e9caa2997 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_target.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_target.c
|
|
@@ -5668,7 +5668,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
|
|
/* found existing exchange */
|
|
qpair->retry_term_cnt++;
|
|
if (qpair->retry_term_cnt >= 5) {
|
|
- rc = EIO;
|
|
+ rc = -EIO;
|
|
qpair->retry_term_cnt = 0;
|
|
ql_log(ql_log_warn, vha, 0xffff,
|
|
"Unable to send ABTS Respond. Dumping firmware.\n");
|
|
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
|
|
index 5dc697ce8b5dd..4a6b15dc36aaf 100644
|
|
--- a/drivers/scsi/qla4xxx/ql4_os.c
|
|
+++ b/drivers/scsi/qla4xxx/ql4_os.c
|
|
@@ -1220,7 +1220,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
|
|
le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
|
|
exit_host_stats:
|
|
if (ql_iscsi_stats)
|
|
- dma_free_coherent(&ha->pdev->dev, host_stats_size,
|
|
+ dma_free_coherent(&ha->pdev->dev, stats_size,
|
|
ql_iscsi_stats, iscsi_stats_dma);
|
|
|
|
ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
|
|
index 1129fe7a27edd..ee069a8b442a7 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi.h
|
|
+++ b/drivers/scsi/smartpqi/smartpqi.h
|
|
@@ -359,7 +359,7 @@ struct pqi_event_response {
|
|
struct pqi_iu_header header;
|
|
u8 event_type;
|
|
u8 reserved2 : 7;
|
|
- u8 request_acknowlege : 1;
|
|
+ u8 request_acknowledge : 1;
|
|
__le16 event_id;
|
|
__le32 additional_event_id;
|
|
union {
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
index cd157f11eb222..10afbaaa4a82f 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi_init.c
|
|
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
|
|
put_unaligned_be16(cdb_length, &cdb[7]);
|
|
break;
|
|
default:
|
|
- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
|
|
- cmd);
|
|
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
|
|
break;
|
|
}
|
|
|
|
@@ -2462,7 +2461,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
|
|
offload_to_mirror =
|
|
(offload_to_mirror >= layout_map_count - 1) ?
|
|
0 : offload_to_mirror + 1;
|
|
- WARN_ON(offload_to_mirror >= layout_map_count);
|
|
device->offload_to_mirror = offload_to_mirror;
|
|
/*
|
|
* Avoid direct use of device->offload_to_mirror within this
|
|
@@ -2915,10 +2913,14 @@ static int pqi_interpret_task_management_response(
|
|
return rc;
|
|
}
|
|
|
|
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
|
|
- struct pqi_queue_group *queue_group)
|
|
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
|
|
+{
|
|
+ pqi_take_ctrl_offline(ctrl_info);
|
|
+}
|
|
+
|
|
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
|
|
{
|
|
- unsigned int num_responses;
|
|
+ int num_responses;
|
|
pqi_index_t oq_pi;
|
|
pqi_index_t oq_ci;
|
|
struct pqi_io_request *io_request;
|
|
@@ -2930,6 +2932,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
|
|
|
|
while (1) {
|
|
oq_pi = readl(queue_group->oq_pi);
|
|
+ if (oq_pi >= ctrl_info->num_elements_per_oq) {
|
|
+ pqi_invalid_response(ctrl_info);
|
|
+ dev_err(&ctrl_info->pci_dev->dev,
|
|
+ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
|
|
+ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
|
|
+ return -1;
|
|
+ }
|
|
if (oq_pi == oq_ci)
|
|
break;
|
|
|
|
@@ -2938,10 +2947,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
|
|
(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
|
|
|
|
request_id = get_unaligned_le16(&response->request_id);
|
|
- WARN_ON(request_id >= ctrl_info->max_io_slots);
|
|
+ if (request_id >= ctrl_info->max_io_slots) {
|
|
+ pqi_invalid_response(ctrl_info);
|
|
+ dev_err(&ctrl_info->pci_dev->dev,
|
|
+ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
|
|
+ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
|
|
+ return -1;
|
|
+ }
|
|
|
|
io_request = &ctrl_info->io_request_pool[request_id];
|
|
- WARN_ON(atomic_read(&io_request->refcount) == 0);
|
|
+ if (atomic_read(&io_request->refcount) == 0) {
|
|
+ pqi_invalid_response(ctrl_info);
|
|
+ dev_err(&ctrl_info->pci_dev->dev,
|
|
+ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
|
|
+ request_id, oq_pi, oq_ci);
|
|
+ return -1;
|
|
+ }
|
|
|
|
switch (response->header.iu_type) {
|
|
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
|
|
@@ -2971,24 +2992,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
|
|
io_request->error_info = ctrl_info->error_buffer +
|
|
(get_unaligned_le16(&response->error_index) *
|
|
PQI_ERROR_BUFFER_ELEMENT_LENGTH);
|
|
- pqi_process_io_error(response->header.iu_type,
|
|
- io_request);
|
|
+ pqi_process_io_error(response->header.iu_type, io_request);
|
|
break;
|
|
default:
|
|
+ pqi_invalid_response(ctrl_info);
|
|
dev_err(&ctrl_info->pci_dev->dev,
|
|
- "unexpected IU type: 0x%x\n",
|
|
- response->header.iu_type);
|
|
- break;
|
|
+ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
|
|
+ response->header.iu_type, oq_pi, oq_ci);
|
|
+ return -1;
|
|
}
|
|
|
|
- io_request->io_complete_callback(io_request,
|
|
- io_request->context);
|
|
+ io_request->io_complete_callback(io_request, io_request->context);
|
|
|
|
/*
|
|
* Note that the I/O request structure CANNOT BE TOUCHED after
|
|
* returning from the I/O completion callback!
|
|
*/
|
|
-
|
|
oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
|
|
}
|
|
|
|
@@ -3301,9 +3320,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
|
|
}
|
|
}
|
|
|
|
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
|
|
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
|
|
{
|
|
- unsigned int num_events;
|
|
+ int num_events;
|
|
pqi_index_t oq_pi;
|
|
pqi_index_t oq_ci;
|
|
struct pqi_event_queue *event_queue;
|
|
@@ -3317,26 +3336,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
|
|
|
|
while (1) {
|
|
oq_pi = readl(event_queue->oq_pi);
|
|
+ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
|
|
+ pqi_invalid_response(ctrl_info);
|
|
+ dev_err(&ctrl_info->pci_dev->dev,
|
|
+ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
|
|
+ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
if (oq_pi == oq_ci)
|
|
break;
|
|
|
|
num_events++;
|
|
- response = event_queue->oq_element_array +
|
|
- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
|
|
+ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
|
|
|
|
event_index =
|
|
pqi_event_type_to_event_index(response->event_type);
|
|
|
|
- if (event_index >= 0) {
|
|
- if (response->request_acknowlege) {
|
|
- event = &ctrl_info->events[event_index];
|
|
- event->pending = true;
|
|
- event->event_type = response->event_type;
|
|
- event->event_id = response->event_id;
|
|
- event->additional_event_id =
|
|
- response->additional_event_id;
|
|
+ if (event_index >= 0 && response->request_acknowledge) {
|
|
+ event = &ctrl_info->events[event_index];
|
|
+ event->pending = true;
|
|
+ event->event_type = response->event_type;
|
|
+ event->event_id = response->event_id;
|
|
+ event->additional_event_id = response->additional_event_id;
|
|
+ if (event->event_type == PQI_EVENT_TYPE_OFA)
|
|
pqi_ofa_capture_event_payload(event, response);
|
|
- }
|
|
}
|
|
|
|
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
|
|
@@ -3451,7 +3475,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
|
|
{
|
|
struct pqi_ctrl_info *ctrl_info;
|
|
struct pqi_queue_group *queue_group;
|
|
- unsigned int num_responses_handled;
|
|
+ int num_io_responses_handled;
|
|
+ int num_events_handled;
|
|
|
|
queue_group = data;
|
|
ctrl_info = queue_group->ctrl_info;
|
|
@@ -3459,17 +3484,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
|
|
if (!pqi_is_valid_irq(ctrl_info))
|
|
return IRQ_NONE;
|
|
|
|
- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
|
|
+ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
|
|
+ if (num_io_responses_handled < 0)
|
|
+ goto out;
|
|
|
|
- if (irq == ctrl_info->event_irq)
|
|
- num_responses_handled += pqi_process_event_intr(ctrl_info);
|
|
+ if (irq == ctrl_info->event_irq) {
|
|
+ num_events_handled = pqi_process_event_intr(ctrl_info);
|
|
+ if (num_events_handled < 0)
|
|
+ goto out;
|
|
+ } else {
|
|
+ num_events_handled = 0;
|
|
+ }
|
|
|
|
- if (num_responses_handled)
|
|
+ if (num_io_responses_handled + num_events_handled > 0)
|
|
atomic_inc(&ctrl_info->num_interrupts);
|
|
|
|
pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
|
|
pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
|
|
|
|
+out:
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
|
|
index d56ce8d97d4e8..7ad127f213977 100644
|
|
--- a/drivers/scsi/ufs/ufs-mediatek.c
|
|
+++ b/drivers/scsi/ufs/ufs-mediatek.c
|
|
@@ -585,13 +585,7 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
|
|
|
|
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
|
|
{
|
|
- struct ufs_dev_info *dev_info = &hba->dev_info;
|
|
- u16 mid = dev_info->wmanufacturerid;
|
|
-
|
|
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
|
|
-
|
|
- if (mid == UFS_VENDOR_SAMSUNG)
|
|
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
|
|
index 2e6ddb5cdfc23..7da27eed1fe7b 100644
|
|
--- a/drivers/scsi/ufs/ufs-qcom.c
|
|
+++ b/drivers/scsi/ufs/ufs-qcom.c
|
|
@@ -1604,9 +1604,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
|
|
*/
|
|
}
|
|
mask <<= offset;
|
|
-
|
|
- pm_runtime_get_sync(host->hba->dev);
|
|
- ufshcd_hold(host->hba, false);
|
|
ufshcd_rmwl(host->hba, TEST_BUS_SEL,
|
|
(u32)host->testbus.select_major << 19,
|
|
REG_UFS_CFG1);
|
|
@@ -1619,8 +1616,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
|
|
* committed before returning.
|
|
*/
|
|
mb();
|
|
- ufshcd_release(host->hba);
|
|
- pm_runtime_put_sync(host->hba->dev);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
|
index 8bc8e4e62c045..e5f75b2e07e2c 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.c
|
|
+++ b/drivers/scsi/ufs/ufshcd.c
|
|
@@ -484,6 +484,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
|
|
|
|
prdt_length = le16_to_cpu(
|
|
lrbp->utr_descriptor_ptr->prd_table_length);
|
|
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
|
|
+ prdt_length /= sizeof(struct ufshcd_sg_entry);
|
|
+
|
|
dev_err(hba->dev,
|
|
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
|
|
tag, prdt_length,
|
|
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
|
|
index ae1e248a8fb8a..1d2bc181da050 100644
|
|
--- a/drivers/slimbus/core.c
|
|
+++ b/drivers/slimbus/core.c
|
|
@@ -301,8 +301,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
|
|
{
|
|
/* Remove all clients */
|
|
device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
|
|
- /* Enter Clock Pause */
|
|
- slim_ctrl_clk_pause(ctrl, false, 0);
|
|
ida_simple_remove(&ctrl_ida, ctrl->id);
|
|
|
|
return 0;
|
|
@@ -326,8 +324,8 @@ void slim_report_absent(struct slim_device *sbdev)
|
|
mutex_lock(&ctrl->lock);
|
|
sbdev->is_laddr_valid = false;
|
|
mutex_unlock(&ctrl->lock);
|
|
-
|
|
- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
|
|
+ if (!ctrl->get_laddr)
|
|
+ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
|
|
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
|
|
}
|
|
EXPORT_SYMBOL_GPL(slim_report_absent);
|
|
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
|
|
index 743ee7b4e63f2..218aefc3531cd 100644
|
|
--- a/drivers/slimbus/qcom-ngd-ctrl.c
|
|
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
|
|
@@ -1277,9 +1277,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
|
|
{
|
|
struct qcom_slim_ngd_qmi *qmi =
|
|
container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
|
|
+ struct qcom_slim_ngd_ctrl *ctrl =
|
|
+ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
|
|
|
|
qmi->svc_info.sq_node = 0;
|
|
qmi->svc_info.sq_port = 0;
|
|
+
|
|
+ qcom_slim_ngd_enable(ctrl, false);
|
|
}
|
|
|
|
static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
|
|
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
|
|
index f4fb527d83018..c5dd026fe889f 100644
|
|
--- a/drivers/soc/fsl/qbman/bman.c
|
|
+++ b/drivers/soc/fsl/qbman/bman.c
|
|
@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
|
|
}
|
|
done:
|
|
put_affine_portal();
|
|
- return 0;
|
|
+ return err;
|
|
}
|
|
|
|
struct gen_pool *bm_bpalloc;
|
|
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
|
|
index 87ee9f767b7af..d8ace96832bac 100644
|
|
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
|
|
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
|
|
@@ -213,15 +213,16 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
|
|
}
|
|
EXPORT_SYMBOL(cmdq_pkt_write_mask);
|
|
|
|
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
|
|
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
|
|
{
|
|
struct cmdq_instruction inst = { {0} };
|
|
+ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
|
|
|
|
if (event >= CMDQ_MAX_EVENT)
|
|
return -EINVAL;
|
|
|
|
inst.op = CMDQ_CODE_WFE;
|
|
- inst.value = CMDQ_WFE_OPTION;
|
|
+ inst.value = CMDQ_WFE_OPTION | clear_option;
|
|
inst.event = event;
|
|
|
|
return cmdq_pkt_append_command(pkt, inst);
|
|
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
|
|
index 1f35b097c6356..7abfc8c4fdc72 100644
|
|
--- a/drivers/soc/qcom/apr.c
|
|
+++ b/drivers/soc/qcom/apr.c
|
|
@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev)
|
|
|
|
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
|
|
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
|
|
- dev_err(dev, "pdr add lookup failed: %d\n", ret);
|
|
+ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
|
|
return PTR_ERR(pds);
|
|
}
|
|
}
|
|
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
|
|
index 15b5002e4127b..ab9ae8cdfa54c 100644
|
|
--- a/drivers/soc/qcom/pdr_internal.h
|
|
+++ b/drivers/soc/qcom/pdr_internal.h
|
|
@@ -185,7 +185,7 @@ struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
|
|
.data_type = QMI_STRUCT,
|
|
.elem_len = SERVREG_DOMAIN_LIST_LENGTH,
|
|
.elem_size = sizeof(struct servreg_location_entry),
|
|
- .array_type = NO_ARRAY,
|
|
+ .array_type = VAR_LEN_ARRAY,
|
|
.tlv_type = 0x12,
|
|
.offset = offsetof(struct servreg_get_domain_list_resp,
|
|
domain_list),
|
|
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
|
|
index 31ff49fcd078b..c556623dae024 100644
|
|
--- a/drivers/soc/xilinx/zynqmp_power.c
|
|
+++ b/drivers/soc/xilinx/zynqmp_power.c
|
|
@@ -205,7 +205,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
|
|
rx_chan = mbox_request_channel_byname(client, "rx");
|
|
if (IS_ERR(rx_chan)) {
|
|
dev_err(&pdev->dev, "Failed to request rx channel\n");
|
|
- return IS_ERR(rx_chan);
|
|
+ return PTR_ERR(rx_chan);
|
|
}
|
|
} else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
|
|
irq = platform_get_irq(pdev, 0);
|
|
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
|
|
index 2ea73809ca345..271839a8add0e 100644
|
|
--- a/drivers/spi/spi-dw-pci.c
|
|
+++ b/drivers/spi/spi-dw-pci.c
|
|
@@ -127,18 +127,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
if (desc->setup) {
|
|
ret = desc->setup(dws);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_free_irq_vectors;
|
|
}
|
|
} else {
|
|
- pci_free_irq_vectors(pdev);
|
|
- return -ENODEV;
|
|
+ ret = -ENODEV;
|
|
+ goto err_free_irq_vectors;
|
|
}
|
|
|
|
ret = dw_spi_add_host(&pdev->dev, dws);
|
|
- if (ret) {
|
|
- pci_free_irq_vectors(pdev);
|
|
- return ret;
|
|
- }
|
|
+ if (ret)
|
|
+ goto err_free_irq_vectors;
|
|
|
|
/* PCI hook and SPI hook use the same drv data */
|
|
pci_set_drvdata(pdev, dws);
|
|
@@ -152,6 +150,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
pm_runtime_allow(&pdev->dev);
|
|
|
|
return 0;
|
|
+
|
|
+err_free_irq_vectors:
|
|
+ pci_free_irq_vectors(pdev);
|
|
+ return ret;
|
|
}
|
|
|
|
static void spi_pci_remove(struct pci_dev *pdev)
|
|
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
|
|
index 37a3e0f8e7526..a702e9d7d68c0 100644
|
|
--- a/drivers/spi/spi-fsi.c
|
|
+++ b/drivers/spi/spi-fsi.c
|
|
@@ -24,11 +24,16 @@
|
|
|
|
#define SPI_FSI_BASE 0x70000
|
|
#define SPI_FSI_INIT_TIMEOUT_MS 1000
|
|
-#define SPI_FSI_MAX_TRANSFER_SIZE 2048
|
|
+#define SPI_FSI_MAX_XFR_SIZE 2048
|
|
+#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32
|
|
|
|
#define SPI_FSI_ERROR 0x0
|
|
#define SPI_FSI_COUNTER_CFG 0x1
|
|
#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
|
|
+#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
|
|
+#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
|
|
+#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
|
|
+#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
|
|
#define SPI_FSI_CFG1 0x2
|
|
#define SPI_FSI_CLOCK_CFG 0x3
|
|
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
|
|
@@ -61,7 +66,7 @@
|
|
#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
|
|
#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
|
|
#define SPI_FSI_STATUS_ANY_ERROR \
|
|
- (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
|
|
+ (SPI_FSI_STATUS_ERROR | \
|
|
SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
|
|
SPI_FSI_STATUS_RDR_OVERRUN)
|
|
#define SPI_FSI_PORT_CTRL 0x9
|
|
@@ -70,6 +75,8 @@ struct fsi_spi {
|
|
struct device *dev; /* SPI controller device */
|
|
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
|
|
u32 base;
|
|
+ size_t max_xfr_size;
|
|
+ bool restricted;
|
|
};
|
|
|
|
struct fsi_spi_sequence {
|
|
@@ -205,8 +212,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
|
|
if (rc)
|
|
return rc;
|
|
|
|
- return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
|
|
- SPI_FSI_CLOCK_CFG_RESET2);
|
|
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
|
|
+ SPI_FSI_CLOCK_CFG_RESET2);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
|
|
}
|
|
|
|
static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
|
|
@@ -214,8 +225,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
|
|
/*
|
|
* Add the next byte of instruction to the 8-byte sequence register.
|
|
* Then decrement the counter so that the next instruction will go in
|
|
- * the right place. Return the number of "slots" left in the sequence
|
|
- * register.
|
|
+ * the right place. Return the index of the slot we just filled in the
|
|
+ * sequence register.
|
|
*/
|
|
seq->data |= (u64)val << seq->bit;
|
|
seq->bit -= 8;
|
|
@@ -233,40 +244,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
|
|
struct fsi_spi_sequence *seq,
|
|
struct spi_transfer *transfer)
|
|
{
|
|
+ bool docfg = false;
|
|
int loops;
|
|
int idx;
|
|
int rc;
|
|
+ u8 val = 0;
|
|
u8 len = min(transfer->len, 8U);
|
|
u8 rem = transfer->len % len;
|
|
+ u64 cfg = 0ULL;
|
|
|
|
loops = transfer->len / len;
|
|
|
|
if (transfer->tx_buf) {
|
|
- idx = fsi_spi_sequence_add(seq,
|
|
- SPI_FSI_SEQUENCE_SHIFT_OUT(len));
|
|
+ val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
|
|
+ idx = fsi_spi_sequence_add(seq, val);
|
|
+
|
|
if (rem)
|
|
rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
|
|
} else if (transfer->rx_buf) {
|
|
- idx = fsi_spi_sequence_add(seq,
|
|
- SPI_FSI_SEQUENCE_SHIFT_IN(len));
|
|
+ val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
|
|
+ idx = fsi_spi_sequence_add(seq, val);
|
|
+
|
|
if (rem)
|
|
rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
|
|
} else {
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (ctx->restricted) {
|
|
+ const int eidx = rem ? 5 : 6;
|
|
+
|
|
+ while (loops > 1 && idx <= eidx) {
|
|
+ idx = fsi_spi_sequence_add(seq, val);
|
|
+ loops--;
|
|
+ docfg = true;
|
|
+ }
|
|
+
|
|
+ if (loops > 1) {
|
|
+ dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (loops > 1) {
|
|
fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
|
|
+ docfg = true;
|
|
+ }
|
|
|
|
- if (rem)
|
|
- fsi_spi_sequence_add(seq, rem);
|
|
+ if (docfg) {
|
|
+ cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
|
|
+ if (transfer->rx_buf)
|
|
+ cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
|
|
+ SPI_FSI_COUNTER_CFG_N2_TX |
|
|
+ SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
|
|
+ SPI_FSI_COUNTER_CFG_N2_RELOAD;
|
|
|
|
- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
|
|
- SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
|
|
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
|
|
if (rc)
|
|
return rc;
|
|
+ } else {
|
|
+ fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
|
|
}
|
|
|
|
+ if (rem)
|
|
+ fsi_spi_sequence_add(seq, rem);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -275,6 +317,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
|
|
{
|
|
int rc = 0;
|
|
u64 status = 0ULL;
|
|
+ u64 cfg = 0ULL;
|
|
|
|
if (transfer->tx_buf) {
|
|
int nb;
|
|
@@ -312,6 +355,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
|
|
u64 in = 0ULL;
|
|
u8 *rx = transfer->rx_buf;
|
|
|
|
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
|
|
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
while (transfer->len > recv) {
|
|
do {
|
|
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
|
|
@@ -350,7 +403,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
|
|
u64 status = 0ULL;
|
|
u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
|
|
SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
|
|
- FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
|
|
+ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
|
|
|
|
end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
|
|
do {
|
|
@@ -407,7 +460,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
|
|
|
/* Sequencer must do shift out (tx) first. */
|
|
if (!transfer->tx_buf ||
|
|
- transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
|
|
+ transfer->len > (ctx->max_xfr_size + 8)) {
|
|
rc = -EINVAL;
|
|
goto error;
|
|
}
|
|
@@ -431,7 +484,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
|
|
|
/* Sequencer can only do shift in (rx) after tx. */
|
|
if (next->rx_buf) {
|
|
- if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
|
|
+ if (next->len > ctx->max_xfr_size) {
|
|
rc = -EINVAL;
|
|
goto error;
|
|
}
|
|
@@ -476,7 +529,9 @@ error:
|
|
|
|
static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
|
|
{
|
|
- return SPI_FSI_MAX_TRANSFER_SIZE;
|
|
+ struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
|
|
+
|
|
+ return ctx->max_xfr_size;
|
|
}
|
|
|
|
static int fsi_spi_probe(struct device *dev)
|
|
@@ -524,6 +579,14 @@ static int fsi_spi_probe(struct device *dev)
|
|
ctx->fsi = fsi;
|
|
ctx->base = base + SPI_FSI_BASE;
|
|
|
|
+ if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
|
|
+ ctx->restricted = true;
|
|
+ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
|
|
+ } else {
|
|
+ ctx->restricted = false;
|
|
+ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
|
|
+ }
|
|
+
|
|
rc = devm_spi_register_controller(dev, ctlr);
|
|
if (rc)
|
|
spi_controller_put(ctlr);
|
|
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
|
|
index e9e256718ef4a..10d8a722b0833 100644
|
|
--- a/drivers/spi/spi-omap2-mcspi.c
|
|
+++ b/drivers/spi/spi-omap2-mcspi.c
|
|
@@ -24,7 +24,6 @@
|
|
#include <linux/of.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/gcd.h>
|
|
-#include <linux/iopoll.h>
|
|
|
|
#include <linux/spi/spi.h>
|
|
#include <linux/gpio.h>
|
|
@@ -349,9 +348,19 @@ disable_fifo:
|
|
|
|
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
|
|
{
|
|
- u32 val;
|
|
-
|
|
- return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
|
|
+ unsigned long timeout;
|
|
+
|
|
+ timeout = jiffies + msecs_to_jiffies(1000);
|
|
+ while (!(readl_relaxed(reg) & bit)) {
|
|
+ if (time_after(jiffies, timeout)) {
|
|
+ if (!(readl_relaxed(reg) & bit))
|
|
+ return -ETIMEDOUT;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+ cpu_relax();
|
|
+ }
|
|
+ return 0;
|
|
}
|
|
|
|
static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
|
|
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
|
|
index cf67ea60dc0ed..6587a7dc3f5ba 100644
|
|
--- a/drivers/spi/spi-s3c64xx.c
|
|
+++ b/drivers/spi/spi-s3c64xx.c
|
|
@@ -122,6 +122,7 @@
|
|
|
|
struct s3c64xx_spi_dma_data {
|
|
struct dma_chan *ch;
|
|
+ dma_cookie_t cookie;
|
|
enum dma_transfer_direction direction;
|
|
};
|
|
|
|
@@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data)
|
|
spin_unlock_irqrestore(&sdd->lock, flags);
|
|
}
|
|
|
|
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
|
|
+static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
|
|
struct sg_table *sgt)
|
|
{
|
|
struct s3c64xx_spi_driver_data *sdd;
|
|
struct dma_slave_config config;
|
|
struct dma_async_tx_descriptor *desc;
|
|
+ int ret;
|
|
|
|
memset(&config, 0, sizeof(config));
|
|
|
|
@@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
|
|
|
|
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
|
|
dma->direction, DMA_PREP_INTERRUPT);
|
|
+ if (!desc) {
|
|
+ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
|
|
+ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
desc->callback = s3c64xx_spi_dmacb;
|
|
desc->callback_param = dma;
|
|
|
|
- dmaengine_submit(desc);
|
|
+ dma->cookie = dmaengine_submit(desc);
|
|
+ ret = dma_submit_error(dma->cookie);
|
|
+ if (ret) {
|
|
+ dev_err(&sdd->pdev->dev, "DMA submission failed");
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
dma_async_issue_pending(dma->ch);
|
|
+ return 0;
|
|
}
|
|
|
|
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
|
|
@@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
|
|
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
|
|
}
|
|
|
|
-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
|
|
+static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
|
|
struct spi_transfer *xfer, int dma_mode)
|
|
{
|
|
void __iomem *regs = sdd->regs;
|
|
u32 modecfg, chcfg;
|
|
+ int ret = 0;
|
|
|
|
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
|
|
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
|
|
@@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
|
|
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
|
|
if (dma_mode) {
|
|
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
|
|
- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
|
|
+ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
|
|
} else {
|
|
switch (sdd->cur_bpw) {
|
|
case 32:
|
|
@@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
|
|
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
|
|
| S3C64XX_SPI_PACKET_CNT_EN,
|
|
regs + S3C64XX_SPI_PACKET_CNT);
|
|
- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
|
|
+ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
|
|
}
|
|
}
|
|
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
|
|
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
|
|
@@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
|
|
return 0;
|
|
}
|
|
|
|
-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
|
|
+static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
|
|
{
|
|
void __iomem *regs = sdd->regs;
|
|
+ int ret;
|
|
u32 val;
|
|
|
|
/* Disable Clock */
|
|
@@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
|
|
|
|
if (sdd->port_conf->clk_from_cmu) {
|
|
/* The src_clk clock is divided internally by 2 */
|
|
- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
|
|
+ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
|
|
+ if (ret)
|
|
+ return ret;
|
|
} else {
|
|
/* Configure Clock */
|
|
val = readl(regs + S3C64XX_SPI_CLK_CFG);
|
|
@@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
|
|
val |= S3C64XX_SPI_ENCLK_ENABLE;
|
|
writel(val, regs + S3C64XX_SPI_CLK_CFG);
|
|
}
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
|
|
@@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
|
|
sdd->cur_bpw = bpw;
|
|
sdd->cur_speed = speed;
|
|
sdd->cur_mode = spi->mode;
|
|
- s3c64xx_spi_config(sdd);
|
|
+ status = s3c64xx_spi_config(sdd);
|
|
+ if (status)
|
|
+ return status;
|
|
}
|
|
|
|
if (!is_polling(sdd) && (xfer->len > fifo_len) &&
|
|
@@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
|
|
sdd->state &= ~RXBUSY;
|
|
sdd->state &= ~TXBUSY;
|
|
|
|
- s3c64xx_enable_datapath(sdd, xfer, use_dma);
|
|
-
|
|
/* Start the signals */
|
|
s3c64xx_spi_set_cs(spi, true);
|
|
|
|
+ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
|
|
+
|
|
spin_unlock_irqrestore(&sdd->lock, flags);
|
|
|
|
+ if (status) {
|
|
+ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
|
|
+ break;
|
|
+ }
|
|
+
|
|
if (use_dma)
|
|
status = s3c64xx_wait_for_dma(sdd, xfer);
|
|
else
|
|
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
|
|
index 03929b9d3a8bc..d0725bc8b48a4 100644
|
|
--- a/drivers/staging/emxx_udc/emxx_udc.c
|
|
+++ b/drivers/staging/emxx_udc/emxx_udc.c
|
|
@@ -2593,7 +2593,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
|
|
|
|
if (req->unaligned) {
|
|
if (!ep->virt_buf)
|
|
- ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
|
|
+ ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
|
|
&ep->phys_buf,
|
|
GFP_ATOMIC | GFP_DMA);
|
|
if (ep->epnum > 0) {
|
|
@@ -3148,7 +3148,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
|
|
for (i = 0; i < NUM_ENDPOINTS; i++) {
|
|
ep = &udc->ep[i];
|
|
if (ep->virt_buf)
|
|
- dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
|
|
+ dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
|
|
ep->phys_buf);
|
|
}
|
|
|
|
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
|
|
index 54434c2dbaf90..8473e14370747 100644
|
|
--- a/drivers/staging/media/atomisp/pci/sh_css.c
|
|
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
|
|
@@ -9521,7 +9521,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
|
|
if (err)
|
|
{
|
|
IA_CSS_LEAVE_ERR(err);
|
|
- return err;
|
|
+ goto ERR;
|
|
}
|
|
#endif
|
|
for (i = 0; i < num_pipes; i++)
|
|
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
|
|
index d561f125085a7..d72ebbd17a692 100644
|
|
--- a/drivers/staging/media/hantro/hantro_h264.c
|
|
+++ b/drivers/staging/media/hantro/hantro_h264.c
|
|
@@ -327,7 +327,7 @@ dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
|
|
*/
|
|
dst_buf = hantro_get_dst_buf(ctx);
|
|
buf = &dst_buf->vb2_buf;
|
|
- dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
|
|
+ dma_addr = hantro_get_dec_buf_addr(ctx, buf);
|
|
}
|
|
|
|
return dma_addr;
|
|
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
|
|
index 44062ffceaea7..6d2a8f2a8f0bb 100644
|
|
--- a/drivers/staging/media/hantro/hantro_postproc.c
|
|
+++ b/drivers/staging/media/hantro/hantro_postproc.c
|
|
@@ -118,7 +118,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
|
|
unsigned int num_buffers = cap_queue->num_buffers;
|
|
unsigned int i, buf_size;
|
|
|
|
- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
|
|
+ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage +
|
|
+ hantro_h264_mv_size(ctx->dst_fmt.width,
|
|
+ ctx->dst_fmt.height);
|
|
|
|
for (i = 0; i < num_buffers; ++i) {
|
|
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
|
|
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
|
|
index fbd53d7c097cd..e9d6bd9e9332a 100644
|
|
--- a/drivers/staging/media/ipu3/ipu3-css-params.c
|
|
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
|
|
@@ -159,7 +159,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
|
|
|
|
memset(&cfg->scaler_coeffs_chroma, 0,
|
|
sizeof(cfg->scaler_coeffs_chroma));
|
|
- memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
|
|
+ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
|
|
do {
|
|
phase_step_correction++;
|
|
|
|
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
|
|
index 7c4df6d48c43d..4df9476ef2a9b 100644
|
|
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
|
|
+++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
|
|
@@ -16,6 +16,7 @@
|
|
*/
|
|
|
|
#include <linux/clk.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/io.h>
|
|
#include <linux/mfd/syscon.h>
|
|
#include <linux/module.h>
|
|
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
|
|
index 195d963c4fbb4..b6fee7230ce05 100644
|
|
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
|
|
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
|
|
@@ -597,7 +597,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
|
|
|
|
prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
|
|
sizeof(struct ieee80211_rxb *),
|
|
- GFP_KERNEL);
|
|
+ GFP_ATOMIC);
|
|
if (!prxbIndicateArray)
|
|
return;
|
|
|
|
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
|
|
index 0e959ebc38b56..a9fb5165b33d9 100644
|
|
--- a/drivers/staging/wfx/data_rx.c
|
|
+++ b/drivers/staging/wfx/data_rx.c
|
|
@@ -80,7 +80,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
|
|
goto drop;
|
|
|
|
if (arg->status == HIF_STATUS_RX_FAIL_MIC)
|
|
- hdr->flag |= RX_FLAG_MMIC_ERROR;
|
|
+ hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
|
|
else if (arg->status)
|
|
goto drop;
|
|
|
|
diff --git a/drivers/staging/wilc1000/mon.c b/drivers/staging/wilc1000/mon.c
|
|
index 60331417bd983..66f1c870f4f69 100644
|
|
--- a/drivers/staging/wilc1000/mon.c
|
|
+++ b/drivers/staging/wilc1000/mon.c
|
|
@@ -236,11 +236,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
|
|
|
|
if (register_netdevice(wl->monitor_dev)) {
|
|
netdev_err(real_dev, "register_netdevice failed\n");
|
|
+ free_netdev(wl->monitor_dev);
|
|
return NULL;
|
|
}
|
|
priv = netdev_priv(wl->monitor_dev);
|
|
- if (!priv)
|
|
- return NULL;
|
|
|
|
priv->real_ndev = real_dev;
|
|
|
|
diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/staging/wilc1000/sdio.c
|
|
index 36eb589263bfd..b14e4ed6134fc 100644
|
|
--- a/drivers/staging/wilc1000/sdio.c
|
|
+++ b/drivers/staging/wilc1000/sdio.c
|
|
@@ -151,9 +151,10 @@ static int wilc_sdio_probe(struct sdio_func *func,
|
|
wilc->dev = &func->dev;
|
|
|
|
wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
|
|
- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
|
|
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
|
|
+ kfree(sdio_priv);
|
|
return -EPROBE_DEFER;
|
|
- else if (!IS_ERR(wilc->rtc_clk))
|
|
+ } else if (!IS_ERR(wilc->rtc_clk))
|
|
clk_prepare_enable(wilc->rtc_clk);
|
|
|
|
dev_info(&func->dev, "Driver Initializing success\n");
|
|
diff --git a/drivers/staging/wilc1000/spi.c b/drivers/staging/wilc1000/spi.c
|
|
index 3f19e3f38a397..a18dac0aa6b67 100644
|
|
--- a/drivers/staging/wilc1000/spi.c
|
|
+++ b/drivers/staging/wilc1000/spi.c
|
|
@@ -112,9 +112,10 @@ static int wilc_bus_probe(struct spi_device *spi)
|
|
wilc->dev_irq_num = spi->irq;
|
|
|
|
wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
|
|
- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
|
|
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
|
|
+ kfree(spi_priv);
|
|
return -EPROBE_DEFER;
|
|
- else if (!IS_ERR(wilc->rtc_clk))
|
|
+ } else if (!IS_ERR(wilc->rtc_clk))
|
|
clk_prepare_enable(wilc->rtc_clk);
|
|
|
|
return 0;
|
|
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
|
|
index 0209bc23e631e..13a280c780c39 100644
|
|
--- a/drivers/target/target_core_user.c
|
|
+++ b/drivers/target/target_core_user.c
|
|
@@ -669,7 +669,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
|
|
void *from, *to = NULL;
|
|
size_t copy_bytes, to_offset, offset;
|
|
struct scatterlist *sg;
|
|
- struct page *page;
|
|
+ struct page *page = NULL;
|
|
|
|
for_each_sg(data_sg, sg, data_nents, i) {
|
|
int sg_remaining = sg->length;
|
|
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
|
|
index d1b27b0522a3c..8d60e0ff67b4d 100644
|
|
--- a/drivers/tty/hvc/Kconfig
|
|
+++ b/drivers/tty/hvc/Kconfig
|
|
@@ -81,6 +81,7 @@ config HVC_DCC
|
|
bool "ARM JTAG DCC console"
|
|
depends on ARM || ARM64
|
|
select HVC_DRIVER
|
|
+ select SERIAL_CORE_CONSOLE
|
|
help
|
|
This console uses the JTAG DCC on ARM to create a console under the HVC
|
|
driver. This console is used through a JTAG only on ARM. If you don't have
|
|
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
|
|
index 55105ac38f89b..509d1042825a1 100644
|
|
--- a/drivers/tty/hvc/hvcs.c
|
|
+++ b/drivers/tty/hvc/hvcs.c
|
|
@@ -1216,13 +1216,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
|
|
|
|
tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
|
|
|
|
- /*
|
|
- * This line is important because it tells hvcs_open that this
|
|
- * device needs to be re-configured the next time hvcs_open is
|
|
- * called.
|
|
- */
|
|
- tty->driver_data = NULL;
|
|
-
|
|
free_irq(irq, hvcsd);
|
|
return;
|
|
} else if (hvcsd->port.count < 0) {
|
|
@@ -1237,6 +1230,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
|
|
{
|
|
struct hvcs_struct *hvcsd = tty->driver_data;
|
|
|
|
+ /*
|
|
+ * This line is important because it tells hvcs_open that this
|
|
+ * device needs to be re-configured the next time hvcs_open is
|
|
+ * called.
|
|
+ */
|
|
+ tty->driver_data = NULL;
|
|
+
|
|
tty_port_put(&hvcsd->port);
|
|
}
|
|
|
|
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
|
|
index cf20616340a1a..fe569f6294a24 100644
|
|
--- a/drivers/tty/ipwireless/network.c
|
|
+++ b/drivers/tty/ipwireless/network.c
|
|
@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
|
|
skb->len,
|
|
notify_packet_sent,
|
|
network);
|
|
- if (ret == -1) {
|
|
+ if (ret < 0) {
|
|
skb_pull(skb, 2);
|
|
return 0;
|
|
}
|
|
@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
|
|
notify_packet_sent,
|
|
network);
|
|
kfree(buf);
|
|
- if (ret == -1)
|
|
+ if (ret < 0)
|
|
return 0;
|
|
}
|
|
kfree_skb(skb);
|
|
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
|
|
index fad3401e604d9..23584769fc292 100644
|
|
--- a/drivers/tty/ipwireless/tty.c
|
|
+++ b/drivers/tty/ipwireless/tty.c
|
|
@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
|
|
ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
|
|
buf, count,
|
|
ipw_write_packet_sent_callback, tty);
|
|
- if (ret == -1) {
|
|
+ if (ret < 0) {
|
|
mutex_unlock(&tty->ipw_tty_mutex);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
|
|
index 00099a8439d21..c6a1d8c4e6894 100644
|
|
--- a/drivers/tty/pty.c
|
|
+++ b/drivers/tty/pty.c
|
|
@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
|
|
spin_lock_irqsave(&to->port->lock, flags);
|
|
/* Stuff the data into the input queue of the other end */
|
|
c = tty_insert_flip_string(to->port, buf, c);
|
|
+ spin_unlock_irqrestore(&to->port->lock, flags);
|
|
/* And shovel */
|
|
if (c)
|
|
tty_flip_buffer_push(to->port);
|
|
- spin_unlock_irqrestore(&to->port->lock, flags);
|
|
}
|
|
return c;
|
|
}
|
|
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
|
|
index 780908d435577..896b9c77117d3 100644
|
|
--- a/drivers/tty/serial/Kconfig
|
|
+++ b/drivers/tty/serial/Kconfig
|
|
@@ -8,6 +8,7 @@ menu "Serial drivers"
|
|
|
|
config SERIAL_EARLYCON
|
|
bool
|
|
+ depends on SERIAL_CORE
|
|
help
|
|
Support for early consoles with the earlycon parameter. This enables
|
|
the console before standard serial driver is probed. The console is
|
|
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
|
|
index 90298c4030421..f8ba7690efe31 100644
|
|
--- a/drivers/tty/serial/fsl_lpuart.c
|
|
+++ b/drivers/tty/serial/fsl_lpuart.c
|
|
@@ -649,26 +649,24 @@ static int lpuart32_poll_init(struct uart_port *port)
|
|
spin_lock_irqsave(&sport->port.lock, flags);
|
|
|
|
/* Disable Rx & Tx */
|
|
- lpuart32_write(&sport->port, UARTCTRL, 0);
|
|
+ lpuart32_write(&sport->port, 0, UARTCTRL);
|
|
|
|
temp = lpuart32_read(&sport->port, UARTFIFO);
|
|
|
|
/* Enable Rx and Tx FIFO */
|
|
- lpuart32_write(&sport->port, UARTFIFO,
|
|
- temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
|
|
+ lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
|
|
|
|
/* flush Tx and Rx FIFO */
|
|
- lpuart32_write(&sport->port, UARTFIFO,
|
|
- UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
|
|
+ lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
|
|
|
|
/* explicitly clear RDRF */
|
|
if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
|
|
lpuart32_read(&sport->port, UARTDATA);
|
|
- lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
|
|
+ lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
|
|
}
|
|
|
|
/* Enable Rx and Tx */
|
|
- lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
|
|
+ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
|
|
spin_unlock_irqrestore(&sport->port.lock, flags);
|
|
|
|
return 0;
|
|
@@ -677,12 +675,12 @@ static int lpuart32_poll_init(struct uart_port *port)
|
|
static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
|
|
{
|
|
lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
|
|
- lpuart32_write(port, UARTDATA, c);
|
|
+ lpuart32_write(port, c, UARTDATA);
|
|
}
|
|
|
|
static int lpuart32_poll_get_char(struct uart_port *port)
|
|
{
|
|
- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
|
|
+ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
|
|
return NO_POLL_CHAR;
|
|
|
|
return lpuart32_read(port, UARTDATA);
|
|
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
|
|
index 37ae7fc5f8dd8..7bac485b49ba9 100644
|
|
--- a/drivers/usb/cdns3/gadget.c
|
|
+++ b/drivers/usb/cdns3/gadget.c
|
|
@@ -2988,12 +2988,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
|
|
|
|
priv_dev = cdns->gadget_dev;
|
|
|
|
- devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
|
|
|
|
pm_runtime_mark_last_busy(cdns->dev);
|
|
pm_runtime_put_autosuspend(cdns->dev);
|
|
|
|
usb_del_gadget_udc(&priv_dev->gadget);
|
|
+ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
|
|
|
|
cdns3_free_all_eps(priv_dev);
|
|
|
|
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
|
|
index 7499ba118665a..808722b8294a4 100644
|
|
--- a/drivers/usb/class/cdc-acm.c
|
|
+++ b/drivers/usb/class/cdc-acm.c
|
|
@@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf,
|
|
}
|
|
}
|
|
} else {
|
|
+ int class = -1;
|
|
+
|
|
data_intf_num = union_header->bSlaveInterface0;
|
|
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
|
|
data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
|
|
+
|
|
+ if (control_interface)
|
|
+ class = control_interface->cur_altsetting->desc.bInterfaceClass;
|
|
+
|
|
+ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
|
|
+ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
|
|
+ combined_interfaces = 1;
|
|
+ control_interface = data_interface = intf;
|
|
+ goto look_for_collapsed_interface;
|
|
+ }
|
|
}
|
|
|
|
if (!control_interface || !data_interface) {
|
|
@@ -1900,6 +1912,17 @@ static const struct usb_device_id acm_ids[] = {
|
|
.driver_info = IGNORE_DEVICE,
|
|
},
|
|
|
|
+ /* Exclude ETAS ES58x */
|
|
+ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
|
|
+ .driver_info = IGNORE_DEVICE,
|
|
+ },
|
|
+ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
|
|
+ .driver_info = IGNORE_DEVICE,
|
|
+ },
|
|
+ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
|
|
+ .driver_info = IGNORE_DEVICE,
|
|
+ },
|
|
+
|
|
{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
|
|
.driver_info = SEND_ZERO_PACKET,
|
|
},
|
|
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
|
|
index e3db6fbeadef8..0c7a0adfd1e1f 100644
|
|
--- a/drivers/usb/class/cdc-wdm.c
|
|
+++ b/drivers/usb/class/cdc-wdm.c
|
|
@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
|
|
|
|
#define WDM_MAX 16
|
|
|
|
+/* we cannot wait forever at flush() */
|
|
+#define WDM_FLUSH_TIMEOUT (30 * HZ)
|
|
+
|
|
/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
|
|
#define WDM_DEFAULT_BUFSIZE 256
|
|
|
|
@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
|
|
kfree(desc->outbuf);
|
|
desc->outbuf = NULL;
|
|
clear_bit(WDM_IN_USE, &desc->flags);
|
|
- wake_up(&desc->wait);
|
|
+ wake_up_all(&desc->wait);
|
|
}
|
|
|
|
static void wdm_in_callback(struct urb *urb)
|
|
@@ -393,6 +396,9 @@ static ssize_t wdm_write
|
|
if (test_bit(WDM_RESETTING, &desc->flags))
|
|
r = -EIO;
|
|
|
|
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
|
|
+ r = -ENODEV;
|
|
+
|
|
if (r < 0) {
|
|
rv = r;
|
|
goto out_free_mem_pm;
|
|
@@ -424,6 +430,7 @@ static ssize_t wdm_write
|
|
if (rv < 0) {
|
|
desc->outbuf = NULL;
|
|
clear_bit(WDM_IN_USE, &desc->flags);
|
|
+ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
|
|
dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
|
|
rv = usb_translate_errors(rv);
|
|
goto out_free_mem_pm;
|
|
@@ -583,28 +590,58 @@ err:
|
|
return rv;
|
|
}
|
|
|
|
-static int wdm_flush(struct file *file, fl_owner_t id)
|
|
+static int wdm_wait_for_response(struct file *file, long timeout)
|
|
{
|
|
struct wdm_device *desc = file->private_data;
|
|
+ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
|
|
+
|
|
+ /*
|
|
+ * Needs both flags. We cannot do with one because resetting it would
|
|
+ * cause a race with write() yet we need to signal a disconnect.
|
|
+ */
|
|
+ rv = wait_event_interruptible_timeout(desc->wait,
|
|
+ !test_bit(WDM_IN_USE, &desc->flags) ||
|
|
+ test_bit(WDM_DISCONNECTING, &desc->flags),
|
|
+ timeout);
|
|
|
|
- wait_event(desc->wait,
|
|
- /*
|
|
- * needs both flags. We cannot do with one
|
|
- * because resetting it would cause a race
|
|
- * with write() yet we need to signal
|
|
- * a disconnect
|
|
- */
|
|
- !test_bit(WDM_IN_USE, &desc->flags) ||
|
|
- test_bit(WDM_DISCONNECTING, &desc->flags));
|
|
-
|
|
- /* cannot dereference desc->intf if WDM_DISCONNECTING */
|
|
+ /*
|
|
+ * To report the correct error. This is best effort.
|
|
+ * We are inevitably racing with the hardware.
|
|
+ */
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags))
|
|
return -ENODEV;
|
|
- if (desc->werr < 0)
|
|
- dev_err(&desc->intf->dev, "Error in flush path: %d\n",
|
|
- desc->werr);
|
|
+ if (!rv)
|
|
+ return -EIO;
|
|
+ if (rv < 0)
|
|
+ return -EINTR;
|
|
+
|
|
+ spin_lock_irq(&desc->iuspin);
|
|
+ rv = desc->werr;
|
|
+ desc->werr = 0;
|
|
+ spin_unlock_irq(&desc->iuspin);
|
|
+
|
|
+ return usb_translate_errors(rv);
|
|
+
|
|
+}
|
|
+
|
|
+/*
|
|
+ * You need to send a signal when you react to malicious or defective hardware.
|
|
+ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
|
|
+ * not implement wdm_flush() will return -EINVAL.
|
|
+ */
|
|
+static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|
+{
|
|
+ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
|
|
+}
|
|
|
|
- return usb_translate_errors(desc->werr);
|
|
+/*
|
|
+ * Same with wdm_fsync(), except it uses finite timeout in order to react to
|
|
+ * malicious or defective hardware which ceased communication after close() was
|
|
+ * implicitly called due to process termination.
|
|
+ */
|
|
+static int wdm_flush(struct file *file, fl_owner_t id)
|
|
+{
|
|
+ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
|
|
}
|
|
|
|
static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
|
|
@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
|
|
.owner = THIS_MODULE,
|
|
.read = wdm_read,
|
|
.write = wdm_write,
|
|
+ .fsync = wdm_fsync,
|
|
.open = wdm_open,
|
|
.flush = wdm_flush,
|
|
.release = wdm_release,
|
|
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
|
|
index da923ec176122..31ca5abb4c12a 100644
|
|
--- a/drivers/usb/core/urb.c
|
|
+++ b/drivers/usb/core/urb.c
|
|
@@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb)
|
|
EXPORT_SYMBOL_GPL(usb_block_urb);
|
|
|
|
/**
|
|
- * usb_kill_anchored_urbs - cancel transfer requests en masse
|
|
+ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
|
|
* @anchor: anchor the requests are bound to
|
|
*
|
|
- * this allows all outstanding URBs to be killed starting
|
|
- * from the back of the queue
|
|
+ * This kills all outstanding URBs starting from the back of the queue,
|
|
+ * with guarantee that no completer callbacks will take place from the
|
|
+ * anchor after this function returns.
|
|
*
|
|
* This routine should not be called by a driver after its disconnect
|
|
* method has returned.
|
|
@@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
|
|
void usb_kill_anchored_urbs(struct usb_anchor *anchor)
|
|
{
|
|
struct urb *victim;
|
|
+ int surely_empty;
|
|
|
|
- spin_lock_irq(&anchor->lock);
|
|
- while (!list_empty(&anchor->urb_list)) {
|
|
- victim = list_entry(anchor->urb_list.prev, struct urb,
|
|
- anchor_list);
|
|
- /* we must make sure the URB isn't freed before we kill it*/
|
|
- usb_get_urb(victim);
|
|
- spin_unlock_irq(&anchor->lock);
|
|
- /* this will unanchor the URB */
|
|
- usb_kill_urb(victim);
|
|
- usb_put_urb(victim);
|
|
+ do {
|
|
spin_lock_irq(&anchor->lock);
|
|
- }
|
|
- spin_unlock_irq(&anchor->lock);
|
|
+ while (!list_empty(&anchor->urb_list)) {
|
|
+ victim = list_entry(anchor->urb_list.prev,
|
|
+ struct urb, anchor_list);
|
|
+ /* make sure the URB isn't freed before we kill it */
|
|
+ usb_get_urb(victim);
|
|
+ spin_unlock_irq(&anchor->lock);
|
|
+ /* this will unanchor the URB */
|
|
+ usb_kill_urb(victim);
|
|
+ usb_put_urb(victim);
|
|
+ spin_lock_irq(&anchor->lock);
|
|
+ }
|
|
+ surely_empty = usb_anchor_check_wakeup(anchor);
|
|
+
|
|
+ spin_unlock_irq(&anchor->lock);
|
|
+ cpu_relax();
|
|
+ } while (!surely_empty);
|
|
}
|
|
EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
|
|
|
|
@@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
|
|
void usb_poison_anchored_urbs(struct usb_anchor *anchor)
|
|
{
|
|
struct urb *victim;
|
|
+ int surely_empty;
|
|
|
|
- spin_lock_irq(&anchor->lock);
|
|
- anchor->poisoned = 1;
|
|
- while (!list_empty(&anchor->urb_list)) {
|
|
- victim = list_entry(anchor->urb_list.prev, struct urb,
|
|
- anchor_list);
|
|
- /* we must make sure the URB isn't freed before we kill it*/
|
|
- usb_get_urb(victim);
|
|
- spin_unlock_irq(&anchor->lock);
|
|
- /* this will unanchor the URB */
|
|
- usb_poison_urb(victim);
|
|
- usb_put_urb(victim);
|
|
+ do {
|
|
spin_lock_irq(&anchor->lock);
|
|
- }
|
|
- spin_unlock_irq(&anchor->lock);
|
|
+ anchor->poisoned = 1;
|
|
+ while (!list_empty(&anchor->urb_list)) {
|
|
+ victim = list_entry(anchor->urb_list.prev,
|
|
+ struct urb, anchor_list);
|
|
+ /* make sure the URB isn't freed before we kill it */
|
|
+ usb_get_urb(victim);
|
|
+ spin_unlock_irq(&anchor->lock);
|
|
+ /* this will unanchor the URB */
|
|
+ usb_poison_urb(victim);
|
|
+ usb_put_urb(victim);
|
|
+ spin_lock_irq(&anchor->lock);
|
|
+ }
|
|
+ surely_empty = usb_anchor_check_wakeup(anchor);
|
|
+
|
|
+ spin_unlock_irq(&anchor->lock);
|
|
+ cpu_relax();
|
|
+ } while (!surely_empty);
|
|
}
|
|
EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
|
|
|
|
@@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
|
|
{
|
|
struct urb *victim;
|
|
unsigned long flags;
|
|
+ int surely_empty;
|
|
+
|
|
+ do {
|
|
+ spin_lock_irqsave(&anchor->lock, flags);
|
|
+ while (!list_empty(&anchor->urb_list)) {
|
|
+ victim = list_entry(anchor->urb_list.prev,
|
|
+ struct urb, anchor_list);
|
|
+ __usb_unanchor_urb(victim, anchor);
|
|
+ }
|
|
+ surely_empty = usb_anchor_check_wakeup(anchor);
|
|
|
|
- spin_lock_irqsave(&anchor->lock, flags);
|
|
- while (!list_empty(&anchor->urb_list)) {
|
|
- victim = list_entry(anchor->urb_list.prev, struct urb,
|
|
- anchor_list);
|
|
- __usb_unanchor_urb(victim, anchor);
|
|
- }
|
|
- spin_unlock_irqrestore(&anchor->lock, flags);
|
|
+ spin_unlock_irqrestore(&anchor->lock, flags);
|
|
+ cpu_relax();
|
|
+ } while (!surely_empty);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
|
|
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
|
|
index 7faf5f8c056d4..642926f9670e6 100644
|
|
--- a/drivers/usb/dwc2/gadget.c
|
|
+++ b/drivers/usb/dwc2/gadget.c
|
|
@@ -712,8 +712,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
|
|
*/
|
|
static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
|
|
{
|
|
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
|
|
int is_isoc = hs_ep->isochronous;
|
|
unsigned int maxsize;
|
|
+ u32 mps = hs_ep->ep.maxpacket;
|
|
+ int dir_in = hs_ep->dir_in;
|
|
|
|
if (is_isoc)
|
|
maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
|
|
@@ -722,6 +725,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
|
|
else
|
|
maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
|
|
|
|
+ /* Interrupt OUT EP with mps not multiple of 4 */
|
|
+ if (hs_ep->index)
|
|
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
|
|
+ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
|
|
+
|
|
return maxsize;
|
|
}
|
|
|
|
@@ -737,11 +745,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
|
|
* Isochronous - descriptor rx/tx bytes bitfield limit,
|
|
* Control In/Bulk/Interrupt - multiple of mps. This will allow to not
|
|
* have concatenations from various descriptors within one packet.
|
|
+ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
|
|
+ * to a single descriptor.
|
|
*
|
|
* Selects corresponding mask for RX/TX bytes as well.
|
|
*/
|
|
static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
|
|
{
|
|
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
|
|
u32 mps = hs_ep->ep.maxpacket;
|
|
int dir_in = hs_ep->dir_in;
|
|
u32 desc_size = 0;
|
|
@@ -765,6 +776,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
|
|
desc_size -= desc_size % mps;
|
|
}
|
|
|
|
+ /* Interrupt OUT EP with mps not multiple of 4 */
|
|
+ if (hs_ep->index)
|
|
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
|
|
+ desc_size = mps;
|
|
+ *mask = DEV_DMA_NBYTES_MASK;
|
|
+ }
|
|
+
|
|
return desc_size;
|
|
}
|
|
|
|
@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
|
|
length += (mps - (length % mps));
|
|
}
|
|
|
|
- /*
|
|
- * If more data to send, adjust DMA for EP0 out data stage.
|
|
- * ureq->dma stays unchanged, hence increment it by already
|
|
- * passed passed data count before starting new transaction.
|
|
- */
|
|
- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
|
|
- continuing)
|
|
+ if (continuing)
|
|
offset = ureq->actual;
|
|
|
|
/* Fill DDMA chain entries */
|
|
@@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
|
|
*/
|
|
static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
|
|
{
|
|
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
|
|
struct dwc2_hsotg *hsotg = hs_ep->parent;
|
|
unsigned int bytes_rem = 0;
|
|
+ unsigned int bytes_rem_correction = 0;
|
|
struct dwc2_dma_desc *desc = hs_ep->desc_list;
|
|
int i;
|
|
u32 status;
|
|
+ u32 mps = hs_ep->ep.maxpacket;
|
|
+ int dir_in = hs_ep->dir_in;
|
|
|
|
if (!desc)
|
|
return -EINVAL;
|
|
|
|
+ /* Interrupt OUT EP with mps not multiple of 4 */
|
|
+ if (hs_ep->index)
|
|
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
|
|
+ bytes_rem_correction = 4 - (mps % 4);
|
|
+
|
|
for (i = 0; i < hs_ep->desc_count; ++i) {
|
|
status = desc->status;
|
|
bytes_rem += status & DEV_DMA_NBYTES_MASK;
|
|
+ bytes_rem -= bytes_rem_correction;
|
|
|
|
if (status & DEV_DMA_STS_MASK)
|
|
dev_err(hsotg->dev, "descriptor %d closed with %x\n",
|
|
i, status & DEV_DMA_STS_MASK);
|
|
+
|
|
+ if (status & DEV_DMA_L)
|
|
+ break;
|
|
+
|
|
desc++;
|
|
}
|
|
|
|
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
|
|
index ce736d67c7c34..fd73ddd8eb753 100644
|
|
--- a/drivers/usb/dwc2/params.c
|
|
+++ b/drivers/usb/dwc2/params.c
|
|
@@ -860,7 +860,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
|
|
int dwc2_init_params(struct dwc2_hsotg *hsotg)
|
|
{
|
|
const struct of_device_id *match;
|
|
- void (*set_params)(void *data);
|
|
+ void (*set_params)(struct dwc2_hsotg *data);
|
|
|
|
dwc2_set_default_params(hsotg);
|
|
dwc2_get_device_properties(hsotg);
|
|
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
|
|
index db9fd4bd1a38c..b28e90e0b685d 100644
|
|
--- a/drivers/usb/dwc2/platform.c
|
|
+++ b/drivers/usb/dwc2/platform.c
|
|
@@ -584,12 +584,16 @@ static int dwc2_driver_probe(struct platform_device *dev)
|
|
if (retval) {
|
|
hsotg->gadget.udc = NULL;
|
|
dwc2_hsotg_remove(hsotg);
|
|
- goto error_init;
|
|
+ goto error_debugfs;
|
|
}
|
|
}
|
|
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
|
|
return 0;
|
|
|
|
+error_debugfs:
|
|
+ dwc2_debugfs_exit(hsotg);
|
|
+ if (hsotg->hcd_enabled)
|
|
+ dwc2_hcd_remove(hsotg);
|
|
error_init:
|
|
if (hsotg->params.activate_stm_id_vb_detection)
|
|
regulator_disable(hsotg->usb33d);
|
|
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
|
|
index 25c686a752b0f..928a85b0d1cdd 100644
|
|
--- a/drivers/usb/dwc3/core.c
|
|
+++ b/drivers/usb/dwc3/core.c
|
|
@@ -119,6 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work)
|
|
struct dwc3 *dwc = work_to_dwc(work);
|
|
unsigned long flags;
|
|
int ret;
|
|
+ u32 reg;
|
|
|
|
if (dwc->dr_mode != USB_DR_MODE_OTG)
|
|
return;
|
|
@@ -172,6 +173,11 @@ static void __dwc3_set_mode(struct work_struct *work)
|
|
otg_set_vbus(dwc->usb2_phy->otg, true);
|
|
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
|
|
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
|
|
+ if (dwc->dis_split_quirk) {
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
|
|
+ reg |= DWC3_GUCTL3_SPLITDISABLE;
|
|
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
|
|
+ }
|
|
}
|
|
break;
|
|
case DWC3_GCTL_PRTCAP_DEVICE:
|
|
@@ -930,13 +936,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
|
|
*/
|
|
dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
|
|
|
|
- /* Handle USB2.0-only core configuration */
|
|
- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
|
|
- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
|
|
- if (dwc->maximum_speed == USB_SPEED_SUPER)
|
|
- dwc->maximum_speed = USB_SPEED_HIGH;
|
|
- }
|
|
-
|
|
ret = dwc3_phy_setup(dwc);
|
|
if (ret)
|
|
goto err0;
|
|
@@ -1357,6 +1356,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
|
|
dwc->dis_metastability_quirk = device_property_read_bool(dev,
|
|
"snps,dis_metastability_quirk");
|
|
|
|
+ dwc->dis_split_quirk = device_property_read_bool(dev,
|
|
+ "snps,dis-split-quirk");
|
|
+
|
|
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
|
|
dwc->tx_de_emphasis = tx_de_emphasis;
|
|
|
|
@@ -1382,6 +1384,8 @@ bool dwc3_has_imod(struct dwc3 *dwc)
|
|
static void dwc3_check_params(struct dwc3 *dwc)
|
|
{
|
|
struct device *dev = dwc->dev;
|
|
+ unsigned int hwparam_gen =
|
|
+ DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
|
|
|
|
/* Check for proper value of imod_interval */
|
|
if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
|
|
@@ -1413,17 +1417,23 @@ static void dwc3_check_params(struct dwc3 *dwc)
|
|
dwc->maximum_speed);
|
|
/* fall through */
|
|
case USB_SPEED_UNKNOWN:
|
|
- /* default to superspeed */
|
|
- dwc->maximum_speed = USB_SPEED_SUPER;
|
|
-
|
|
- /*
|
|
- * default to superspeed plus if we are capable.
|
|
- */
|
|
- if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
|
|
- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
|
|
- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
|
|
+ switch (hwparam_gen) {
|
|
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
|
|
dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
|
|
-
|
|
+ break;
|
|
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
|
|
+ if (DWC3_IP_IS(DWC32))
|
|
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
|
|
+ else
|
|
+ dwc->maximum_speed = USB_SPEED_SUPER;
|
|
+ break;
|
|
+ case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
|
|
+ dwc->maximum_speed = USB_SPEED_HIGH;
|
|
+ break;
|
|
+ default:
|
|
+ dwc->maximum_speed = USB_SPEED_SUPER;
|
|
+ break;
|
|
+ }
|
|
break;
|
|
}
|
|
}
|
|
@@ -1866,10 +1876,26 @@ static int dwc3_resume(struct device *dev)
|
|
|
|
return 0;
|
|
}
|
|
+
|
|
+static void dwc3_complete(struct device *dev)
|
|
+{
|
|
+ struct dwc3 *dwc = dev_get_drvdata(dev);
|
|
+ u32 reg;
|
|
+
|
|
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
|
|
+ dwc->dis_split_quirk) {
|
|
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
|
|
+ reg |= DWC3_GUCTL3_SPLITDISABLE;
|
|
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
|
|
+ }
|
|
+}
|
|
+#else
|
|
+#define dwc3_complete NULL
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
static const struct dev_pm_ops dwc3_dev_pm_ops = {
|
|
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
|
|
+ .complete = dwc3_complete,
|
|
SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
|
|
dwc3_runtime_idle)
|
|
};
|
|
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
|
|
index 013f42a2b5dcc..af5533b097133 100644
|
|
--- a/drivers/usb/dwc3/core.h
|
|
+++ b/drivers/usb/dwc3/core.h
|
|
@@ -138,6 +138,7 @@
|
|
#define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
|
|
|
|
#define DWC3_GHWPARAMS8 0xc600
|
|
+#define DWC3_GUCTL3 0xc60c
|
|
#define DWC3_GFLADJ 0xc630
|
|
|
|
/* Device Registers */
|
|
@@ -380,6 +381,9 @@
|
|
/* Global User Control Register 2 */
|
|
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
|
|
|
|
+/* Global User Control Register 3 */
|
|
+#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
|
|
+
|
|
/* Device Configuration Register */
|
|
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
|
|
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
|
|
@@ -1052,6 +1056,7 @@ struct dwc3_scratchpad_array {
|
|
* 2 - No de-emphasis
|
|
* 3 - Reserved
|
|
* @dis_metastability_quirk: set to disable metastability quirk.
|
|
+ * @dis_split_quirk: set to disable split boundary.
|
|
* @imod_interval: set the interrupt moderation interval in 250ns
|
|
* increments or 0 to disable.
|
|
*/
|
|
@@ -1245,6 +1250,8 @@ struct dwc3 {
|
|
|
|
unsigned dis_metastability_quirk:1;
|
|
|
|
+ unsigned dis_split_quirk:1;
|
|
+
|
|
u16 imod_interval;
|
|
};
|
|
|
|
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
|
|
index 8852fbfdead4e..336253ff55749 100644
|
|
--- a/drivers/usb/dwc3/dwc3-of-simple.c
|
|
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
|
|
@@ -176,6 +176,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
|
|
{ .compatible = "cavium,octeon-7130-usb-uctl" },
|
|
{ .compatible = "sprd,sc9860-dwc3" },
|
|
{ .compatible = "allwinner,sun50i-h6-dwc3" },
|
|
+ { .compatible = "hisilicon,hi3670-dwc3" },
|
|
{ /* Sentinel */ }
|
|
};
|
|
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
|
|
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
|
|
index 1f638759a9533..92a7c3a839454 100644
|
|
--- a/drivers/usb/gadget/function/f_ncm.c
|
|
+++ b/drivers/usb/gadget/function/f_ncm.c
|
|
@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
|
|
/* peak (theoretical) bulk transfer rate in bits-per-second */
|
|
static inline unsigned ncm_bitrate(struct usb_gadget *g)
|
|
{
|
|
- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
|
|
- return 13 * 1024 * 8 * 1000 * 8;
|
|
+ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
|
|
+ return 4250000000U;
|
|
+ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
|
|
+ return 3750000000U;
|
|
else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
|
|
return 13 * 512 * 8 * 1000 * 8;
|
|
else
|
|
@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
|
|
fs_ncm_notify_desc.bEndpointAddress;
|
|
|
|
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
|
|
- ncm_ss_function, NULL);
|
|
+ ncm_ss_function, ncm_ss_function);
|
|
if (status)
|
|
goto fail;
|
|
|
|
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
|
|
index 9c7ed2539ff77..8ed1295d7e350 100644
|
|
--- a/drivers/usb/gadget/function/f_printer.c
|
|
+++ b/drivers/usb/gadget/function/f_printer.c
|
|
@@ -31,6 +31,7 @@
|
|
#include <linux/types.h>
|
|
#include <linux/ctype.h>
|
|
#include <linux/cdev.h>
|
|
+#include <linux/kref.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
#include <linux/io.h>
|
|
@@ -64,7 +65,7 @@ struct printer_dev {
|
|
struct usb_gadget *gadget;
|
|
s8 interface;
|
|
struct usb_ep *in_ep, *out_ep;
|
|
-
|
|
+ struct kref kref;
|
|
struct list_head rx_reqs; /* List of free RX structs */
|
|
struct list_head rx_reqs_active; /* List of Active RX xfers */
|
|
struct list_head rx_buffers; /* List of completed xfers */
|
|
@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
+static void printer_dev_free(struct kref *kref)
|
|
+{
|
|
+ struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
|
|
+
|
|
+ kfree(dev);
|
|
+}
|
|
+
|
|
static struct usb_request *
|
|
printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
|
|
{
|
|
@@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd)
|
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
|
|
|
+ kref_get(&dev->kref);
|
|
DBG(dev, "printer_open returned %x\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd)
|
|
dev->printer_status &= ~PRINTER_SELECTED;
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
|
|
|
+ kref_put(&dev->kref, printer_dev_free);
|
|
DBG(dev, "printer_close\n");
|
|
|
|
return 0;
|
|
@@ -1350,7 +1360,8 @@ static void gprinter_free(struct usb_function *f)
|
|
struct f_printer_opts *opts;
|
|
|
|
opts = container_of(f->fi, struct f_printer_opts, func_inst);
|
|
- kfree(dev);
|
|
+
|
|
+ kref_put(&dev->kref, printer_dev_free);
|
|
mutex_lock(&opts->lock);
|
|
--opts->refcnt;
|
|
mutex_unlock(&opts->lock);
|
|
@@ -1419,6 +1430,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
+ kref_init(&dev->kref);
|
|
++opts->refcnt;
|
|
dev->minor = opts->minor;
|
|
dev->pnp_string = opts->pnp_string;
|
|
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
|
|
index fbe96ef1ac7a4..891e9f7f40d59 100644
|
|
--- a/drivers/usb/gadget/function/u_ether.c
|
|
+++ b/drivers/usb/gadget/function/u_ether.c
|
|
@@ -93,7 +93,7 @@ struct eth_dev {
|
|
static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
|
|
{
|
|
if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
|
|
- gadget->speed == USB_SPEED_SUPER))
|
|
+ gadget->speed >= USB_SPEED_SUPER))
|
|
return qmult * DEFAULT_QLEN;
|
|
else
|
|
return DEFAULT_QLEN;
|
|
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
|
|
index 3cfc6e2eba71a..e0e3cb2f6f3bc 100644
|
|
--- a/drivers/usb/gadget/function/u_serial.c
|
|
+++ b/drivers/usb/gadget/function/u_serial.c
|
|
@@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser)
|
|
if (port->port.tty)
|
|
tty_hangup(port->port.tty);
|
|
}
|
|
+ port->suspended = false;
|
|
spin_unlock_irqrestore(&port->port_lock, flags);
|
|
|
|
/* disable endpoints, aborting down any active I/O */
|
|
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
|
|
index 54501814dc3fd..aebe11829baa6 100644
|
|
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
|
|
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/timer.h>
|
|
+#include <linux/usb.h>
|
|
#include <linux/usb/ch9.h>
|
|
#include <linux/usb/gadget.h>
|
|
#include <linux/workqueue.h>
|
|
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
|
|
index 4de91653a2c7b..5eb62240c7f87 100644
|
|
--- a/drivers/usb/host/ohci-hcd.c
|
|
+++ b/drivers/usb/host/ohci-hcd.c
|
|
@@ -673,20 +673,24 @@ retry:
|
|
|
|
/* handle root hub init quirks ... */
|
|
val = roothub_a (ohci);
|
|
- val &= ~(RH_A_PSM | RH_A_OCPM);
|
|
+ /* Configure for per-port over-current protection by default */
|
|
+ val &= ~RH_A_NOCP;
|
|
+ val |= RH_A_OCPM;
|
|
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
|
|
- /* NSC 87560 and maybe others */
|
|
+ /* NSC 87560 and maybe others.
|
|
+ * Ganged power switching, no over-current protection.
|
|
+ */
|
|
val |= RH_A_NOCP;
|
|
- val &= ~(RH_A_POTPGT | RH_A_NPS);
|
|
- ohci_writel (ohci, val, &ohci->regs->roothub.a);
|
|
+ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
|
|
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
|
|
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
|
|
/* hub power always on; required for AMD-756 and some
|
|
- * Mac platforms. ganged overcurrent reporting, if any.
|
|
+ * Mac platforms.
|
|
*/
|
|
val |= RH_A_NPS;
|
|
- ohci_writel (ohci, val, &ohci->regs->roothub.a);
|
|
}
|
|
+ ohci_writel(ohci, val, &ohci->regs->roothub.a);
|
|
+
|
|
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
|
|
ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
|
|
&ohci->regs->roothub.b);
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index 113ab5d3cbfe5..f665da34a8f73 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
|
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
|
|
trace_xhci_add_endpoint(ep_ctx);
|
|
|
|
- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
|
|
-
|
|
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
|
|
(unsigned int) ep->desc.bEndpointAddress,
|
|
udev->slot_id,
|
|
@@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
|
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
|
|
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
|
|
virt_dev->eps[i].new_ring = NULL;
|
|
+ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
|
|
}
|
|
command_cleanup:
|
|
kfree(command->completion);
|
|
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
|
|
index d98843feddce0..5076d0155bc3f 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_config.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_config.c
|
|
@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
|
|
* PF SR-IOV capability, there's therefore no need to trigger
|
|
* faults based on the virtual value.
|
|
*/
|
|
- return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
|
|
+ return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
|
|
}
|
|
|
|
/*
|
|
@@ -520,8 +520,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
|
|
|
|
count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
|
|
|
|
- /* Mask in virtual memory enable for SR-IOV devices */
|
|
- if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
|
|
+ /* Mask in virtual memory enable */
|
|
+ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
|
|
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
|
|
u32 tmp_val = le32_to_cpu(*val);
|
|
|
|
@@ -589,9 +589,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|
* shows it disabled (phys_mem/io, then the device has
|
|
* undergone some kind of backdoor reset and needs to be
|
|
* restored before we allow it to enable the bars.
|
|
- * SR-IOV devices will trigger this, but we catch them later
|
|
+ * SR-IOV devices will trigger this - for mem enable let's
|
|
+ * catch this now and for io enable it will be caught later
|
|
*/
|
|
- if ((new_mem && virt_mem && !phys_mem) ||
|
|
+ if ((new_mem && virt_mem && !phys_mem &&
|
|
+ !pdev->no_command_memory) ||
|
|
(new_io && virt_io && !phys_io) ||
|
|
vfio_need_bar_restore(vdev))
|
|
vfio_bar_restore(vdev);
|
|
@@ -1734,12 +1736,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
|
|
vconfig[PCI_INTERRUPT_PIN]);
|
|
|
|
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
|
|
-
|
|
+ }
|
|
+ if (pdev->no_command_memory) {
|
|
/*
|
|
- * VFs do no implement the memory enable bit of the COMMAND
|
|
- * register therefore we'll not have it set in our initial
|
|
- * copy of config space after pci_enable_device(). For
|
|
- * consistency with PFs, set the virtual enable bit here.
|
|
+ * VFs and devices that set pdev->no_command_memory do not
|
|
+ * implement the memory enable bit of the COMMAND register
|
|
+ * therefore we'll not have it set in our initial copy of
|
|
+ * config space after pci_enable_device(). For consistency
|
|
+ * with PFs, set the virtual enable bit here.
|
|
*/
|
|
*(__le16 *)&vconfig[PCI_COMMAND] |=
|
|
cpu_to_le16(PCI_COMMAND_MEMORY);
|
|
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
|
|
index 1d9fb25929459..869dce5f134dd 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_intrs.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
|
|
@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|
vdev->ctx[vector].producer.token = trigger;
|
|
vdev->ctx[vector].producer.irq = irq;
|
|
ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
|
|
- if (unlikely(ret))
|
|
+ if (unlikely(ret)) {
|
|
dev_info(&pdev->dev,
|
|
"irq bypass producer (token %p) registration fails: %d\n",
|
|
vdev->ctx[vector].producer.token, ret);
|
|
|
|
+ vdev->ctx[vector].producer.token = NULL;
|
|
+ }
|
|
vdev->ctx[vector].trigger = trigger;
|
|
|
|
return 0;
|
|
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
|
|
index 580099afeaffa..fbff5c4743c5e 100644
|
|
--- a/drivers/vfio/vfio.c
|
|
+++ b/drivers/vfio/vfio.c
|
|
@@ -1948,8 +1948,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
|
|
if (!group)
|
|
return -ENODEV;
|
|
|
|
- if (group->dev_counter > 1)
|
|
- return -EINVAL;
|
|
+ if (group->dev_counter > 1) {
|
|
+ ret = -EINVAL;
|
|
+ goto err_pin_pages;
|
|
+ }
|
|
|
|
ret = vfio_group_add_container_user(group);
|
|
if (ret)
|
|
@@ -2050,6 +2052,9 @@ int vfio_group_pin_pages(struct vfio_group *group,
|
|
if (!group || !user_iova_pfn || !phys_pfn || !npage)
|
|
return -EINVAL;
|
|
|
|
+ if (group->dev_counter > 1)
|
|
+ return -EINVAL;
|
|
+
|
|
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
|
|
return -E2BIG;
|
|
|
|
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
|
|
index f48f0db908a46..00d3cf12e92c3 100644
|
|
--- a/drivers/vfio/vfio_iommu_type1.c
|
|
+++ b/drivers/vfio/vfio_iommu_type1.c
|
|
@@ -693,7 +693,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
|
|
|
|
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
|
|
if (ret) {
|
|
- vfio_unpin_page_external(dma, iova, do_accounting);
|
|
+ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
|
|
+ vfio_lock_acct(dma, -1, true);
|
|
goto pin_unwind;
|
|
}
|
|
|
|
@@ -2899,7 +2900,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
|
|
* size
|
|
*/
|
|
bitmap_set(dma->bitmap, offset >> pgshift,
|
|
- *copied >> pgshift);
|
|
+ ((offset + *copied - 1) >> pgshift) -
|
|
+ (offset >> pgshift) + 1);
|
|
}
|
|
} else
|
|
*copied = copy_from_user(data, (void __user *)vaddr,
|
|
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
|
|
index 2355f00f57732..1f6301375fd33 100644
|
|
--- a/drivers/video/backlight/sky81452-backlight.c
|
|
+++ b/drivers/video/backlight/sky81452-backlight.c
|
|
@@ -196,6 +196,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
|
|
num_entry);
|
|
if (ret < 0) {
|
|
dev_err(dev, "led-sources node is invalid.\n");
|
|
+ of_node_put(np);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
|
|
index e116a3f9ad566..687bd2c0d5040 100644
|
|
--- a/drivers/video/fbdev/aty/radeon_base.c
|
|
+++ b/drivers/video/fbdev/aty/radeon_base.c
|
|
@@ -2311,7 +2311,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
|
|
|
|
ret = radeon_kick_out_firmware_fb(pdev);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_release_fb;
|
|
|
|
/* request the mem regions */
|
|
ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
|
|
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
|
|
index da7c88ffaa6a8..1136b569ccb7c 100644
|
|
--- a/drivers/video/fbdev/core/fbmem.c
|
|
+++ b/drivers/video/fbdev/core/fbmem.c
|
|
@@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
|
return 0;
|
|
}
|
|
|
|
+ /* bitfill_aligned() assumes that it's at least 8x8 */
|
|
+ if (var->xres < 8 || var->yres < 8)
|
|
+ return -EINVAL;
|
|
+
|
|
ret = info->fbops->fb_check_var(var, info);
|
|
|
|
if (ret)
|
|
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
|
|
index dfe3eb769638b..fde27feae5d0c 100644
|
|
--- a/drivers/video/fbdev/sis/init.c
|
|
+++ b/drivers/video/fbdev/sis/init.c
|
|
@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
|
|
|
|
i = 0;
|
|
|
|
+ if (SiS_Pr->ChipType == SIS_730)
|
|
+ queuedata = &FQBQData730[0];
|
|
+ else
|
|
+ queuedata = &FQBQData[0];
|
|
+
|
|
if(ModeNo > 0x13) {
|
|
|
|
/* Get VCLK */
|
|
@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
|
|
/* Get half colordepth */
|
|
colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
|
|
|
|
- if(SiS_Pr->ChipType == SIS_730) {
|
|
- queuedata = &FQBQData730[0];
|
|
- } else {
|
|
- queuedata = &FQBQData[0];
|
|
- }
|
|
-
|
|
do {
|
|
templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
|
|
|
|
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
|
|
index 578d3541e3d6f..1e8a38a7967d8 100644
|
|
--- a/drivers/video/fbdev/vga16fb.c
|
|
+++ b/drivers/video/fbdev/vga16fb.c
|
|
@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
|
|
}
|
|
|
|
static void vga16fb_clock_chip(struct vga16fb_par *par,
|
|
- unsigned int pixclock,
|
|
+ unsigned int *pixclock,
|
|
const struct fb_info *info,
|
|
int mul, int div)
|
|
{
|
|
@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
|
|
{ 0 /* bad */, 0x00, 0x00}};
|
|
int err;
|
|
|
|
- pixclock = (pixclock * mul) / div;
|
|
+ *pixclock = (*pixclock * mul) / div;
|
|
best = vgaclocks;
|
|
- err = pixclock - best->pixclock;
|
|
+ err = *pixclock - best->pixclock;
|
|
if (err < 0) err = -err;
|
|
for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
|
|
int tmp;
|
|
|
|
- tmp = pixclock - ptr->pixclock;
|
|
+ tmp = *pixclock - ptr->pixclock;
|
|
if (tmp < 0) tmp = -tmp;
|
|
if (tmp < err) {
|
|
err = tmp;
|
|
@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
|
|
}
|
|
par->misc |= best->misc;
|
|
par->clkdiv = best->seq_clock_mode;
|
|
- pixclock = (best->pixclock * div) / mul;
|
|
+ *pixclock = (best->pixclock * div) / mul;
|
|
}
|
|
|
|
#define FAIL(X) return -EINVAL
|
|
@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
|
|
|
|
if (mode & MODE_8BPP)
|
|
/* pixel clock == vga clock / 2 */
|
|
- vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
|
|
+ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
|
|
else
|
|
/* pixel clock == vga clock */
|
|
- vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
|
|
+ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
|
|
|
|
var->red.offset = var->green.offset = var->blue.offset =
|
|
var->transp.offset = 0;
|
|
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
|
|
index 1b0b11b55d2a0..46ee0a0998b6f 100644
|
|
--- a/drivers/virt/fsl_hypervisor.c
|
|
+++ b/drivers/virt/fsl_hypervisor.c
|
|
@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|
|
|
unsigned int i;
|
|
long ret = 0;
|
|
- int num_pinned; /* return value from get_user_pages() */
|
|
+ int num_pinned = 0; /* return value from get_user_pages_fast() */
|
|
phys_addr_t remote_paddr; /* The next address in the remote buffer */
|
|
uint32_t count; /* The number of bytes left to copy */
|
|
|
|
@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
- * The array of pages returned by get_user_pages() covers only
|
|
+ * The array of pages returned by get_user_pages_fast() covers only
|
|
* page-aligned memory. Since the user buffer is probably not
|
|
* page-aligned, we need to handle the discrepancy.
|
|
*
|
|
@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|
|
|
/*
|
|
* 'pages' is an array of struct page pointers that's initialized by
|
|
- * get_user_pages().
|
|
+ * get_user_pages_fast().
|
|
*/
|
|
pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
|
|
if (!pages) {
|
|
@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|
if (!sg_list_unaligned) {
|
|
pr_debug("fsl-hv: could not allocate S/G list\n");
|
|
ret = -ENOMEM;
|
|
- goto exit;
|
|
+ goto free_pages;
|
|
}
|
|
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
|
|
|
|
@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|
num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
|
|
|
|
if (num_pinned != num_pages) {
|
|
- /* get_user_pages() failed */
|
|
pr_debug("fsl-hv: could not lock source buffer\n");
|
|
ret = (num_pinned < 0) ? num_pinned : -EFAULT;
|
|
goto exit;
|
|
@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|
virt_to_phys(sg_list), num_pages);
|
|
|
|
exit:
|
|
- if (pages) {
|
|
- for (i = 0; i < num_pages; i++)
|
|
- if (pages[i])
|
|
- put_page(pages[i]);
|
|
+ if (pages && (num_pinned > 0)) {
|
|
+ for (i = 0; i < num_pinned; i++)
|
|
+ put_page(pages[i]);
|
|
}
|
|
|
|
kfree(sg_list_unaligned);
|
|
+free_pages:
|
|
kfree(pages);
|
|
|
|
if (!ret)
|
|
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
|
|
index 87eaf357ae01f..adf015aa4126f 100644
|
|
--- a/drivers/watchdog/sp5100_tco.h
|
|
+++ b/drivers/watchdog/sp5100_tco.h
|
|
@@ -70,7 +70,7 @@
|
|
#define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
|
|
|
|
|
|
-#define EFCH_PM_DECODEEN3 0x00
|
|
+#define EFCH_PM_DECODEEN3 0x03
|
|
#define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
|
|
#define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
|
|
|
|
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
|
|
index b535f5fa279b9..c2065615fd6ca 100644
|
|
--- a/drivers/watchdog/watchdog_dev.c
|
|
+++ b/drivers/watchdog/watchdog_dev.c
|
|
@@ -991,8 +991,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
|
|
wd_data->wdd = wdd;
|
|
wdd->wd_data = wd_data;
|
|
|
|
- if (IS_ERR_OR_NULL(watchdog_kworker))
|
|
+ if (IS_ERR_OR_NULL(watchdog_kworker)) {
|
|
+ kfree(wd_data);
|
|
return -ENODEV;
|
|
+ }
|
|
|
|
device_initialize(&wd_data->dev);
|
|
wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
|
|
@@ -1018,7 +1020,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
|
|
pr_err("%s: a legacy watchdog module is probably present.\n",
|
|
wdd->info->identity);
|
|
old_wd_data = NULL;
|
|
- kfree(wd_data);
|
|
+ put_device(&wd_data->dev);
|
|
return err;
|
|
}
|
|
}
|
|
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
|
|
index 5b79cdceefa0f..bc7ed46aaca9f 100644
|
|
--- a/fs/afs/cell.c
|
|
+++ b/fs/afs/cell.c
|
|
@@ -19,7 +19,8 @@ static unsigned __read_mostly afs_cell_gc_delay = 10;
|
|
static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
|
|
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
|
|
|
|
-static void afs_manage_cell(struct work_struct *);
|
|
+static void afs_queue_cell_manager(struct afs_net *);
|
|
+static void afs_manage_cell_work(struct work_struct *);
|
|
|
|
static void afs_dec_cells_outstanding(struct afs_net *net)
|
|
{
|
|
@@ -37,19 +38,21 @@ static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
|
|
atomic_inc(&net->cells_outstanding);
|
|
if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
|
|
afs_dec_cells_outstanding(net);
|
|
+ } else {
|
|
+ afs_queue_cell_manager(net);
|
|
}
|
|
}
|
|
|
|
/*
|
|
- * Look up and get an activation reference on a cell record under RCU
|
|
- * conditions. The caller must hold the RCU read lock.
|
|
+ * Look up and get an activation reference on a cell record. The caller must
|
|
+ * hold net->cells_lock at least read-locked.
|
|
*/
|
|
-struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
|
|
- const char *name, unsigned int namesz)
|
|
+static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
|
|
+ const char *name, unsigned int namesz)
|
|
{
|
|
struct afs_cell *cell = NULL;
|
|
struct rb_node *p;
|
|
- int n, seq = 0, ret = 0;
|
|
+ int n;
|
|
|
|
_enter("%*.*s", namesz, namesz, name);
|
|
|
|
@@ -58,61 +61,47 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
|
|
if (namesz > AFS_MAXCELLNAME)
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
|
|
- do {
|
|
- /* Unfortunately, rbtree walking doesn't give reliable results
|
|
- * under just the RCU read lock, so we have to check for
|
|
- * changes.
|
|
- */
|
|
- if (cell)
|
|
- afs_put_cell(net, cell);
|
|
- cell = NULL;
|
|
- ret = -ENOENT;
|
|
-
|
|
- read_seqbegin_or_lock(&net->cells_lock, &seq);
|
|
-
|
|
- if (!name) {
|
|
- cell = rcu_dereference_raw(net->ws_cell);
|
|
- if (cell) {
|
|
- afs_get_cell(cell);
|
|
- ret = 0;
|
|
- break;
|
|
- }
|
|
- ret = -EDESTADDRREQ;
|
|
- continue;
|
|
- }
|
|
+ if (!name) {
|
|
+ cell = net->ws_cell;
|
|
+ if (!cell)
|
|
+ return ERR_PTR(-EDESTADDRREQ);
|
|
+ goto found;
|
|
+ }
|
|
|
|
- p = rcu_dereference_raw(net->cells.rb_node);
|
|
- while (p) {
|
|
- cell = rb_entry(p, struct afs_cell, net_node);
|
|
-
|
|
- n = strncasecmp(cell->name, name,
|
|
- min_t(size_t, cell->name_len, namesz));
|
|
- if (n == 0)
|
|
- n = cell->name_len - namesz;
|
|
- if (n < 0) {
|
|
- p = rcu_dereference_raw(p->rb_left);
|
|
- } else if (n > 0) {
|
|
- p = rcu_dereference_raw(p->rb_right);
|
|
- } else {
|
|
- if (atomic_inc_not_zero(&cell->usage)) {
|
|
- ret = 0;
|
|
- break;
|
|
- }
|
|
- /* We want to repeat the search, this time with
|
|
- * the lock properly locked.
|
|
- */
|
|
- }
|
|
- cell = NULL;
|
|
- }
|
|
+ p = net->cells.rb_node;
|
|
+ while (p) {
|
|
+ cell = rb_entry(p, struct afs_cell, net_node);
|
|
+
|
|
+ n = strncasecmp(cell->name, name,
|
|
+ min_t(size_t, cell->name_len, namesz));
|
|
+ if (n == 0)
|
|
+ n = cell->name_len - namesz;
|
|
+ if (n < 0)
|
|
+ p = p->rb_left;
|
|
+ else if (n > 0)
|
|
+ p = p->rb_right;
|
|
+ else
|
|
+ goto found;
|
|
+ }
|
|
|
|
- } while (need_seqretry(&net->cells_lock, seq));
|
|
+ return ERR_PTR(-ENOENT);
|
|
|
|
- done_seqretry(&net->cells_lock, seq);
|
|
+found:
|
|
+ return afs_use_cell(cell);
|
|
+}
|
|
|
|
- if (ret != 0 && cell)
|
|
- afs_put_cell(net, cell);
|
|
+/*
|
|
+ * Look up and get an activation reference on a cell record.
|
|
+ */
|
|
+struct afs_cell *afs_find_cell(struct afs_net *net,
|
|
+ const char *name, unsigned int namesz)
|
|
+{
|
|
+ struct afs_cell *cell;
|
|
|
|
- return ret == 0 ? cell : ERR_PTR(ret);
|
|
+ down_read(&net->cells_lock);
|
|
+ cell = afs_find_cell_locked(net, name, namesz);
|
|
+ up_read(&net->cells_lock);
|
|
+ return cell;
|
|
}
|
|
|
|
/*
|
|
@@ -166,8 +155,9 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
|
|
cell->name[i] = tolower(name[i]);
|
|
cell->name[i] = 0;
|
|
|
|
- atomic_set(&cell->usage, 2);
|
|
- INIT_WORK(&cell->manager, afs_manage_cell);
|
|
+ atomic_set(&cell->ref, 1);
|
|
+ atomic_set(&cell->active, 0);
|
|
+ INIT_WORK(&cell->manager, afs_manage_cell_work);
|
|
cell->volumes = RB_ROOT;
|
|
INIT_HLIST_HEAD(&cell->proc_volumes);
|
|
seqlock_init(&cell->volume_lock);
|
|
@@ -206,6 +196,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
|
|
cell->dns_source = vllist->source;
|
|
cell->dns_status = vllist->status;
|
|
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
|
|
+ atomic_inc(&net->cells_outstanding);
|
|
|
|
_leave(" = %p", cell);
|
|
return cell;
|
|
@@ -245,9 +236,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
|
|
_enter("%s,%s", name, vllist);
|
|
|
|
if (!excl) {
|
|
- rcu_read_lock();
|
|
- cell = afs_lookup_cell_rcu(net, name, namesz);
|
|
- rcu_read_unlock();
|
|
+ cell = afs_find_cell(net, name, namesz);
|
|
if (!IS_ERR(cell))
|
|
goto wait_for_cell;
|
|
}
|
|
@@ -268,7 +257,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
|
|
/* Find the insertion point and check to see if someone else added a
|
|
* cell whilst we were allocating.
|
|
*/
|
|
- write_seqlock(&net->cells_lock);
|
|
+ down_write(&net->cells_lock);
|
|
|
|
pp = &net->cells.rb_node;
|
|
parent = NULL;
|
|
@@ -290,23 +279,23 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
|
|
|
|
cell = candidate;
|
|
candidate = NULL;
|
|
+ atomic_set(&cell->active, 2);
|
|
rb_link_node_rcu(&cell->net_node, parent, pp);
|
|
rb_insert_color(&cell->net_node, &net->cells);
|
|
- atomic_inc(&net->cells_outstanding);
|
|
- write_sequnlock(&net->cells_lock);
|
|
+ up_write(&net->cells_lock);
|
|
|
|
- queue_work(afs_wq, &cell->manager);
|
|
+ afs_queue_cell(cell);
|
|
|
|
wait_for_cell:
|
|
_debug("wait_for_cell");
|
|
wait_var_event(&cell->state,
|
|
({
|
|
state = smp_load_acquire(&cell->state); /* vs error */
|
|
- state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
|
|
+ state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
|
|
}));
|
|
|
|
/* Check the state obtained from the wait check. */
|
|
- if (state == AFS_CELL_FAILED) {
|
|
+ if (state == AFS_CELL_REMOVED) {
|
|
ret = cell->error;
|
|
goto error;
|
|
}
|
|
@@ -320,16 +309,17 @@ cell_already_exists:
|
|
if (excl) {
|
|
ret = -EEXIST;
|
|
} else {
|
|
- afs_get_cell(cursor);
|
|
+ afs_use_cell(cursor);
|
|
ret = 0;
|
|
}
|
|
- write_sequnlock(&net->cells_lock);
|
|
- kfree(candidate);
|
|
+ up_write(&net->cells_lock);
|
|
+ if (candidate)
|
|
+ afs_put_cell(candidate);
|
|
if (ret == 0)
|
|
goto wait_for_cell;
|
|
goto error_noput;
|
|
error:
|
|
- afs_put_cell(net, cell);
|
|
+ afs_unuse_cell(net, cell);
|
|
error_noput:
|
|
_leave(" = %d [error]", ret);
|
|
return ERR_PTR(ret);
|
|
@@ -374,15 +364,15 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
|
|
}
|
|
|
|
if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
|
|
- afs_get_cell(new_root);
|
|
+ afs_use_cell(new_root);
|
|
|
|
/* install the new cell */
|
|
- write_seqlock(&net->cells_lock);
|
|
- old_root = rcu_access_pointer(net->ws_cell);
|
|
- rcu_assign_pointer(net->ws_cell, new_root);
|
|
- write_sequnlock(&net->cells_lock);
|
|
+ down_write(&net->cells_lock);
|
|
+ old_root = net->ws_cell;
|
|
+ net->ws_cell = new_root;
|
|
+ up_write(&net->cells_lock);
|
|
|
|
- afs_put_cell(net, old_root);
|
|
+ afs_unuse_cell(net, old_root);
|
|
_leave(" = 0");
|
|
return 0;
|
|
}
|
|
@@ -488,18 +478,21 @@ out_wake:
|
|
static void afs_cell_destroy(struct rcu_head *rcu)
|
|
{
|
|
struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
|
|
+ struct afs_net *net = cell->net;
|
|
+ int u;
|
|
|
|
_enter("%p{%s}", cell, cell->name);
|
|
|
|
- ASSERTCMP(atomic_read(&cell->usage), ==, 0);
|
|
+ u = atomic_read(&cell->ref);
|
|
+ ASSERTCMP(u, ==, 0);
|
|
|
|
- afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
|
|
- afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
|
|
- afs_put_cell(cell->net, cell->alias_of);
|
|
+ afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
|
|
+ afs_unuse_cell(net, cell->alias_of);
|
|
key_put(cell->anonymous_key);
|
|
kfree(cell->name);
|
|
kfree(cell);
|
|
|
|
+ afs_dec_cells_outstanding(net);
|
|
_leave(" [destroyed]");
|
|
}
|
|
|
|
@@ -534,16 +527,50 @@ void afs_cells_timer(struct timer_list *timer)
|
|
*/
|
|
struct afs_cell *afs_get_cell(struct afs_cell *cell)
|
|
{
|
|
- atomic_inc(&cell->usage);
|
|
+ if (atomic_read(&cell->ref) <= 0)
|
|
+ BUG();
|
|
+
|
|
+ atomic_inc(&cell->ref);
|
|
return cell;
|
|
}
|
|
|
|
/*
|
|
* Drop a reference on a cell record.
|
|
*/
|
|
-void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
|
|
+void afs_put_cell(struct afs_cell *cell)
|
|
+{
|
|
+ if (cell) {
|
|
+ unsigned int u, a;
|
|
+
|
|
+ u = atomic_dec_return(&cell->ref);
|
|
+ if (u == 0) {
|
|
+ a = atomic_read(&cell->active);
|
|
+ WARN(a != 0, "Cell active count %u > 0\n", a);
|
|
+ call_rcu(&cell->rcu, afs_cell_destroy);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Note a cell becoming more active.
|
|
+ */
|
|
+struct afs_cell *afs_use_cell(struct afs_cell *cell)
|
|
+{
|
|
+ if (atomic_read(&cell->ref) <= 0)
|
|
+ BUG();
|
|
+
|
|
+ atomic_inc(&cell->active);
|
|
+ return cell;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Record a cell becoming less active. When the active counter reaches 1, it
|
|
+ * is scheduled for destruction, but may get reactivated.
|
|
+ */
|
|
+void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell)
|
|
{
|
|
time64_t now, expire_delay;
|
|
+ int a;
|
|
|
|
if (!cell)
|
|
return;
|
|
@@ -556,11 +583,21 @@ void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
|
|
if (cell->vl_servers->nr_servers)
|
|
expire_delay = afs_cell_gc_delay;
|
|
|
|
- if (atomic_dec_return(&cell->usage) > 1)
|
|
- return;
|
|
+ a = atomic_dec_return(&cell->active);
|
|
+ WARN_ON(a == 0);
|
|
+ if (a == 1)
|
|
+ /* 'cell' may now be garbage collected. */
|
|
+ afs_set_cell_timer(net, expire_delay);
|
|
+}
|
|
|
|
- /* 'cell' may now be garbage collected. */
|
|
- afs_set_cell_timer(net, expire_delay);
|
|
+/*
|
|
+ * Queue a cell for management, giving the workqueue a ref to hold.
|
|
+ */
|
|
+void afs_queue_cell(struct afs_cell *cell)
|
|
+{
|
|
+ afs_get_cell(cell);
|
|
+ if (!queue_work(afs_wq, &cell->manager))
|
|
+ afs_put_cell(cell);
|
|
}
|
|
|
|
/*
|
|
@@ -660,12 +697,10 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
|
|
* Manage a cell record, initialising and destroying it, maintaining its DNS
|
|
* records.
|
|
*/
|
|
-static void afs_manage_cell(struct work_struct *work)
|
|
+static void afs_manage_cell(struct afs_cell *cell)
|
|
{
|
|
- struct afs_cell *cell = container_of(work, struct afs_cell, manager);
|
|
struct afs_net *net = cell->net;
|
|
- bool deleted;
|
|
- int ret, usage;
|
|
+ int ret, active;
|
|
|
|
_enter("%s", cell->name);
|
|
|
|
@@ -674,14 +709,17 @@ again:
|
|
switch (cell->state) {
|
|
case AFS_CELL_INACTIVE:
|
|
case AFS_CELL_FAILED:
|
|
- write_seqlock(&net->cells_lock);
|
|
- usage = 1;
|
|
- deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
|
|
- if (deleted)
|
|
+ down_write(&net->cells_lock);
|
|
+ active = 1;
|
|
+ if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
|
|
rb_erase(&cell->net_node, &net->cells);
|
|
- write_sequnlock(&net->cells_lock);
|
|
- if (deleted)
|
|
+ smp_store_release(&cell->state, AFS_CELL_REMOVED);
|
|
+ }
|
|
+ up_write(&net->cells_lock);
|
|
+ if (cell->state == AFS_CELL_REMOVED) {
|
|
+ wake_up_var(&cell->state);
|
|
goto final_destruction;
|
|
+ }
|
|
if (cell->state == AFS_CELL_FAILED)
|
|
goto done;
|
|
smp_store_release(&cell->state, AFS_CELL_UNSET);
|
|
@@ -703,7 +741,7 @@ again:
|
|
goto again;
|
|
|
|
case AFS_CELL_ACTIVE:
|
|
- if (atomic_read(&cell->usage) > 1) {
|
|
+ if (atomic_read(&cell->active) > 1) {
|
|
if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
|
|
ret = afs_update_cell(cell);
|
|
if (ret < 0)
|
|
@@ -716,13 +754,16 @@ again:
|
|
goto again;
|
|
|
|
case AFS_CELL_DEACTIVATING:
|
|
- if (atomic_read(&cell->usage) > 1)
|
|
+ if (atomic_read(&cell->active) > 1)
|
|
goto reverse_deactivation;
|
|
afs_deactivate_cell(net, cell);
|
|
smp_store_release(&cell->state, AFS_CELL_INACTIVE);
|
|
wake_up_var(&cell->state);
|
|
goto again;
|
|
|
|
+ case AFS_CELL_REMOVED:
|
|
+ goto done;
|
|
+
|
|
default:
|
|
break;
|
|
}
|
|
@@ -748,9 +789,18 @@ done:
|
|
return;
|
|
|
|
final_destruction:
|
|
- call_rcu(&cell->rcu, afs_cell_destroy);
|
|
- afs_dec_cells_outstanding(net);
|
|
- _leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
|
|
+ /* The root volume is pinning the cell */
|
|
+ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
|
|
+ cell->root_volume = NULL;
|
|
+ afs_put_cell(cell);
|
|
+}
|
|
+
|
|
+static void afs_manage_cell_work(struct work_struct *work)
|
|
+{
|
|
+ struct afs_cell *cell = container_of(work, struct afs_cell, manager);
|
|
+
|
|
+ afs_manage_cell(cell);
|
|
+ afs_put_cell(cell);
|
|
}
|
|
|
|
/*
|
|
@@ -779,26 +829,25 @@ void afs_manage_cells(struct work_struct *work)
|
|
* lack of use and cells whose DNS results have expired and dispatch
|
|
* their managers.
|
|
*/
|
|
- read_seqlock_excl(&net->cells_lock);
|
|
+ down_read(&net->cells_lock);
|
|
|
|
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
|
|
struct afs_cell *cell =
|
|
rb_entry(cursor, struct afs_cell, net_node);
|
|
- unsigned usage;
|
|
+ unsigned active;
|
|
bool sched_cell = false;
|
|
|
|
- usage = atomic_read(&cell->usage);
|
|
- _debug("manage %s %u", cell->name, usage);
|
|
+ active = atomic_read(&cell->active);
|
|
+ _debug("manage %s %u %u", cell->name, atomic_read(&cell->ref), active);
|
|
|
|
- ASSERTCMP(usage, >=, 1);
|
|
+ ASSERTCMP(active, >=, 1);
|
|
|
|
if (purging) {
|
|
if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
|
|
- usage = atomic_dec_return(&cell->usage);
|
|
- ASSERTCMP(usage, ==, 1);
|
|
+ atomic_dec(&cell->active);
|
|
}
|
|
|
|
- if (usage == 1) {
|
|
+ if (active == 1) {
|
|
struct afs_vlserver_list *vllist;
|
|
time64_t expire_at = cell->last_inactive;
|
|
|
|
@@ -821,10 +870,10 @@ void afs_manage_cells(struct work_struct *work)
|
|
}
|
|
|
|
if (sched_cell)
|
|
- queue_work(afs_wq, &cell->manager);
|
|
+ afs_queue_cell(cell);
|
|
}
|
|
|
|
- read_sequnlock_excl(&net->cells_lock);
|
|
+ up_read(&net->cells_lock);
|
|
|
|
/* Update the timer on the way out. We have to pass an increment on
|
|
* cells_outstanding in the namespace that we are in to the timer or
|
|
@@ -854,11 +903,11 @@ void afs_cell_purge(struct afs_net *net)
|
|
|
|
_enter("");
|
|
|
|
- write_seqlock(&net->cells_lock);
|
|
- ws = rcu_access_pointer(net->ws_cell);
|
|
- RCU_INIT_POINTER(net->ws_cell, NULL);
|
|
- write_sequnlock(&net->cells_lock);
|
|
- afs_put_cell(net, ws);
|
|
+ down_write(&net->cells_lock);
|
|
+ ws = net->ws_cell;
|
|
+ net->ws_cell = NULL;
|
|
+ up_write(&net->cells_lock);
|
|
+ afs_unuse_cell(net, ws);
|
|
|
|
_debug("del timer");
|
|
if (del_timer_sync(&net->cells_timer))
|
|
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
|
|
index 7b784af604fd9..da32797dd4257 100644
|
|
--- a/fs/afs/dynroot.c
|
|
+++ b/fs/afs/dynroot.c
|
|
@@ -123,9 +123,9 @@ static int afs_probe_cell_name(struct dentry *dentry)
|
|
len--;
|
|
}
|
|
|
|
- cell = afs_lookup_cell_rcu(net, name, len);
|
|
+ cell = afs_find_cell(net, name, len);
|
|
if (!IS_ERR(cell)) {
|
|
- afs_put_cell(net, cell);
|
|
+ afs_unuse_cell(net, cell);
|
|
return 0;
|
|
}
|
|
|
|
@@ -179,7 +179,6 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
|
|
struct afs_cell *cell;
|
|
struct afs_net *net = afs_d2net(dentry);
|
|
struct dentry *ret;
|
|
- unsigned int seq = 0;
|
|
char *name;
|
|
int len;
|
|
|
|
@@ -191,17 +190,13 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
|
|
if (!name)
|
|
goto out_p;
|
|
|
|
- rcu_read_lock();
|
|
- do {
|
|
- read_seqbegin_or_lock(&net->cells_lock, &seq);
|
|
- cell = rcu_dereference_raw(net->ws_cell);
|
|
- if (cell) {
|
|
- len = cell->name_len;
|
|
- memcpy(name, cell->name, len + 1);
|
|
- }
|
|
- } while (need_seqretry(&net->cells_lock, seq));
|
|
- done_seqretry(&net->cells_lock, seq);
|
|
- rcu_read_unlock();
|
|
+ down_read(&net->cells_lock);
|
|
+ cell = net->ws_cell;
|
|
+ if (cell) {
|
|
+ len = cell->name_len;
|
|
+ memcpy(name, cell->name, len + 1);
|
|
+ }
|
|
+ up_read(&net->cells_lock);
|
|
|
|
ret = ERR_PTR(-ENOENT);
|
|
if (!cell)
|
|
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
|
|
index e1ebead2e505a..7689f4535ef9c 100644
|
|
--- a/fs/afs/internal.h
|
|
+++ b/fs/afs/internal.h
|
|
@@ -263,11 +263,11 @@ struct afs_net {
|
|
|
|
/* Cell database */
|
|
struct rb_root cells;
|
|
- struct afs_cell __rcu *ws_cell;
|
|
+ struct afs_cell *ws_cell;
|
|
struct work_struct cells_manager;
|
|
struct timer_list cells_timer;
|
|
atomic_t cells_outstanding;
|
|
- seqlock_t cells_lock;
|
|
+ struct rw_semaphore cells_lock;
|
|
struct mutex cells_alias_lock;
|
|
|
|
struct mutex proc_cells_lock;
|
|
@@ -326,6 +326,7 @@ enum afs_cell_state {
|
|
AFS_CELL_DEACTIVATING,
|
|
AFS_CELL_INACTIVE,
|
|
AFS_CELL_FAILED,
|
|
+ AFS_CELL_REMOVED,
|
|
};
|
|
|
|
/*
|
|
@@ -363,7 +364,8 @@ struct afs_cell {
|
|
#endif
|
|
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
|
|
time64_t last_inactive; /* Time of last drop of usage count */
|
|
- atomic_t usage;
|
|
+ atomic_t ref; /* Struct refcount */
|
|
+ atomic_t active; /* Active usage counter */
|
|
unsigned long flags;
|
|
#define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
|
|
#define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */
|
|
@@ -915,11 +917,14 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
|
|
* cell.c
|
|
*/
|
|
extern int afs_cell_init(struct afs_net *, const char *);
|
|
-extern struct afs_cell *afs_lookup_cell_rcu(struct afs_net *, const char *, unsigned);
|
|
+extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned);
|
|
extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
|
|
const char *, bool);
|
|
+extern struct afs_cell *afs_use_cell(struct afs_cell *);
|
|
+extern void afs_unuse_cell(struct afs_net *, struct afs_cell *);
|
|
extern struct afs_cell *afs_get_cell(struct afs_cell *);
|
|
-extern void afs_put_cell(struct afs_net *, struct afs_cell *);
|
|
+extern void afs_put_cell(struct afs_cell *);
|
|
+extern void afs_queue_cell(struct afs_cell *);
|
|
extern void afs_manage_cells(struct work_struct *);
|
|
extern void afs_cells_timer(struct timer_list *);
|
|
extern void __net_exit afs_cell_purge(struct afs_net *);
|
|
diff --git a/fs/afs/main.c b/fs/afs/main.c
|
|
index 31b472f7c734c..accdd8970e7c0 100644
|
|
--- a/fs/afs/main.c
|
|
+++ b/fs/afs/main.c
|
|
@@ -78,7 +78,7 @@ static int __net_init afs_net_init(struct net *net_ns)
|
|
mutex_init(&net->socket_mutex);
|
|
|
|
net->cells = RB_ROOT;
|
|
- seqlock_init(&net->cells_lock);
|
|
+ init_rwsem(&net->cells_lock);
|
|
INIT_WORK(&net->cells_manager, afs_manage_cells);
|
|
timer_setup(&net->cells_timer, afs_cells_timer, 0);
|
|
|
|
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
|
|
index 79bc5f1338edf..c69a0282960cc 100644
|
|
--- a/fs/afs/mntpt.c
|
|
+++ b/fs/afs/mntpt.c
|
|
@@ -88,7 +88,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
|
|
ctx->force = true;
|
|
}
|
|
if (ctx->cell) {
|
|
- afs_put_cell(ctx->net, ctx->cell);
|
|
+ afs_unuse_cell(ctx->net, ctx->cell);
|
|
ctx->cell = NULL;
|
|
}
|
|
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
|
|
@@ -124,7 +124,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
|
|
char *buf;
|
|
|
|
if (src_as->cell)
|
|
- ctx->cell = afs_get_cell(src_as->cell);
|
|
+ ctx->cell = afs_use_cell(src_as->cell);
|
|
|
|
if (size < 2 || size > PAGE_SIZE - 1)
|
|
return -EINVAL;
|
|
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
|
|
index e817fc740ba01..855d7358933b4 100644
|
|
--- a/fs/afs/proc.c
|
|
+++ b/fs/afs/proc.c
|
|
@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
|
|
|
|
if (v == SEQ_START_TOKEN) {
|
|
/* display header on line 1 */
|
|
- seq_puts(m, "USE TTL SV ST NAME\n");
|
|
+ seq_puts(m, "USE ACT TTL SV ST NAME\n");
|
|
return 0;
|
|
}
|
|
|
|
@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
|
|
vllist = rcu_dereference(cell->vl_servers);
|
|
|
|
/* display one cell per line on subsequent lines */
|
|
- seq_printf(m, "%3u %6lld %2u %2u %s\n",
|
|
- atomic_read(&cell->usage),
|
|
+ seq_printf(m, "%3u %3u %6lld %2u %2u %s\n",
|
|
+ atomic_read(&cell->ref),
|
|
+ atomic_read(&cell->active),
|
|
cell->dns_expiry - ktime_get_real_seconds(),
|
|
- vllist->nr_servers,
|
|
+ vllist ? vllist->nr_servers : 0,
|
|
cell->state,
|
|
cell->name);
|
|
return 0;
|
|
@@ -128,7 +129,7 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
|
|
}
|
|
|
|
if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
|
|
- afs_put_cell(net, cell);
|
|
+ afs_unuse_cell(net, cell);
|
|
} else {
|
|
goto inval;
|
|
}
|
|
@@ -154,13 +155,11 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
|
|
struct afs_net *net;
|
|
|
|
net = afs_seq2net_single(m);
|
|
- if (rcu_access_pointer(net->ws_cell)) {
|
|
- rcu_read_lock();
|
|
- cell = rcu_dereference(net->ws_cell);
|
|
- if (cell)
|
|
- seq_printf(m, "%s\n", cell->name);
|
|
- rcu_read_unlock();
|
|
- }
|
|
+ down_read(&net->cells_lock);
|
|
+ cell = net->ws_cell;
|
|
+ if (cell)
|
|
+ seq_printf(m, "%s\n", cell->name);
|
|
+ up_read(&net->cells_lock);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/afs/super.c b/fs/afs/super.c
|
|
index b552357b1d137..e72c223f831d2 100644
|
|
--- a/fs/afs/super.c
|
|
+++ b/fs/afs/super.c
|
|
@@ -294,7 +294,7 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
|
|
cellnamesz, cellnamesz, cellname ?: "");
|
|
return PTR_ERR(cell);
|
|
}
|
|
- afs_put_cell(ctx->net, ctx->cell);
|
|
+ afs_unuse_cell(ctx->net, ctx->cell);
|
|
ctx->cell = cell;
|
|
}
|
|
|
|
@@ -389,8 +389,8 @@ static int afs_validate_fc(struct fs_context *fc)
|
|
_debug("switch to alias");
|
|
key_put(ctx->key);
|
|
ctx->key = NULL;
|
|
- cell = afs_get_cell(ctx->cell->alias_of);
|
|
- afs_put_cell(ctx->net, ctx->cell);
|
|
+ cell = afs_use_cell(ctx->cell->alias_of);
|
|
+ afs_unuse_cell(ctx->net, ctx->cell);
|
|
ctx->cell = cell;
|
|
goto reget_key;
|
|
}
|
|
@@ -508,7 +508,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
|
|
if (ctx->dyn_root) {
|
|
as->dyn_root = true;
|
|
} else {
|
|
- as->cell = afs_get_cell(ctx->cell);
|
|
+ as->cell = afs_use_cell(ctx->cell);
|
|
as->volume = afs_get_volume(ctx->volume,
|
|
afs_volume_trace_get_alloc_sbi);
|
|
}
|
|
@@ -521,7 +521,7 @@ static void afs_destroy_sbi(struct afs_super_info *as)
|
|
if (as) {
|
|
struct afs_net *net = afs_net(as->net_ns);
|
|
afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
|
|
- afs_put_cell(net, as->cell);
|
|
+ afs_unuse_cell(net, as->cell);
|
|
put_net(as->net_ns);
|
|
kfree(as);
|
|
}
|
|
@@ -607,7 +607,7 @@ static void afs_free_fc(struct fs_context *fc)
|
|
|
|
afs_destroy_sbi(fc->s_fs_info);
|
|
afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
|
|
- afs_put_cell(ctx->net, ctx->cell);
|
|
+ afs_unuse_cell(ctx->net, ctx->cell);
|
|
key_put(ctx->key);
|
|
kfree(ctx);
|
|
}
|
|
@@ -634,9 +634,7 @@ static int afs_init_fs_context(struct fs_context *fc)
|
|
ctx->net = afs_net(fc->net_ns);
|
|
|
|
/* Default to the workstation cell. */
|
|
- rcu_read_lock();
|
|
- cell = afs_lookup_cell_rcu(ctx->net, NULL, 0);
|
|
- rcu_read_unlock();
|
|
+ cell = afs_find_cell(ctx->net, NULL, 0);
|
|
if (IS_ERR(cell))
|
|
cell = NULL;
|
|
ctx->cell = cell;
|
|
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
|
|
index 5082ef04e99c5..ddb4cb67d0fd9 100644
|
|
--- a/fs/afs/vl_alias.c
|
|
+++ b/fs/afs/vl_alias.c
|
|
@@ -177,7 +177,7 @@ static int afs_compare_cell_roots(struct afs_cell *cell)
|
|
|
|
is_alias:
|
|
rcu_read_unlock();
|
|
- cell->alias_of = afs_get_cell(p);
|
|
+ cell->alias_of = afs_use_cell(p);
|
|
return 1;
|
|
}
|
|
|
|
@@ -247,18 +247,18 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
|
|
continue;
|
|
if (p->root_volume)
|
|
continue; /* Ignore cells that have a root.cell volume. */
|
|
- afs_get_cell(p);
|
|
+ afs_use_cell(p);
|
|
mutex_unlock(&cell->net->proc_cells_lock);
|
|
|
|
if (afs_query_for_alias_one(cell, key, p) != 0)
|
|
goto is_alias;
|
|
|
|
if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
|
|
- afs_put_cell(cell->net, p);
|
|
+ afs_unuse_cell(cell->net, p);
|
|
return -ERESTARTSYS;
|
|
}
|
|
|
|
- afs_put_cell(cell->net, p);
|
|
+ afs_unuse_cell(cell->net, p);
|
|
}
|
|
|
|
mutex_unlock(&cell->net->proc_cells_lock);
|
|
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
|
|
index f405ca8b240a5..750bd1579f212 100644
|
|
--- a/fs/afs/vl_rotate.c
|
|
+++ b/fs/afs/vl_rotate.c
|
|
@@ -45,7 +45,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
|
|
cell->dns_expiry <= ktime_get_real_seconds()) {
|
|
dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
|
|
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
|
|
- queue_work(afs_wq, &cell->manager);
|
|
+ afs_queue_cell(cell);
|
|
|
|
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
|
|
if (wait_var_event_interruptible(
|
|
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
|
|
index 9bc0509e3634c..a838030e95634 100644
|
|
--- a/fs/afs/volume.c
|
|
+++ b/fs/afs/volume.c
|
|
@@ -106,7 +106,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
|
|
return volume;
|
|
|
|
error_1:
|
|
- afs_put_cell(params->net, volume->cell);
|
|
+ afs_put_cell(volume->cell);
|
|
kfree(volume);
|
|
error_0:
|
|
return ERR_PTR(ret);
|
|
@@ -228,7 +228,7 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
|
|
|
|
afs_remove_volume_from_cell(volume);
|
|
afs_put_serverlist(net, rcu_access_pointer(volume->servers));
|
|
- afs_put_cell(net, volume->cell);
|
|
+ afs_put_cell(volume->cell);
|
|
trace_afs_volume(volume->vid, atomic_read(&volume->usage),
|
|
afs_volume_trace_free);
|
|
kfree_rcu(volume, rcu);
|
|
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
|
|
index 8bbb734f3f514..49384d55a908f 100644
|
|
--- a/fs/btrfs/extent-io-tree.h
|
|
+++ b/fs/btrfs/extent-io-tree.h
|
|
@@ -48,6 +48,7 @@ enum {
|
|
IO_TREE_INODE_FILE_EXTENT,
|
|
IO_TREE_LOG_CSUM_RANGE,
|
|
IO_TREE_SELFTEST,
|
|
+ IO_TREE_DEVICE_ALLOC_STATE,
|
|
};
|
|
|
|
struct extent_io_tree {
|
|
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
|
|
index 79e9a80bd37a0..f9d8bd3099488 100644
|
|
--- a/fs/btrfs/volumes.c
|
|
+++ b/fs/btrfs/volumes.c
|
|
@@ -406,7 +406,7 @@ void __exit btrfs_cleanup_fs_uuids(void)
|
|
* Returned struct is not linked onto any lists and must be destroyed using
|
|
* btrfs_free_device.
|
|
*/
|
|
-static struct btrfs_device *__alloc_device(void)
|
|
+static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
|
|
{
|
|
struct btrfs_device *dev;
|
|
|
|
@@ -433,7 +433,8 @@ static struct btrfs_device *__alloc_device(void)
|
|
btrfs_device_data_ordered_init(dev);
|
|
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
|
|
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
|
|
- extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
|
|
+ extent_io_tree_init(fs_info, &dev->alloc_state,
|
|
+ IO_TREE_DEVICE_ALLOC_STATE, NULL);
|
|
|
|
return dev;
|
|
}
|
|
@@ -6545,7 +6546,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
|
|
if (WARN_ON(!devid && !fs_info))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- dev = __alloc_device();
|
|
+ dev = __alloc_device(fs_info);
|
|
if (IS_ERR(dev))
|
|
return dev;
|
|
|
|
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
|
|
index 689162e2e1755..3150c19cdc2fb 100644
|
|
--- a/fs/cifs/asn1.c
|
|
+++ b/fs/cifs/asn1.c
|
|
@@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
|
|
return 0;
|
|
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|
|
|| (tag != ASN1_EOC)) {
|
|
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
|
|
- cls, con, tag, end, *end);
|
|
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
|
|
+ cls, con, tag, end);
|
|
return 0;
|
|
}
|
|
|
|
@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
|
|
return 0;
|
|
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|
|
|| (tag != ASN1_SEQ)) {
|
|
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
|
|
- cls, con, tag, end, *end);
|
|
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
|
|
+ cls, con, tag, end);
|
|
return 0;
|
|
}
|
|
|
|
@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
|
|
return 0;
|
|
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|
|
|| (tag != ASN1_EOC)) {
|
|
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
|
|
- cls, con, tag, end, *end);
|
|
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
|
|
+ cls, con, tag, end);
|
|
return 0;
|
|
}
|
|
|
|
@@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
|
|
return 0;
|
|
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|
|
|| (tag != ASN1_SEQ)) {
|
|
- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
|
|
- cls, con, tag, end, *end);
|
|
+ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
|
|
+ cls, con, tag, sequence_end);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
|
|
index 6025d7fc7bbfd..d0658891b0a6d 100644
|
|
--- a/fs/cifs/cifsacl.c
|
|
+++ b/fs/cifs/cifsacl.c
|
|
@@ -338,7 +338,7 @@ invalidate_key:
|
|
goto out_key_put;
|
|
}
|
|
|
|
-static int
|
|
+int
|
|
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
|
|
struct cifs_fattr *fattr, uint sidtype)
|
|
{
|
|
@@ -359,7 +359,8 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
|
|
return -EIO;
|
|
}
|
|
|
|
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) {
|
|
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
|
|
+ (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
|
|
uint32_t unix_id;
|
|
bool is_group;
|
|
|
|
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
|
|
index 7a836ec0438e8..f4751cb391238 100644
|
|
--- a/fs/cifs/cifsproto.h
|
|
+++ b/fs/cifs/cifsproto.h
|
|
@@ -208,6 +208,8 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
|
|
extern int cifs_rename_pending_delete(const char *full_path,
|
|
struct dentry *dentry,
|
|
const unsigned int xid);
|
|
+extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
|
|
+ struct cifs_fattr *fattr, uint sidtype);
|
|
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
|
|
struct cifs_fattr *fattr, struct inode *inode,
|
|
bool get_mode_from_special_sid,
|
|
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
|
|
index a61abde09ffe1..f4ecc13b02c0a 100644
|
|
--- a/fs/cifs/connect.c
|
|
+++ b/fs/cifs/connect.c
|
|
@@ -3594,7 +3594,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
|
|
*/
|
|
tcon->retry = volume_info->retry;
|
|
tcon->nocase = volume_info->nocase;
|
|
- tcon->nohandlecache = volume_info->nohandlecache;
|
|
+ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
|
|
+ tcon->nohandlecache = volume_info->nohandlecache;
|
|
+ else
|
|
+ tcon->nohandlecache = 1;
|
|
tcon->nodelete = volume_info->nodelete;
|
|
tcon->local_lease = volume_info->local_lease;
|
|
INIT_LIST_HEAD(&tcon->pending_opens);
|
|
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
|
|
index 6df0922e7e304..709fb53e9fee1 100644
|
|
--- a/fs/cifs/readdir.c
|
|
+++ b/fs/cifs/readdir.c
|
|
@@ -267,9 +267,8 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
|
|
if (reparse_file_needs_reval(fattr))
|
|
fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
|
|
|
|
- /* TODO map SIDs */
|
|
- fattr->cf_uid = cifs_sb->mnt_uid;
|
|
- fattr->cf_gid = cifs_sb->mnt_gid;
|
|
+ sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
|
|
+ sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
|
|
}
|
|
|
|
static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index d44df8f95bcd4..09e1cd320ee56 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -3072,7 +3072,12 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
|
|
oparms.tcon = tcon;
|
|
oparms.desired_access = READ_CONTROL;
|
|
oparms.disposition = FILE_OPEN;
|
|
- oparms.create_options = cifs_create_options(cifs_sb, 0);
|
|
+ /*
|
|
+ * When querying an ACL, even if the file is a symlink we want to open
|
|
+ * the source not the target, and so the protocol requires that the
|
|
+ * client specify this flag when opening a reparse point
|
|
+ */
|
|
+ oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
|
|
oparms.fid = &fid;
|
|
oparms.reconnect = false;
|
|
|
|
@@ -3924,7 +3929,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
|
|
if (rc) {
|
|
cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
|
|
enc ? "en" : "de");
|
|
- return 0;
|
|
+ return rc;
|
|
}
|
|
|
|
rc = smb3_crypto_aead_allocate(server);
|
|
@@ -4103,7 +4108,8 @@ smb3_is_transform_hdr(void *buf)
|
|
static int
|
|
decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
|
|
unsigned int buf_data_size, struct page **pages,
|
|
- unsigned int npages, unsigned int page_data_size)
|
|
+ unsigned int npages, unsigned int page_data_size,
|
|
+ bool is_offloaded)
|
|
{
|
|
struct kvec iov[2];
|
|
struct smb_rqst rqst = {NULL};
|
|
@@ -4129,7 +4135,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
|
|
|
|
memmove(buf, iov[1].iov_base, buf_data_size);
|
|
|
|
- server->total_read = buf_data_size + page_data_size;
|
|
+ if (!is_offloaded)
|
|
+ server->total_read = buf_data_size + page_data_size;
|
|
|
|
return rc;
|
|
}
|
|
@@ -4342,7 +4349,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
|
|
struct mid_q_entry *mid;
|
|
|
|
rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
|
|
- dw->ppages, dw->npages, dw->len);
|
|
+ dw->ppages, dw->npages, dw->len, true);
|
|
if (rc) {
|
|
cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
|
|
goto free_pages;
|
|
@@ -4448,7 +4455,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
|
|
|
|
non_offloaded_decrypt:
|
|
rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
|
|
- pages, npages, len);
|
|
+ pages, npages, len, false);
|
|
if (rc)
|
|
goto free_pages;
|
|
|
|
@@ -4504,7 +4511,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
|
|
server->total_read += length;
|
|
|
|
buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
|
|
- length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
|
|
+ length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
|
|
if (length)
|
|
return length;
|
|
|
|
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
|
|
index d23ff162c78bc..0b32c64eb4053 100644
|
|
--- a/fs/crypto/policy.c
|
|
+++ b/fs/crypto/policy.c
|
|
@@ -178,10 +178,15 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
|
|
32, 32))
|
|
return false;
|
|
|
|
+ /*
|
|
+ * IV_INO_LBLK_32 hashes the inode number, so in principle it can
|
|
+ * support any ino_bits. However, currently the inode number is gotten
|
|
+ * from inode::i_ino which is 'unsigned long'. So for now the
|
|
+ * implementation limit is 32 bits.
|
|
+ */
|
|
if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
|
|
- /* This uses hashed inode numbers, so ino_bits doesn't matter. */
|
|
!supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
|
|
- INT_MAX, 32))
|
|
+ 32, 32))
|
|
return false;
|
|
|
|
if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
|
|
diff --git a/fs/d_path.c b/fs/d_path.c
|
|
index 0f1fc1743302f..a69e2cd36e6e3 100644
|
|
--- a/fs/d_path.c
|
|
+++ b/fs/d_path.c
|
|
@@ -102,6 +102,8 @@ restart:
|
|
|
|
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
|
|
struct mount *parent = READ_ONCE(mnt->mnt_parent);
|
|
+ struct mnt_namespace *mnt_ns;
|
|
+
|
|
/* Escaped? */
|
|
if (dentry != vfsmnt->mnt_root) {
|
|
bptr = *buffer;
|
|
@@ -116,7 +118,9 @@ restart:
|
|
vfsmnt = &mnt->mnt;
|
|
continue;
|
|
}
|
|
- if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
|
|
+ mnt_ns = READ_ONCE(mnt->mnt_ns);
|
|
+ /* open-coded is_mounted() to use local mnt_ns */
|
|
+ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
|
|
error = 1; // absolute root
|
|
else
|
|
error = 2; // detached or not attached yet
|
|
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
|
|
index 3b21082e1b550..3b1012a3c4396 100644
|
|
--- a/fs/dlm/config.c
|
|
+++ b/fs/dlm/config.c
|
|
@@ -216,6 +216,7 @@ struct dlm_space {
|
|
struct list_head members;
|
|
struct mutex members_lock;
|
|
int members_count;
|
|
+ struct dlm_nodes *nds;
|
|
};
|
|
|
|
struct dlm_comms {
|
|
@@ -424,6 +425,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
|
|
INIT_LIST_HEAD(&sp->members);
|
|
mutex_init(&sp->members_lock);
|
|
sp->members_count = 0;
|
|
+ sp->nds = nds;
|
|
return &sp->group;
|
|
|
|
fail:
|
|
@@ -445,6 +447,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
|
|
static void release_space(struct config_item *i)
|
|
{
|
|
struct dlm_space *sp = config_item_to_space(i);
|
|
+ kfree(sp->nds);
|
|
kfree(sp);
|
|
}
|
|
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index ff46defc65683..dc943e714d142 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -466,7 +466,7 @@ struct flex_groups {
|
|
|
|
/* Flags which are mutually exclusive to DAX */
|
|
#define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
|
|
- EXT4_JOURNAL_DATA_FL)
|
|
+ EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL)
|
|
|
|
/* Mask out flags that are inappropriate for the given type of inode. */
|
|
static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
|
|
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
|
|
index dbccf46f17709..37347ba868b70 100644
|
|
--- a/fs/ext4/fsmap.c
|
|
+++ b/fs/ext4/fsmap.c
|
|
@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
|
|
|
|
/* Are we just counting mappings? */
|
|
if (info->gfi_head->fmh_count == 0) {
|
|
+ if (info->gfi_head->fmh_entries == UINT_MAX)
|
|
+ return EXT4_QUERY_RANGE_ABORT;
|
|
+
|
|
if (rec_fsblk > info->gfi_next_fsblk)
|
|
info->gfi_head->fmh_entries++;
|
|
|
|
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
|
|
index e88eff999bd15..79d32ea606aa1 100644
|
|
--- a/fs/ext4/mballoc.c
|
|
+++ b/fs/ext4/mballoc.c
|
|
@@ -4037,7 +4037,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|
struct ext4_buddy e4b;
|
|
int err;
|
|
int busy = 0;
|
|
- int free = 0;
|
|
+ int free, free_total = 0;
|
|
|
|
mb_debug(sb, "discard preallocation for group %u\n", group);
|
|
if (list_empty(&grp->bb_prealloc_list))
|
|
@@ -4065,8 +4065,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|
|
|
INIT_LIST_HEAD(&list);
|
|
repeat:
|
|
+ free = 0;
|
|
ext4_lock_group(sb, group);
|
|
- this_cpu_inc(discard_pa_seq);
|
|
list_for_each_entry_safe(pa, tmp,
|
|
&grp->bb_prealloc_list, pa_group_list) {
|
|
spin_lock(&pa->pa_lock);
|
|
@@ -4083,6 +4083,9 @@ repeat:
|
|
/* seems this one can be freed ... */
|
|
ext4_mb_mark_pa_deleted(sb, pa);
|
|
|
|
+ if (!free)
|
|
+ this_cpu_inc(discard_pa_seq);
|
|
+
|
|
/* we can trust pa_free ... */
|
|
free += pa->pa_free;
|
|
|
|
@@ -4092,22 +4095,6 @@ repeat:
|
|
list_add(&pa->u.pa_tmp_list, &list);
|
|
}
|
|
|
|
- /* if we still need more blocks and some PAs were used, try again */
|
|
- if (free < needed && busy) {
|
|
- busy = 0;
|
|
- ext4_unlock_group(sb, group);
|
|
- cond_resched();
|
|
- goto repeat;
|
|
- }
|
|
-
|
|
- /* found anything to free? */
|
|
- if (list_empty(&list)) {
|
|
- BUG_ON(free != 0);
|
|
- mb_debug(sb, "Someone else may have freed PA for this group %u\n",
|
|
- group);
|
|
- goto out;
|
|
- }
|
|
-
|
|
/* now free all selected PAs */
|
|
list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
|
|
|
|
@@ -4125,14 +4112,22 @@ repeat:
|
|
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
|
}
|
|
|
|
-out:
|
|
+ free_total += free;
|
|
+
|
|
+ /* if we still need more blocks and some PAs were used, try again */
|
|
+ if (free_total < needed && busy) {
|
|
+ ext4_unlock_group(sb, group);
|
|
+ cond_resched();
|
|
+ busy = 0;
|
|
+ goto repeat;
|
|
+ }
|
|
ext4_unlock_group(sb, group);
|
|
ext4_mb_unload_buddy(&e4b);
|
|
put_bh(bitmap_bh);
|
|
out_dbg:
|
|
mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
|
|
- free, group, grp->bb_free);
|
|
- return free;
|
|
+ free_total, group, grp->bb_free);
|
|
+ return free_total;
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
|
|
index 44582a4db513e..1e014535c2530 100644
|
|
--- a/fs/f2fs/inode.c
|
|
+++ b/fs/f2fs/inode.c
|
|
@@ -287,6 +287,13 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
|
|
return false;
|
|
}
|
|
|
|
+ if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
|
|
+ __func__, inode->i_ino);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
|
|
fi->i_flags & F2FS_COMPR_FL &&
|
|
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
|
|
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
|
|
index c5e32ceb94827..e186d3af61368 100644
|
|
--- a/fs/f2fs/sysfs.c
|
|
+++ b/fs/f2fs/sysfs.c
|
|
@@ -964,4 +964,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
|
|
}
|
|
kobject_del(&sbi->s_kobj);
|
|
kobject_put(&sbi->s_kobj);
|
|
+ wait_for_completion(&sbi->s_kobj_unregister);
|
|
}
|
|
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
|
|
index bcfc288dba3fb..b115e7d47fcec 100644
|
|
--- a/fs/iomap/buffered-io.c
|
|
+++ b/fs/iomap/buffered-io.c
|
|
@@ -49,16 +49,8 @@ iomap_page_create(struct inode *inode, struct page *page)
|
|
if (iop || i_blocksize(inode) == PAGE_SIZE)
|
|
return iop;
|
|
|
|
- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
|
|
- atomic_set(&iop->read_count, 0);
|
|
- atomic_set(&iop->write_count, 0);
|
|
+ iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
|
|
spin_lock_init(&iop->uptodate_lock);
|
|
- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
|
|
-
|
|
- /*
|
|
- * migrate_page_move_mapping() assumes that pages with private data have
|
|
- * their count elevated by 1.
|
|
- */
|
|
attach_page_private(page, iop);
|
|
return iop;
|
|
}
|
|
@@ -574,10 +566,10 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
|
|
loff_t block_start = pos & ~(block_size - 1);
|
|
loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
|
|
unsigned from = offset_in_page(pos), to = from + len, poff, plen;
|
|
- int status;
|
|
|
|
if (PageUptodate(page))
|
|
return 0;
|
|
+ ClearPageError(page);
|
|
|
|
do {
|
|
iomap_adjust_read_range(inode, iop, &block_start,
|
|
@@ -594,14 +586,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
|
|
if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
|
|
return -EIO;
|
|
zero_user_segments(page, poff, from, to, poff + plen);
|
|
- iomap_set_range_uptodate(page, poff, plen);
|
|
- continue;
|
|
+ } else {
|
|
+ int status = iomap_read_page_sync(block_start, page,
|
|
+ poff, plen, srcmap);
|
|
+ if (status)
|
|
+ return status;
|
|
}
|
|
-
|
|
- status = iomap_read_page_sync(block_start, page, poff, plen,
|
|
- srcmap);
|
|
- if (status)
|
|
- return status;
|
|
+ iomap_set_range_uptodate(page, poff, plen);
|
|
} while ((block_start += plen) < block_end);
|
|
|
|
return 0;
|
|
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
|
|
index ec7b78e6fecaf..28d656b15300b 100644
|
|
--- a/fs/iomap/direct-io.c
|
|
+++ b/fs/iomap/direct-io.c
|
|
@@ -387,6 +387,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|
return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
|
|
case IOMAP_INLINE:
|
|
return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
|
|
+ case IOMAP_DELALLOC:
|
|
+ /*
|
|
+ * DIO is not serialised against mmap() access at all, and so
|
|
+ * if the page_mkwrite occurs between the writeback and the
|
|
+ * iomap_apply() call in the DIO path, then it will see the
|
|
+ * DELALLOC block that the page-mkwrite allocated.
|
|
+ */
|
|
+ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
|
|
+ dio->iocb->ki_filp, current->comm);
|
|
+ return -EIO;
|
|
default:
|
|
WARN_ON_ONCE(1);
|
|
return -EIO;
|
|
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
|
|
index ccc88be88d6ae..a30b4bcb95a2c 100644
|
|
--- a/fs/nfs/fs_context.c
|
|
+++ b/fs/nfs/fs_context.c
|
|
@@ -94,6 +94,7 @@ enum {
|
|
static const struct constant_table nfs_param_enums_local_lock[] = {
|
|
{ "all", Opt_local_lock_all },
|
|
{ "flock", Opt_local_lock_flock },
|
|
+ { "posix", Opt_local_lock_posix },
|
|
{ "none", Opt_local_lock_none },
|
|
{}
|
|
};
|
|
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
|
|
index d4359a1df3d5e..84933a0af49b6 100644
|
|
--- a/fs/ntfs/inode.c
|
|
+++ b/fs/ntfs/inode.c
|
|
@@ -1809,6 +1809,12 @@ int ntfs_read_inode_mount(struct inode *vi)
|
|
brelse(bh);
|
|
}
|
|
|
|
+ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
|
|
+ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
|
|
+ le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
|
|
+ goto err_out;
|
|
+ }
|
|
+
|
|
/* Apply the mst fixups. */
|
|
if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
|
|
/* FIXME: Try to use the $MFTMirr now. */
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index d86c0afc8a859..297ff606ae0f6 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -1046,7 +1046,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
|
|
|
|
static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
|
|
{
|
|
- static DEFINE_MUTEX(oom_adj_mutex);
|
|
struct mm_struct *mm = NULL;
|
|
struct task_struct *task;
|
|
int err = 0;
|
|
@@ -1086,7 +1085,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
|
|
struct task_struct *p = find_lock_task_mm(task);
|
|
|
|
if (p) {
|
|
- if (atomic_read(&p->mm->mm_users) > 1) {
|
|
+ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
|
|
mm = p->mm;
|
|
mmgrab(mm);
|
|
}
|
|
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
|
|
index 58fc2a7c7fd19..e69a2bfdd81c0 100644
|
|
--- a/fs/quota/quota_v2.c
|
|
+++ b/fs/quota/quota_v2.c
|
|
@@ -282,6 +282,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
|
|
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
|
|
d->dqb_btime = cpu_to_le64(m->dqb_btime);
|
|
d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
|
|
+ d->dqb_pad = 0;
|
|
if (qtree_entry_unused(info, dp))
|
|
d->dqb_itime = cpu_to_le64(1);
|
|
}
|
|
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
|
|
index 4146954549560..355523f4a4bf3 100644
|
|
--- a/fs/ramfs/file-nommu.c
|
|
+++ b/fs/ramfs/file-nommu.c
|
|
@@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
|
|
if (!pages)
|
|
goto out_free;
|
|
|
|
- nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
|
|
+ nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
|
|
if (nr != lpages)
|
|
goto out_free_pages; /* leave if some pages were missing */
|
|
|
|
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
|
|
index e43fed96704d8..c76d563dec0e1 100644
|
|
--- a/fs/reiserfs/inode.c
|
|
+++ b/fs/reiserfs/inode.c
|
|
@@ -2159,7 +2159,8 @@ out_end_trans:
|
|
out_inserted_sd:
|
|
clear_nlink(inode);
|
|
th->t_trans_id = 0; /* so the caller can't use this handle later */
|
|
- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
|
|
+ if (inode->i_state & I_NEW)
|
|
+ unlock_new_inode(inode);
|
|
iput(inode);
|
|
return err;
|
|
}
|
|
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
|
|
index a6bce5b1fb1dc..1b9c7a387dc71 100644
|
|
--- a/fs/reiserfs/super.c
|
|
+++ b/fs/reiserfs/super.c
|
|
@@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s,
|
|
"turned on.");
|
|
return 0;
|
|
}
|
|
+ if (qf_names[qtype] !=
|
|
+ REISERFS_SB(s)->s_qf_names[qtype])
|
|
+ kfree(qf_names[qtype]);
|
|
+ qf_names[qtype] = NULL;
|
|
if (*arg) { /* Some filename specified? */
|
|
if (REISERFS_SB(s)->s_qf_names[qtype]
|
|
&& strcmp(REISERFS_SB(s)->s_qf_names[qtype],
|
|
@@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s,
|
|
else
|
|
*mount_options |= 1 << REISERFS_GRPQUOTA;
|
|
} else {
|
|
- if (qf_names[qtype] !=
|
|
- REISERFS_SB(s)->s_qf_names[qtype])
|
|
- kfree(qf_names[qtype]);
|
|
- qf_names[qtype] = NULL;
|
|
if (qtype == USRQUOTA)
|
|
*mount_options &= ~(1 << REISERFS_USRQUOTA);
|
|
else
|
|
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
|
|
index adaba8e8b326e..566118417e562 100644
|
|
--- a/fs/udf/inode.c
|
|
+++ b/fs/udf/inode.c
|
|
@@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode)
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
int want_delete = 0;
|
|
|
|
- if (!inode->i_nlink && !is_bad_inode(inode)) {
|
|
- want_delete = 1;
|
|
- udf_setsize(inode, 0);
|
|
- udf_update_inode(inode, IS_SYNC(inode));
|
|
+ if (!is_bad_inode(inode)) {
|
|
+ if (!inode->i_nlink) {
|
|
+ want_delete = 1;
|
|
+ udf_setsize(inode, 0);
|
|
+ udf_update_inode(inode, IS_SYNC(inode));
|
|
+ }
|
|
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
|
|
+ inode->i_size != iinfo->i_lenExtents) {
|
|
+ udf_warn(inode->i_sb,
|
|
+ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
|
|
+ inode->i_ino, inode->i_mode,
|
|
+ (unsigned long long)inode->i_size,
|
|
+ (unsigned long long)iinfo->i_lenExtents);
|
|
+ }
|
|
}
|
|
truncate_inode_pages_final(&inode->i_data);
|
|
invalidate_inode_buffers(inode);
|
|
clear_inode(inode);
|
|
- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
|
|
- inode->i_size != iinfo->i_lenExtents) {
|
|
- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
|
|
- inode->i_ino, inode->i_mode,
|
|
- (unsigned long long)inode->i_size,
|
|
- (unsigned long long)iinfo->i_lenExtents);
|
|
- }
|
|
kfree(iinfo->i_ext.i_data);
|
|
iinfo->i_ext.i_data = NULL;
|
|
udf_clear_extent_cache(inode);
|
|
diff --git a/fs/udf/super.c b/fs/udf/super.c
|
|
index f747bf72edbe0..a6ce0ddb392c7 100644
|
|
--- a/fs/udf/super.c
|
|
+++ b/fs/udf/super.c
|
|
@@ -1353,6 +1353,12 @@ static int udf_load_sparable_map(struct super_block *sb,
|
|
(int)spm->numSparingTables);
|
|
return -EIO;
|
|
}
|
|
+ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
|
|
+ udf_err(sb, "error loading logical volume descriptor: "
|
|
+ "Too big sparing table size (%u)\n",
|
|
+ le32_to_cpu(spm->sizeSparingTable));
|
|
+ return -EIO;
|
|
+ }
|
|
|
|
for (i = 0; i < spm->numSparingTables; i++) {
|
|
loc = le32_to_cpu(spm->locSparingTable[i]);
|
|
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
|
|
index 9498ced947be9..2a38576189307 100644
|
|
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
|
|
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
|
|
@@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range(
|
|
struct xfs_mount *mp = tp->t_mountp;
|
|
xfs_rtblock_t rtstart;
|
|
xfs_rtblock_t rtend;
|
|
- xfs_rtblock_t rem;
|
|
int is_free;
|
|
int error = 0;
|
|
|
|
@@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range(
|
|
if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
|
|
low_rec->ar_startext == high_rec->ar_startext)
|
|
return 0;
|
|
- if (high_rec->ar_startext > mp->m_sb.sb_rextents)
|
|
- high_rec->ar_startext = mp->m_sb.sb_rextents;
|
|
+ high_rec->ar_startext = min(high_rec->ar_startext,
|
|
+ mp->m_sb.sb_rextents - 1);
|
|
|
|
/* Iterate the bitmap, looking for discrepancies. */
|
|
rtstart = low_rec->ar_startext;
|
|
- rem = high_rec->ar_startext - rtstart;
|
|
- while (rem) {
|
|
+ while (rtstart <= high_rec->ar_startext) {
|
|
/* Is the first block free? */
|
|
error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
|
|
&is_free);
|
|
@@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range(
|
|
|
|
/* How long does the extent go for? */
|
|
error = xfs_rtfind_forw(mp, tp, rtstart,
|
|
- high_rec->ar_startext - 1, &rtend);
|
|
+ high_rec->ar_startext, &rtend);
|
|
if (error)
|
|
break;
|
|
|
|
@@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range(
|
|
break;
|
|
}
|
|
|
|
- rem -= rtend - rtstart + 1;
|
|
rtstart = rtend + 1;
|
|
}
|
|
|
|
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
|
|
index 04faa7310c4f0..8140bd870226a 100644
|
|
--- a/fs/xfs/xfs_buf_item_recover.c
|
|
+++ b/fs/xfs/xfs_buf_item_recover.c
|
|
@@ -721,6 +721,8 @@ xlog_recover_get_buf_lsn(
|
|
case XFS_ABTC_MAGIC:
|
|
case XFS_RMAP_CRC_MAGIC:
|
|
case XFS_REFC_CRC_MAGIC:
|
|
+ case XFS_FIBT_CRC_MAGIC:
|
|
+ case XFS_FIBT_MAGIC:
|
|
case XFS_IBT_CRC_MAGIC:
|
|
case XFS_IBT_MAGIC: {
|
|
struct xfs_btree_block *btb = blk;
|
|
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
|
|
index 4d7385426149c..3ebc73ccc1337 100644
|
|
--- a/fs/xfs/xfs_file.c
|
|
+++ b/fs/xfs/xfs_file.c
|
|
@@ -1005,6 +1005,21 @@ xfs_file_fadvise(
|
|
return ret;
|
|
}
|
|
|
|
+/* Does this file, inode, or mount want synchronous writes? */
|
|
+static inline bool xfs_file_sync_writes(struct file *filp)
|
|
+{
|
|
+ struct xfs_inode *ip = XFS_I(file_inode(filp));
|
|
+
|
|
+ if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
|
|
+ return true;
|
|
+ if (filp->f_flags & (__O_SYNC | O_DSYNC))
|
|
+ return true;
|
|
+ if (IS_SYNC(file_inode(filp)))
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
STATIC loff_t
|
|
xfs_file_remap_range(
|
|
struct file *file_in,
|
|
@@ -1062,7 +1077,7 @@ xfs_file_remap_range(
|
|
if (ret)
|
|
goto out_unlock;
|
|
|
|
- if (mp->m_flags & XFS_MOUNT_WSYNC)
|
|
+ if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
|
|
xfs_log_force_inode(dest);
|
|
out_unlock:
|
|
xfs_reflink_remap_unlock(file_in, file_out);
|
|
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
|
|
index 4eebcec4aae6c..9ce5e7d5bf8f2 100644
|
|
--- a/fs/xfs/xfs_fsmap.c
|
|
+++ b/fs/xfs/xfs_fsmap.c
|
|
@@ -26,7 +26,7 @@
|
|
#include "xfs_rtalloc.h"
|
|
|
|
/* Convert an xfs_fsmap to an fsmap. */
|
|
-void
|
|
+static void
|
|
xfs_fsmap_from_internal(
|
|
struct fsmap *dest,
|
|
struct xfs_fsmap *src)
|
|
@@ -155,8 +155,7 @@ xfs_fsmap_owner_from_rmap(
|
|
/* getfsmap query state */
|
|
struct xfs_getfsmap_info {
|
|
struct xfs_fsmap_head *head;
|
|
- xfs_fsmap_format_t formatter; /* formatting fn */
|
|
- void *format_arg; /* format buffer */
|
|
+ struct fsmap *fsmap_recs; /* mapping records */
|
|
struct xfs_buf *agf_bp; /* AGF, for refcount queries */
|
|
xfs_daddr_t next_daddr; /* next daddr we expect */
|
|
u64 missing_owner; /* owner of holes */
|
|
@@ -224,6 +223,20 @@ xfs_getfsmap_is_shared(
|
|
return 0;
|
|
}
|
|
|
|
+static inline void
|
|
+xfs_getfsmap_format(
|
|
+ struct xfs_mount *mp,
|
|
+ struct xfs_fsmap *xfm,
|
|
+ struct xfs_getfsmap_info *info)
|
|
+{
|
|
+ struct fsmap *rec;
|
|
+
|
|
+ trace_xfs_getfsmap_mapping(mp, xfm);
|
|
+
|
|
+ rec = &info->fsmap_recs[info->head->fmh_entries++];
|
|
+ xfs_fsmap_from_internal(rec, xfm);
|
|
+}
|
|
+
|
|
/*
|
|
* Format a reverse mapping for getfsmap, having translated rm_startblock
|
|
* into the appropriate daddr units.
|
|
@@ -256,6 +269,9 @@ xfs_getfsmap_helper(
|
|
|
|
/* Are we just counting mappings? */
|
|
if (info->head->fmh_count == 0) {
|
|
+ if (info->head->fmh_entries == UINT_MAX)
|
|
+ return -ECANCELED;
|
|
+
|
|
if (rec_daddr > info->next_daddr)
|
|
info->head->fmh_entries++;
|
|
|
|
@@ -285,10 +301,7 @@ xfs_getfsmap_helper(
|
|
fmr.fmr_offset = 0;
|
|
fmr.fmr_length = rec_daddr - info->next_daddr;
|
|
fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
|
|
- error = info->formatter(&fmr, info->format_arg);
|
|
- if (error)
|
|
- return error;
|
|
- info->head->fmh_entries++;
|
|
+ xfs_getfsmap_format(mp, &fmr, info);
|
|
}
|
|
|
|
if (info->last)
|
|
@@ -320,11 +333,8 @@ xfs_getfsmap_helper(
|
|
if (shared)
|
|
fmr.fmr_flags |= FMR_OF_SHARED;
|
|
}
|
|
- error = info->formatter(&fmr, info->format_arg);
|
|
- if (error)
|
|
- return error;
|
|
- info->head->fmh_entries++;
|
|
|
|
+ xfs_getfsmap_format(mp, &fmr, info);
|
|
out:
|
|
rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
|
|
if (info->next_daddr < rec_daddr)
|
|
@@ -792,11 +802,11 @@ xfs_getfsmap_check_keys(
|
|
#endif /* CONFIG_XFS_RT */
|
|
|
|
/*
|
|
- * Get filesystem's extents as described in head, and format for
|
|
- * output. Calls formatter to fill the user's buffer until all
|
|
- * extents are mapped, until the passed-in head->fmh_count slots have
|
|
- * been filled, or until the formatter short-circuits the loop, if it
|
|
- * is tracking filled-in extents on its own.
|
|
+ * Get filesystem's extents as described in head, and format for output. Fills
|
|
+ * in the supplied records array until there are no more reverse mappings to
|
|
+ * return or head.fmh_entries == head.fmh_count. In the second case, this
|
|
+ * function returns -ECANCELED to indicate that more records would have been
|
|
+ * returned.
|
|
*
|
|
* Key to Confusion
|
|
* ----------------
|
|
@@ -816,8 +826,7 @@ int
|
|
xfs_getfsmap(
|
|
struct xfs_mount *mp,
|
|
struct xfs_fsmap_head *head,
|
|
- xfs_fsmap_format_t formatter,
|
|
- void *arg)
|
|
+ struct fsmap *fsmap_recs)
|
|
{
|
|
struct xfs_trans *tp = NULL;
|
|
struct xfs_fsmap dkeys[2]; /* per-dev keys */
|
|
@@ -892,8 +901,7 @@ xfs_getfsmap(
|
|
|
|
info.next_daddr = head->fmh_keys[0].fmr_physical +
|
|
head->fmh_keys[0].fmr_length;
|
|
- info.formatter = formatter;
|
|
- info.format_arg = arg;
|
|
+ info.fsmap_recs = fsmap_recs;
|
|
info.head = head;
|
|
|
|
/*
|
|
diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
|
|
index c6c57739b8626..a0775788e7b13 100644
|
|
--- a/fs/xfs/xfs_fsmap.h
|
|
+++ b/fs/xfs/xfs_fsmap.h
|
|
@@ -27,13 +27,9 @@ struct xfs_fsmap_head {
|
|
struct xfs_fsmap fmh_keys[2]; /* low and high keys */
|
|
};
|
|
|
|
-void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
|
|
void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
|
|
|
|
-/* fsmap to userspace formatter - copy to user & advance pointer */
|
|
-typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
|
|
-
|
|
int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
|
|
- xfs_fsmap_format_t formatter, void *arg);
|
|
+ struct fsmap *out_recs);
|
|
|
|
#endif /* __XFS_FSMAP_H__ */
|
|
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
|
|
index a190212ca85d0..e2a8edcb367bb 100644
|
|
--- a/fs/xfs/xfs_ioctl.c
|
|
+++ b/fs/xfs/xfs_ioctl.c
|
|
@@ -1707,39 +1707,17 @@ out_free_buf:
|
|
return error;
|
|
}
|
|
|
|
-struct getfsmap_info {
|
|
- struct xfs_mount *mp;
|
|
- struct fsmap_head __user *data;
|
|
- unsigned int idx;
|
|
- __u32 last_flags;
|
|
-};
|
|
-
|
|
-STATIC int
|
|
-xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
|
|
-{
|
|
- struct getfsmap_info *info = priv;
|
|
- struct fsmap fm;
|
|
-
|
|
- trace_xfs_getfsmap_mapping(info->mp, xfm);
|
|
-
|
|
- info->last_flags = xfm->fmr_flags;
|
|
- xfs_fsmap_from_internal(&fm, xfm);
|
|
- if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
|
|
- sizeof(struct fsmap)))
|
|
- return -EFAULT;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
STATIC int
|
|
xfs_ioc_getfsmap(
|
|
struct xfs_inode *ip,
|
|
struct fsmap_head __user *arg)
|
|
{
|
|
- struct getfsmap_info info = { NULL };
|
|
struct xfs_fsmap_head xhead = {0};
|
|
struct fsmap_head head;
|
|
- bool aborted = false;
|
|
+ struct fsmap *recs;
|
|
+ unsigned int count;
|
|
+ __u32 last_flags = 0;
|
|
+ bool done = false;
|
|
int error;
|
|
|
|
if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
|
|
@@ -1751,38 +1729,112 @@ xfs_ioc_getfsmap(
|
|
sizeof(head.fmh_keys[1].fmr_reserved)))
|
|
return -EINVAL;
|
|
|
|
+ /*
|
|
+ * Use an internal memory buffer so that we don't have to copy fsmap
|
|
+ * data to userspace while holding locks. Start by trying to allocate
|
|
+ * up to 128k for the buffer, but fall back to a single page if needed.
|
|
+ */
|
|
+ count = min_t(unsigned int, head.fmh_count,
|
|
+ 131072 / sizeof(struct fsmap));
|
|
+ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
|
|
+ if (!recs) {
|
|
+ count = min_t(unsigned int, head.fmh_count,
|
|
+ PAGE_SIZE / sizeof(struct fsmap));
|
|
+ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
|
|
+ if (!recs)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
xhead.fmh_iflags = head.fmh_iflags;
|
|
- xhead.fmh_count = head.fmh_count;
|
|
xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
|
|
xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
|
|
|
|
trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
|
|
trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
|
|
|
|
- info.mp = ip->i_mount;
|
|
- info.data = arg;
|
|
- error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
|
|
- if (error == -ECANCELED) {
|
|
- error = 0;
|
|
- aborted = true;
|
|
- } else if (error)
|
|
- return error;
|
|
+ head.fmh_entries = 0;
|
|
+ do {
|
|
+ struct fsmap __user *user_recs;
|
|
+ struct fsmap *last_rec;
|
|
+
|
|
+ user_recs = &arg->fmh_recs[head.fmh_entries];
|
|
+ xhead.fmh_entries = 0;
|
|
+ xhead.fmh_count = min_t(unsigned int, count,
|
|
+ head.fmh_count - head.fmh_entries);
|
|
+
|
|
+ /* Run query, record how many entries we got. */
|
|
+ error = xfs_getfsmap(ip->i_mount, &xhead, recs);
|
|
+ switch (error) {
|
|
+ case 0:
|
|
+ /*
|
|
+ * There are no more records in the result set. Copy
|
|
+ * whatever we got to userspace and break out.
|
|
+ */
|
|
+ done = true;
|
|
+ break;
|
|
+ case -ECANCELED:
|
|
+ /*
|
|
+ * The internal memory buffer is full. Copy whatever
|
|
+ * records we got to userspace and go again if we have
|
|
+ * not yet filled the userspace buffer.
|
|
+ */
|
|
+ error = 0;
|
|
+ break;
|
|
+ default:
|
|
+ goto out_free;
|
|
+ }
|
|
+ head.fmh_entries += xhead.fmh_entries;
|
|
+ head.fmh_oflags = xhead.fmh_oflags;
|
|
|
|
- /* If we didn't abort, set the "last" flag in the last fmx */
|
|
- if (!aborted && info.idx) {
|
|
- info.last_flags |= FMR_OF_LAST;
|
|
- if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
|
|
- &info.last_flags, sizeof(info.last_flags)))
|
|
- return -EFAULT;
|
|
+ /*
|
|
+ * If the caller wanted a record count or there aren't any
|
|
+ * new records to return, we're done.
|
|
+ */
|
|
+ if (head.fmh_count == 0 || xhead.fmh_entries == 0)
|
|
+ break;
|
|
+
|
|
+ /* Copy all the records we got out to userspace. */
|
|
+ if (copy_to_user(user_recs, recs,
|
|
+ xhead.fmh_entries * sizeof(struct fsmap))) {
|
|
+ error = -EFAULT;
|
|
+ goto out_free;
|
|
+ }
|
|
+
|
|
+ /* Remember the last record flags we copied to userspace. */
|
|
+ last_rec = &recs[xhead.fmh_entries - 1];
|
|
+ last_flags = last_rec->fmr_flags;
|
|
+
|
|
+ /* Set up the low key for the next iteration. */
|
|
+ xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
|
|
+ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
|
|
+ } while (!done && head.fmh_entries < head.fmh_count);
|
|
+
|
|
+ /*
|
|
+ * If there are no more records in the query result set and we're not
|
|
+ * in counting mode, mark the last record returned with the LAST flag.
|
|
+ */
|
|
+ if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
|
|
+ struct fsmap __user *user_rec;
|
|
+
|
|
+ last_flags |= FMR_OF_LAST;
|
|
+ user_rec = &arg->fmh_recs[head.fmh_entries - 1];
|
|
+
|
|
+ if (copy_to_user(&user_rec->fmr_flags, &last_flags,
|
|
+ sizeof(last_flags))) {
|
|
+ error = -EFAULT;
|
|
+ goto out_free;
|
|
+ }
|
|
}
|
|
|
|
/* copy back header */
|
|
- head.fmh_entries = xhead.fmh_entries;
|
|
- head.fmh_oflags = xhead.fmh_oflags;
|
|
- if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
|
|
- return -EFAULT;
|
|
+ if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
|
|
+ error = -EFAULT;
|
|
+ goto out_free;
|
|
+ }
|
|
|
|
- return 0;
|
|
+out_free:
|
|
+ kmem_free(recs);
|
|
+ return error;
|
|
}
|
|
|
|
STATIC int
|
|
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
|
|
index 6209e7b6b895b..86994d7f7cba3 100644
|
|
--- a/fs/xfs/xfs_rtalloc.c
|
|
+++ b/fs/xfs/xfs_rtalloc.c
|
|
@@ -247,6 +247,9 @@ xfs_rtallocate_extent_block(
|
|
end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
|
|
i <= end;
|
|
i++) {
|
|
+ /* Make sure we don't scan off the end of the rt volume. */
|
|
+ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
|
|
+
|
|
/*
|
|
* See if there's a free extent of maxlen starting at i.
|
|
* If it's not so then next will contain the first non-free.
|
|
@@ -442,6 +445,14 @@ xfs_rtallocate_extent_near(
|
|
*/
|
|
if (bno >= mp->m_sb.sb_rextents)
|
|
bno = mp->m_sb.sb_rextents - 1;
|
|
+
|
|
+ /* Make sure we don't run off the end of the rt volume. */
|
|
+ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
|
|
+ if (maxlen < minlen) {
|
|
+ *rtblock = NULLRTBLOCK;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
/*
|
|
* Try the exact allocation first.
|
|
*/
|
|
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
|
|
index ca08db4ffb5f7..ce3f5231aa698 100644
|
|
--- a/include/linux/bpf_verifier.h
|
|
+++ b/include/linux/bpf_verifier.h
|
|
@@ -358,6 +358,7 @@ struct bpf_subprog_info {
|
|
u32 start; /* insn idx of function entry point */
|
|
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
|
|
u16 stack_depth; /* max. stack depth used by this function */
|
|
+ bool has_tail_call;
|
|
};
|
|
|
|
/* single container for all structs
|
|
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
|
|
index a4dc45fbec0a4..23bc366f6c3b3 100644
|
|
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
|
|
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
|
|
@@ -27,8 +27,7 @@
|
|
* bit 16-27: update value
|
|
* bit 31: 1 - update, 0 - no update
|
|
*/
|
|
-#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
|
|
- CMDQ_WFE_WAIT_VALUE)
|
|
+#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
|
|
|
|
/** cmdq event maximum */
|
|
#define CMDQ_MAX_EVENT 0x3ff
|
|
diff --git a/include/linux/oom.h b/include/linux/oom.h
|
|
index c696c265f0193..b9df34326772c 100644
|
|
--- a/include/linux/oom.h
|
|
+++ b/include/linux/oom.h
|
|
@@ -55,6 +55,7 @@ struct oom_control {
|
|
};
|
|
|
|
extern struct mutex oom_lock;
|
|
+extern struct mutex oom_adj_mutex;
|
|
|
|
static inline void set_current_oom_origin(void)
|
|
{
|
|
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
|
|
index 93fcef105061b..ff3c48f0abc5b 100644
|
|
--- a/include/linux/overflow.h
|
|
+++ b/include/linux/overflow.h
|
|
@@ -3,6 +3,7 @@
|
|
#define __LINUX_OVERFLOW_H
|
|
|
|
#include <linux/compiler.h>
|
|
+#include <linux/limits.h>
|
|
|
|
/*
|
|
* In the fallback code below, we need to compute the minimum and
|
|
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
|
|
index 8679ccd722e89..3468794f83d23 100644
|
|
--- a/include/linux/page_owner.h
|
|
+++ b/include/linux/page_owner.h
|
|
@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
|
|
extern void __reset_page_owner(struct page *page, unsigned int order);
|
|
extern void __set_page_owner(struct page *page,
|
|
unsigned int order, gfp_t gfp_mask);
|
|
-extern void __split_page_owner(struct page *page, unsigned int order);
|
|
+extern void __split_page_owner(struct page *page, unsigned int nr);
|
|
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
|
|
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
|
|
extern void __dump_page_owner(struct page *page);
|
|
@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
|
|
__set_page_owner(page, order, gfp_mask);
|
|
}
|
|
|
|
-static inline void split_page_owner(struct page *page, unsigned int order)
|
|
+static inline void split_page_owner(struct page *page, unsigned int nr)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
- __split_page_owner(page, order);
|
|
+ __split_page_owner(page, nr);
|
|
}
|
|
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
|
|
{
|
|
diff --git a/include/linux/pci.h b/include/linux/pci.h
|
|
index 34c1c4f45288f..1bc3c020672fd 100644
|
|
--- a/include/linux/pci.h
|
|
+++ b/include/linux/pci.h
|
|
@@ -439,6 +439,7 @@ struct pci_dev {
|
|
unsigned int is_probed:1; /* Device probing in progress */
|
|
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
|
|
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
|
|
+ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
|
|
pci_dev_flags_t dev_flags;
|
|
atomic_t enable_cnt; /* pci_enable_device has been called */
|
|
|
|
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
|
|
index f3eaf9ec00a1b..70078be166e3c 100644
|
|
--- a/include/linux/platform_data/dma-dw.h
|
|
+++ b/include/linux/platform_data/dma-dw.h
|
|
@@ -21,6 +21,7 @@
|
|
* @dst_id: dst request line
|
|
* @m_master: memory master for transfers on allocated channel
|
|
* @p_master: peripheral master for transfers on allocated channel
|
|
+ * @channels: mask of the channels permitted for allocation (zero value means any)
|
|
* @hs_polarity:set active low polarity of handshake interface
|
|
*/
|
|
struct dw_dma_slave {
|
|
@@ -29,6 +30,7 @@ struct dw_dma_slave {
|
|
u8 dst_id;
|
|
u8 m_master;
|
|
u8 p_master;
|
|
+ u8 channels;
|
|
bool hs_polarity;
|
|
};
|
|
|
|
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
|
|
index ecdc6542070f1..dfd82eab29025 100644
|
|
--- a/include/linux/sched/coredump.h
|
|
+++ b/include/linux/sched/coredump.h
|
|
@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
|
|
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
|
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
|
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
|
|
+#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
|
|
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
|
|
|
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
|
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
|
|
index a74c1d5acdf3c..cb71dca985589 100644
|
|
--- a/include/linux/soc/mediatek/mtk-cmdq.h
|
|
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
|
|
@@ -105,11 +105,12 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
|
|
/**
|
|
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
|
|
* @pkt: the CMDQ packet
|
|
- * @event: the desired event type to "wait and CLEAR"
|
|
+ * @event: the desired event type to wait
|
|
+ * @clear: clear event or not after event arrive
|
|
*
|
|
* Return: 0 for success; else the error code is returned
|
|
*/
|
|
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
|
|
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
|
|
|
|
/**
|
|
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
|
|
diff --git a/include/net/ip.h b/include/net/ip.h
|
|
index 04ebe7bf54c6a..d61c26ab4ee84 100644
|
|
--- a/include/net/ip.h
|
|
+++ b/include/net/ip.h
|
|
@@ -439,12 +439,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
|
|
bool forwarding)
|
|
{
|
|
struct net *net = dev_net(dst->dev);
|
|
+ unsigned int mtu;
|
|
|
|
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
|
|
ip_mtu_locked(dst) ||
|
|
!forwarding)
|
|
return dst_mtu(dst);
|
|
|
|
+ /* 'forwarding = true' case should always honour route mtu */
|
|
+ mtu = dst_metric_raw(dst, RTAX_MTU);
|
|
+ if (mtu)
|
|
+ return mtu;
|
|
+
|
|
return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
|
|
}
|
|
|
|
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
|
|
index 0d3920896d502..716db4a0fed89 100644
|
|
--- a/include/net/netfilter/nf_log.h
|
|
+++ b/include/net/netfilter/nf_log.h
|
|
@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
|
|
unsigned int logflags);
|
|
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
|
|
struct sock *sk);
|
|
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
|
|
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
|
|
unsigned int hooknum, const struct sk_buff *skb,
|
|
const struct net_device *in,
|
|
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
|
|
index e1057b255f69a..879fe8cff5819 100644
|
|
--- a/include/net/tc_act/tc_tunnel_key.h
|
|
+++ b/include/net/tc_act/tc_tunnel_key.h
|
|
@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
|
|
{
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
struct tcf_tunnel_key *t = to_tunnel_key(a);
|
|
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
|
|
+ struct tcf_tunnel_key_params *params;
|
|
+
|
|
+ params = rcu_dereference_protected(t->params,
|
|
+ lockdep_is_held(&a->tcfa_lock));
|
|
|
|
return ¶ms->tcft_enc_metadata->u.tun_info;
|
|
#else
|
|
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
|
|
index e3518fd6b95b1..9353910915d41 100644
|
|
--- a/include/rdma/ib_umem.h
|
|
+++ b/include/rdma/ib_umem.h
|
|
@@ -95,10 +95,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
|
|
size_t length) {
|
|
return -EINVAL;
|
|
}
|
|
-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
|
|
- unsigned long pgsz_bitmap,
|
|
- unsigned long virt) {
|
|
- return -EINVAL;
|
|
+static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
|
+ unsigned long pgsz_bitmap,
|
|
+ unsigned long virt)
|
|
+{
|
|
+ return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_INFINIBAND_USER_MEM */
|
|
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
|
|
index ef2f3986c4933..d7809f203715f 100644
|
|
--- a/include/rdma/ib_verbs.h
|
|
+++ b/include/rdma/ib_verbs.h
|
|
@@ -2465,7 +2465,7 @@ struct ib_device_ops {
|
|
int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
|
|
struct ib_udata *udata);
|
|
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
|
- void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
|
|
+ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
|
|
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
|
|
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
|
|
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
|
|
@@ -3834,46 +3834,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
|
|
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
|
|
}
|
|
|
|
-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
|
- int nr_cqe, int comp_vector,
|
|
- enum ib_poll_context poll_ctx,
|
|
- const char *caller, struct ib_udata *udata);
|
|
-
|
|
-/**
|
|
- * ib_alloc_cq_user: Allocate kernel/user CQ
|
|
- * @dev: The IB device
|
|
- * @private: Private data attached to the CQE
|
|
- * @nr_cqe: Number of CQEs in the CQ
|
|
- * @comp_vector: Completion vector used for the IRQs
|
|
- * @poll_ctx: Context used for polling the CQ
|
|
- * @udata: Valid user data or NULL for kernel objects
|
|
- */
|
|
-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
|
|
- void *private, int nr_cqe,
|
|
- int comp_vector,
|
|
- enum ib_poll_context poll_ctx,
|
|
- struct ib_udata *udata)
|
|
-{
|
|
- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
|
|
- KBUILD_MODNAME, udata);
|
|
-}
|
|
-
|
|
-/**
|
|
- * ib_alloc_cq: Allocate kernel CQ
|
|
- * @dev: The IB device
|
|
- * @private: Private data attached to the CQE
|
|
- * @nr_cqe: Number of CQEs in the CQ
|
|
- * @comp_vector: Completion vector used for the IRQs
|
|
- * @poll_ctx: Context used for polling the CQ
|
|
- *
|
|
- * NOTE: for user cq use ib_alloc_cq_user with valid udata!
|
|
- */
|
|
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
|
|
+ int comp_vector, enum ib_poll_context poll_ctx,
|
|
+ const char *caller);
|
|
static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
|
|
int nr_cqe, int comp_vector,
|
|
enum ib_poll_context poll_ctx)
|
|
{
|
|
- return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
|
|
- NULL);
|
|
+ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
|
|
+ KBUILD_MODNAME);
|
|
}
|
|
|
|
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
|
|
@@ -3895,26 +3864,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
|
|
KBUILD_MODNAME);
|
|
}
|
|
|
|
-/**
|
|
- * ib_free_cq_user - Free kernel/user CQ
|
|
- * @cq: The CQ to free
|
|
- * @udata: Valid user data or NULL for kernel objects
|
|
- *
|
|
- * NOTE: This function shouldn't be called on shared CQs.
|
|
- */
|
|
-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
|
|
-
|
|
-/**
|
|
- * ib_free_cq - Free kernel CQ
|
|
- * @cq: The CQ to free
|
|
- *
|
|
- * NOTE: for user cq use ib_free_cq_user with valid udata!
|
|
- */
|
|
-static inline void ib_free_cq(struct ib_cq *cq)
|
|
-{
|
|
- ib_free_cq_user(cq, NULL);
|
|
-}
|
|
-
|
|
+void ib_free_cq(struct ib_cq *cq);
|
|
int ib_process_cq_direct(struct ib_cq *cq, int budget);
|
|
|
|
/**
|
|
@@ -3972,7 +3922,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
|
|
*/
|
|
static inline void ib_destroy_cq(struct ib_cq *cq)
|
|
{
|
|
- ib_destroy_cq_user(cq, NULL);
|
|
+ int ret = ib_destroy_cq_user(cq, NULL);
|
|
+
|
|
+ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
|
|
index 731ac09ed2313..5b567b43e1b16 100644
|
|
--- a/include/scsi/scsi_common.h
|
|
+++ b/include/scsi/scsi_common.h
|
|
@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
|
|
scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
|
|
}
|
|
|
|
+static inline unsigned char
|
|
+scsi_command_control(const unsigned char *cmnd)
|
|
+{
|
|
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
|
|
+ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
|
|
+}
|
|
+
|
|
/* Returns a human-readable name for the device */
|
|
extern const char *scsi_device_type(unsigned type);
|
|
|
|
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
|
|
index d16a4229209b2..a2becf13293a3 100644
|
|
--- a/include/sound/hda_codec.h
|
|
+++ b/include/sound/hda_codec.h
|
|
@@ -253,6 +253,7 @@ struct hda_codec {
|
|
unsigned int force_pin_prefix:1; /* Add location prefix */
|
|
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
|
|
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
|
|
+ unsigned int forced_resume:1; /* forced resume for jack */
|
|
unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
|
|
|
|
#ifdef CONFIG_PM
|
|
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
|
|
index 77408edd29d2a..67fad2677ed55 100644
|
|
--- a/include/trace/events/target.h
|
|
+++ b/include/trace/events/target.h
|
|
@@ -141,6 +141,7 @@ TRACE_EVENT(target_sequencer_start,
|
|
__field( unsigned int, opcode )
|
|
__field( unsigned int, data_length )
|
|
__field( unsigned int, task_attribute )
|
|
+ __field( unsigned char, control )
|
|
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
|
|
__string( initiator, cmd->se_sess->se_node_acl->initiatorname )
|
|
),
|
|
@@ -151,6 +152,7 @@ TRACE_EVENT(target_sequencer_start,
|
|
__entry->opcode = cmd->t_task_cdb[0];
|
|
__entry->data_length = cmd->data_length;
|
|
__entry->task_attribute = cmd->sam_task_attr;
|
|
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
|
|
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
|
|
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
|
|
),
|
|
@@ -160,9 +162,7 @@ TRACE_EVENT(target_sequencer_start,
|
|
__entry->tag, show_opcode_name(__entry->opcode),
|
|
__entry->data_length, __print_hex(__entry->cdb, 16),
|
|
show_task_attribute_name(__entry->task_attribute),
|
|
- scsi_command_size(__entry->cdb) <= 16 ?
|
|
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
|
|
- __entry->cdb[1]
|
|
+ __entry->control
|
|
)
|
|
);
|
|
|
|
@@ -178,6 +178,7 @@ TRACE_EVENT(target_cmd_complete,
|
|
__field( unsigned int, opcode )
|
|
__field( unsigned int, data_length )
|
|
__field( unsigned int, task_attribute )
|
|
+ __field( unsigned char, control )
|
|
__field( unsigned char, scsi_status )
|
|
__field( unsigned char, sense_length )
|
|
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
|
|
@@ -191,6 +192,7 @@ TRACE_EVENT(target_cmd_complete,
|
|
__entry->opcode = cmd->t_task_cdb[0];
|
|
__entry->data_length = cmd->data_length;
|
|
__entry->task_attribute = cmd->sam_task_attr;
|
|
+ __entry->control = scsi_command_control(cmd->t_task_cdb);
|
|
__entry->scsi_status = cmd->scsi_status;
|
|
__entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
|
|
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
|
|
@@ -208,9 +210,7 @@ TRACE_EVENT(target_cmd_complete,
|
|
show_opcode_name(__entry->opcode),
|
|
__entry->data_length, __print_hex(__entry->cdb, 16),
|
|
show_task_attribute_name(__entry->task_attribute),
|
|
- scsi_command_size(__entry->cdb) <= 16 ?
|
|
- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
|
|
- __entry->cdb[1]
|
|
+ __entry->control
|
|
)
|
|
);
|
|
|
|
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
|
|
index f9701410d3b52..57a222014cd20 100644
|
|
--- a/include/uapi/linux/pci_regs.h
|
|
+++ b/include/uapi/linux/pci_regs.h
|
|
@@ -76,6 +76,7 @@
|
|
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
|
|
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
|
|
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
|
|
+#define PCI_HEADER_TYPE_MASK 0x7f
|
|
#define PCI_HEADER_TYPE_NORMAL 0
|
|
#define PCI_HEADER_TYPE_BRIDGE 1
|
|
#define PCI_HEADER_TYPE_CARDBUS 2
|
|
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
|
|
index 7b2d6fc9e6ed7..dc33e3051819d 100644
|
|
--- a/include/uapi/linux/perf_event.h
|
|
+++ b/include/uapi/linux/perf_event.h
|
|
@@ -1155,7 +1155,7 @@ union perf_mem_data_src {
|
|
|
|
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
|
|
/* 1 free */
|
|
-#define PERF_MEM_SNOOPX_SHIFT 37
|
|
+#define PERF_MEM_SNOOPX_SHIFT 38
|
|
|
|
/* locked instruction */
|
|
#define PERF_MEM_LOCK_NA 0x01 /* not available */
|
|
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
|
|
index b367430e611c7..3d897de890612 100644
|
|
--- a/kernel/bpf/percpu_freelist.c
|
|
+++ b/kernel/bpf/percpu_freelist.c
|
|
@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
|
|
raw_spin_lock_init(&head->lock);
|
|
head->first = NULL;
|
|
}
|
|
+ raw_spin_lock_init(&s->extralist.lock);
|
|
+ s->extralist.first = NULL;
|
|
return 0;
|
|
}
|
|
|
|
@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
|
|
raw_spin_unlock(&head->lock);
|
|
}
|
|
|
|
+static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
|
|
+ struct pcpu_freelist_node *node)
|
|
+{
|
|
+ if (!raw_spin_trylock(&s->extralist.lock))
|
|
+ return false;
|
|
+
|
|
+ pcpu_freelist_push_node(&s->extralist, node);
|
|
+ raw_spin_unlock(&s->extralist.lock);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
|
|
+ struct pcpu_freelist_node *node)
|
|
+{
|
|
+ int cpu, orig_cpu;
|
|
+
|
|
+ orig_cpu = cpu = raw_smp_processor_id();
|
|
+ while (1) {
|
|
+ struct pcpu_freelist_head *head;
|
|
+
|
|
+ head = per_cpu_ptr(s->freelist, cpu);
|
|
+ if (raw_spin_trylock(&head->lock)) {
|
|
+ pcpu_freelist_push_node(head, node);
|
|
+ raw_spin_unlock(&head->lock);
|
|
+ return;
|
|
+ }
|
|
+ cpu = cpumask_next(cpu, cpu_possible_mask);
|
|
+ if (cpu >= nr_cpu_ids)
|
|
+ cpu = 0;
|
|
+
|
|
+ /* cannot lock any per cpu lock, try extralist */
|
|
+ if (cpu == orig_cpu &&
|
|
+ pcpu_freelist_try_push_extra(s, node))
|
|
+ return;
|
|
+ }
|
|
+}
|
|
+
|
|
void __pcpu_freelist_push(struct pcpu_freelist *s,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
- struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
|
|
-
|
|
- ___pcpu_freelist_push(head, node);
|
|
+ if (in_nmi())
|
|
+ ___pcpu_freelist_push_nmi(s, node);
|
|
+ else
|
|
+ ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
|
|
}
|
|
|
|
void pcpu_freelist_push(struct pcpu_freelist *s,
|
|
@@ -81,7 +121,7 @@ again:
|
|
}
|
|
}
|
|
|
|
-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
+static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
{
|
|
struct pcpu_freelist_head *head;
|
|
struct pcpu_freelist_node *node;
|
|
@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
if (cpu >= nr_cpu_ids)
|
|
cpu = 0;
|
|
if (cpu == orig_cpu)
|
|
- return NULL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* per cpu lists are all empty, try extralist */
|
|
+ raw_spin_lock(&s->extralist.lock);
|
|
+ node = s->extralist.first;
|
|
+ if (node)
|
|
+ s->extralist.first = node->next;
|
|
+ raw_spin_unlock(&s->extralist.lock);
|
|
+ return node;
|
|
+}
|
|
+
|
|
+static struct pcpu_freelist_node *
|
|
+___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
|
|
+{
|
|
+ struct pcpu_freelist_head *head;
|
|
+ struct pcpu_freelist_node *node;
|
|
+ int orig_cpu, cpu;
|
|
+
|
|
+ orig_cpu = cpu = raw_smp_processor_id();
|
|
+ while (1) {
|
|
+ head = per_cpu_ptr(s->freelist, cpu);
|
|
+ if (raw_spin_trylock(&head->lock)) {
|
|
+ node = head->first;
|
|
+ if (node) {
|
|
+ head->first = node->next;
|
|
+ raw_spin_unlock(&head->lock);
|
|
+ return node;
|
|
+ }
|
|
+ raw_spin_unlock(&head->lock);
|
|
+ }
|
|
+ cpu = cpumask_next(cpu, cpu_possible_mask);
|
|
+ if (cpu >= nr_cpu_ids)
|
|
+ cpu = 0;
|
|
+ if (cpu == orig_cpu)
|
|
+ break;
|
|
}
|
|
+
|
|
+ /* cannot pop from per cpu lists, try extralist */
|
|
+ if (!raw_spin_trylock(&s->extralist.lock))
|
|
+ return NULL;
|
|
+ node = s->extralist.first;
|
|
+ if (node)
|
|
+ s->extralist.first = node->next;
|
|
+ raw_spin_unlock(&s->extralist.lock);
|
|
+ return node;
|
|
+}
|
|
+
|
|
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
+{
|
|
+ if (in_nmi())
|
|
+ return ___pcpu_freelist_pop_nmi(s);
|
|
+ return ___pcpu_freelist_pop(s);
|
|
}
|
|
|
|
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
|
|
index fbf8a8a289791..3c76553cfe571 100644
|
|
--- a/kernel/bpf/percpu_freelist.h
|
|
+++ b/kernel/bpf/percpu_freelist.h
|
|
@@ -13,6 +13,7 @@ struct pcpu_freelist_head {
|
|
|
|
struct pcpu_freelist {
|
|
struct pcpu_freelist_head __percpu *freelist;
|
|
+ struct pcpu_freelist_head extralist;
|
|
};
|
|
|
|
struct pcpu_freelist_node {
|
|
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
|
|
index 89b07db146763..12eb9e47d101c 100644
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -1470,6 +1470,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
|
for (i = 0; i < insn_cnt; i++) {
|
|
u8 code = insn[i].code;
|
|
|
|
+ if (code == (BPF_JMP | BPF_CALL) &&
|
|
+ insn[i].imm == BPF_FUNC_tail_call &&
|
|
+ insn[i].src_reg != BPF_PSEUDO_CALL)
|
|
+ subprog[cur_subprog].has_tail_call = true;
|
|
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
|
|
goto next;
|
|
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
|
|
@@ -2951,6 +2955,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|
int ret_prog[MAX_CALL_FRAMES];
|
|
|
|
process_func:
|
|
+ /* protect against potential stack overflow that might happen when
|
|
+ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
|
|
+ * depth for such case down to 256 so that the worst case scenario
|
|
+ * would result in 8k stack size (32 which is tailcall limit * 256 =
|
|
+ * 8k).
|
|
+ *
|
|
+ * To get the idea what might happen, see an example:
|
|
+ * func1 -> sub rsp, 128
|
|
+ * subfunc1 -> sub rsp, 256
|
|
+ * tailcall1 -> add rsp, 256
|
|
+ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
|
|
+ * subfunc2 -> sub rsp, 64
|
|
+ * subfunc22 -> sub rsp, 128
|
|
+ * tailcall2 -> add rsp, 128
|
|
+ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
|
|
+ *
|
|
+ * tailcall will unwind the current stack frame but it will not get rid
|
|
+ * of caller's stack as shown on the example above.
|
|
+ */
|
|
+ if (idx && subprog[idx].has_tail_call && depth >= 256) {
|
|
+ verbose(env,
|
|
+ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
|
|
+ depth);
|
|
+ return -EACCES;
|
|
+ }
|
|
/* round up to 32-bytes, since this is granularity
|
|
* of interpreter stack size
|
|
*/
|
|
@@ -10862,6 +10891,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
|
}
|
|
|
|
if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
|
|
+ if (tgt_prog) {
|
|
+ verbose(env, "can't modify return codes of BPF programs\n");
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
ret = check_attach_modify_return(prog, addr);
|
|
if (ret)
|
|
verbose(env, "%s() is not modifiable\n",
|
|
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
|
|
index 683a799618ade..bc827bd547c81 100644
|
|
--- a/kernel/debug/kdb/kdb_io.c
|
|
+++ b/kernel/debug/kdb/kdb_io.c
|
|
@@ -706,12 +706,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
|
|
size_avail = sizeof(kdb_buffer) - len;
|
|
goto kdb_print_out;
|
|
}
|
|
- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
|
|
+ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
|
|
/*
|
|
* This was a interactive search (using '/' at more
|
|
- * prompt) and it has completed. Clear the flag.
|
|
+ * prompt) and it has completed. Replace the \0 with
|
|
+ * its original value to ensure multi-line strings
|
|
+ * are handled properly, and return to normal mode.
|
|
*/
|
|
+ *cphold = replaced_byte;
|
|
kdb_grepping_flag = 0;
|
|
+ }
|
|
/*
|
|
* at this point the string is a full line and
|
|
* should be printed, up to the null.
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index fd8cd00099dae..38eeb297255e4 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -5852,11 +5852,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
|
|
static void perf_mmap_close(struct vm_area_struct *vma)
|
|
{
|
|
struct perf_event *event = vma->vm_file->private_data;
|
|
-
|
|
struct perf_buffer *rb = ring_buffer_get(event);
|
|
struct user_struct *mmap_user = rb->mmap_user;
|
|
int mmap_locked = rb->mmap_locked;
|
|
unsigned long size = perf_data_size(rb);
|
|
+ bool detach_rest = false;
|
|
|
|
if (event->pmu->event_unmapped)
|
|
event->pmu->event_unmapped(event, vma->vm_mm);
|
|
@@ -5887,7 +5887,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
mutex_unlock(&event->mmap_mutex);
|
|
}
|
|
|
|
- atomic_dec(&rb->mmap_count);
|
|
+ if (atomic_dec_and_test(&rb->mmap_count))
|
|
+ detach_rest = true;
|
|
|
|
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
|
|
goto out_put;
|
|
@@ -5896,7 +5897,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
|
/* If there's still other mmap()s of this buffer, we're done. */
|
|
- if (atomic_read(&rb->mmap_count))
|
|
+ if (!detach_rest)
|
|
goto out_put;
|
|
|
|
/*
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index efc5493203ae0..0074bbe8c66f1 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -1830,6 +1830,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
|
|
free_task(tsk);
|
|
}
|
|
|
|
+static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
|
|
+{
|
|
+ /* Skip if kernel thread */
|
|
+ if (!tsk->mm)
|
|
+ return;
|
|
+
|
|
+ /* Skip if spawning a thread or using vfork */
|
|
+ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
|
|
+ return;
|
|
+
|
|
+ /* We need to synchronize with __set_oom_adj */
|
|
+ mutex_lock(&oom_adj_mutex);
|
|
+ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
|
|
+ /* Update the values in case they were changed after copy_signal */
|
|
+ tsk->signal->oom_score_adj = current->signal->oom_score_adj;
|
|
+ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
|
|
+ mutex_unlock(&oom_adj_mutex);
|
|
+}
|
|
+
|
|
/*
|
|
* This creates a new process as a copy of the old one,
|
|
* but does not actually start it yet.
|
|
@@ -2310,6 +2329,8 @@ static __latent_entropy struct task_struct *copy_process(
|
|
trace_task_newtask(p, clone_flags);
|
|
uprobe_copy_process(p, clone_flags);
|
|
|
|
+ copy_oom_score_adj(clone_flags, p);
|
|
+
|
|
return p;
|
|
|
|
bad_fork_cancel_cgroup:
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 08c46084d8cca..991395d60f59c 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -91,8 +91,9 @@ EXPORT_SYMBOL_GPL(module_mutex);
|
|
static LIST_HEAD(modules);
|
|
|
|
/* Work queue for freeing init sections in success case */
|
|
-static struct work_struct init_free_wq;
|
|
-static struct llist_head init_free_list;
|
|
+static void do_free_init(struct work_struct *w);
|
|
+static DECLARE_WORK(init_free_wq, do_free_init);
|
|
+static LLIST_HEAD(init_free_list);
|
|
|
|
#ifdef CONFIG_MODULES_TREE_LOOKUP
|
|
|
|
@@ -3551,14 +3552,6 @@ static void do_free_init(struct work_struct *w)
|
|
}
|
|
}
|
|
|
|
-static int __init modules_wq_init(void)
|
|
-{
|
|
- INIT_WORK(&init_free_wq, do_free_init);
|
|
- init_llist_head(&init_free_list);
|
|
- return 0;
|
|
-}
|
|
-module_init(modules_wq_init);
|
|
-
|
|
/*
|
|
* This is where the real work happens.
|
|
*
|
|
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
|
|
index 02ec716a49271..0e60e10ed66a3 100644
|
|
--- a/kernel/power/hibernate.c
|
|
+++ b/kernel/power/hibernate.c
|
|
@@ -851,17 +851,6 @@ static int software_resume(void)
|
|
|
|
/* Check if the device is there */
|
|
swsusp_resume_device = name_to_dev_t(resume_file);
|
|
-
|
|
- /*
|
|
- * name_to_dev_t is ineffective to verify parition if resume_file is in
|
|
- * integer format. (e.g. major:minor)
|
|
- */
|
|
- if (isdigit(resume_file[0]) && resume_wait) {
|
|
- int partno;
|
|
- while (!get_gendisk(swsusp_resume_device, &partno))
|
|
- msleep(10);
|
|
- }
|
|
-
|
|
if (!swsusp_resume_device) {
|
|
/*
|
|
* Some device discovery might still be in progress; we need
|
|
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
|
|
index efb792e13fca9..23ec68d8ff3aa 100644
|
|
--- a/kernel/rcu/rcutorture.c
|
|
+++ b/kernel/rcu/rcutorture.c
|
|
@@ -2154,9 +2154,20 @@ static int __init rcu_torture_fwd_prog_init(void)
|
|
return -ENOMEM;
|
|
spin_lock_init(&rfp->rcu_fwd_lock);
|
|
rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
|
|
+ rcu_fwds = rfp;
|
|
return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
|
|
}
|
|
|
|
+static void rcu_torture_fwd_prog_cleanup(void)
|
|
+{
|
|
+ struct rcu_fwd *rfp;
|
|
+
|
|
+ torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
|
|
+ rfp = rcu_fwds;
|
|
+ rcu_fwds = NULL;
|
|
+ kfree(rfp);
|
|
+}
|
|
+
|
|
/* Callback function for RCU barrier testing. */
|
|
static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
|
|
{
|
|
@@ -2360,7 +2371,7 @@ rcu_torture_cleanup(void)
|
|
|
|
show_rcu_gp_kthreads();
|
|
rcu_torture_barrier_cleanup();
|
|
- torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
|
|
+ rcu_torture_fwd_prog_cleanup();
|
|
torture_stop_kthread(rcu_torture_stall, stall_task);
|
|
torture_stop_kthread(rcu_torture_writer, writer_task);
|
|
|
|
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
|
|
index 1e9e500ff7906..572a79b1a8510 100644
|
|
--- a/kernel/rcu/tree.c
|
|
+++ b/kernel/rcu/tree.c
|
|
@@ -1882,7 +1882,7 @@ static void rcu_gp_fqs_loop(void)
|
|
break;
|
|
/* If time for quiescent-state forcing, do it. */
|
|
if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
|
|
- (gf & RCU_GP_FLAG_FQS)) {
|
|
+ (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
|
|
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
|
|
TPS("fqsstart"));
|
|
rcu_gp_fqs(first_gp_fqs);
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index f788cd61df212..1c68621743ac2 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -39,7 +39,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
|
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|
|
|
-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
/*
|
|
* Debugging: various feature bits
|
|
*
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 6b3b59cc51d6c..f3496556b6992 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -1550,7 +1550,7 @@ struct task_numa_env {
|
|
|
|
static unsigned long cpu_load(struct rq *rq);
|
|
static unsigned long cpu_util(int cpu);
|
|
-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running);
|
|
+static inline long adjust_numa_imbalance(int imbalance, int nr_running);
|
|
|
|
static inline enum
|
|
numa_type numa_classify(unsigned int imbalance_pct,
|
|
@@ -1927,7 +1927,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
|
|
src_running = env->src_stats.nr_running - 1;
|
|
dst_running = env->dst_stats.nr_running + 1;
|
|
imbalance = max(0, dst_running - src_running);
|
|
- imbalance = adjust_numa_imbalance(imbalance, src_running);
|
|
+ imbalance = adjust_numa_imbalance(imbalance, dst_running);
|
|
|
|
/* Use idle CPU if there is no imbalance */
|
|
if (!imbalance) {
|
|
@@ -6067,7 +6067,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
|
/*
|
|
* Scan the local SMT mask for idle CPUs.
|
|
*/
|
|
-static int select_idle_smt(struct task_struct *p, int target)
|
|
+static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
|
|
{
|
|
int cpu;
|
|
|
|
@@ -6075,7 +6075,8 @@ static int select_idle_smt(struct task_struct *p, int target)
|
|
return -1;
|
|
|
|
for_each_cpu(cpu, cpu_smt_mask(target)) {
|
|
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
|
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
|
|
+ !cpumask_test_cpu(cpu, sched_domain_span(sd)))
|
|
continue;
|
|
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
|
|
return cpu;
|
|
@@ -6091,7 +6092,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
|
|
return -1;
|
|
}
|
|
|
|
-static inline int select_idle_smt(struct task_struct *p, int target)
|
|
+static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
|
|
{
|
|
return -1;
|
|
}
|
|
@@ -6266,7 +6267,7 @@ symmetric:
|
|
if ((unsigned)i < nr_cpumask_bits)
|
|
return i;
|
|
|
|
- i = select_idle_smt(p, target);
|
|
+ i = select_idle_smt(p, sd, target);
|
|
if ((unsigned)i < nr_cpumask_bits)
|
|
return i;
|
|
|
|
@@ -6586,7 +6587,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
|
|
|
util = cpu_util_next(cpu, p, cpu);
|
|
cpu_cap = capacity_of(cpu);
|
|
- spare_cap = cpu_cap - util;
|
|
+ spare_cap = cpu_cap;
|
|
+ lsub_positive(&spare_cap, util);
|
|
|
|
/*
|
|
* Skip CPUs that cannot satisfy the capacity request.
|
|
@@ -8943,7 +8945,7 @@ next_group:
|
|
}
|
|
}
|
|
|
|
-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
|
|
+static inline long adjust_numa_imbalance(int imbalance, int nr_running)
|
|
{
|
|
unsigned int imbalance_min;
|
|
|
|
@@ -8952,7 +8954,7 @@ static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
|
|
* tasks that remain local when the source domain is almost idle.
|
|
*/
|
|
imbalance_min = 2;
|
|
- if (src_nr_running <= imbalance_min)
|
|
+ if (nr_running <= imbalance_min)
|
|
return 0;
|
|
|
|
return imbalance;
|
|
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
|
index c82857e2e288a..0b1485ac19c4e 100644
|
|
--- a/kernel/sched/sched.h
|
|
+++ b/kernel/sched/sched.h
|
|
@@ -1600,7 +1600,7 @@ enum {
|
|
|
|
#undef SCHED_FEAT
|
|
|
|
-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
|
|
/*
|
|
* To support run-time toggling of sched features, all the translation units
|
|
@@ -1608,6 +1608,7 @@ enum {
|
|
*/
|
|
extern const_debug unsigned int sysctl_sched_features;
|
|
|
|
+#ifdef CONFIG_JUMP_LABEL
|
|
#define SCHED_FEAT(name, enabled) \
|
|
static __always_inline bool static_branch_##name(struct static_key *key) \
|
|
{ \
|
|
@@ -1620,7 +1621,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
|
|
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
|
|
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
|
|
|
|
-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
|
|
+#else /* !CONFIG_JUMP_LABEL */
|
|
+
|
|
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
|
|
+
|
|
+#endif /* CONFIG_JUMP_LABEL */
|
|
+
|
|
+#else /* !SCHED_DEBUG */
|
|
|
|
/*
|
|
* Each translation unit has its own copy of sysctl_sched_features to allow
|
|
@@ -1636,7 +1643,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
|
|
|
|
#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
|
|
|
|
-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
|
|
+#endif /* SCHED_DEBUG */
|
|
|
|
extern struct static_key_false sched_numa_balancing;
|
|
extern struct static_key_false sched_schedstats;
|
|
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
|
|
index c6cca0d1d5840..c8892156db341 100644
|
|
--- a/kernel/trace/trace_events_synth.c
|
|
+++ b/kernel/trace/trace_events_synth.c
|
|
@@ -132,7 +132,7 @@ static int synth_field_string_size(char *type)
|
|
start += sizeof("char[") - 1;
|
|
|
|
end = strchr(type, ']');
|
|
- if (!end || end < start)
|
|
+ if (!end || end < start || type + strlen(type) > end + 1)
|
|
return -EINVAL;
|
|
|
|
len = end - start;
|
|
@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
|
|
struct synth_field *field;
|
|
const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
|
|
int len, ret = 0;
|
|
+ ssize_t size;
|
|
|
|
if (field_type[0] == ';')
|
|
field_type++;
|
|
@@ -501,8 +502,14 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
|
|
if (field_type[0] == ';')
|
|
field_type++;
|
|
len = strlen(field_type) + 1;
|
|
- if (array)
|
|
- len += strlen(array);
|
|
+
|
|
+ if (array) {
|
|
+ int l = strlen(array);
|
|
+
|
|
+ if (l && array[l - 1] == ';')
|
|
+ l--;
|
|
+ len += l;
|
|
+ }
|
|
if (prefix)
|
|
len += strlen(prefix);
|
|
|
|
@@ -520,11 +527,12 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
|
|
field->type[len - 1] = '\0';
|
|
}
|
|
|
|
- field->size = synth_field_size(field->type);
|
|
- if (!field->size) {
|
|
+ size = synth_field_size(field->type);
|
|
+ if (size <= 0) {
|
|
ret = -EINVAL;
|
|
goto free;
|
|
}
|
|
+ field->size = size;
|
|
|
|
if (synth_field_is_string(field->type))
|
|
field->is_string = true;
|
|
diff --git a/lib/crc32.c b/lib/crc32.c
|
|
index 4a20455d1f61e..bf60ef26a45c2 100644
|
|
--- a/lib/crc32.c
|
|
+++ b/lib/crc32.c
|
|
@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
|
|
return crc;
|
|
}
|
|
|
|
-#if CRC_LE_BITS == 1
|
|
+#if CRC_BE_BITS == 1
|
|
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
|
|
{
|
|
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
|
|
diff --git a/lib/idr.c b/lib/idr.c
|
|
index c2cf2c52bbde5..4d2eef0259d2c 100644
|
|
--- a/lib/idr.c
|
|
+++ b/lib/idr.c
|
|
@@ -470,6 +470,7 @@ alloc:
|
|
goto retry;
|
|
nospc:
|
|
xas_unlock_irqrestore(&xas, flags);
|
|
+ kfree(alloc);
|
|
return -ENOSPC;
|
|
}
|
|
EXPORT_SYMBOL(ida_alloc_range);
|
|
diff --git a/mm/filemap.c b/mm/filemap.c
|
|
index 385759c4ce4be..6c3b879116212 100644
|
|
--- a/mm/filemap.c
|
|
+++ b/mm/filemap.c
|
|
@@ -826,10 +826,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
}
|
|
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
|
|
|
-static int __add_to_page_cache_locked(struct page *page,
|
|
- struct address_space *mapping,
|
|
- pgoff_t offset, gfp_t gfp_mask,
|
|
- void **shadowp)
|
|
+noinline int __add_to_page_cache_locked(struct page *page,
|
|
+ struct address_space *mapping,
|
|
+ pgoff_t offset, gfp_t gfp_mask,
|
|
+ void **shadowp)
|
|
{
|
|
XA_STATE(xas, &mapping->i_pages, offset);
|
|
int huge = PageHuge(page);
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index 74300e337c3c7..358403422104b 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -2449,7 +2449,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
|
|
|
ClearPageCompound(head);
|
|
|
|
- split_page_owner(head, HPAGE_PMD_ORDER);
|
|
+ split_page_owner(head, HPAGE_PMD_NR);
|
|
|
|
/* See comment in __split_huge_page_tail() */
|
|
if (PageAnon(head)) {
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 13f559af1ab6a..6795bdf662566 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -5276,7 +5276,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
|
|
struct page *page = NULL;
|
|
swp_entry_t ent = pte_to_swp_entry(ptent);
|
|
|
|
- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
|
|
+ if (!(mc.flags & MOVE_ANON))
|
|
return NULL;
|
|
|
|
/*
|
|
@@ -5295,6 +5295,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
|
|
return page;
|
|
}
|
|
|
|
+ if (non_swap_entry(ent))
|
|
+ return NULL;
|
|
+
|
|
/*
|
|
* Because lookup_swap_cache() updates some statistics counter,
|
|
* we call find_get_page() with swapper_space directly.
|
|
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
|
|
index 6e94962893ee8..67e5bb0900b37 100644
|
|
--- a/mm/oom_kill.c
|
|
+++ b/mm/oom_kill.c
|
|
@@ -64,6 +64,8 @@ int sysctl_oom_dump_tasks = 1;
|
|
* and mark_oom_victim
|
|
*/
|
|
DEFINE_MUTEX(oom_lock);
|
|
+/* Serializes oom_score_adj and oom_score_adj_min updates */
|
|
+DEFINE_MUTEX(oom_adj_mutex);
|
|
|
|
static inline bool is_memcg_oom(struct oom_control *oc)
|
|
{
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index 43f6d91f57156..8cc774340d490 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -3213,7 +3213,7 @@ void split_page(struct page *page, unsigned int order)
|
|
|
|
for (i = 1; i < (1 << order); i++)
|
|
set_page_refcounted(page + i);
|
|
- split_page_owner(page, order);
|
|
+ split_page_owner(page, 1 << order);
|
|
}
|
|
EXPORT_SYMBOL_GPL(split_page);
|
|
|
|
@@ -3487,7 +3487,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|
|
|
#endif /* CONFIG_FAIL_PAGE_ALLOC */
|
|
|
|
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|
{
|
|
return __should_fail_alloc_page(gfp_mask, order);
|
|
}
|
|
diff --git a/mm/page_owner.c b/mm/page_owner.c
|
|
index 3604615094235..4ca3051a10358 100644
|
|
--- a/mm/page_owner.c
|
|
+++ b/mm/page_owner.c
|
|
@@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
|
|
page_owner->last_migrate_reason = reason;
|
|
}
|
|
|
|
-void __split_page_owner(struct page *page, unsigned int order)
|
|
+void __split_page_owner(struct page *page, unsigned int nr)
|
|
{
|
|
int i;
|
|
struct page_ext *page_ext = lookup_page_ext(page);
|
|
@@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order)
|
|
if (unlikely(!page_ext))
|
|
return;
|
|
|
|
- for (i = 0; i < (1 << order); i++) {
|
|
+ for (i = 0; i < nr; i++) {
|
|
page_owner = get_page_owner(page_ext);
|
|
page_owner->order = 0;
|
|
page_ext = page_ext_next(page_ext);
|
|
diff --git a/mm/swapfile.c b/mm/swapfile.c
|
|
index 26707c5dc9fce..605294e4df684 100644
|
|
--- a/mm/swapfile.c
|
|
+++ b/mm/swapfile.c
|
|
@@ -3336,7 +3336,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
error = inode_drain_writes(inode);
|
|
if (error) {
|
|
inode->i_flags &= ~S_SWAPFILE;
|
|
- goto bad_swap_unlock_inode;
|
|
+ goto free_swap_address_space;
|
|
}
|
|
|
|
mutex_lock(&swapon_mutex);
|
|
@@ -3361,6 +3361,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
|
|
error = 0;
|
|
goto out;
|
|
+free_swap_address_space:
|
|
+ exit_swap_address_space(p->type);
|
|
bad_swap_unlock_inode:
|
|
inode_unlock(inode);
|
|
bad_swap:
|
|
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
|
|
index fc28dc201b936..131d29e902a30 100644
|
|
--- a/net/bluetooth/hci_core.c
|
|
+++ b/net/bluetooth/hci_core.c
|
|
@@ -3280,6 +3280,16 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
|
}
|
|
}
|
|
|
|
+static void hci_suspend_clear_tasks(struct hci_dev *hdev)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
|
|
+ clear_bit(i, hdev->suspend_tasks);
|
|
+
|
|
+ wake_up(&hdev->suspend_wait_q);
|
|
+}
|
|
+
|
|
static int hci_suspend_wait_event(struct hci_dev *hdev)
|
|
{
|
|
#define WAKE_COND \
|
|
@@ -3608,6 +3618,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
|
|
|
cancel_work_sync(&hdev->power_on);
|
|
|
|
+ hci_suspend_clear_tasks(hdev);
|
|
unregister_pm_notifier(&hdev->suspend_notifier);
|
|
cancel_work_sync(&hdev->suspend_prepare);
|
|
|
|
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
|
|
index c7fc28a465fdb..fa66e27b73635 100644
|
|
--- a/net/bluetooth/l2cap_sock.c
|
|
+++ b/net/bluetooth/l2cap_sock.c
|
|
@@ -1521,8 +1521,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
|
|
|
|
parent = bt_sk(sk)->parent;
|
|
|
|
- sock_set_flag(sk, SOCK_ZAPPED);
|
|
-
|
|
switch (chan->state) {
|
|
case BT_OPEN:
|
|
case BT_BOUND:
|
|
@@ -1549,8 +1547,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
|
|
|
|
break;
|
|
}
|
|
-
|
|
release_sock(sk);
|
|
+
|
|
+ /* Only zap after cleanup to avoid use after free race */
|
|
+ sock_set_flag(sk, SOCK_ZAPPED);
|
|
+
|
|
}
|
|
|
|
static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
|
|
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
|
|
index 12a4f4d936810..3fda71a8579d1 100644
|
|
--- a/net/bridge/netfilter/ebt_dnat.c
|
|
+++ b/net/bridge/netfilter/ebt_dnat.c
|
|
@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
|
{
|
|
const struct ebt_nat_info *info = par->targinfo;
|
|
|
|
- if (skb_ensure_writable(skb, ETH_ALEN))
|
|
+ if (skb_ensure_writable(skb, 0))
|
|
return EBT_DROP;
|
|
|
|
ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
|
|
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
|
|
index 0cad62a4052b9..307790562b492 100644
|
|
--- a/net/bridge/netfilter/ebt_redirect.c
|
|
+++ b/net/bridge/netfilter/ebt_redirect.c
|
|
@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
|
{
|
|
const struct ebt_redirect_info *info = par->targinfo;
|
|
|
|
- if (skb_ensure_writable(skb, ETH_ALEN))
|
|
+ if (skb_ensure_writable(skb, 0))
|
|
return EBT_DROP;
|
|
|
|
if (xt_hooknum(par) != NF_BR_BROUTING)
|
|
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
|
|
index 27443bf229a3b..7dfbcdfc30e5d 100644
|
|
--- a/net/bridge/netfilter/ebt_snat.c
|
|
+++ b/net/bridge/netfilter/ebt_snat.c
|
|
@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
|
{
|
|
const struct ebt_nat_info *info = par->targinfo;
|
|
|
|
- if (skb_ensure_writable(skb, ETH_ALEN * 2))
|
|
+ if (skb_ensure_writable(skb, 0))
|
|
return EBT_DROP;
|
|
|
|
ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
|
|
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
|
|
index a8dd956b5e8e1..916fdf2464bc2 100644
|
|
--- a/net/can/j1939/transport.c
|
|
+++ b/net/can/j1939/transport.c
|
|
@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
|
|
skb->dev = priv->ndev;
|
|
can_skb_reserve(skb);
|
|
can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
|
|
+ can_skb_prv(skb)->skbcnt = 0;
|
|
/* reserve CAN header */
|
|
skb_reserve(skb, offsetof(struct can_frame, data));
|
|
|
|
@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
|
|
skb->dev = priv->ndev;
|
|
can_skb_reserve(skb);
|
|
can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
|
|
+ can_skb_prv(skb)->skbcnt = 0;
|
|
skcb = j1939_skb_to_cb(skb);
|
|
memcpy(skcb, rel_skcb, sizeof(*skcb));
|
|
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index 0261531d4fda6..3e4de9e461bd0 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -4323,7 +4323,8 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
|
cmpxchg(&sk->sk_pacing_status,
|
|
SK_PACING_NONE,
|
|
SK_PACING_NEEDED);
|
|
- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
|
|
+ sk->sk_max_pacing_rate = (val == ~0U) ?
|
|
+ ~0UL : (unsigned int)val;
|
|
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
|
|
sk->sk_max_pacing_rate);
|
|
break;
|
|
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
|
|
index 6a32a1fd34f8c..053472c48354b 100644
|
|
--- a/net/core/skmsg.c
|
|
+++ b/net/core/skmsg.c
|
|
@@ -662,15 +662,16 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
|
|
{
|
|
int ret;
|
|
|
|
+ /* strparser clones the skb before handing it to a upper layer,
|
|
+ * meaning we have the same data, but sk is NULL. We do want an
|
|
+ * sk pointer though when we run the BPF program. So we set it
|
|
+ * here and then NULL it to ensure we don't trigger a BUG_ON()
|
|
+ * in skb/sk operations later if kfree_skb is called with a
|
|
+ * valid skb->sk pointer and no destructor assigned.
|
|
+ */
|
|
skb->sk = psock->sk;
|
|
bpf_compute_data_end_sk_skb(skb);
|
|
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
|
- /* strparser clones the skb before handing it to a upper layer,
|
|
- * meaning skb_orphan has been called. We NULL sk on the way out
|
|
- * to ensure we don't trigger a BUG_ON() in skb/sk operations
|
|
- * later and because we are not charging the memory of this skb
|
|
- * to any socket yet.
|
|
- */
|
|
skb->sk = NULL;
|
|
return ret;
|
|
}
|
|
@@ -795,7 +796,6 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
|
}
|
|
prog = READ_ONCE(psock->progs.skb_verdict);
|
|
if (likely(prog)) {
|
|
- skb_orphan(skb);
|
|
tcp_skb_bpf_redirect_clear(skb);
|
|
ret = sk_psock_bpf_run(psock, prog, skb);
|
|
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
|
|
diff --git a/net/core/sock.c b/net/core/sock.c
|
|
index 78f8736be9c50..25968369fe7f6 100644
|
|
--- a/net/core/sock.c
|
|
+++ b/net/core/sock.c
|
|
@@ -777,7 +777,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
|
|
} else {
|
|
sock_reset_flag(sk, SOCK_RCVTSTAMP);
|
|
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
|
|
- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
|
|
}
|
|
}
|
|
|
|
@@ -1007,8 +1006,6 @@ set_sndbuf:
|
|
__sock_set_timestamps(sk, valbool, true, true);
|
|
break;
|
|
case SO_TIMESTAMPING_NEW:
|
|
- sock_set_flag(sk, SOCK_TSTAMP_NEW);
|
|
- /* fall through */
|
|
case SO_TIMESTAMPING_OLD:
|
|
if (val & ~SOF_TIMESTAMPING_MASK) {
|
|
ret = -EINVAL;
|
|
@@ -1037,16 +1034,14 @@ set_sndbuf:
|
|
}
|
|
|
|
sk->sk_tsflags = val;
|
|
+ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
|
|
+
|
|
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
|
|
sock_enable_timestamp(sk,
|
|
SOCK_TIMESTAMPING_RX_SOFTWARE);
|
|
- else {
|
|
- if (optname == SO_TIMESTAMPING_NEW)
|
|
- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
|
|
-
|
|
+ else
|
|
sock_disable_timestamp(sk,
|
|
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
|
|
- }
|
|
break;
|
|
|
|
case SO_RCVLOWAT:
|
|
@@ -1189,7 +1184,7 @@ set_sndbuf:
|
|
|
|
case SO_MAX_PACING_RATE:
|
|
{
|
|
- unsigned long ulval = (val == ~0U) ? ~0UL : val;
|
|
+ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
|
|
|
|
if (sizeof(ulval) != sizeof(val) &&
|
|
optlen >= sizeof(ulval) &&
|
|
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
|
|
index e30515f898023..70a505a713a56 100644
|
|
--- a/net/ipv4/icmp.c
|
|
+++ b/net/ipv4/icmp.c
|
|
@@ -239,7 +239,7 @@ static struct {
|
|
/**
|
|
* icmp_global_allow - Are we allowed to send one more ICMP message ?
|
|
*
|
|
- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
|
|
+ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
|
|
* Returns false if we reached the limit and can not send another packet.
|
|
* Note: called with BH disabled
|
|
*/
|
|
@@ -267,7 +267,10 @@ bool icmp_global_allow(void)
|
|
}
|
|
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
|
|
if (credit) {
|
|
- credit--;
|
|
+ /* We want to use a credit of one in average, but need to randomize
|
|
+ * it for security reasons.
|
|
+ */
|
|
+ credit = max_t(int, credit - prandom_u32_max(3), 0);
|
|
rc = true;
|
|
}
|
|
WRITE_ONCE(icmp_global.credit, credit);
|
|
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
|
|
index 4e31f23e4117e..e70291748889b 100644
|
|
--- a/net/ipv4/ip_gre.c
|
|
+++ b/net/ipv4/ip_gre.c
|
|
@@ -625,9 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
|
|
}
|
|
|
|
if (dev->header_ops) {
|
|
- /* Need space for new headers */
|
|
- if (skb_cow_head(skb, dev->needed_headroom -
|
|
- (tunnel->hlen + sizeof(struct iphdr))))
|
|
+ if (skb_cow_head(skb, 0))
|
|
goto free_skb;
|
|
|
|
tnl_params = (const struct iphdr *)skb->data;
|
|
@@ -748,7 +746,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
|
|
len = tunnel->tun_hlen - len;
|
|
tunnel->hlen = tunnel->hlen + len;
|
|
|
|
- dev->needed_headroom = dev->needed_headroom + len;
|
|
+ if (dev->header_ops)
|
|
+ dev->hard_header_len += len;
|
|
+ else
|
|
+ dev->needed_headroom += len;
|
|
+
|
|
if (set_mtu)
|
|
dev->mtu = max_t(int, dev->mtu - len, 68);
|
|
|
|
@@ -944,6 +946,7 @@ static void __gre_tunnel_init(struct net_device *dev)
|
|
tunnel->parms.iph.protocol = IPPROTO_GRE;
|
|
|
|
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
|
|
+ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
|
|
|
|
dev->features |= GRE_FEATURES;
|
|
dev->hw_features |= GRE_FEATURES;
|
|
@@ -987,10 +990,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
|
|
return -EINVAL;
|
|
dev->flags = IFF_BROADCAST;
|
|
dev->header_ops = &ipgre_header_ops;
|
|
+ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
|
|
+ dev->needed_headroom = 0;
|
|
}
|
|
#endif
|
|
} else if (!tunnel->collect_md) {
|
|
dev->header_ops = &ipgre_header_ops;
|
|
+ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
|
|
+ dev->needed_headroom = 0;
|
|
}
|
|
|
|
return ip_tunnel_init(dev);
|
|
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
|
|
index 7a83f881efa9e..136030ad2e546 100644
|
|
--- a/net/ipv4/netfilter/nf_log_arp.c
|
|
+++ b/net/ipv4/netfilter/nf_log_arp.c
|
|
@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
|
|
const struct nf_loginfo *info,
|
|
const struct sk_buff *skb, unsigned int nhoff)
|
|
{
|
|
- const struct arphdr *ah;
|
|
- struct arphdr _arph;
|
|
const struct arppayload *ap;
|
|
struct arppayload _arpp;
|
|
+ const struct arphdr *ah;
|
|
+ unsigned int logflags;
|
|
+ struct arphdr _arph;
|
|
|
|
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
|
|
if (ah == NULL) {
|
|
nf_log_buf_add(m, "TRUNCATED");
|
|
return;
|
|
}
|
|
+
|
|
+ if (info->type == NF_LOG_TYPE_LOG)
|
|
+ logflags = info->u.log.logflags;
|
|
+ else
|
|
+ logflags = NF_LOG_DEFAULT_MASK;
|
|
+
|
|
+ if (logflags & NF_LOG_MACDECODE) {
|
|
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
|
|
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
|
|
+ nf_log_dump_vlan(m, skb);
|
|
+ nf_log_buf_add(m, "MACPROTO=%04x ",
|
|
+ ntohs(eth_hdr(skb)->h_proto));
|
|
+ }
|
|
+
|
|
nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
|
|
ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
|
|
|
|
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
|
|
index 0c72156130b68..d07583fac8f8c 100644
|
|
--- a/net/ipv4/netfilter/nf_log_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
|
|
@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
|
|
|
|
switch (dev->type) {
|
|
case ARPHRD_ETHER:
|
|
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
|
|
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
|
|
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
|
|
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
|
|
+ nf_log_dump_vlan(m, skb);
|
|
+ nf_log_buf_add(m, "MACPROTO=%04x ",
|
|
ntohs(eth_hdr(skb)->h_proto));
|
|
return;
|
|
default:
|
|
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
|
|
index 134e923822750..355c4499fa1b5 100644
|
|
--- a/net/ipv4/nexthop.c
|
|
+++ b/net/ipv4/nexthop.c
|
|
@@ -842,7 +842,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
|
|
remove_nh_grp_entry(net, nhge, nlinfo);
|
|
|
|
/* make sure all see the newly published array before releasing rtnl */
|
|
- synchronize_rcu();
|
|
+ synchronize_net();
|
|
}
|
|
|
|
static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 37f1288894747..71a9b11b7126d 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -2764,10 +2764,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
|
|
if (IS_ERR(rt))
|
|
return rt;
|
|
|
|
- if (flp4->flowi4_proto)
|
|
+ if (flp4->flowi4_proto) {
|
|
+ flp4->flowi4_oif = rt->dst.dev->ifindex;
|
|
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
|
|
flowi4_to_flowi(flp4),
|
|
sk, 0);
|
|
+ }
|
|
|
|
return rt;
|
|
}
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 518f04355fbf3..02cc972edd0b0 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -5716,6 +5716,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
|
|
tcp_data_snd_check(sk);
|
|
if (!inet_csk_ack_scheduled(sk))
|
|
goto no_ack;
|
|
+ } else {
|
|
+ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
|
|
}
|
|
|
|
__tcp_ack_snd_check(sk, 0);
|
|
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
|
|
index 3c32dcb5fd8e2..c0a0d41b6c37d 100644
|
|
--- a/net/ipv6/ip6_fib.c
|
|
+++ b/net/ipv6/ip6_fib.c
|
|
@@ -2617,8 +2617,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
|
|
iter->skip = *pos;
|
|
|
|
if (iter->tbl) {
|
|
+ loff_t p = 0;
|
|
+
|
|
ipv6_route_seq_setup_walk(iter, net);
|
|
- return ipv6_route_seq_next(seq, NULL, pos);
|
|
+ return ipv6_route_seq_next(seq, NULL, &p);
|
|
} else {
|
|
return NULL;
|
|
}
|
|
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
|
|
index da64550a57075..8210ff34ed9b7 100644
|
|
--- a/net/ipv6/netfilter/nf_log_ipv6.c
|
|
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
|
|
@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
|
|
|
|
switch (dev->type) {
|
|
case ARPHRD_ETHER:
|
|
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
|
|
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
|
|
- ntohs(eth_hdr(skb)->h_proto));
|
|
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
|
|
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
|
|
+ nf_log_dump_vlan(m, skb);
|
|
+ nf_log_buf_add(m, "MACPROTO=%04x ",
|
|
+ ntohs(eth_hdr(skb)->h_proto));
|
|
return;
|
|
default:
|
|
break;
|
|
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
|
|
index 1079a07e43e49..d74cfec685477 100644
|
|
--- a/net/mac80211/cfg.c
|
|
+++ b/net/mac80211/cfg.c
|
|
@@ -709,7 +709,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
|
|
u16 brate;
|
|
|
|
sband = ieee80211_get_sband(sta->sdata);
|
|
- if (sband) {
|
|
+ WARN_ON_ONCE(sband && !sband->bitrates);
|
|
+ if (sband && sband->bitrates) {
|
|
brate = sband->bitrates[rate->idx].bitrate;
|
|
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
|
|
}
|
|
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
|
|
index 05e966f1609e2..b93916c382cdb 100644
|
|
--- a/net/mac80211/sta_info.c
|
|
+++ b/net/mac80211/sta_info.c
|
|
@@ -2122,6 +2122,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
|
|
int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
|
|
|
|
sband = local->hw.wiphy->bands[band];
|
|
+
|
|
+ if (WARN_ON_ONCE(!sband->bitrates))
|
|
+ break;
|
|
+
|
|
brate = sband->bitrates[rate_idx].bitrate;
|
|
if (rinfo->bw == RATE_INFO_BW_5)
|
|
shift = 2;
|
|
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
|
|
index 8f940be42f98a..430a9213a7bf9 100644
|
|
--- a/net/mptcp/options.c
|
|
+++ b/net/mptcp/options.c
|
|
@@ -296,6 +296,7 @@ void mptcp_get_options(const struct sk_buff *skb,
|
|
mp_opt->mp_capable = 0;
|
|
mp_opt->mp_join = 0;
|
|
mp_opt->add_addr = 0;
|
|
+ mp_opt->ahmac = 0;
|
|
mp_opt->rm_addr = 0;
|
|
mp_opt->dss = 0;
|
|
|
|
@@ -517,7 +518,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
|
|
return ret;
|
|
}
|
|
|
|
- if (subflow->use_64bit_ack) {
|
|
+ if (READ_ONCE(msk->use_64bit_ack)) {
|
|
ack_size = TCPOLEN_MPTCP_DSS_ACK64;
|
|
opts->ext_copy.data_ack = msk->ack_seq;
|
|
opts->ext_copy.ack64 = 1;
|
|
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
|
|
index c6eeaf3e8dcb7..4675a7bbebb15 100644
|
|
--- a/net/mptcp/protocol.h
|
|
+++ b/net/mptcp/protocol.h
|
|
@@ -199,6 +199,7 @@ struct mptcp_sock {
|
|
u32 token;
|
|
unsigned long flags;
|
|
bool can_ack;
|
|
+ bool use_64bit_ack; /* Set when we received a 64-bit DSN */
|
|
spinlock_t join_list_lock;
|
|
struct work_struct work;
|
|
struct list_head conn_list;
|
|
@@ -285,7 +286,6 @@ struct mptcp_subflow_context {
|
|
data_avail : 1,
|
|
rx_eof : 1,
|
|
data_fin_tx_enable : 1,
|
|
- use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
|
|
can_ack : 1; /* only after processing the remote a key */
|
|
u64 data_fin_tx_seq;
|
|
u32 remote_nonce;
|
|
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
|
|
index 3838a0b3a21ff..2e145b53b81f4 100644
|
|
--- a/net/mptcp/subflow.c
|
|
+++ b/net/mptcp/subflow.c
|
|
@@ -682,12 +682,11 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
|
|
if (!mpext->dsn64) {
|
|
map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
|
|
mpext->data_seq);
|
|
- subflow->use_64bit_ack = 0;
|
|
pr_debug("expanded seq=%llu", subflow->map_seq);
|
|
} else {
|
|
map_seq = mpext->data_seq;
|
|
- subflow->use_64bit_ack = 1;
|
|
}
|
|
+ WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
|
|
|
|
if (subflow->map_valid) {
|
|
/* Allow replacing only with an identical map */
|
|
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
index 412656c34f205..beeafa42aad76 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_ctl.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
@@ -2471,6 +2471,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
|
|
/* Set timeout values for (tcp tcpfin udp) */
|
|
ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
|
|
goto out_unlock;
|
|
+ } else if (!len) {
|
|
+ /* No more commands with len == 0 below */
|
|
+ ret = -EINVAL;
|
|
+ goto out_unlock;
|
|
}
|
|
|
|
usvc_compat = (struct ip_vs_service_user *)arg;
|
|
@@ -2547,9 +2551,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
|
|
break;
|
|
case IP_VS_SO_SET_DELDEST:
|
|
ret = ip_vs_del_dest(svc, &udest);
|
|
- break;
|
|
- default:
|
|
- ret = -EINVAL;
|
|
}
|
|
|
|
out_unlock:
|
|
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
index b00866d777fe0..d2e5a8f644b80 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_xmit.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
@@ -609,6 +609,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
|
|
if (ret == NF_ACCEPT) {
|
|
nf_reset_ct(skb);
|
|
skb_forward_csum(skb);
|
|
+ if (skb->dev)
|
|
+ skb->tstamp = 0;
|
|
}
|
|
return ret;
|
|
}
|
|
@@ -649,6 +651,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
|
|
|
|
if (!local) {
|
|
skb_forward_csum(skb);
|
|
+ if (skb->dev)
|
|
+ skb->tstamp = 0;
|
|
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
|
|
NULL, skb_dst(skb)->dev, dst_output);
|
|
} else
|
|
@@ -669,6 +673,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
|
|
if (!local) {
|
|
ip_vs_drop_early_demux_sk(skb);
|
|
skb_forward_csum(skb);
|
|
+ if (skb->dev)
|
|
+ skb->tstamp = 0;
|
|
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
|
|
NULL, skb_dst(skb)->dev, dst_output);
|
|
} else
|
|
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
|
|
index 1926fd56df56a..848b137151c26 100644
|
|
--- a/net/netfilter/nf_conntrack_proto_tcp.c
|
|
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
|
|
@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
|
|
swin = win << sender->td_scale;
|
|
sender->td_maxwin = (swin == 0 ? 1 : swin);
|
|
sender->td_maxend = end + sender->td_maxwin;
|
|
- /*
|
|
- * We haven't seen traffic in the other direction yet
|
|
- * but we have to tweak window tracking to pass III
|
|
- * and IV until that happens.
|
|
- */
|
|
- if (receiver->td_maxwin == 0)
|
|
+ if (receiver->td_maxwin == 0) {
|
|
+ /* We haven't seen traffic in the other
|
|
+ * direction yet but we have to tweak window
|
|
+ * tracking to pass III and IV until that
|
|
+ * happens.
|
|
+ */
|
|
receiver->td_end = receiver->td_maxend = sack;
|
|
+ } else if (sack == receiver->td_end + 1) {
|
|
+ /* Likely a reply to a keepalive.
|
|
+ * Needed for III.
|
|
+ */
|
|
+ receiver->td_end++;
|
|
+ }
|
|
+
|
|
}
|
|
} else if (((state->state == TCP_CONNTRACK_SYN_SENT
|
|
&& dir == IP_CT_DIR_ORIGINAL)
|
|
diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
|
|
index 2b01a151eaa80..a579e59ee5c5e 100644
|
|
--- a/net/netfilter/nf_dup_netdev.c
|
|
+++ b/net/netfilter/nf_dup_netdev.c
|
|
@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
|
|
skb_push(skb, skb->mac_len);
|
|
|
|
skb->dev = dev;
|
|
+ skb->tstamp = 0;
|
|
dev_queue_xmit(skb);
|
|
}
|
|
|
|
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
|
|
index ae5628ddbe6d7..fd7c5f0f5c25b 100644
|
|
--- a/net/netfilter/nf_log_common.c
|
|
+++ b/net/netfilter/nf_log_common.c
|
|
@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
|
|
|
|
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
|
|
+{
|
|
+ u16 vid;
|
|
+
|
|
+ if (!skb_vlan_tag_present(skb))
|
|
+ return;
|
|
+
|
|
+ vid = skb_vlan_tag_get(skb);
|
|
+ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
|
|
+
|
|
/* bridge and netdev logging families share this code. */
|
|
void nf_log_l2packet(struct net *net, u_int8_t pf,
|
|
__be16 protocol,
|
|
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
|
|
index 3087e23297dbf..b77985986b24e 100644
|
|
--- a/net/netfilter/nft_fwd_netdev.c
|
|
+++ b/net/netfilter/nft_fwd_netdev.c
|
|
@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
|
|
return;
|
|
|
|
skb->dev = dev;
|
|
+ skb->tstamp = 0;
|
|
neigh_xmit(neigh_table, dev, addr, skb);
|
|
out:
|
|
regs->verdict.code = verdict;
|
|
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
|
|
index e894254c17d43..8709f3d4e7c4b 100644
|
|
--- a/net/nfc/netlink.c
|
|
+++ b/net/nfc/netlink.c
|
|
@@ -1217,7 +1217,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
|
|
u32 idx;
|
|
char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
|
|
|
|
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
|
|
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
|
|
return -EINVAL;
|
|
|
|
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
|
|
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
|
|
index aa69fc4ce39d9..3715b1261c6f3 100644
|
|
--- a/net/sched/act_api.c
|
|
+++ b/net/sched/act_api.c
|
|
@@ -722,13 +722,6 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
|
|
return ret;
|
|
}
|
|
|
|
-static int tcf_action_destroy_1(struct tc_action *a, int bind)
|
|
-{
|
|
- struct tc_action *actions[] = { a, NULL };
|
|
-
|
|
- return tcf_action_destroy(actions, bind);
|
|
-}
|
|
-
|
|
static int tcf_action_put(struct tc_action *p)
|
|
{
|
|
return __tcf_action_put(p, false);
|
|
@@ -1000,13 +993,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
|
|
if (err < 0)
|
|
goto err_mod;
|
|
|
|
- if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
|
|
- !rcu_access_pointer(a->goto_chain)) {
|
|
- tcf_action_destroy_1(a, bind);
|
|
- NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
|
|
- return ERR_PTR(-EINVAL);
|
|
- }
|
|
-
|
|
if (!name && tb[TCA_ACT_COOKIE])
|
|
tcf_set_action_cookie(&a->act_cookie, cookie);
|
|
|
|
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
|
|
index 0eb4722cf7cd9..1558126af0d4b 100644
|
|
--- a/net/sched/act_ct.c
|
|
+++ b/net/sched/act_ct.c
|
|
@@ -156,11 +156,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
|
|
__be16 target_dst = target.dst.u.udp.port;
|
|
|
|
if (target_src != tuple->src.u.udp.port)
|
|
- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
|
|
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
|
|
offsetof(struct udphdr, source),
|
|
0xFFFF, be16_to_cpu(target_src));
|
|
if (target_dst != tuple->dst.u.udp.port)
|
|
- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
|
|
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
|
|
offsetof(struct udphdr, dest),
|
|
0xFFFF, be16_to_cpu(target_dst));
|
|
}
|
|
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
|
|
index 23cf8469a2e7c..e167f0ddfbcd4 100644
|
|
--- a/net/sched/act_tunnel_key.c
|
|
+++ b/net/sched/act_tunnel_key.c
|
|
@@ -458,7 +458,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|
|
|
metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
|
|
0, flags,
|
|
- key_id, 0);
|
|
+ key_id, opts_len);
|
|
} else {
|
|
NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
|
|
ret = -EINVAL;
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index 4619cb3cb0a8f..8bf6bde1cfe59 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -3707,7 +3707,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
|
|
entry->gate.num_entries = tcf_gate_num_entries(act);
|
|
err = tcf_gate_get_entries(entry, act);
|
|
if (err)
|
|
- goto err_out;
|
|
+ goto err_out_locked;
|
|
} else {
|
|
err = -EOPNOTSUPP;
|
|
goto err_out_locked;
|
|
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
|
|
index f82a2e5999171..49696f464794f 100644
|
|
--- a/net/smc/smc_core.c
|
|
+++ b/net/smc/smc_core.c
|
|
@@ -1595,7 +1595,7 @@ out:
|
|
return rc;
|
|
}
|
|
|
|
-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
|
|
+#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
|
|
|
|
static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
|
|
bool is_dmb, int bufsize)
|
|
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
|
|
index df5b0a6ea8488..398f1d9521351 100644
|
|
--- a/net/smc/smc_llc.c
|
|
+++ b/net/smc/smc_llc.c
|
|
@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
|
|
default:
|
|
flow->type = SMC_LLC_FLOW_NONE;
|
|
}
|
|
- if (qentry == lgr->delayed_event)
|
|
- lgr->delayed_event = NULL;
|
|
smc_llc_flow_qentry_set(flow, qentry);
|
|
spin_unlock_bh(&lgr->llc_flow_lock);
|
|
return true;
|
|
@@ -1590,13 +1588,12 @@ static void smc_llc_event_work(struct work_struct *work)
|
|
struct smc_llc_qentry *qentry;
|
|
|
|
if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
|
|
- if (smc_link_usable(lgr->delayed_event->link)) {
|
|
- smc_llc_event_handler(lgr->delayed_event);
|
|
- } else {
|
|
- qentry = lgr->delayed_event;
|
|
- lgr->delayed_event = NULL;
|
|
+ qentry = lgr->delayed_event;
|
|
+ lgr->delayed_event = NULL;
|
|
+ if (smc_link_usable(qentry->link))
|
|
+ smc_llc_event_handler(qentry);
|
|
+ else
|
|
kfree(qentry);
|
|
- }
|
|
}
|
|
|
|
again:
|
|
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
index c28051f7d217d..653c317694406 100644
|
|
--- a/net/sunrpc/auth_gss/svcauth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
@@ -1104,9 +1104,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
|
|
struct gssp_in_token *in_token)
|
|
{
|
|
struct kvec *argv = &rqstp->rq_arg.head[0];
|
|
- unsigned int page_base, length;
|
|
- int pages, i, res;
|
|
- size_t inlen;
|
|
+ unsigned int length, pgto_offs, pgfrom_offs;
|
|
+ int pages, i, res, pgto, pgfrom;
|
|
+ size_t inlen, to_offs, from_offs;
|
|
|
|
res = gss_read_common_verf(gc, argv, authp, in_handle);
|
|
if (res)
|
|
@@ -1134,17 +1134,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
|
|
memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
|
|
inlen -= length;
|
|
|
|
- i = 1;
|
|
- page_base = rqstp->rq_arg.page_base;
|
|
+ to_offs = length;
|
|
+ from_offs = rqstp->rq_arg.page_base;
|
|
while (inlen) {
|
|
- length = min_t(unsigned int, inlen, PAGE_SIZE);
|
|
- memcpy(page_address(in_token->pages[i]),
|
|
- page_address(rqstp->rq_arg.pages[i]) + page_base,
|
|
+ pgto = to_offs >> PAGE_SHIFT;
|
|
+ pgfrom = from_offs >> PAGE_SHIFT;
|
|
+ pgto_offs = to_offs & ~PAGE_MASK;
|
|
+ pgfrom_offs = from_offs & ~PAGE_MASK;
|
|
+
|
|
+ length = min_t(unsigned int, inlen,
|
|
+ min_t(unsigned int, PAGE_SIZE - pgto_offs,
|
|
+ PAGE_SIZE - pgfrom_offs));
|
|
+ memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
|
|
+ page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
|
|
length);
|
|
|
|
+ to_offs += length;
|
|
+ from_offs += length;
|
|
inlen -= length;
|
|
- page_base = 0;
|
|
- i++;
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
index 38e7c3c8c4a9c..e4f410084c748 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
@@ -637,10 +637,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
|
|
while (remaining) {
|
|
len = min_t(u32, PAGE_SIZE - pageoff, remaining);
|
|
|
|
- memcpy(dst, page_address(*ppages), len);
|
|
+ memcpy(dst, page_address(*ppages) + pageoff, len);
|
|
remaining -= len;
|
|
dst += len;
|
|
pageoff = 0;
|
|
+ ppages++;
|
|
}
|
|
}
|
|
|
|
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
|
|
index 383f87bc10615..f69fb54821e6b 100644
|
|
--- a/net/tipc/bcast.c
|
|
+++ b/net/tipc/bcast.c
|
|
@@ -108,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net)
|
|
{
|
|
struct tipc_bc_base *bb = tipc_bc_base(net);
|
|
int all_dests = tipc_link_bc_peers(bb->link);
|
|
+ int max_win = tipc_link_max_win(bb->link);
|
|
+ int min_win = tipc_link_min_win(bb->link);
|
|
int i, mtu, prim;
|
|
|
|
bb->primary_bearer = INVALID_BEARER_ID;
|
|
@@ -121,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net)
|
|
continue;
|
|
|
|
mtu = tipc_bearer_mtu(net, i);
|
|
- if (mtu < tipc_link_mtu(bb->link))
|
|
+ if (mtu < tipc_link_mtu(bb->link)) {
|
|
tipc_link_set_mtu(bb->link, mtu);
|
|
+ tipc_link_set_queue_limits(bb->link,
|
|
+ min_win,
|
|
+ max_win);
|
|
+ }
|
|
bb->bcast_support &= tipc_bearer_bcast_support(net, i);
|
|
if (bb->dests[i] < all_dests)
|
|
continue;
|
|
@@ -585,7 +591,7 @@ static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
|
|
if (max_win > TIPC_MAX_LINK_WIN)
|
|
return -EINVAL;
|
|
tipc_bcast_lock(net);
|
|
- tipc_link_set_queue_limits(l, BCLINK_WIN_MIN, max_win);
|
|
+ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
|
|
tipc_bcast_unlock(net);
|
|
return 0;
|
|
}
|
|
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
|
|
index 2776a41e0dece..15b24fbcbe970 100644
|
|
--- a/net/tipc/msg.c
|
|
+++ b/net/tipc/msg.c
|
|
@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
|
|
if (fragid == FIRST_FRAGMENT) {
|
|
if (unlikely(head))
|
|
goto err;
|
|
- frag = skb_unshare(frag, GFP_ATOMIC);
|
|
+ if (skb_cloned(frag))
|
|
+ frag = skb_copy(frag, GFP_ATOMIC);
|
|
if (unlikely(!frag))
|
|
goto err;
|
|
head = *headbuf = frag;
|
|
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
|
|
index 0cbad566f2811..f19416371bb99 100644
|
|
--- a/net/tls/tls_device.c
|
|
+++ b/net/tls/tls_device.c
|
|
@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk,
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
struct tls_prot_info *prot = &tls_ctx->prot_info;
|
|
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
|
- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
|
|
struct tls_record_info *record = ctx->open_record;
|
|
int tls_push_record_flags;
|
|
struct page_frag *pfrag;
|
|
size_t orig_size = size;
|
|
u32 max_open_record_len;
|
|
- int copy, rc = 0;
|
|
+ bool more = false;
|
|
bool done = false;
|
|
+ int copy, rc = 0;
|
|
long timeo;
|
|
|
|
if (flags &
|
|
@@ -492,9 +492,8 @@ handle_error:
|
|
if (!size) {
|
|
last_record:
|
|
tls_push_record_flags = flags;
|
|
- if (more) {
|
|
- tls_ctx->pending_open_record_frags =
|
|
- !!record->num_frags;
|
|
+ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
|
|
+ more = true;
|
|
break;
|
|
}
|
|
|
|
@@ -526,6 +525,8 @@ last_record:
|
|
}
|
|
} while (!done);
|
|
|
|
+ tls_ctx->pending_open_record_frags = more;
|
|
+
|
|
if (orig_size - size > 0)
|
|
rc = orig_size - size;
|
|
|
|
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
|
|
index 4d7b255067225..47ab86ee192ac 100644
|
|
--- a/net/wireless/nl80211.c
|
|
+++ b/net/wireless/nl80211.c
|
|
@@ -2355,7 +2355,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
|
|
* case we'll continue with more data in the next round,
|
|
* but break unconditionally so unsplit data stops here.
|
|
*/
|
|
- state->split_start++;
|
|
+ if (state->split)
|
|
+ state->split_start++;
|
|
+ else
|
|
+ state->split_start = 0;
|
|
break;
|
|
case 9:
|
|
if (rdev->wiphy.extended_capabilities &&
|
|
@@ -4683,16 +4686,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
|
|
if (err)
|
|
return err;
|
|
|
|
- if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
|
|
- !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
|
|
- return -EINVAL;
|
|
-
|
|
- he_obss_pd->min_offset =
|
|
- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
|
|
- he_obss_pd->max_offset =
|
|
- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
|
|
+ if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
|
|
+ he_obss_pd->min_offset =
|
|
+ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
|
|
+ if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
|
|
+ he_obss_pd->max_offset =
|
|
+ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
|
|
|
|
- if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
|
|
+ if (he_obss_pd->min_offset > he_obss_pd->max_offset)
|
|
return -EINVAL;
|
|
|
|
he_obss_pd->enable = true;
|
|
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
|
|
index c91e91362a0c6..0151bb0b2fc71 100644
|
|
--- a/samples/bpf/xdpsock_user.c
|
|
+++ b/samples/bpf/xdpsock_user.c
|
|
@@ -921,7 +921,7 @@ static void rx_drop_all(void)
|
|
}
|
|
}
|
|
|
|
-static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
|
|
+static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
|
|
{
|
|
u32 idx;
|
|
unsigned int i;
|
|
@@ -934,14 +934,14 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
|
|
for (i = 0; i < batch_size; i++) {
|
|
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
|
|
idx + i);
|
|
- tx_desc->addr = (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
|
|
+ tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
|
|
tx_desc->len = PKT_SIZE;
|
|
}
|
|
|
|
xsk_ring_prod__submit(&xsk->tx, batch_size);
|
|
xsk->outstanding_tx += batch_size;
|
|
- frame_nb += batch_size;
|
|
- frame_nb %= NUM_FRAMES;
|
|
+ *frame_nb += batch_size;
|
|
+ *frame_nb %= NUM_FRAMES;
|
|
complete_tx_only(xsk, batch_size);
|
|
}
|
|
|
|
@@ -997,7 +997,7 @@ static void tx_only_all(void)
|
|
}
|
|
|
|
for (i = 0; i < num_socks; i++)
|
|
- tx_only(xsks[i], frame_nb[i], batch_size);
|
|
+ tx_only(xsks[i], &frame_nb[i], batch_size);
|
|
|
|
pkt_cnt += batch_size;
|
|
|
|
diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
|
|
index a11bf6c5b53b4..cd3f16a6f5caf 100644
|
|
--- a/samples/mic/mpssd/mpssd.c
|
|
+++ b/samples/mic/mpssd/mpssd.c
|
|
@@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
|
|
|
|
static inline unsigned _vring_size(unsigned int num, unsigned long align)
|
|
{
|
|
- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
|
|
+ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
|
|
+ align - 1) & ~(align - 1))
|
|
- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
|
|
+ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
|
|
}
|
|
|
|
/*
|
|
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
|
|
index 011c3c76af865..21989fa0c1074 100644
|
|
--- a/security/integrity/ima/ima_crypto.c
|
|
+++ b/security/integrity/ima/ima_crypto.c
|
|
@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
|
|
/* now accumulate with current aggregate */
|
|
rc = crypto_shash_update(shash, d.digest,
|
|
crypto_shash_digestsize(tfm));
|
|
+ if (rc != 0)
|
|
+ return rc;
|
|
}
|
|
/*
|
|
* Extend cumulative digest over TPM registers 8-9, which contain
|
|
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
|
|
index c1583d98c5e50..0b8f17570f210 100644
|
|
--- a/security/integrity/ima/ima_main.c
|
|
+++ b/security/integrity/ima/ima_main.c
|
|
@@ -531,6 +531,16 @@ int ima_file_hash(struct file *file, char *buf, size_t buf_size)
|
|
return -EOPNOTSUPP;
|
|
|
|
mutex_lock(&iint->mutex);
|
|
+
|
|
+ /*
|
|
+ * ima_file_hash can be called when ima_collect_measurement has still
|
|
+ * not been called, we might not always have a hash.
|
|
+ */
|
|
+ if (!iint->ima_hash) {
|
|
+ mutex_unlock(&iint->mutex);
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
if (buf) {
|
|
size_t copied_size;
|
|
|
|
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
|
|
index c8b9c0b315d8f..250a92b187265 100644
|
|
--- a/sound/core/seq/oss/seq_oss.c
|
|
+++ b/sound/core/seq/oss/seq_oss.c
|
|
@@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
if (snd_BUG_ON(!dp))
|
|
return -ENXIO;
|
|
|
|
- mutex_lock(®ister_mutex);
|
|
+ if (cmd != SNDCTL_SEQ_SYNC &&
|
|
+ mutex_lock_interruptible(®ister_mutex))
|
|
+ return -ERESTARTSYS;
|
|
rc = snd_seq_oss_ioctl(dp, cmd, arg);
|
|
- mutex_unlock(®ister_mutex);
|
|
+ if (cmd != SNDCTL_SEQ_SYNC)
|
|
+ mutex_unlock(®ister_mutex);
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
|
|
index 45b740f44c459..c362eb38ab906 100644
|
|
--- a/sound/firewire/bebob/bebob_hwdep.c
|
|
+++ b/sound/firewire/bebob/bebob_hwdep.c
|
|
@@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
|
|
}
|
|
|
|
memset(&event, 0, sizeof(event));
|
|
+ count = min_t(long, count, sizeof(event.lock_status));
|
|
if (bebob->dev_lock_changed) {
|
|
event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
|
|
event.lock_status.status = (bebob->dev_lock_count > 0);
|
|
bebob->dev_lock_changed = false;
|
|
-
|
|
- count = min_t(long, count, sizeof(event.lock_status));
|
|
}
|
|
|
|
spin_unlock_irq(&bebob->lock);
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index 4c23b169ac67e..cc51ef98752a9 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -1003,12 +1003,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
|
|
azx_init_pci(chip);
|
|
hda_intel_init_chip(chip, true);
|
|
|
|
- if (status && from_rt) {
|
|
- list_for_each_codec(codec, &chip->bus)
|
|
- if (!codec->relaxed_resume &&
|
|
- (status & (1 << codec->addr)))
|
|
- schedule_delayed_work(&codec->jackpoll_work,
|
|
- codec->jackpoll_interval);
|
|
+ if (from_rt) {
|
|
+ list_for_each_codec(codec, &chip->bus) {
|
|
+ if (codec->relaxed_resume)
|
|
+ continue;
|
|
+
|
|
+ if (codec->forced_resume || (status & (1 << codec->addr)))
|
|
+ pm_request_resume(hda_codec_dev(codec));
|
|
+ }
|
|
}
|
|
|
|
/* power down again for link-controlled chips */
|
|
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
|
|
index 02cc682caa55a..588059428d8f5 100644
|
|
--- a/sound/pci/hda/hda_jack.c
|
|
+++ b/sound/pci/hda/hda_jack.c
|
|
@@ -275,6 +275,23 @@ int snd_hda_jack_detect_state_mst(struct hda_codec *codec,
|
|
}
|
|
EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst);
|
|
|
|
+static struct hda_jack_callback *
|
|
+find_callback_from_list(struct hda_jack_tbl *jack,
|
|
+ hda_jack_callback_fn func)
|
|
+{
|
|
+ struct hda_jack_callback *cb;
|
|
+
|
|
+ if (!func)
|
|
+ return NULL;
|
|
+
|
|
+ for (cb = jack->callback; cb; cb = cb->next) {
|
|
+ if (cb->func == func)
|
|
+ return cb;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
/**
|
|
* snd_hda_jack_detect_enable_mst - enable the jack-detection
|
|
* @codec: the HDA codec
|
|
@@ -297,7 +314,10 @@ snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
|
|
jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
|
|
if (!jack)
|
|
return ERR_PTR(-ENOMEM);
|
|
- if (func) {
|
|
+
|
|
+ callback = find_callback_from_list(jack, func);
|
|
+
|
|
+ if (func && !callback) {
|
|
callback = kzalloc(sizeof(*callback), GFP_KERNEL);
|
|
if (!callback)
|
|
return ERR_PTR(-ENOMEM);
|
|
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
|
|
index 6dfa864d3fe7b..a49c322bdbe9d 100644
|
|
--- a/sound/pci/hda/patch_ca0132.c
|
|
+++ b/sound/pci/hda/patch_ca0132.c
|
|
@@ -1065,6 +1065,7 @@ enum {
|
|
QUIRK_R3DI,
|
|
QUIRK_R3D,
|
|
QUIRK_AE5,
|
|
+ QUIRK_AE7,
|
|
};
|
|
|
|
#ifdef CONFIG_PCI
|
|
@@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
|
|
SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
|
|
SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
|
|
SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
|
|
+ SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
|
|
{}
|
|
};
|
|
|
|
@@ -4675,6 +4677,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
|
|
ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
|
|
tmp = FLOAT_THREE;
|
|
break;
|
|
+ case QUIRK_AE7:
|
|
+ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
|
|
+ tmp = FLOAT_THREE;
|
|
+ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
|
|
+ SR_96_000);
|
|
+ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
|
|
+ SR_96_000);
|
|
+ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
|
|
+ break;
|
|
default:
|
|
tmp = FLOAT_ONE;
|
|
break;
|
|
@@ -4720,6 +4731,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
|
|
case QUIRK_AE5:
|
|
ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
|
|
break;
|
|
+ case QUIRK_AE7:
|
|
+ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
|
|
+ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
|
|
+ SR_96_000);
|
|
+ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
|
|
+ SR_96_000);
|
|
+ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
|
|
+ break;
|
|
default:
|
|
break;
|
|
}
|
|
@@ -4729,7 +4748,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
|
|
if (ca0132_quirk(spec) == QUIRK_R3DI)
|
|
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
|
|
|
|
- tmp = FLOAT_ZERO;
|
|
+ if (ca0132_quirk(spec) == QUIRK_AE7)
|
|
+ tmp = FLOAT_THREE;
|
|
+ else
|
|
+ tmp = FLOAT_ZERO;
|
|
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
|
|
|
|
switch (ca0132_quirk(spec)) {
|
|
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
|
|
index 419f012b9853c..0d3e996beede1 100644
|
|
--- a/sound/pci/hda/patch_hdmi.c
|
|
+++ b/sound/pci/hda/patch_hdmi.c
|
|
@@ -1989,22 +1989,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
|
|
int pinctl;
|
|
int err = 0;
|
|
|
|
+ mutex_lock(&spec->pcm_lock);
|
|
if (hinfo->nid) {
|
|
pcm_idx = hinfo_to_pcm_index(codec, hinfo);
|
|
- if (snd_BUG_ON(pcm_idx < 0))
|
|
- return -EINVAL;
|
|
+ if (snd_BUG_ON(pcm_idx < 0)) {
|
|
+ err = -EINVAL;
|
|
+ goto unlock;
|
|
+ }
|
|
cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
|
|
- if (snd_BUG_ON(cvt_idx < 0))
|
|
- return -EINVAL;
|
|
+ if (snd_BUG_ON(cvt_idx < 0)) {
|
|
+ err = -EINVAL;
|
|
+ goto unlock;
|
|
+ }
|
|
per_cvt = get_cvt(spec, cvt_idx);
|
|
-
|
|
snd_BUG_ON(!per_cvt->assigned);
|
|
per_cvt->assigned = 0;
|
|
hinfo->nid = 0;
|
|
|
|
azx_stream(get_azx_dev(substream))->stripe = 0;
|
|
|
|
- mutex_lock(&spec->pcm_lock);
|
|
snd_hda_spdif_ctls_unassign(codec, pcm_idx);
|
|
clear_bit(pcm_idx, &spec->pcm_in_use);
|
|
pin_idx = hinfo_to_pin_index(codec, hinfo);
|
|
@@ -2034,10 +2037,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
|
|
per_pin->setup = false;
|
|
per_pin->channels = 0;
|
|
mutex_unlock(&per_pin->lock);
|
|
- unlock:
|
|
- mutex_unlock(&spec->pcm_lock);
|
|
}
|
|
|
|
+unlock:
|
|
+ mutex_unlock(&spec->pcm_lock);
|
|
+
|
|
return err;
|
|
}
|
|
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 601683e05ccca..e9593abd4e232 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -1142,6 +1142,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
|
|
codec->single_adc_amp = 1;
|
|
/* FIXME: do we need this for all Realtek codec models? */
|
|
codec->spdif_status_reset = 1;
|
|
+ codec->forced_resume = 1;
|
|
codec->patch_ops = alc_patch_ops;
|
|
|
|
err = alc_codec_rename_from_preset(codec);
|
|
@@ -1921,6 +1922,8 @@ enum {
|
|
ALC1220_FIXUP_CLEVO_P950,
|
|
ALC1220_FIXUP_CLEVO_PB51ED,
|
|
ALC1220_FIXUP_CLEVO_PB51ED_PINS,
|
|
+ ALC887_FIXUP_ASUS_AUDIO,
|
|
+ ALC887_FIXUP_ASUS_HMIC,
|
|
};
|
|
|
|
static void alc889_fixup_coef(struct hda_codec *codec,
|
|
@@ -2133,6 +2136,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
|
|
alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
|
|
}
|
|
|
|
+static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
|
|
+ struct hda_jack_callback *jack)
|
|
+{
|
|
+ struct alc_spec *spec = codec->spec;
|
|
+ unsigned int vref;
|
|
+
|
|
+ snd_hda_gen_hp_automute(codec, jack);
|
|
+
|
|
+ if (spec->gen.hp_jack_present)
|
|
+ vref = AC_PINCTL_VREF_80;
|
|
+ else
|
|
+ vref = AC_PINCTL_VREF_HIZ;
|
|
+ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
|
|
+}
|
|
+
|
|
+static void alc887_fixup_asus_jack(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ struct alc_spec *spec = codec->spec;
|
|
+ if (action != HDA_FIXUP_ACT_PROBE)
|
|
+ return;
|
|
+ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
|
|
+ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
|
|
+}
|
|
+
|
|
static const struct hda_fixup alc882_fixups[] = {
|
|
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
|
|
.type = HDA_FIXUP_PINS,
|
|
@@ -2390,6 +2418,20 @@ static const struct hda_fixup alc882_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
|
|
},
|
|
+ [ALC887_FIXUP_ASUS_AUDIO] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
|
|
+ { 0x19, 0x22219420 },
|
|
+ {}
|
|
+ },
|
|
+ },
|
|
+ [ALC887_FIXUP_ASUS_HMIC] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc887_fixup_asus_jack,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC887_FIXUP_ASUS_AUDIO,
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
@@ -2423,6 +2465,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
|
|
SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
|
|
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
|
|
SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
|
|
SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
|
|
@@ -6245,6 +6288,7 @@ enum {
|
|
ALC269_FIXUP_LEMOTE_A190X,
|
|
ALC256_FIXUP_INTEL_NUC8_RUGGED,
|
|
ALC255_FIXUP_XIAOMI_HEADSET_MIC,
|
|
+ ALC274_FIXUP_HP_MIC,
|
|
};
|
|
|
|
static const struct hda_fixup alc269_fixups[] = {
|
|
@@ -7624,6 +7668,14 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC289_FIXUP_ASUS_GA401
|
|
},
|
|
+ [ALC274_FIXUP_HP_MIC] = {
|
|
+ .type = HDA_FIXUP_VERBS,
|
|
+ .v.verbs = (const struct hda_verb[]) {
|
|
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
|
|
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
|
|
+ { }
|
|
+ },
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
@@ -7775,6 +7827,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
|
+ SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
|
|
+ SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
|
|
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
|
@@ -8100,6 +8154,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
|
{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
|
|
{.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
|
|
{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
|
|
+ {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
|
|
{}
|
|
};
|
|
#define ALC225_STANDARD_PINS \
|
|
@@ -9634,6 +9689,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
|
|
+ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
|
|
SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
|
|
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
|
|
SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
|
|
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
|
|
index 986a6308818b2..2a8484f37496c 100644
|
|
--- a/sound/soc/codecs/Kconfig
|
|
+++ b/sound/soc/codecs/Kconfig
|
|
@@ -539,6 +539,7 @@ config SND_SOC_CQ0093VC
|
|
config SND_SOC_CROS_EC_CODEC
|
|
tristate "codec driver for ChromeOS EC"
|
|
depends on CROS_EC
|
|
+ select CRYPTO
|
|
select CRYPTO_LIB_SHA256
|
|
help
|
|
If you say yes here you will get support for the
|
|
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
|
|
index cf071121c8398..531bf32043813 100644
|
|
--- a/sound/soc/codecs/tas2770.c
|
|
+++ b/sound/soc/codecs/tas2770.c
|
|
@@ -16,7 +16,6 @@
|
|
#include <linux/i2c.h>
|
|
#include <linux/gpio.h>
|
|
#include <linux/gpio/consumer.h>
|
|
-#include <linux/pm_runtime.h>
|
|
#include <linux/regulator/consumer.h>
|
|
#include <linux/firmware.h>
|
|
#include <linux/regmap.h>
|
|
@@ -57,7 +56,12 @@ static int tas2770_set_bias_level(struct snd_soc_component *component,
|
|
TAS2770_PWR_CTRL_MASK,
|
|
TAS2770_PWR_CTRL_ACTIVE);
|
|
break;
|
|
-
|
|
+ case SND_SOC_BIAS_STANDBY:
|
|
+ case SND_SOC_BIAS_PREPARE:
|
|
+ snd_soc_component_update_bits(component,
|
|
+ TAS2770_PWR_CTRL,
|
|
+ TAS2770_PWR_CTRL_MASK, TAS2770_PWR_CTRL_MUTE);
|
|
+ break;
|
|
case SND_SOC_BIAS_OFF:
|
|
snd_soc_component_update_bits(component,
|
|
TAS2770_PWR_CTRL,
|
|
@@ -135,23 +139,18 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
|
|
TAS2770_PWR_CTRL,
|
|
TAS2770_PWR_CTRL_MASK,
|
|
TAS2770_PWR_CTRL_MUTE);
|
|
- if (ret)
|
|
- goto end;
|
|
break;
|
|
case SND_SOC_DAPM_PRE_PMD:
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_PWR_CTRL,
|
|
TAS2770_PWR_CTRL_MASK,
|
|
TAS2770_PWR_CTRL_SHUTDOWN);
|
|
- if (ret)
|
|
- goto end;
|
|
break;
|
|
default:
|
|
dev_err(tas2770->dev, "Not supported evevt\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
-end:
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -243,6 +242,9 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
tas2770->channel_size = bitwidth;
|
|
|
|
ret = snd_soc_component_update_bits(component,
|
|
@@ -251,16 +253,15 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
|
|
TAS2770_TDM_CFG_REG5_50_MASK,
|
|
TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
|
|
tas2770->v_sense_slot);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG6,
|
|
TAS2770_TDM_CFG_REG6_ISNS_MASK |
|
|
TAS2770_TDM_CFG_REG6_50_MASK,
|
|
TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
|
|
tas2770->i_sense_slot);
|
|
-
|
|
-end:
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -278,36 +279,35 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_SMP_MASK,
|
|
TAS2770_TDM_CFG_REG0_SMP_48KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_31_MASK,
|
|
TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
break;
|
|
case 44100:
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_SMP_MASK,
|
|
TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_31_MASK,
|
|
TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
break;
|
|
case 96000:
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_SMP_MASK,
|
|
TAS2770_TDM_CFG_REG0_SMP_48KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_31_MASK,
|
|
@@ -318,8 +318,9 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_SMP_MASK,
|
|
TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_31_MASK,
|
|
@@ -330,22 +331,22 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_SMP_MASK,
|
|
TAS2770_TDM_CFG_REG0_SMP_48KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_31_MASK,
|
|
TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
break;
|
|
case 17640:
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_SMP_MASK,
|
|
TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
|
|
- if (ret)
|
|
- goto end;
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = snd_soc_component_update_bits(component,
|
|
TAS2770_TDM_CFG_REG0,
|
|
TAS2770_TDM_CFG_REG0_31_MASK,
|
|
@@ -355,7 +356,6 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
-end:
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -574,6 +574,8 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
|
|
|
|
tas2770->component = component;
|
|
|
|
+ tas2770_reset(tas2770);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -700,29 +702,28 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
|
|
rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format",
|
|
&tas2770->asi_format);
|
|
if (rc) {
|
|
- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
|
|
- "ti,asi-format", rc);
|
|
- goto end;
|
|
+ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
|
|
+ "ti,asi-format");
|
|
+ tas2770->asi_format = 0;
|
|
}
|
|
|
|
rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
|
|
&tas2770->i_sense_slot);
|
|
if (rc) {
|
|
- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
|
|
- "ti,imon-slot-no", rc);
|
|
- goto end;
|
|
+ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
|
|
+ "ti,imon-slot-no");
|
|
+ tas2770->i_sense_slot = 0;
|
|
}
|
|
|
|
rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
|
|
&tas2770->v_sense_slot);
|
|
if (rc) {
|
|
- dev_err(tas2770->dev, "Looking up %s property failed %d\n",
|
|
- "ti,vmon-slot-no", rc);
|
|
- goto end;
|
|
+ dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
|
|
+ "ti,vmon-slot-no");
|
|
+ tas2770->v_sense_slot = 2;
|
|
}
|
|
|
|
-end:
|
|
- return rc;
|
|
+ return 0;
|
|
}
|
|
|
|
static int tas2770_i2c_probe(struct i2c_client *client,
|
|
@@ -770,8 +771,6 @@ static int tas2770_i2c_probe(struct i2c_client *client,
|
|
tas2770->channel_size = 0;
|
|
tas2770->slot_width = 0;
|
|
|
|
- tas2770_reset(tas2770);
|
|
-
|
|
result = tas2770_register_codec(tas2770);
|
|
if (result)
|
|
dev_err(tas2770->dev, "Register codec failed.\n");
|
|
@@ -780,13 +779,6 @@ end:
|
|
return result;
|
|
}
|
|
|
|
-static int tas2770_i2c_remove(struct i2c_client *client)
|
|
-{
|
|
- pm_runtime_disable(&client->dev);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-
|
|
static const struct i2c_device_id tas2770_i2c_id[] = {
|
|
{ "tas2770", 0},
|
|
{ }
|
|
@@ -807,7 +799,6 @@ static struct i2c_driver tas2770_i2c_driver = {
|
|
.of_match_table = of_match_ptr(tas2770_of_match),
|
|
},
|
|
.probe = tas2770_i2c_probe,
|
|
- .remove = tas2770_i2c_remove,
|
|
.id_table = tas2770_i2c_id,
|
|
};
|
|
|
|
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
|
|
index 03fb50175d876..a6273ccb84013 100644
|
|
--- a/sound/soc/codecs/tlv320adcx140.c
|
|
+++ b/sound/soc/codecs/tlv320adcx140.c
|
|
@@ -154,7 +154,7 @@ static const struct regmap_config adcx140_i2c_regmap = {
|
|
};
|
|
|
|
/* Digital Volume control. From -100 to 27 dB in 0.5 dB steps */
|
|
-static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10000, 50, 0);
|
|
+static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10050, 50, 0);
|
|
|
|
/* ADC gain. From 0 to 42 dB in 1 dB steps */
|
|
static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 100, 0);
|
|
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
|
|
index d087f3b20b1d5..50b66cf9ea8f9 100644
|
|
--- a/sound/soc/codecs/tlv320aic32x4.c
|
|
+++ b/sound/soc/codecs/tlv320aic32x4.c
|
|
@@ -665,7 +665,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
|
|
}
|
|
|
|
static int aic32x4_setup_clocks(struct snd_soc_component *component,
|
|
- unsigned int sample_rate)
|
|
+ unsigned int sample_rate, unsigned int channels)
|
|
{
|
|
u8 aosr;
|
|
u16 dosr;
|
|
@@ -753,7 +753,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
|
|
dosr);
|
|
|
|
clk_set_rate(clocks[5].clk,
|
|
- sample_rate * 32);
|
|
+ sample_rate * 32 *
|
|
+ channels);
|
|
+
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -775,7 +777,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
|
|
u8 iface1_reg = 0;
|
|
u8 dacsetup_reg = 0;
|
|
|
|
- aic32x4_setup_clocks(component, params_rate(params));
|
|
+ aic32x4_setup_clocks(component, params_rate(params),
|
|
+ params_channels(params));
|
|
|
|
switch (params_width(params)) {
|
|
case 16:
|
|
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
|
|
index 519ca2e696372..18f62fde92537 100644
|
|
--- a/sound/soc/codecs/wm_adsp.c
|
|
+++ b/sound/soc/codecs/wm_adsp.c
|
|
@@ -2043,6 +2043,7 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
|
|
{
|
|
struct wm_coeff_ctl *ctl;
|
|
struct snd_kcontrol *kcontrol;
|
|
+ char ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
|
|
int ret;
|
|
|
|
ctl = wm_adsp_get_ctl(dsp, name, type, alg);
|
|
@@ -2053,8 +2054,25 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
|
|
return -EINVAL;
|
|
|
|
ret = wm_coeff_write_ctrl(ctl, buf, len);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (ctl->flags & WMFW_CTL_FLAG_SYS)
|
|
+ return 0;
|
|
+
|
|
+ if (dsp->component->name_prefix)
|
|
+ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s",
|
|
+ dsp->component->name_prefix, ctl->name);
|
|
+ else
|
|
+ snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s",
|
|
+ ctl->name);
|
|
+
|
|
+ kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl_name);
|
|
+ if (!kcontrol) {
|
|
+ adsp_err(dsp, "Can't find kcontrol %s\n", ctl_name);
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
- kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
|
|
snd_ctl_notify(dsp->component->card->snd_card,
|
|
SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id);
|
|
|
|
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
|
|
index 7031869a023a1..211e29a73a41a 100644
|
|
--- a/sound/soc/fsl/fsl_sai.c
|
|
+++ b/sound/soc/fsl/fsl_sai.c
|
|
@@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
|
|
return 0;
|
|
}
|
|
|
|
-static struct snd_soc_dai_driver fsl_sai_dai = {
|
|
+static struct snd_soc_dai_driver fsl_sai_dai_template = {
|
|
.probe = fsl_sai_dai_probe,
|
|
.playback = {
|
|
.stream_name = "CPU-Playback",
|
|
@@ -966,12 +966,15 @@ static int fsl_sai_probe(struct platform_device *pdev)
|
|
return ret;
|
|
}
|
|
|
|
+ memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template,
|
|
+ sizeof(fsl_sai_dai_template));
|
|
+
|
|
/* Sync Tx with Rx as default by following old DT binding */
|
|
sai->synchronous[RX] = true;
|
|
sai->synchronous[TX] = false;
|
|
- fsl_sai_dai.symmetric_rates = 1;
|
|
- fsl_sai_dai.symmetric_channels = 1;
|
|
- fsl_sai_dai.symmetric_samplebits = 1;
|
|
+ sai->cpu_dai_drv.symmetric_rates = 1;
|
|
+ sai->cpu_dai_drv.symmetric_channels = 1;
|
|
+ sai->cpu_dai_drv.symmetric_samplebits = 1;
|
|
|
|
if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
|
|
of_find_property(np, "fsl,sai-asynchronous", NULL)) {
|
|
@@ -988,9 +991,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
|
|
/* Discard all settings for asynchronous mode */
|
|
sai->synchronous[RX] = false;
|
|
sai->synchronous[TX] = false;
|
|
- fsl_sai_dai.symmetric_rates = 0;
|
|
- fsl_sai_dai.symmetric_channels = 0;
|
|
- fsl_sai_dai.symmetric_samplebits = 0;
|
|
+ sai->cpu_dai_drv.symmetric_rates = 0;
|
|
+ sai->cpu_dai_drv.symmetric_channels = 0;
|
|
+ sai->cpu_dai_drv.symmetric_samplebits = 0;
|
|
}
|
|
|
|
if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
|
|
@@ -1019,7 +1022,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
|
|
- &fsl_sai_dai, 1);
|
|
+ &sai->cpu_dai_drv, 1);
|
|
if (ret)
|
|
goto err_pm_disable;
|
|
|
|
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
|
|
index 6aba7d28f5f34..677ecfc1ec68f 100644
|
|
--- a/sound/soc/fsl/fsl_sai.h
|
|
+++ b/sound/soc/fsl/fsl_sai.h
|
|
@@ -180,6 +180,7 @@ struct fsl_sai {
|
|
unsigned int bclk_ratio;
|
|
|
|
const struct fsl_sai_soc_data *soc_data;
|
|
+ struct snd_soc_dai_driver cpu_dai_drv;
|
|
struct snd_dmaengine_dai_dma_data dma_params_rx;
|
|
struct snd_dmaengine_dai_dma_data dma_params_tx;
|
|
};
|
|
diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
|
|
index 15a27a2cd0cae..fad1eb6253d53 100644
|
|
--- a/sound/soc/fsl/imx-es8328.c
|
|
+++ b/sound/soc/fsl/imx-es8328.c
|
|
@@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev)
|
|
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
|
if (!data) {
|
|
ret = -ENOMEM;
|
|
- goto fail;
|
|
+ goto put_device;
|
|
}
|
|
|
|
comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL);
|
|
if (!comp) {
|
|
ret = -ENOMEM;
|
|
- goto fail;
|
|
+ goto put_device;
|
|
}
|
|
|
|
data->dev = dev;
|
|
@@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
|
|
ret = snd_soc_of_parse_card_name(&data->card, "model");
|
|
if (ret) {
|
|
dev_err(dev, "Unable to parse card name\n");
|
|
- goto fail;
|
|
+ goto put_device;
|
|
}
|
|
ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
|
|
if (ret) {
|
|
dev_err(dev, "Unable to parse routing: %d\n", ret);
|
|
- goto fail;
|
|
+ goto put_device;
|
|
}
|
|
data->card.num_links = 1;
|
|
data->card.owner = THIS_MODULE;
|
|
@@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
|
|
ret = snd_soc_register_card(&data->card);
|
|
if (ret) {
|
|
dev_err(dev, "Unable to register: %d\n", ret);
|
|
- goto fail;
|
|
+ goto put_device;
|
|
}
|
|
|
|
platform_set_drvdata(pdev, data);
|
|
+put_device:
|
|
+ put_device(&ssi_pdev->dev);
|
|
fail:
|
|
of_node_put(ssi_np);
|
|
of_node_put(codec_np);
|
|
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
|
|
index 13a48b0c35aef..11233c3aeadfb 100644
|
|
--- a/sound/soc/intel/boards/sof_rt5682.c
|
|
+++ b/sound/soc/intel/boards/sof_rt5682.c
|
|
@@ -118,6 +118,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
|
|
.driver_data = (void *)(SOF_RT5682_MCLK_EN |
|
|
SOF_RT5682_SSP_CODEC(0)),
|
|
},
|
|
+ {
|
|
+ .callback = sof_rt5682_quirk_cb,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
|
|
+ },
|
|
+ .driver_data = (void *)(SOF_RT5682_MCLK_EN |
|
|
+ SOF_RT5682_SSP_CODEC(0) |
|
|
+ SOF_SPEAKER_AMP_PRESENT |
|
|
+ SOF_MAX98373_SPEAKER_AMP_PRESENT |
|
|
+ SOF_RT5682_SSP_AMP(2) |
|
|
+ SOF_RT5682_NUM_HDMIDEV(4)),
|
|
+ },
|
|
{}
|
|
};
|
|
|
|
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
|
|
index e00a4af29c13f..f25da84f175ac 100644
|
|
--- a/sound/soc/qcom/lpass-cpu.c
|
|
+++ b/sound/soc/qcom/lpass-cpu.c
|
|
@@ -209,21 +209,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
|
|
return 0;
|
|
}
|
|
|
|
-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
|
|
- struct snd_soc_dai *dai)
|
|
-{
|
|
- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
|
|
- int ret;
|
|
-
|
|
- ret = regmap_write(drvdata->lpaif_map,
|
|
- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
|
|
- 0);
|
|
- if (ret)
|
|
- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
|
|
struct snd_soc_dai *dai)
|
|
{
|
|
@@ -304,7 +289,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
|
|
.startup = lpass_cpu_daiops_startup,
|
|
.shutdown = lpass_cpu_daiops_shutdown,
|
|
.hw_params = lpass_cpu_daiops_hw_params,
|
|
- .hw_free = lpass_cpu_daiops_hw_free,
|
|
.prepare = lpass_cpu_daiops_prepare,
|
|
.trigger = lpass_cpu_daiops_trigger,
|
|
};
|
|
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
|
|
index 34f7fd1bab1cf..693839deebfe8 100644
|
|
--- a/sound/soc/qcom/lpass-platform.c
|
|
+++ b/sound/soc/qcom/lpass-platform.c
|
|
@@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
|
|
int ret, dma_ch, dir = substream->stream;
|
|
struct lpass_pcm_data *data;
|
|
|
|
- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
|
|
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
if (!data)
|
|
return -ENOMEM;
|
|
|
|
@@ -118,6 +118,7 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component,
|
|
if (v->free_dma_channel)
|
|
v->free_dma_channel(drvdata, data->dma_ch);
|
|
|
|
+ kfree(data);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
|
|
index 6eaa00c210117..a5460155b3f64 100644
|
|
--- a/sound/soc/soc-topology.c
|
|
+++ b/sound/soc/soc-topology.c
|
|
@@ -592,6 +592,17 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
|
|
k->info = snd_soc_bytes_info_ext;
|
|
k->tlv.c = snd_soc_bytes_tlv_callback;
|
|
|
|
+ /*
|
|
+ * When a topology-based implementation abuses the
|
|
+ * control interface and uses bytes_ext controls of
|
|
+ * more than 512 bytes, we need to disable the size
|
|
+ * checks, otherwise accesses to such controls will
|
|
+ * return an -EINVAL error and prevent the card from
|
|
+ * being configured.
|
|
+ */
|
|
+ if (IS_ENABLED(CONFIG_SND_CTL_VALIDATION) && sbe->max > 512)
|
|
+ k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK;
|
|
+
|
|
ext_ops = tplg->bytes_ext_ops;
|
|
num_ops = tplg->bytes_ext_ops_count;
|
|
for (i = 0; i < num_ops; i++) {
|
|
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
|
|
index 186eea105bb15..009938d45ddd9 100644
|
|
--- a/sound/soc/sof/control.c
|
|
+++ b/sound/soc/sof/control.c
|
|
@@ -298,6 +298,10 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
|
|
const struct snd_ctl_tlv __user *tlvd =
|
|
(const struct snd_ctl_tlv __user *)binary_data;
|
|
|
|
+ /* make sure we have at least a header */
|
|
+ if (size < sizeof(struct snd_ctl_tlv))
|
|
+ return -EINVAL;
|
|
+
|
|
/*
|
|
* The beginning of bytes data contains a header from where
|
|
* the length (as bytes) is needed to know the correct copy
|
|
@@ -306,6 +310,13 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
|
|
if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv)))
|
|
return -EFAULT;
|
|
|
|
+ /* make sure TLV info is consistent */
|
|
+ if (header.length + sizeof(struct snd_ctl_tlv) > size) {
|
|
+ dev_err_ratelimited(scomp->dev, "error: inconsistent TLV, data %d + header %zu > %d\n",
|
|
+ header.length, sizeof(struct snd_ctl_tlv), size);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
/* be->max is coming from topology */
|
|
if (header.length > be->max) {
|
|
dev_err_ratelimited(scomp->dev, "error: Bytes data size %d exceeds max %d.\n",
|
|
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
|
|
index 63ca920c8e6e0..7152e6d1cf673 100644
|
|
--- a/sound/soc/sof/intel/hda.c
|
|
+++ b/sound/soc/sof/intel/hda.c
|
|
@@ -1179,7 +1179,13 @@ void hda_machine_select(struct snd_sof_dev *sdev)
|
|
|
|
mach = snd_soc_acpi_find_machine(desc->machines);
|
|
if (mach) {
|
|
- sof_pdata->tplg_filename = mach->sof_tplg_filename;
|
|
+ /*
|
|
+ * If tplg file name is overridden, use it instead of
|
|
+ * the one set in mach table
|
|
+ */
|
|
+ if (!sof_pdata->tplg_filename)
|
|
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
|
|
+
|
|
sof_pdata->machine = mach;
|
|
|
|
if (mach->link_mask) {
|
|
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
|
|
index aa3532ba14349..f3a8140773db5 100644
|
|
--- a/sound/soc/sof/sof-pci-dev.c
|
|
+++ b/sound/soc/sof/sof-pci-dev.c
|
|
@@ -35,8 +35,28 @@ static int sof_pci_debug;
|
|
module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
|
|
MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
|
|
|
|
+static const char *sof_override_tplg_name;
|
|
+
|
|
#define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
|
|
|
|
+static int sof_tplg_cb(const struct dmi_system_id *id)
|
|
+{
|
|
+ sof_override_tplg_name = id->driver_data;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static const struct dmi_system_id sof_tplg_table[] = {
|
|
+ {
|
|
+ .callback = sof_tplg_cb,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
|
|
+ },
|
|
+ .driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
static const struct dmi_system_id community_key_platforms[] = {
|
|
{
|
|
.ident = "Up Squared",
|
|
@@ -347,6 +367,10 @@ static int sof_pci_probe(struct pci_dev *pci,
|
|
sof_pdata->tplg_filename_prefix =
|
|
sof_pdata->desc->default_tplg_path;
|
|
|
|
+ dmi_check_system(sof_tplg_table);
|
|
+ if (sof_override_tplg_name)
|
|
+ sof_pdata->tplg_filename = sof_override_tplg_name;
|
|
+
|
|
#if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
|
|
/* set callback to enable runtime_pm */
|
|
sof_pdata->sof_probe_complete = sof_pci_probe_complete;
|
|
diff --git a/sound/usb/format.c b/sound/usb/format.c
|
|
index 1b28d01d1f4cd..3bfead393aa34 100644
|
|
--- a/sound/usb/format.c
|
|
+++ b/sound/usb/format.c
|
|
@@ -406,6 +406,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
|
|
case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */
|
|
case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */
|
|
case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */
|
|
+ case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */
|
|
case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
|
|
case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
|
|
case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
|
|
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
|
|
index e7818b44b48ee..6e5c907680b1a 100644
|
|
--- a/tools/build/Makefile.feature
|
|
+++ b/tools/build/Makefile.feature
|
|
@@ -38,8 +38,6 @@ FEATURE_TESTS_BASIC := \
|
|
get_current_dir_name \
|
|
gettid \
|
|
glibc \
|
|
- gtk2 \
|
|
- gtk2-infobar \
|
|
libbfd \
|
|
libcap \
|
|
libelf \
|
|
@@ -81,6 +79,8 @@ FEATURE_TESTS_EXTRA := \
|
|
compile-32 \
|
|
compile-x32 \
|
|
cplus-demangle \
|
|
+ gtk2 \
|
|
+ gtk2-infobar \
|
|
hello \
|
|
libbabeltrace \
|
|
libbfd-liberty \
|
|
@@ -110,7 +110,6 @@ FEATURE_DISPLAY ?= \
|
|
dwarf \
|
|
dwarf_getlocations \
|
|
glibc \
|
|
- gtk2 \
|
|
libbfd \
|
|
libcap \
|
|
libelf \
|
|
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
|
|
index 93b590d81209c..85d341e25eaec 100644
|
|
--- a/tools/build/feature/Makefile
|
|
+++ b/tools/build/feature/Makefile
|
|
@@ -89,7 +89,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
|
|
###############################
|
|
|
|
$(OUTPUT)test-all.bin:
|
|
- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
|
|
+ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
|
|
|
|
$(OUTPUT)test-hello.bin:
|
|
$(BUILD)
|
|
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
|
|
index 5479e543b1947..d2623992ccd61 100644
|
|
--- a/tools/build/feature/test-all.c
|
|
+++ b/tools/build/feature/test-all.c
|
|
@@ -78,14 +78,6 @@
|
|
# include "test-libslang.c"
|
|
#undef main
|
|
|
|
-#define main main_test_gtk2
|
|
-# include "test-gtk2.c"
|
|
-#undef main
|
|
-
|
|
-#define main main_test_gtk2_infobar
|
|
-# include "test-gtk2-infobar.c"
|
|
-#undef main
|
|
-
|
|
#define main main_test_libbfd
|
|
# include "test-libbfd.c"
|
|
#undef main
|
|
@@ -205,8 +197,6 @@ int main(int argc, char *argv[])
|
|
main_test_libelf_getshdrstrndx();
|
|
main_test_libunwind();
|
|
main_test_libslang();
|
|
- main_test_gtk2(argc, argv);
|
|
- main_test_gtk2_infobar(argc, argv);
|
|
main_test_libbfd();
|
|
main_test_backtrace();
|
|
main_test_libnuma();
|
|
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
|
|
index 236c91aff48f8..3e71c2f69afe8 100644
|
|
--- a/tools/lib/bpf/libbpf.c
|
|
+++ b/tools/lib/bpf/libbpf.c
|
|
@@ -3677,6 +3677,36 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
|
|
return 0;
|
|
}
|
|
|
|
+static int init_map_slots(struct bpf_map *map)
|
|
+{
|
|
+ const struct bpf_map *targ_map;
|
|
+ unsigned int i;
|
|
+ int fd, err;
|
|
+
|
|
+ for (i = 0; i < map->init_slots_sz; i++) {
|
|
+ if (!map->init_slots[i])
|
|
+ continue;
|
|
+
|
|
+ targ_map = map->init_slots[i];
|
|
+ fd = bpf_map__fd(targ_map);
|
|
+ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
|
|
+ if (err) {
|
|
+ err = -errno;
|
|
+ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
|
|
+ map->name, i, targ_map->name,
|
|
+ fd, err);
|
|
+ return err;
|
|
+ }
|
|
+ pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
|
|
+ map->name, i, targ_map->name, fd);
|
|
+ }
|
|
+
|
|
+ zfree(&map->init_slots);
|
|
+ map->init_slots_sz = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int
|
|
bpf_object__create_maps(struct bpf_object *obj)
|
|
{
|
|
@@ -3719,28 +3749,11 @@ bpf_object__create_maps(struct bpf_object *obj)
|
|
}
|
|
|
|
if (map->init_slots_sz) {
|
|
- for (j = 0; j < map->init_slots_sz; j++) {
|
|
- const struct bpf_map *targ_map;
|
|
- int fd;
|
|
-
|
|
- if (!map->init_slots[j])
|
|
- continue;
|
|
-
|
|
- targ_map = map->init_slots[j];
|
|
- fd = bpf_map__fd(targ_map);
|
|
- err = bpf_map_update_elem(map->fd, &j, &fd, 0);
|
|
- if (err) {
|
|
- err = -errno;
|
|
- pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
|
|
- map->name, j, targ_map->name,
|
|
- fd, err);
|
|
- goto err_out;
|
|
- }
|
|
- pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
|
|
- map->name, j, targ_map->name, fd);
|
|
+ err = init_map_slots(map);
|
|
+ if (err < 0) {
|
|
+ zclose(map->fd);
|
|
+ goto err_out;
|
|
}
|
|
- zfree(&map->init_slots);
|
|
- map->init_slots_sz = 0;
|
|
}
|
|
|
|
if (map->pin_path && !map->pinned) {
|
|
@@ -5253,7 +5266,7 @@ retry_load:
|
|
free(log_buf);
|
|
goto retry_load;
|
|
}
|
|
- ret = -errno;
|
|
+ ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
|
|
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
|
|
pr_warn("load bpf program failed: %s\n", cp);
|
|
pr_perm_msg(ret);
|
|
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
|
|
index 6a875a0f01bb0..233592c5a52c7 100644
|
|
--- a/tools/lib/perf/evlist.c
|
|
+++ b/tools/lib/perf/evlist.c
|
|
@@ -45,6 +45,9 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
|
if (!evsel->own_cpus || evlist->has_user_cpus) {
|
|
perf_cpu_map__put(evsel->cpus);
|
|
evsel->cpus = perf_cpu_map__get(evlist->cpus);
|
|
+ } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
|
|
+ perf_cpu_map__put(evsel->cpus);
|
|
+ evsel->cpus = perf_cpu_map__get(evlist->cpus);
|
|
} else if (evsel->cpus != evsel->own_cpus) {
|
|
perf_cpu_map__put(evsel->cpus);
|
|
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
|
|
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
|
|
index 513633809c81e..ab6dbd8ef6cf6 100644
|
|
--- a/tools/perf/Makefile.config
|
|
+++ b/tools/perf/Makefile.config
|
|
@@ -716,12 +716,14 @@ ifndef NO_SLANG
|
|
endif
|
|
endif
|
|
|
|
-ifndef NO_GTK2
|
|
+ifdef GTK2
|
|
FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
|
|
+ $(call feature_check,gtk2)
|
|
ifneq ($(feature-gtk2), 1)
|
|
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
|
|
NO_GTK2 := 1
|
|
else
|
|
+ $(call feature_check,gtk2-infobar)
|
|
ifeq ($(feature-gtk2-infobar), 1)
|
|
GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
|
|
endif
|
|
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
|
|
index 86dbb51bb2723..bc45b1a61d3a3 100644
|
|
--- a/tools/perf/Makefile.perf
|
|
+++ b/tools/perf/Makefile.perf
|
|
@@ -48,7 +48,7 @@ include ../scripts/utilities.mak
|
|
#
|
|
# Define NO_SLANG if you do not want TUI support.
|
|
#
|
|
-# Define NO_GTK2 if you do not want GTK+ GUI support.
|
|
+# Define GTK2 if you want GTK+ GUI support.
|
|
#
|
|
# Define NO_DEMANGLE if you do not want C++ symbol demangling.
|
|
#
|
|
@@ -384,7 +384,7 @@ ifneq ($(OUTPUT),)
|
|
CFLAGS += -I$(OUTPUT)
|
|
endif
|
|
|
|
-ifndef NO_GTK2
|
|
+ifdef GTK2
|
|
ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
|
|
GTK_IN := $(OUTPUT)gtk-in.o
|
|
endif
|
|
@@ -876,7 +876,7 @@ check: $(OUTPUT)common-cmds.h
|
|
|
|
### Installation rules
|
|
|
|
-ifndef NO_GTK2
|
|
+ifdef GTK2
|
|
install-gtk: $(OUTPUT)libperf-gtk.so
|
|
$(call QUIET_INSTALL, 'GTK UI') \
|
|
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
|
|
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
|
|
index 6e2502de755a8..6494383687f89 100644
|
|
--- a/tools/perf/builtin-stat.c
|
|
+++ b/tools/perf/builtin-stat.c
|
|
@@ -1963,8 +1963,10 @@ static void setup_system_wide(int forks)
|
|
struct evsel *counter;
|
|
|
|
evlist__for_each_entry(evsel_list, counter) {
|
|
- if (!counter->core.system_wide)
|
|
+ if (!counter->core.system_wide &&
|
|
+ strcmp(counter->name, "duration_time")) {
|
|
return;
|
|
+ }
|
|
}
|
|
|
|
if (evsel_list->core.nr_entries)
|
|
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
|
|
index 4cbb64edc9983..83e8cd663b4e4 100644
|
|
--- a/tools/perf/builtin-trace.c
|
|
+++ b/tools/perf/builtin-trace.c
|
|
@@ -1762,7 +1762,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
|
|
if (table == NULL)
|
|
return -ENOMEM;
|
|
|
|
- memset(table + trace->sctbl->syscalls.max_id, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
|
|
+ // Need to memset from offset 0 and +1 members if brand new
|
|
+ if (trace->syscalls.table == NULL)
|
|
+ memset(table, 0, (id + 1) * sizeof(*sc));
|
|
+ else
|
|
+ memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
|
|
|
|
trace->syscalls.table = table;
|
|
trace->sctbl->syscalls.max_id = id;
|
|
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
|
|
index 05cf2af9e2c27..d09ec2f030719 100644
|
|
--- a/tools/perf/builtin-version.c
|
|
+++ b/tools/perf/builtin-version.c
|
|
@@ -60,7 +60,6 @@ static void library_status(void)
|
|
STATUS(HAVE_DWARF_SUPPORT, dwarf);
|
|
STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
|
|
STATUS(HAVE_GLIBC_SUPPORT, glibc);
|
|
- STATUS(HAVE_GTK2_SUPPORT, gtk2);
|
|
#ifndef HAVE_SYSCALL_TABLE_SUPPORT
|
|
STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
|
|
#endif
|
|
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
|
|
index 9357b5f62c273..bc88175e377ce 100644
|
|
--- a/tools/perf/util/intel-pt.c
|
|
+++ b/tools/perf/util/intel-pt.c
|
|
@@ -1071,6 +1071,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
|
|
|
|
if (queue->tid == -1 || pt->have_sched_switch) {
|
|
ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
|
|
+ if (ptq->tid == -1)
|
|
+ ptq->pid = -1;
|
|
thread__zput(ptq->thread);
|
|
}
|
|
|
|
@@ -2561,10 +2563,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
|
|
tid = sample->tid;
|
|
}
|
|
|
|
- if (tid == -1) {
|
|
- pr_err("context_switch event has no tid\n");
|
|
- return -EINVAL;
|
|
- }
|
|
+ if (tid == -1)
|
|
+ intel_pt_log("context_switch event has no tid\n");
|
|
|
|
intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
|
|
cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
|
|
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
|
|
index 8995092d541ec..3b796dd5e5772 100644
|
|
--- a/tools/testing/radix-tree/idr-test.c
|
|
+++ b/tools/testing/radix-tree/idr-test.c
|
|
@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
|
|
return NULL;
|
|
}
|
|
|
|
+static void *ida_leak_fn(void *arg)
|
|
+{
|
|
+ struct ida *ida = arg;
|
|
+ time_t s = time(NULL);
|
|
+ int i, ret;
|
|
+
|
|
+ rcu_register_thread();
|
|
+
|
|
+ do for (i = 0; i < 1000; i++) {
|
|
+ ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
|
|
+ if (ret >= 0)
|
|
+ ida_free(ida, 128);
|
|
+ } while (time(NULL) < s + 2);
|
|
+
|
|
+ rcu_unregister_thread();
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
void ida_thread_tests(void)
|
|
{
|
|
+ DEFINE_IDA(ida);
|
|
pthread_t threads[20];
|
|
int i;
|
|
|
|
@@ -536,6 +555,16 @@ void ida_thread_tests(void)
|
|
|
|
while (i--)
|
|
pthread_join(threads[i], NULL);
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
|
|
+ if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
|
|
+ perror("creating ida thread");
|
|
+ exit(1);
|
|
+ }
|
|
+
|
|
+ while (i--)
|
|
+ pthread_join(threads[i], NULL);
|
|
+ assert(ida_is_empty(&ida));
|
|
}
|
|
|
|
void ida_tests(void)
|
|
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
|
|
index 944ad4721c83c..da14eaac71d03 100644
|
|
--- a/tools/testing/selftests/bpf/bench.c
|
|
+++ b/tools/testing/selftests/bpf/bench.c
|
|
@@ -311,7 +311,6 @@ extern const struct bench bench_rename_kretprobe;
|
|
extern const struct bench bench_rename_rawtp;
|
|
extern const struct bench bench_rename_fentry;
|
|
extern const struct bench bench_rename_fexit;
|
|
-extern const struct bench bench_rename_fmodret;
|
|
extern const struct bench bench_trig_base;
|
|
extern const struct bench bench_trig_tp;
|
|
extern const struct bench bench_trig_rawtp;
|
|
@@ -332,7 +331,6 @@ static const struct bench *benchs[] = {
|
|
&bench_rename_rawtp,
|
|
&bench_rename_fentry,
|
|
&bench_rename_fexit,
|
|
- &bench_rename_fmodret,
|
|
&bench_trig_base,
|
|
&bench_trig_tp,
|
|
&bench_trig_rawtp,
|
|
@@ -462,4 +460,3 @@ int main(int argc, char **argv)
|
|
|
|
return 0;
|
|
}
|
|
-
|
|
diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
|
|
index e74cff40f4fea..a967674098ada 100644
|
|
--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
|
|
+++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
|
|
@@ -106,12 +106,6 @@ static void setup_fexit()
|
|
attach_bpf(ctx.skel->progs.prog5);
|
|
}
|
|
|
|
-static void setup_fmodret()
|
|
-{
|
|
- setup_ctx();
|
|
- attach_bpf(ctx.skel->progs.prog6);
|
|
-}
|
|
-
|
|
static void *consumer(void *input)
|
|
{
|
|
return NULL;
|
|
@@ -182,14 +176,3 @@ const struct bench bench_rename_fexit = {
|
|
.report_progress = hits_drops_report_progress,
|
|
.report_final = hits_drops_report_final,
|
|
};
|
|
-
|
|
-const struct bench bench_rename_fmodret = {
|
|
- .name = "rename-fmodret",
|
|
- .validate = validate,
|
|
- .setup = setup_fmodret,
|
|
- .producer_thread = producer,
|
|
- .consumer_thread = consumer,
|
|
- .measure = measure,
|
|
- .report_progress = hits_drops_report_progress,
|
|
- .report_final = hits_drops_report_final,
|
|
-};
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
|
|
index 47fa04adc1471..21c2d265c3e8e 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
|
|
@@ -265,7 +265,7 @@ void test_sk_assign(void)
|
|
TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
|
|
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
|
|
};
|
|
- int server = -1;
|
|
+ __s64 server = -1;
|
|
int server_map;
|
|
int self_net;
|
|
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
|
|
index 5f54c6aec7f07..b25c9c45c1484 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
|
|
@@ -45,9 +45,9 @@ static int getsetsockopt(void)
|
|
goto err;
|
|
}
|
|
|
|
- if (*(int *)big_buf != 0x08) {
|
|
+ if (*big_buf != 0x08) {
|
|
log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
|
|
- *(int *)big_buf);
|
|
+ (int)*big_buf);
|
|
goto err;
|
|
}
|
|
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
|
|
index 2702df2b23433..9966685866fdf 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
|
|
@@ -61,10 +61,9 @@ void test_test_overhead(void)
|
|
const char *raw_tp_name = "raw_tp/task_rename";
|
|
const char *fentry_name = "fentry/__set_task_comm";
|
|
const char *fexit_name = "fexit/__set_task_comm";
|
|
- const char *fmodret_name = "fmod_ret/__set_task_comm";
|
|
const char *kprobe_func = "__set_task_comm";
|
|
struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
|
|
- struct bpf_program *fentry_prog, *fexit_prog, *fmodret_prog;
|
|
+ struct bpf_program *fentry_prog, *fexit_prog;
|
|
struct bpf_object *obj;
|
|
struct bpf_link *link;
|
|
int err, duration = 0;
|
|
@@ -97,11 +96,6 @@ void test_test_overhead(void)
|
|
if (CHECK(!fexit_prog, "find_probe",
|
|
"prog '%s' not found\n", fexit_name))
|
|
goto cleanup;
|
|
- fmodret_prog = bpf_object__find_program_by_title(obj, fmodret_name);
|
|
- if (CHECK(!fmodret_prog, "find_probe",
|
|
- "prog '%s' not found\n", fmodret_name))
|
|
- goto cleanup;
|
|
-
|
|
err = bpf_object__load(obj);
|
|
if (CHECK(err, "obj_load", "err %d\n", err))
|
|
goto cleanup;
|
|
@@ -148,12 +142,6 @@ void test_test_overhead(void)
|
|
test_run("fexit");
|
|
bpf_link__destroy(link);
|
|
|
|
- /* attach fmod_ret */
|
|
- link = bpf_program__attach_trace(fmodret_prog);
|
|
- if (CHECK(IS_ERR(link), "attach fmod_ret", "err %ld\n", PTR_ERR(link)))
|
|
- goto cleanup;
|
|
- test_run("fmod_ret");
|
|
- bpf_link__destroy(link);
|
|
cleanup:
|
|
prctl(PR_SET_NAME, comm, 0L, 0L, 0L);
|
|
bpf_object__close(obj);
|
|
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
|
|
index 42403d088abc9..abb7344b531f4 100644
|
|
--- a/tools/testing/selftests/bpf/progs/test_overhead.c
|
|
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
|
|
@@ -39,10 +39,4 @@ int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
|
|
return 0;
|
|
}
|
|
|
|
-SEC("fmod_ret/__set_task_comm")
|
|
-int BPF_PROG(prog6, struct task_struct *tsk, const char *buf, bool exec)
|
|
-{
|
|
- return !tsk;
|
|
-}
|
|
-
|
|
char _license[] SEC("license") = "GPL";
|
|
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
|
|
index 458b0d69133e4..553a282d816ab 100644
|
|
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
|
|
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
|
|
@@ -18,11 +18,11 @@
|
|
#define MAX_ULONG_STR_LEN 7
|
|
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
|
|
|
|
+const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
|
|
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
|
|
{
|
|
- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
|
|
unsigned char i;
|
|
- char name[64];
|
|
+ char name[sizeof(tcp_mem_name)];
|
|
int ret;
|
|
|
|
memset(name, 0, sizeof(name));
|
|
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
|
|
index b2e6f9b0894d8..2b64bc563a12e 100644
|
|
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
|
|
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
|
|
@@ -18,11 +18,11 @@
|
|
#define MAX_ULONG_STR_LEN 7
|
|
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
|
|
|
|
+const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
|
|
static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
|
|
{
|
|
- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
|
|
unsigned char i;
|
|
- char name[64];
|
|
+ char name[sizeof(tcp_mem_name)];
|
|
int ret;
|
|
|
|
memset(name, 0, sizeof(name));
|
|
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
|
|
index 5611b564d3b1c..f54b2293c490f 100644
|
|
--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
|
|
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
|
|
@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
|
|
int handle__tp(struct trace_event_raw_sys_enter *args)
|
|
{
|
|
struct __kernel_timespec *ts;
|
|
+ long tv_nsec;
|
|
|
|
if (args->id != __NR_nanosleep)
|
|
return 0;
|
|
|
|
ts = (void *)args->args[0];
|
|
- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
|
|
+ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
|
|
+ tv_nsec != MY_TV_NSEC)
|
|
return 0;
|
|
|
|
tp_called = true;
|
|
@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
|
|
int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
|
|
{
|
|
struct __kernel_timespec *ts;
|
|
+ long tv_nsec;
|
|
|
|
if (id != __NR_nanosleep)
|
|
return 0;
|
|
|
|
ts = (void *)PT_REGS_PARM1_CORE(regs);
|
|
- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
|
|
+ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
|
|
+ tv_nsec != MY_TV_NSEC)
|
|
return 0;
|
|
|
|
raw_tp_called = true;
|
|
@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
|
|
int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
|
|
{
|
|
struct __kernel_timespec *ts;
|
|
+ long tv_nsec;
|
|
|
|
if (id != __NR_nanosleep)
|
|
return 0;
|
|
|
|
ts = (void *)PT_REGS_PARM1_CORE(regs);
|
|
- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
|
|
+ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
|
|
+ tv_nsec != MY_TV_NSEC)
|
|
return 0;
|
|
|
|
tp_btf_called = true;
|
|
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
|
|
index 7449a4b8f1f9a..9098f1e7433fd 100644
|
|
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
|
|
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
|
|
@@ -25,12 +25,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events
|
|
echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger
|
|
echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger
|
|
|
|
-echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events
|
|
-echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
|
|
-echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger
|
|
+echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events
|
|
+echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
|
|
+echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger
|
|
|
|
ping $LOCALHOST -c 3
|
|
-if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then
|
|
+if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then
|
|
fail "Failed to create combined histogram"
|
|
fi
|
|
|
|
diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
|
|
index 8383eb89d88a9..bb7a1775307b8 100755
|
|
--- a/tools/testing/selftests/lkdtm/run.sh
|
|
+++ b/tools/testing/selftests/lkdtm/run.sh
|
|
@@ -82,7 +82,7 @@ dmesg > "$DMESG"
|
|
($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
|
|
|
|
# Record and dump the results
|
|
-dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true
|
|
+dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
|
|
|
|
cat "$LOG"
|
|
# Check for expected output
|
|
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
|
|
index 3b42c06b59858..c5e50ab2ced60 100644
|
|
--- a/tools/testing/selftests/net/config
|
|
+++ b/tools/testing/selftests/net/config
|
|
@@ -31,3 +31,4 @@ CONFIG_NET_SCH_ETF=m
|
|
CONFIG_NET_SCH_NETEM=y
|
|
CONFIG_TEST_BLACKHOLE_DEV=m
|
|
CONFIG_KALLSYMS=y
|
|
+CONFIG_NET_FOU=m
|
|
diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
|
|
index a0b5f57d6bd31..0727e2012b685 100755
|
|
--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
|
|
@@ -215,10 +215,16 @@ switch_create()
|
|
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
|
|
+
|
|
+ sysctl_set net.ipv4.conf.all.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
|
|
}
|
|
|
|
switch_destroy()
|
|
{
|
|
+ sysctl_restore net.ipv4.conf.all.rp_filter
|
|
+
|
|
bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
|
|
bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
|
|
|
|
@@ -359,6 +365,10 @@ ns_switch_create()
|
|
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
|
|
+
|
|
+ sysctl_set net.ipv4.conf.all.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
|
|
}
|
|
export -f ns_switch_create
|
|
|
|
diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
|
|
index 1209031bc794d..5d97fa347d75a 100755
|
|
--- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
|
|
+++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
|
|
@@ -237,10 +237,16 @@ switch_create()
|
|
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
|
|
+
|
|
+ sysctl_set net.ipv4.conf.all.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
|
|
}
|
|
|
|
switch_destroy()
|
|
{
|
|
+ sysctl_restore net.ipv4.conf.all.rp_filter
|
|
+
|
|
bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
|
|
bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
|
|
|
|
@@ -402,6 +408,10 @@ ns_switch_create()
|
|
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
|
|
bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
|
|
+
|
|
+ sysctl_set net.ipv4.conf.all.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
|
|
+ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
|
|
}
|
|
export -f ns_switch_create
|
|
|
|
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
|
|
index acf02e156d20f..ed163e4ad4344 100755
|
|
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
|
|
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
|
|
@@ -421,9 +421,9 @@ do_transfer()
|
|
duration=$(printf "(duration %05sms)" $duration)
|
|
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
|
|
echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2
|
|
- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
|
|
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
|
|
ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
|
|
- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
|
|
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
|
|
ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
|
|
|
|
cat "$capout"
|
|
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
|
|
index dd42c2f692d01..9cb0c6af326ba 100755
|
|
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
|
|
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
|
|
@@ -167,9 +167,9 @@ do_transfer()
|
|
|
|
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
|
|
echo " client exit code $retc, server $rets" 1>&2
|
|
- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
|
|
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
|
|
ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
|
|
- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
|
|
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
|
|
ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
|
|
|
|
cat "$capout"
|
|
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
|
|
index bdbf4b3125b6a..28ea3753da207 100755
|
|
--- a/tools/testing/selftests/net/rtnetlink.sh
|
|
+++ b/tools/testing/selftests/net/rtnetlink.sh
|
|
@@ -521,6 +521,11 @@ kci_test_encap_fou()
|
|
return $ksft_skip
|
|
fi
|
|
|
|
+ if ! /sbin/modprobe -q -n fou; then
|
|
+ echo "SKIP: module fou is not found"
|
|
+ return $ksft_skip
|
|
+ fi
|
|
+ /sbin/modprobe -q fou
|
|
ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
|
|
if [ $? -ne 0 ];then
|
|
echo "FAIL: can't add fou port 7777, skipping test"
|
|
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
|
|
index 8a8d0f456946c..0d783e1065c86 100755
|
|
--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
|
|
+++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
|
|
@@ -1,17 +1,19 @@
|
|
#!/bin/sh
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
+KSELFTESTS_SKIP=4
|
|
+
|
|
. ./eeh-functions.sh
|
|
|
|
if ! eeh_supported ; then
|
|
echo "EEH not supported on this system, skipping"
|
|
- exit 0;
|
|
+ exit $KSELFTESTS_SKIP;
|
|
fi
|
|
|
|
if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
|
|
[ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
|
|
echo "debugfs EEH testing files are missing. Is debugfs mounted?"
|
|
- exit 1;
|
|
+ exit $KSELFTESTS_SKIP;
|
|
fi
|
|
|
|
pre_lspci=`mktemp`
|
|
@@ -84,4 +86,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
|
|
lspci | diff -u $pre_lspci -
|
|
rm -f $pre_lspci
|
|
|
|
-exit $failed
|
|
+test "$failed" == 0
|
|
+exit $?
|
|
diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
|
|
index 3ba674b64fa9f..69dd0d1aa30b2 100644
|
|
--- a/tools/testing/selftests/vm/config
|
|
+++ b/tools/testing/selftests/vm/config
|
|
@@ -3,3 +3,4 @@ CONFIG_USERFAULTFD=y
|
|
CONFIG_TEST_VMALLOC=m
|
|
CONFIG_DEVICE_PRIVATE=y
|
|
CONFIG_TEST_HMM=m
|
|
+CONFIG_GUP_BENCHMARK=y
|