mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-28 01:31:35 +00:00
4927 lines
168 KiB
Diff
4927 lines
168 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index a8c772b299aa..1bd1b17cd207 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 41
|
|
+SUBLEVEL = 42
|
|
EXTRAVERSION =
|
|
NAME = Kleptomaniac Octopus
|
|
|
|
@@ -707,12 +707,9 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
|
KBUILD_CFLAGS += -Os
|
|
endif
|
|
|
|
-ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED
|
|
-KBUILD_CFLAGS += -Wno-maybe-uninitialized
|
|
-endif
|
|
-
|
|
# Tell gcc to never replace conditional load with a non-conditional one
|
|
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
|
+KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races)
|
|
|
|
include scripts/Makefile.kcov
|
|
include scripts/Makefile.gcc-plugins
|
|
@@ -860,6 +857,17 @@ KBUILD_CFLAGS += -Wno-pointer-sign
|
|
# disable stringop warnings in gcc 8+
|
|
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
|
|
|
|
+# We'll want to enable this eventually, but it's not going away for 5.7 at least
|
|
+KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
|
|
+KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
|
|
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
|
|
+
|
|
+# Another good warning that we'll want to enable eventually
|
|
+KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
|
|
+
|
|
+# Enabled with W=2, disabled by default as noisy
|
|
+KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
|
|
+
|
|
# disable invalid "can't wrap" optimizations for signed / pointers
|
|
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
|
|
|
|
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
|
|
index c6be65249f42..a6ef3d137c7a 100644
|
|
--- a/arch/arm/boot/dts/dra7.dtsi
|
|
+++ b/arch/arm/boot/dts/dra7.dtsi
|
|
@@ -172,6 +172,7 @@
|
|
#address-cells = <1>;
|
|
ranges = <0x51000000 0x51000000 0x3000
|
|
0x0 0x20000000 0x10000000>;
|
|
+ dma-ranges;
|
|
/**
|
|
* To enable PCI endpoint mode, disable the pcie1_rc
|
|
* node and enable pcie1_ep mode.
|
|
@@ -185,7 +186,6 @@
|
|
device_type = "pci";
|
|
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
|
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
|
- dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
|
|
bus-range = <0x00 0xff>;
|
|
#interrupt-cells = <1>;
|
|
num-lanes = <1>;
|
|
@@ -230,6 +230,7 @@
|
|
#address-cells = <1>;
|
|
ranges = <0x51800000 0x51800000 0x3000
|
|
0x0 0x30000000 0x10000000>;
|
|
+ dma-ranges;
|
|
status = "disabled";
|
|
pcie2_rc: pcie@51800000 {
|
|
reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
|
|
@@ -240,7 +241,6 @@
|
|
device_type = "pci";
|
|
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
|
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
|
- dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
|
|
bus-range = <0x00 0xff>;
|
|
#interrupt-cells = <1>;
|
|
num-lanes = <1>;
|
|
diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
|
|
index 0cd75dadf292..188639738dc3 100644
|
|
--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
|
|
+++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
|
|
@@ -75,8 +75,8 @@
|
|
imx27-phycard-s-rdk {
|
|
pinctrl_i2c1: i2c1grp {
|
|
fsl,pins = <
|
|
- MX27_PAD_I2C2_SDA__I2C2_SDA 0x0
|
|
- MX27_PAD_I2C2_SCL__I2C2_SCL 0x0
|
|
+ MX27_PAD_I2C_DATA__I2C_DATA 0x0
|
|
+ MX27_PAD_I2C_CLK__I2C_CLK 0x0
|
|
>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
|
|
index 0d594e4bd559..a1173bf5bff5 100644
|
|
--- a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
|
|
+++ b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
|
|
@@ -38,7 +38,7 @@
|
|
};
|
|
|
|
&switch_ports {
|
|
- /delete-node/ port@2;
|
|
+ /delete-node/ port@3;
|
|
};
|
|
|
|
&touchscreen {
|
|
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
|
|
index dd865f3c2eda..4447f45f0cba 100644
|
|
--- a/arch/arm/boot/dts/r8a73a4.dtsi
|
|
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
|
|
@@ -131,7 +131,14 @@
|
|
cmt1: timer@e6130000 {
|
|
compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1";
|
|
reg = <0 0xe6130000 0 0x1004>;
|
|
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
|
|
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
|
|
clock-names = "fck";
|
|
power-domains = <&pd_c5>;
|
|
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
|
|
index 12ffe73bf2bc..155f58e6d4e8 100644
|
|
--- a/arch/arm/boot/dts/r8a7740.dtsi
|
|
+++ b/arch/arm/boot/dts/r8a7740.dtsi
|
|
@@ -479,7 +479,7 @@
|
|
cpg_clocks: cpg_clocks@e6150000 {
|
|
compatible = "renesas,r8a7740-cpg-clocks";
|
|
reg = <0xe6150000 0x10000>;
|
|
- clocks = <&extal1_clk>, <&extalr_clk>;
|
|
+ clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
|
|
#clock-cells = <1>;
|
|
clock-output-names = "system", "pllc0", "pllc1",
|
|
"pllc2", "r",
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
|
|
index 0ee8a369c547..2199a54c720c 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
|
|
@@ -2365,7 +2365,7 @@
|
|
reg = <0x0 0xff400000 0x0 0x40000>;
|
|
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
|
|
- clock-names = "ddr";
|
|
+ clock-names = "otg";
|
|
phys = <&usb2_phy1>;
|
|
phy-names = "usb2-phy";
|
|
dr_mode = "peripheral";
|
|
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
|
|
index 554863429aa6..e2094575f528 100644
|
|
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
|
|
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
|
|
@@ -152,6 +152,10 @@
|
|
clock-latency = <50000>;
|
|
};
|
|
|
|
+&frddr_a {
|
|
+ status = "okay";
|
|
+};
|
|
+
|
|
&frddr_b {
|
|
status = "okay";
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
|
|
index 43c4db312146..ac3a3b333efa 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
|
|
@@ -616,7 +616,7 @@
|
|
reg = <0x30bd0000 0x10000>;
|
|
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
|
|
clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
|
|
- <&clk IMX8MN_CLK_SDMA1_ROOT>;
|
|
+ <&clk IMX8MN_CLK_AHB>;
|
|
clock-names = "ipg", "ahb";
|
|
#dma-cells = <3>;
|
|
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
|
|
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
|
|
index 461a47ea656d..e81cd83b138b 100644
|
|
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
|
|
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
|
|
@@ -1318,6 +1318,7 @@
|
|
ipmmu_vip0: mmu@e7b00000 {
|
|
compatible = "renesas,ipmmu-r8a77980";
|
|
reg = <0 0xe7b00000 0 0x1000>;
|
|
+ renesas,ipmmu-main = <&ipmmu_mm 4>;
|
|
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
|
|
#iommu-cells = <1>;
|
|
};
|
|
@@ -1325,6 +1326,7 @@
|
|
ipmmu_vip1: mmu@e7960000 {
|
|
compatible = "renesas,ipmmu-r8a77980";
|
|
reg = <0 0xe7960000 0 0x1000>;
|
|
+ renesas,ipmmu-main = <&ipmmu_mm 11>;
|
|
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
|
|
#iommu-cells = <1>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
|
|
index 49c4b96da3d4..6abc6f4a86cf 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
|
|
@@ -92,7 +92,7 @@
|
|
&i2c1 {
|
|
status = "okay";
|
|
|
|
- rk805: rk805@18 {
|
|
+ rk805: pmic@18 {
|
|
compatible = "rockchip,rk805";
|
|
reg = <0x18>;
|
|
interrupt-parent = <&gpio2>;
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
|
|
index 62936b432f9a..304fad1a0b57 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
|
|
@@ -169,7 +169,7 @@
|
|
&i2c1 {
|
|
status = "okay";
|
|
|
|
- rk805: rk805@18 {
|
|
+ rk805: pmic@18 {
|
|
compatible = "rockchip,rk805";
|
|
reg = <0x18>;
|
|
interrupt-parent = <&gpio2>;
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
index cede1ad81be2..cd97016b7c18 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
|
|
@@ -410,7 +410,7 @@
|
|
reset-names = "usb3-otg";
|
|
status = "disabled";
|
|
|
|
- usbdrd_dwc3_0: dwc3 {
|
|
+ usbdrd_dwc3_0: usb@fe800000 {
|
|
compatible = "snps,dwc3";
|
|
reg = <0x0 0xfe800000 0x0 0x100000>;
|
|
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
|
|
@@ -446,7 +446,7 @@
|
|
reset-names = "usb3-otg";
|
|
status = "disabled";
|
|
|
|
- usbdrd_dwc3_1: dwc3 {
|
|
+ usbdrd_dwc3_1: usb@fe900000 {
|
|
compatible = "snps,dwc3";
|
|
reg = <0x0 0xfe900000 0x0 0x100000>;
|
|
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
|
|
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
|
|
index 0df8493624e0..cc049ff5c6a5 100644
|
|
--- a/arch/arm64/kernel/machine_kexec.c
|
|
+++ b/arch/arm64/kernel/machine_kexec.c
|
|
@@ -189,6 +189,7 @@ void machine_kexec(struct kimage *kimage)
|
|
* the offline CPUs. Therefore, we must use the __* variant here.
|
|
*/
|
|
__flush_icache_range((uintptr_t)reboot_code_buffer,
|
|
+ (uintptr_t)reboot_code_buffer +
|
|
arm64_relocate_new_kernel_size);
|
|
|
|
/* Flush the kimage list and its buffers. */
|
|
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
|
|
index 91c8f1d9bcee..1a2c80e8be84 100644
|
|
--- a/arch/powerpc/include/asm/book3s/32/kup.h
|
|
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
|
|
@@ -75,7 +75,7 @@
|
|
|
|
.macro kuap_check current, gpr
|
|
#ifdef CONFIG_PPC_KUAP_DEBUG
|
|
- lwz \gpr2, KUAP(thread)
|
|
+ lwz \gpr, KUAP(thread)
|
|
999: twnei \gpr, 0
|
|
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
|
|
#endif
|
|
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
|
|
index 33b16f4212f7..a4ee3a0e7d20 100644
|
|
--- a/arch/riscv/kernel/vdso/Makefile
|
|
+++ b/arch/riscv/kernel/vdso/Makefile
|
|
@@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
|
|
$(call if_changed,vdsold)
|
|
|
|
# We also create a special relocatable object that should mirror the symbol
|
|
-# table and layout of the linked DSO. With ld -R we can then refer to
|
|
-# these symbols in the kernel code rather than hand-coded addresses.
|
|
+# table and layout of the linked DSO. With ld --just-symbols we can then
|
|
+# refer to these symbols in the kernel code rather than hand-coded addresses.
|
|
|
|
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
|
|
-Wl,--build-id -Wl,--hash-style=both
|
|
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
|
|
$(call if_changed,vdsold)
|
|
|
|
-LDFLAGS_vdso-syms.o := -r -R
|
|
+LDFLAGS_vdso-syms.o := -r --just-symbols
|
|
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
|
|
$(call if_changed,ld)
|
|
|
|
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
|
|
index 91e29b6a86a5..9804a7957f4e 100644
|
|
--- a/arch/x86/include/asm/stackprotector.h
|
|
+++ b/arch/x86/include/asm/stackprotector.h
|
|
@@ -55,8 +55,13 @@
|
|
/*
|
|
* Initialize the stackprotector canary value.
|
|
*
|
|
- * NOTE: this must only be called from functions that never return,
|
|
+ * NOTE: this must only be called from functions that never return
|
|
* and it must always be inlined.
|
|
+ *
|
|
+ * In addition, it should be called from a compilation unit for which
|
|
+ * stack protector is disabled. Alternatively, the caller should not end
|
|
+ * with a function call which gets tail-call optimized as that would
|
|
+ * lead to checking a modified canary value.
|
|
*/
|
|
static __always_inline void boot_init_stack_canary(void)
|
|
{
|
|
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
|
|
index 69881b2d446c..9674321ce3a3 100644
|
|
--- a/arch/x86/kernel/smpboot.c
|
|
+++ b/arch/x86/kernel/smpboot.c
|
|
@@ -262,6 +262,14 @@ static void notrace start_secondary(void *unused)
|
|
|
|
wmb();
|
|
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
|
+
|
|
+ /*
|
|
+ * Prevent tail call to cpu_startup_entry() because the stack protector
|
|
+ * guard has been changed a couple of function calls up, in
|
|
+ * boot_init_stack_canary() and must not be checked before tail calling
|
|
+ * another function.
|
|
+ */
|
|
+ prevent_tail_call_optimization();
|
|
}
|
|
|
|
/**
|
|
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
|
|
index fb37221a1532..647e6af0883d 100644
|
|
--- a/arch/x86/kernel/unwind_orc.c
|
|
+++ b/arch/x86/kernel/unwind_orc.c
|
|
@@ -608,23 +608,23 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
|
|
void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|
struct pt_regs *regs, unsigned long *first_frame)
|
|
{
|
|
- if (!orc_init)
|
|
- goto done;
|
|
-
|
|
memset(state, 0, sizeof(*state));
|
|
state->task = task;
|
|
|
|
+ if (!orc_init)
|
|
+ goto err;
|
|
+
|
|
/*
|
|
* Refuse to unwind the stack of a task while it's executing on another
|
|
* CPU. This check is racy, but that's ok: the unwinder has other
|
|
* checks to prevent it from going off the rails.
|
|
*/
|
|
if (task_on_another_cpu(task))
|
|
- goto done;
|
|
+ goto err;
|
|
|
|
if (regs) {
|
|
if (user_mode(regs))
|
|
- goto done;
|
|
+ goto the_end;
|
|
|
|
state->ip = regs->ip;
|
|
state->sp = regs->sp;
|
|
@@ -657,6 +657,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|
* generate some kind of backtrace if this happens.
|
|
*/
|
|
void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
|
|
+ state->error = true;
|
|
if (get_stack_info(next_page, state->task, &state->stack_info,
|
|
&state->stack_mask))
|
|
return;
|
|
@@ -682,8 +683,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|
|
|
return;
|
|
|
|
-done:
|
|
+err:
|
|
+ state->error = true;
|
|
+the_end:
|
|
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
|
- return;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__unwind_start);
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 56a0f9c18892..41408065574f 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -3682,7 +3682,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
|
|
unsigned bank_num = mcg_cap & 0xff, bank;
|
|
|
|
r = -EINVAL;
|
|
- if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
|
|
+ if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
|
|
goto out;
|
|
if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
|
|
goto out;
|
|
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
|
|
index 802ee5bba66c..0cebe5db691d 100644
|
|
--- a/arch/x86/xen/smp_pv.c
|
|
+++ b/arch/x86/xen/smp_pv.c
|
|
@@ -92,6 +92,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
|
|
cpu_bringup();
|
|
boot_init_stack_canary();
|
|
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
|
+ prevent_tail_call_optimization();
|
|
}
|
|
|
|
void xen_smp_intr_free_pv(unsigned int cpu)
|
|
diff --git a/crypto/lrw.c b/crypto/lrw.c
|
|
index be829f6afc8e..3d40e1f32bea 100644
|
|
--- a/crypto/lrw.c
|
|
+++ b/crypto/lrw.c
|
|
@@ -289,7 +289,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
|
|
crypto_free_skcipher(ctx->child);
|
|
}
|
|
|
|
-static void free(struct skcipher_instance *inst)
|
|
+static void free_inst(struct skcipher_instance *inst)
|
|
{
|
|
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
|
kfree(inst);
|
|
@@ -401,7 +401,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
|
inst->alg.encrypt = encrypt;
|
|
inst->alg.decrypt = decrypt;
|
|
|
|
- inst->free = free;
|
|
+ inst->free = free_inst;
|
|
|
|
err = skcipher_register_instance(tmpl, inst);
|
|
if (err)
|
|
diff --git a/crypto/xts.c b/crypto/xts.c
|
|
index ab117633d64e..9d72429f666e 100644
|
|
--- a/crypto/xts.c
|
|
+++ b/crypto/xts.c
|
|
@@ -328,7 +328,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
|
|
crypto_free_cipher(ctx->tweak);
|
|
}
|
|
|
|
-static void free(struct skcipher_instance *inst)
|
|
+static void free_inst(struct skcipher_instance *inst)
|
|
{
|
|
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
|
kfree(inst);
|
|
@@ -439,7 +439,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
|
inst->alg.encrypt = encrypt;
|
|
inst->alg.decrypt = decrypt;
|
|
|
|
- inst->free = free;
|
|
+ inst->free = free_inst;
|
|
|
|
err = skcipher_register_instance(tmpl, inst);
|
|
if (err)
|
|
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
|
|
index 5e6c8bfc6612..5b53a66d403d 100644
|
|
--- a/drivers/acpi/ec.c
|
|
+++ b/drivers/acpi/ec.c
|
|
@@ -1962,23 +1962,31 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
|
|
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
|
|
}
|
|
|
|
-bool acpi_ec_other_gpes_active(void)
|
|
-{
|
|
- return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
|
|
-}
|
|
-
|
|
bool acpi_ec_dispatch_gpe(void)
|
|
{
|
|
u32 ret;
|
|
|
|
if (!first_ec)
|
|
+ return acpi_any_gpe_status_set(U32_MAX);
|
|
+
|
|
+ /*
|
|
+ * Report wakeup if the status bit is set for any enabled GPE other
|
|
+ * than the EC one.
|
|
+ */
|
|
+ if (acpi_any_gpe_status_set(first_ec->gpe))
|
|
+ return true;
|
|
+
|
|
+ if (ec_no_wakeup)
|
|
return false;
|
|
|
|
+ /*
|
|
+ * Dispatch the EC GPE in-band, but do not report wakeup in any case
|
|
+ * to allow the caller to process events properly after that.
|
|
+ */
|
|
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
|
|
- if (ret == ACPI_INTERRUPT_HANDLED) {
|
|
+ if (ret == ACPI_INTERRUPT_HANDLED)
|
|
pm_pr_dbg("EC GPE dispatched\n");
|
|
- return true;
|
|
- }
|
|
+
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
|
|
index cbf7f34c3ce7..afe6636f9ad3 100644
|
|
--- a/drivers/acpi/internal.h
|
|
+++ b/drivers/acpi/internal.h
|
|
@@ -201,7 +201,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
void acpi_ec_flush_work(void);
|
|
-bool acpi_ec_other_gpes_active(void);
|
|
bool acpi_ec_dispatch_gpe(void);
|
|
#endif
|
|
|
|
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
|
|
index edad89e58c58..85514c0f3aa5 100644
|
|
--- a/drivers/acpi/sleep.c
|
|
+++ b/drivers/acpi/sleep.c
|
|
@@ -1010,20 +1010,10 @@ static bool acpi_s2idle_wake(void)
|
|
if (acpi_check_wakeup_handlers())
|
|
return true;
|
|
|
|
- /*
|
|
- * If the status bit is set for any enabled GPE other than the
|
|
- * EC one, the wakeup is regarded as a genuine one.
|
|
- */
|
|
- if (acpi_ec_other_gpes_active())
|
|
+ /* Check non-EC GPE wakeups and dispatch the EC GPE. */
|
|
+ if (acpi_ec_dispatch_gpe())
|
|
return true;
|
|
|
|
- /*
|
|
- * If the EC GPE status bit has not been set, the wakeup is
|
|
- * regarded as a spurious one.
|
|
- */
|
|
- if (!acpi_ec_dispatch_gpe())
|
|
- return false;
|
|
-
|
|
/*
|
|
* Cancel the wakeup and process all pending events in case
|
|
* there are any wakeup ones in there.
|
|
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
|
|
index a55383b139df..0cf2fe290230 100644
|
|
--- a/drivers/block/virtio_blk.c
|
|
+++ b/drivers/block/virtio_blk.c
|
|
@@ -33,6 +33,15 @@ struct virtio_blk_vq {
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
struct virtio_blk {
|
|
+ /*
|
|
+ * This mutex must be held by anything that may run after
|
|
+ * virtblk_remove() sets vblk->vdev to NULL.
|
|
+ *
|
|
+ * blk-mq, virtqueue processing, and sysfs attribute code paths are
|
|
+ * shut down before vblk->vdev is set to NULL and therefore do not need
|
|
+ * to hold this mutex.
|
|
+ */
|
|
+ struct mutex vdev_mutex;
|
|
struct virtio_device *vdev;
|
|
|
|
/* The disk structure for the kernel. */
|
|
@@ -44,6 +53,13 @@ struct virtio_blk {
|
|
/* Process context for config space updates */
|
|
struct work_struct config_work;
|
|
|
|
+ /*
|
|
+ * Tracks references from block_device_operations open/release and
|
|
+ * virtio_driver probe/remove so this object can be freed once no
|
|
+ * longer in use.
|
|
+ */
|
|
+ refcount_t refs;
|
|
+
|
|
/* What host tells us, plus 2 for header & tailer. */
|
|
unsigned int sg_elems;
|
|
|
|
@@ -388,10 +404,55 @@ out:
|
|
return err;
|
|
}
|
|
|
|
+static void virtblk_get(struct virtio_blk *vblk)
|
|
+{
|
|
+ refcount_inc(&vblk->refs);
|
|
+}
|
|
+
|
|
+static void virtblk_put(struct virtio_blk *vblk)
|
|
+{
|
|
+ if (refcount_dec_and_test(&vblk->refs)) {
|
|
+ ida_simple_remove(&vd_index_ida, vblk->index);
|
|
+ mutex_destroy(&vblk->vdev_mutex);
|
|
+ kfree(vblk);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int virtblk_open(struct block_device *bd, fmode_t mode)
|
|
+{
|
|
+ struct virtio_blk *vblk = bd->bd_disk->private_data;
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&vblk->vdev_mutex);
|
|
+
|
|
+ if (vblk->vdev)
|
|
+ virtblk_get(vblk);
|
|
+ else
|
|
+ ret = -ENXIO;
|
|
+
|
|
+ mutex_unlock(&vblk->vdev_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void virtblk_release(struct gendisk *disk, fmode_t mode)
|
|
+{
|
|
+ struct virtio_blk *vblk = disk->private_data;
|
|
+
|
|
+ virtblk_put(vblk);
|
|
+}
|
|
+
|
|
/* We provide getgeo only to please some old bootloader/partitioning tools */
|
|
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
|
|
{
|
|
struct virtio_blk *vblk = bd->bd_disk->private_data;
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&vblk->vdev_mutex);
|
|
+
|
|
+ if (!vblk->vdev) {
|
|
+ ret = -ENXIO;
|
|
+ goto out;
|
|
+ }
|
|
|
|
/* see if the host passed in geometry config */
|
|
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
|
|
@@ -407,12 +468,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
|
|
geo->sectors = 1 << 5;
|
|
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
|
|
}
|
|
- return 0;
|
|
+out:
|
|
+ mutex_unlock(&vblk->vdev_mutex);
|
|
+ return ret;
|
|
}
|
|
|
|
static const struct block_device_operations virtblk_fops = {
|
|
.ioctl = virtblk_ioctl,
|
|
.owner = THIS_MODULE,
|
|
+ .open = virtblk_open,
|
|
+ .release = virtblk_release,
|
|
.getgeo = virtblk_getgeo,
|
|
};
|
|
|
|
@@ -767,6 +832,10 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|
goto out_free_index;
|
|
}
|
|
|
|
+ /* This reference is dropped in virtblk_remove(). */
|
|
+ refcount_set(&vblk->refs, 1);
|
|
+ mutex_init(&vblk->vdev_mutex);
|
|
+
|
|
vblk->vdev = vdev;
|
|
vblk->sg_elems = sg_elems;
|
|
|
|
@@ -932,8 +1001,6 @@ out:
|
|
static void virtblk_remove(struct virtio_device *vdev)
|
|
{
|
|
struct virtio_blk *vblk = vdev->priv;
|
|
- int index = vblk->index;
|
|
- int refc;
|
|
|
|
/* Make sure no work handler is accessing the device. */
|
|
flush_work(&vblk->config_work);
|
|
@@ -943,18 +1010,21 @@ static void virtblk_remove(struct virtio_device *vdev)
|
|
|
|
blk_mq_free_tag_set(&vblk->tag_set);
|
|
|
|
+ mutex_lock(&vblk->vdev_mutex);
|
|
+
|
|
/* Stop all the virtqueues. */
|
|
vdev->config->reset(vdev);
|
|
|
|
- refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
|
|
+ /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
|
|
+ vblk->vdev = NULL;
|
|
+
|
|
put_disk(vblk->disk);
|
|
vdev->config->del_vqs(vdev);
|
|
kfree(vblk->vqs);
|
|
- kfree(vblk);
|
|
|
|
- /* Only free device id if we don't have any users */
|
|
- if (refc == 1)
|
|
- ida_simple_remove(&vd_index_ida, index);
|
|
+ mutex_unlock(&vblk->vdev_mutex);
|
|
+
|
|
+ virtblk_put(vblk);
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
|
|
index 80b029713722..9728d1282e43 100644
|
|
--- a/drivers/clk/clk.c
|
|
+++ b/drivers/clk/clk.c
|
|
@@ -3448,6 +3448,9 @@ static int __clk_core_init(struct clk_core *core)
|
|
out:
|
|
clk_pm_runtime_put(core);
|
|
unlock:
|
|
+ if (ret)
|
|
+ hlist_del_init(&core->child_node);
|
|
+
|
|
clk_prepare_unlock();
|
|
|
|
if (!ret)
|
|
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
|
|
index d17cfb7a3ff4..d7243c09cc84 100644
|
|
--- a/drivers/clk/rockchip/clk-rk3228.c
|
|
+++ b/drivers/clk/rockchip/clk-rk3228.c
|
|
@@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
|
|
PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
|
|
PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
|
|
|
|
-PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
|
|
-
|
|
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
|
|
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
|
|
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
|
|
@@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
|
|
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
|
|
RK2928_CLKGATE_CON(2), 8, GFLAGS),
|
|
|
|
- GATE(0, "cpll_gpu", "cpll", 0,
|
|
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
|
|
- GATE(0, "gpll_gpu", "gpll", 0,
|
|
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
|
|
- GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
|
|
- RK2928_CLKGATE_CON(3), 13, GFLAGS),
|
|
- GATE(0, "usb480m_gpu", "usb480m", 0,
|
|
+ COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
|
|
+ RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
|
|
RK2928_CLKGATE_CON(3), 13, GFLAGS),
|
|
- COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
|
|
- RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
|
|
|
|
COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
|
|
RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
|
|
@@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
|
|
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
|
|
|
|
/* PD_GPU */
|
|
- GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
|
|
- GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
|
|
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
|
|
+ GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
|
|
|
|
/* PD_BUS */
|
|
GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
|
|
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
|
|
index 45499e0b9f2f..d3d7c4ef7d04 100644
|
|
--- a/drivers/cpufreq/intel_pstate.c
|
|
+++ b/drivers/cpufreq/intel_pstate.c
|
|
@@ -1058,7 +1058,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
|
|
|
|
update_turbo_state();
|
|
if (global.turbo_disabled) {
|
|
- pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
|
+ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
|
|
mutex_unlock(&intel_pstate_limits_lock);
|
|
mutex_unlock(&intel_pstate_driver_lock);
|
|
return -EPERM;
|
|
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
|
|
index e7d1e12bf464..89d90c456c0c 100644
|
|
--- a/drivers/dma/mmp_tdma.c
|
|
+++ b/drivers/dma/mmp_tdma.c
|
|
@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
|
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
|
|
size);
|
|
tdmac->desc_arr = NULL;
|
|
+ if (tdmac->status == DMA_ERROR)
|
|
+ tdmac->status = DMA_COMPLETE;
|
|
|
|
return;
|
|
}
|
|
@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
|
|
if (!desc)
|
|
goto err_out;
|
|
|
|
- mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
|
|
+ if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
|
|
+ goto err_out;
|
|
|
|
while (buf < buf_len) {
|
|
desc = &tdmac->desc_arr[i];
|
|
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
|
|
index 581e7a290d98..a3b0b4c56a19 100644
|
|
--- a/drivers/dma/pch_dma.c
|
|
+++ b/drivers/dma/pch_dma.c
|
|
@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
|
}
|
|
|
|
pci_set_master(pdev);
|
|
+ pd->dma.dev = &pdev->dev;
|
|
|
|
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
|
|
if (err) {
|
|
@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
|
goto err_free_irq;
|
|
}
|
|
|
|
- pd->dma.dev = &pdev->dev;
|
|
|
|
INIT_LIST_HEAD(&pd->dma.channels);
|
|
|
|
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
|
|
index 31f9f0e369b9..55b031d2c989 100644
|
|
--- a/drivers/firmware/efi/tpm.c
|
|
+++ b/drivers/firmware/efi/tpm.c
|
|
@@ -16,7 +16,7 @@
|
|
int efi_tpm_final_log_size;
|
|
EXPORT_SYMBOL(efi_tpm_final_log_size);
|
|
|
|
-static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
|
|
+static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
|
|
{
|
|
struct tcg_pcr_event2_head *header;
|
|
int event_size, size = 0;
|
|
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
|
|
index de5d1383f28d..3edc1762803a 100644
|
|
--- a/drivers/gpio/gpio-pca953x.c
|
|
+++ b/drivers/gpio/gpio-pca953x.c
|
|
@@ -528,7 +528,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
|
|
{
|
|
struct pca953x_chip *chip = gpiochip_get_data(gc);
|
|
|
|
- switch (config) {
|
|
+ switch (pinconf_to_config_param(config)) {
|
|
case PIN_CONFIG_BIAS_PULL_UP:
|
|
case PIN_CONFIG_BIAS_PULL_DOWN:
|
|
return pca953x_gpio_set_pull_up_down(chip, offset, config);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
|
|
index 143753d237e7..eaa5e7b7c19d 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
|
|
@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|
u32 cpp;
|
|
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
|
|
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
|
+ AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
|
|
|
info = drm_get_format_info(adev->ddev, mode_cmd);
|
|
cpp = info->cpp[0];
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
|
|
index c45304f1047c..4af9acc2dc4f 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
|
|
@@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
|
|
u32 extra_bits = vmid & 0xf;
|
|
|
|
/* IB packet must end on a 8 DW boundary */
|
|
- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
|
+ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
|
|
|
|
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
|
|
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
|
|
@@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
|
u32 pad_count;
|
|
int i;
|
|
|
|
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
|
+ pad_count = (-ib->length_dw) & 7;
|
|
for (i = 0; i < pad_count; i++)
|
|
if (sdma && sdma->burst_nop && (i == 0))
|
|
ib->ptr[ib->length_dw++] =
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
|
|
index 074a9a09c0a7..a5b60c9a2418 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
|
|
@@ -73,6 +73,22 @@
|
|
#define SDMA_OP_AQL_COPY 0
|
|
#define SDMA_OP_AQL_BARRIER_OR 0
|
|
|
|
+#define SDMA_GCR_RANGE_IS_PA (1 << 18)
|
|
+#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
|
|
+#define SDMA_GCR_GL2_WB (1 << 15)
|
|
+#define SDMA_GCR_GL2_INV (1 << 14)
|
|
+#define SDMA_GCR_GL2_DISCARD (1 << 13)
|
|
+#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
|
|
+#define SDMA_GCR_GL2_US (1 << 10)
|
|
+#define SDMA_GCR_GL1_INV (1 << 9)
|
|
+#define SDMA_GCR_GLV_INV (1 << 8)
|
|
+#define SDMA_GCR_GLK_INV (1 << 7)
|
|
+#define SDMA_GCR_GLK_WB (1 << 6)
|
|
+#define SDMA_GCR_GLM_INV (1 << 5)
|
|
+#define SDMA_GCR_GLM_WB (1 << 4)
|
|
+#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
|
|
+#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
|
|
+
|
|
/*define for op field*/
|
|
#define SDMA_PKT_HEADER_op_offset 0
|
|
#define SDMA_PKT_HEADER_op_mask 0x000000FF
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
|
|
index a10175838013..b6af67f6f214 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
|
|
@@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
|
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
|
|
|
/* IB packet must end on a 8 DW boundary */
|
|
- sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
|
+ sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
|
|
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
|
@@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
|
u32 pad_count;
|
|
int i;
|
|
|
|
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
|
+ pad_count = (-ib->length_dw) & 7;
|
|
for (i = 0; i < pad_count; i++)
|
|
if (sdma && sdma->burst_nop && (i == 0))
|
|
ib->ptr[ib->length_dw++] =
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
|
|
index 5f4e2c616241..cd3ebed46d05 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
|
|
@@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
|
|
|
/* IB packet must end on a 8 DW boundary */
|
|
- sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
|
+ sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
|
|
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
|
@@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
|
u32 pad_count;
|
|
int i;
|
|
|
|
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
|
+ pad_count = (-ib->length_dw) & 7;
|
|
for (i = 0; i < pad_count; i++)
|
|
if (sdma && sdma->burst_nop && (i == 0))
|
|
ib->ptr[ib->length_dw++] =
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
|
|
index 4554e72c8378..23de332f3c6e 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
|
|
@@ -698,7 +698,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
|
|
|
/* IB packet must end on a 8 DW boundary */
|
|
- sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
|
+ sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
|
|
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
|
@@ -1579,7 +1579,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
|
u32 pad_count;
|
|
int i;
|
|
|
|
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
|
+ pad_count = (-ib->length_dw) & 7;
|
|
for (i = 0; i < pad_count; i++)
|
|
if (sdma && sdma->burst_nop && (i == 0))
|
|
ib->ptr[ib->length_dw++] =
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
|
|
index 8493bfbbc148..bd715012185c 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
|
|
@@ -382,8 +382,27 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
|
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
|
|
|
|
- /* IB packet must end on a 8 DW boundary */
|
|
- sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
|
+ /* Invalidate L2, because if we don't do it, we might get stale cache
|
|
+ * lines from previous IBs.
|
|
+ */
|
|
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
|
|
+ amdgpu_ring_write(ring, 0);
|
|
+ amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
|
|
+ SDMA_GCR_GL2_WB |
|
|
+ SDMA_GCR_GLM_INV |
|
|
+ SDMA_GCR_GLM_WB) << 16);
|
|
+ amdgpu_ring_write(ring, 0xffffff80);
|
|
+ amdgpu_ring_write(ring, 0xffff);
|
|
+
|
|
+ /* An IB packet must end on a 8 DW boundary--the next dword
|
|
+ * must be on a 8-dword boundary. Our IB packet below is 6
|
|
+ * dwords long, thus add x number of NOPs, such that, in
|
|
+ * modular arithmetic,
|
|
+ * wptr + 6 + x = 8k, k >= 0, which in C is,
|
|
+ * (wptr + 6 + x) % 8 = 0.
|
|
+ * The expression below, is a solution of x.
|
|
+ */
|
|
+ sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
|
|
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
|
@@ -1086,10 +1105,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
|
|
}
|
|
|
|
/**
|
|
- * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
|
|
- *
|
|
+ * sdma_v5_0_ring_pad_ib - pad the IB
|
|
* @ib: indirect buffer to fill with padding
|
|
*
|
|
+ * Pad the IB with NOPs to a boundary multiple of 8.
|
|
*/
|
|
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
|
{
|
|
@@ -1097,7 +1116,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
|
u32 pad_count;
|
|
int i;
|
|
|
|
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
|
+ pad_count = (-ib->length_dw) & 0x7;
|
|
for (i = 0; i < pad_count; i++)
|
|
if (sdma && sdma->burst_nop && (i == 0))
|
|
ib->ptr[ib->length_dw++] =
|
|
@@ -1600,7 +1619,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
|
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
|
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
|
|
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
|
|
- .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
|
+ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
|
.emit_ib = sdma_v5_0_ring_emit_ib,
|
|
.emit_fence = sdma_v5_0_ring_emit_fence,
|
|
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
index be61ae1430ed..99906435dcf7 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
|
|
@@ -6921,6 +6921,7 @@ static int dm_update_plane_state(struct dc *dc,
|
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
|
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
|
|
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
|
|
+ struct amdgpu_crtc *new_acrtc;
|
|
bool needs_reset;
|
|
int ret = 0;
|
|
|
|
@@ -6930,9 +6931,30 @@ static int dm_update_plane_state(struct dc *dc,
|
|
dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
|
dm_old_plane_state = to_dm_plane_state(old_plane_state);
|
|
|
|
- /*TODO Implement atomic check for cursor plane */
|
|
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
|
+ /*TODO Implement better atomic check for cursor plane */
|
|
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
|
+ if (!enable || !new_plane_crtc ||
|
|
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
|
|
+ return 0;
|
|
+
|
|
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
|
|
+
|
|
+ if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
|
|
+ (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
|
|
+ DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
|
|
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
|
|
+ new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
|
|
+ DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
|
|
+ new_plane_state->crtc_x, new_plane_state->crtc_y);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
return 0;
|
|
+ }
|
|
|
|
needs_reset = should_reset_plane(state, plane, old_plane_state,
|
|
new_plane_state);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
index e933f6a369f9..083c42e521f5 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
@@ -2015,7 +2015,8 @@ static void dcn20_fpga_init_hw(struct dc *dc)
|
|
|
|
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
|
|
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
|
|
- REG_WRITE(REFCLK_CNTL, 0);
|
|
+ if (REG(REFCLK_CNTL))
|
|
+ REG_WRITE(REFCLK_CNTL, 0);
|
|
//
|
|
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
index 161bf7caf3ae..bb7add5ea227 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
|
@@ -247,7 +247,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
|
.dram_channel_width_bytes = 4,
|
|
.fabric_datapath_to_dcn_data_return_bytes = 32,
|
|
.dcn_downspread_percent = 0.5,
|
|
- .downspread_percent = 0.5,
|
|
+ .downspread_percent = 0.38,
|
|
.dram_page_open_time_ns = 50.0,
|
|
.dram_rw_turnaround_time_ns = 17.5,
|
|
.dram_return_buffer_per_channel_bytes = 8192,
|
|
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
|
|
index d306cc711997..8bb5fbef7de0 100644
|
|
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
|
|
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
|
|
@@ -1425,7 +1425,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
|
|
if (!hwmgr)
|
|
return -EINVAL;
|
|
|
|
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
|
|
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
|
+ !hwmgr->hwmgr_func->get_asic_baco_capability)
|
|
return 0;
|
|
|
|
mutex_lock(&hwmgr->smu_lock);
|
|
@@ -1459,7 +1460,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
|
|
if (!hwmgr)
|
|
return -EINVAL;
|
|
|
|
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
|
|
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
|
+ !hwmgr->hwmgr_func->set_asic_baco_state)
|
|
return 0;
|
|
|
|
mutex_lock(&hwmgr->smu_lock);
|
|
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
|
|
index 07a038f21619..caf6166622e4 100644
|
|
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
|
|
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
|
|
@@ -504,8 +504,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
|
|
if (!ret)
|
|
goto err_llb;
|
|
else if (ret > 1) {
|
|
- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
|
-
|
|
+ DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
|
}
|
|
|
|
fbc->threshold = ret;
|
|
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
|
|
index 6c79d16b381e..058dcd541644 100644
|
|
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
|
|
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
|
|
@@ -374,7 +374,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
|
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
|
|
struct i915_page_directory * const pd =
|
|
i915_pd_entry(ppgtt->pd, i);
|
|
-
|
|
+ /* skip now as current i915 ppgtt alloc won't allocate
|
|
+ top level pdp for non 4-level table, won't impact
|
|
+ shadow ppgtt. */
|
|
+ if (!pd)
|
|
+ break;
|
|
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
|
|
index 3ccfc025fde2..ade607d93e45 100644
|
|
--- a/drivers/gpu/drm/i915/intel_pm.c
|
|
+++ b/drivers/gpu/drm/i915/intel_pm.c
|
|
@@ -4784,7 +4784,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
|
|
* WaIncreaseLatencyIPCEnabled: kbl,cfl
|
|
* Display WA #1141: kbl,cfl
|
|
*/
|
|
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
|
|
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
|
|
dev_priv->ipc_enabled)
|
|
latency += 4;
|
|
|
|
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
|
|
index 43688ecdd8a0..60ab7151b84d 100644
|
|
--- a/drivers/gpu/drm/qxl/qxl_image.c
|
|
+++ b/drivers/gpu/drm/qxl/qxl_image.c
|
|
@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
|
|
break;
|
|
default:
|
|
DRM_ERROR("unsupported image bit depth\n");
|
|
- return -EINVAL; /* TODO: cleanup */
|
|
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
|
|
+ return -EINVAL;
|
|
}
|
|
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
|
|
image->u.bitmap.x = width;
|
|
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
|
|
index f83522717488..4f944ace665d 100644
|
|
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
|
|
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
|
|
@@ -718,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
|
|
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
|
|
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
|
|
struct mipi_dsi_device *device = dsi->device;
|
|
- union phy_configure_opts opts = { 0 };
|
|
+ union phy_configure_opts opts = { };
|
|
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
|
|
u16 delay;
|
|
|
|
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
|
|
index 53b517dbe7e6..4af2fc309c28 100644
|
|
--- a/drivers/hwmon/da9052-hwmon.c
|
|
+++ b/drivers/hwmon/da9052-hwmon.c
|
|
@@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
|
|
int channel = to_sensor_dev_attr(devattr)->index;
|
|
int ret;
|
|
|
|
- mutex_lock(&hwmon->hwmon_lock);
|
|
+ mutex_lock(&hwmon->da9052->auxadc_lock);
|
|
ret = __da9052_read_tsi(dev, channel);
|
|
- mutex_unlock(&hwmon->hwmon_lock);
|
|
+ mutex_unlock(&hwmon->da9052->auxadc_lock);
|
|
|
|
if (ret < 0)
|
|
return ret;
|
|
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
|
|
index 65b10efca2b8..7affe6b4ae21 100644
|
|
--- a/drivers/infiniband/core/cache.c
|
|
+++ b/drivers/infiniband/core/cache.c
|
|
@@ -1542,8 +1542,11 @@ int ib_cache_setup_one(struct ib_device *device)
|
|
if (err)
|
|
return err;
|
|
|
|
- rdma_for_each_port (device, p)
|
|
- ib_cache_update(device, p, true);
|
|
+ rdma_for_each_port (device, p) {
|
|
+ err = ib_cache_update(device, p, true);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
|
|
index ef4b0c7061e4..244ebf285fc3 100644
|
|
--- a/drivers/infiniband/core/nldev.c
|
|
+++ b/drivers/infiniband/core/nldev.c
|
|
@@ -1248,10 +1248,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
|
|
ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
|
|
- rdma_restrack_put(res);
|
|
if (ret)
|
|
goto err_free;
|
|
|
|
+ rdma_restrack_put(res);
|
|
nlmsg_end(msg, nlh);
|
|
ib_device_put(device);
|
|
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
|
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
|
|
index d82e0589cfd2..6b4e7235d2f5 100644
|
|
--- a/drivers/infiniband/hw/cxgb4/cm.c
|
|
+++ b/drivers/infiniband/hw/cxgb4/cm.c
|
|
@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
|
srqidx = ABORT_RSS_SRQIDX_G(
|
|
be32_to_cpu(req->srqidx_status));
|
|
if (srqidx) {
|
|
- complete_cached_srq_buffers(ep,
|
|
- req->srqidx_status);
|
|
+ complete_cached_srq_buffers(ep, srqidx);
|
|
} else {
|
|
/* Hold ep ref until finish_peer_abort() */
|
|
c4iw_get_ep(&ep->com);
|
|
@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|
return 0;
|
|
}
|
|
|
|
- ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
|
|
- TCB_RQ_START_S);
|
|
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
|
|
+ TCB_RQ_START_S);
|
|
cleanup:
|
|
pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
|
|
|
|
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
|
|
index 13e4203497b3..a92346e88628 100644
|
|
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
|
|
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
|
|
@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|
|
|
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
|
|
pq->state = SDMA_PKT_Q_ACTIVE;
|
|
- /* Send the first N packets in the request to buy us some time */
|
|
- ret = user_sdma_send_pkts(req, pcount);
|
|
- if (unlikely(ret < 0 && ret != -EBUSY))
|
|
- goto free_req;
|
|
|
|
/*
|
|
* This is a somewhat blocking send implementation.
|
|
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
|
|
index 55a1fbf0e670..ae8b97c30665 100644
|
|
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
|
|
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
|
|
@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
|
|
int arp_index;
|
|
|
|
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
|
|
- if (arp_index == -1)
|
|
+ if (arp_index < 0)
|
|
return;
|
|
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
|
|
if (!cqp_request)
|
|
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
|
|
index bd4aa04416c6..6e2b3e2f83f1 100644
|
|
--- a/drivers/infiniband/hw/mlx4/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx4/qp.c
|
|
@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
|
int send_size;
|
|
int header_size;
|
|
int spc;
|
|
+ int err;
|
|
int i;
|
|
|
|
if (wr->wr.opcode != IB_WR_SEND)
|
|
@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
|
|
|
sqp->ud_header.lrh.virtual_lane = 0;
|
|
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
|
- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
|
|
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
|
|
+ if (err)
|
|
+ return err;
|
|
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
|
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
|
|
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
|
@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
|
|
}
|
|
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
|
if (!sqp->qp.ibqp.qp_num)
|
|
- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
|
|
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
|
|
+ &pkey);
|
|
else
|
|
- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
|
|
+ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
|
|
+ &pkey);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
|
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
|
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
|
|
index 48f48122ddcb..6a413d73b95d 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
|
|
@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
|
|
|
|
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
|
if (!ip)
|
|
- return NULL;
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
|
|
index ff92704de32f..245040c3a35d 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
|
|
@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
|
|
|
if (outbuf) {
|
|
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
|
|
- if (!ip)
|
|
+ if (IS_ERR(ip)) {
|
|
+ err = PTR_ERR(ip);
|
|
goto err1;
|
|
+ }
|
|
|
|
- err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
|
|
- if (err)
|
|
+ if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
|
|
+ err = -EFAULT;
|
|
goto err2;
|
|
+ }
|
|
|
|
spin_lock_bh(&rxe->pending_lock);
|
|
list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
|
|
@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
|
err2:
|
|
kfree(ip);
|
|
err1:
|
|
- return -EINVAL;
|
|
+ return err;
|
|
}
|
|
|
|
inline void rxe_queue_reset(struct rxe_queue *q)
|
|
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
|
|
index 95b41c0891d0..9d01b5dca519 100644
|
|
--- a/drivers/mmc/core/block.c
|
|
+++ b/drivers/mmc/core/block.c
|
|
@@ -1417,6 +1417,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
|
|
struct mmc_request *mrq = &mqrq->brq.mrq;
|
|
struct request_queue *q = req->q;
|
|
struct mmc_host *host = mq->card->host;
|
|
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
|
|
unsigned long flags;
|
|
bool put_card;
|
|
int err;
|
|
@@ -1446,7 +1447,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
|
|
|
|
spin_lock_irqsave(&mq->lock, flags);
|
|
|
|
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
|
|
+ mq->in_flight[issue_type] -= 1;
|
|
|
|
put_card = (mmc_tot_in_flight(mq) == 0);
|
|
|
|
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
|
|
index 9edc08685e86..9c0ccb3744c2 100644
|
|
--- a/drivers/mmc/core/queue.c
|
|
+++ b/drivers/mmc/core/queue.c
|
|
@@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
|
|
case MMC_ISSUE_DCMD:
|
|
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
|
|
if (recovery_needed)
|
|
- __mmc_cqe_recovery_notifier(mq);
|
|
+ mmc_cqe_recovery_notifier(mrq);
|
|
return BLK_EH_RESET_TIMER;
|
|
}
|
|
- /* No timeout (XXX: huh? comment doesn't make much sense) */
|
|
- blk_mq_complete_request(req);
|
|
+ /* The request has gone already */
|
|
return BLK_EH_DONE;
|
|
default:
|
|
/* Timeout is handled by mmc core */
|
|
@@ -125,18 +124,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
|
|
struct request_queue *q = req->q;
|
|
struct mmc_queue *mq = q->queuedata;
|
|
unsigned long flags;
|
|
- int ret;
|
|
+ bool ignore_tout;
|
|
|
|
spin_lock_irqsave(&mq->lock, flags);
|
|
-
|
|
- if (mq->recovery_needed || !mq->use_cqe)
|
|
- ret = BLK_EH_RESET_TIMER;
|
|
- else
|
|
- ret = mmc_cqe_timed_out(req);
|
|
-
|
|
+ ignore_tout = mq->recovery_needed || !mq->use_cqe;
|
|
spin_unlock_irqrestore(&mq->lock, flags);
|
|
|
|
- return ret;
|
|
+ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
|
|
}
|
|
|
|
static void mmc_mq_recovery_handler(struct work_struct *work)
|
|
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
|
|
index 1aee485d56d4..026ca9194ce5 100644
|
|
--- a/drivers/mmc/host/alcor.c
|
|
+++ b/drivers/mmc/host/alcor.c
|
|
@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
|
|
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "Failed to get irq for data line\n");
|
|
- return ret;
|
|
+ goto free_host;
|
|
}
|
|
|
|
mutex_init(&host->cmd_mutex);
|
|
@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
|
|
dev_set_drvdata(&pdev->dev, host);
|
|
mmc_add_host(mmc);
|
|
return 0;
|
|
+
|
|
+free_host:
|
|
+ mmc_free_host(mmc);
|
|
+ return ret;
|
|
}
|
|
|
|
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
|
|
index 1604f512c7bd..01fc437ed965 100644
|
|
--- a/drivers/mmc/host/sdhci-acpi.c
|
|
+++ b/drivers/mmc/host/sdhci-acpi.c
|
|
@@ -602,10 +602,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
|
|
}
|
|
|
|
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
|
|
- .chip = &sdhci_acpi_chip_amd,
|
|
- .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
|
|
- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
|
|
- SDHCI_QUIRK_32BIT_ADMA_SIZE,
|
|
+ .chip = &sdhci_acpi_chip_amd,
|
|
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
|
|
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
|
|
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
|
|
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
|
|
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
|
|
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
|
|
};
|
|
|
|
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
|
|
index ce15a05f23d4..fd76aa672e02 100644
|
|
--- a/drivers/mmc/host/sdhci-pci-gli.c
|
|
+++ b/drivers/mmc/host/sdhci-pci-gli.c
|
|
@@ -26,6 +26,9 @@
|
|
#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
|
|
#define GLI_9750_DRIVING_1_VALUE 0xFFF
|
|
#define GLI_9750_DRIVING_2_VALUE 0x3
|
|
+#define SDHCI_GLI_9750_SEL_1 BIT(29)
|
|
+#define SDHCI_GLI_9750_SEL_2 BIT(31)
|
|
+#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
|
|
|
|
#define SDHCI_GLI_9750_PLL 0x864
|
|
#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
|
|
@@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host)
|
|
GLI_9750_DRIVING_1_VALUE);
|
|
driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
|
|
GLI_9750_DRIVING_2_VALUE);
|
|
+ driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
|
|
+ driving_value |= SDHCI_GLI_9750_SEL_2;
|
|
sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
|
|
|
|
sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
|
|
@@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
|
|
return value;
|
|
}
|
|
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
|
|
+{
|
|
+ struct sdhci_pci_slot *slot = chip->slots[0];
|
|
+
|
|
+ pci_free_irq_vectors(slot->chip->pdev);
|
|
+ gli_pcie_enable_msi(slot);
|
|
+
|
|
+ return sdhci_pci_resume_host(chip);
|
|
+}
|
|
+#endif
|
|
+
|
|
static const struct sdhci_ops sdhci_gl9755_ops = {
|
|
.set_clock = sdhci_set_clock,
|
|
.enable_dma = sdhci_pci_enable_dma,
|
|
@@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
|
|
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
|
|
.probe_slot = gli_probe_slot_gl9755,
|
|
.ops = &sdhci_gl9755_ops,
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+ .resume = sdhci_pci_gli_resume,
|
|
+#endif
|
|
};
|
|
|
|
static const struct sdhci_ops sdhci_gl9750_ops = {
|
|
@@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
|
|
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
|
|
.probe_slot = gli_probe_slot_gl9750,
|
|
.ops = &sdhci_gl9750_ops,
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+ .resume = sdhci_pci_gli_resume,
|
|
+#endif
|
|
};
|
|
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
|
|
index 925ed135a4d9..0df6c2b9484a 100644
|
|
--- a/drivers/net/dsa/dsa_loop.c
|
|
+++ b/drivers/net/dsa/dsa_loop.c
|
|
@@ -356,6 +356,7 @@ static void __exit dsa_loop_exit(void)
|
|
}
|
|
module_exit(dsa_loop_exit);
|
|
|
|
+MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Florian Fainelli");
|
|
MODULE_DESCRIPTION("DSA loopback driver");
|
|
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
|
|
index bf5add954181..a935b20effa3 100644
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
|
|
@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
|
|
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
|
|
addr = dpaa2_sg_get_addr(&sgt[i]);
|
|
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
|
|
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
free_pages((unsigned long)sg_vaddr, 0);
|
|
@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
|
|
/* Get the address and length from the S/G entry */
|
|
sg_addr = dpaa2_sg_get_addr(sge);
|
|
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
|
|
- dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
sg_length = dpaa2_sg_get_len(sge);
|
|
@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
|
|
(page_address(page) - page_address(head_page));
|
|
|
|
skb_add_rx_frag(skb, i - 1, head_page, page_offset,
|
|
- sg_length, DPAA2_ETH_RX_BUF_SIZE);
|
|
+ sg_length, priv->rx_buf_size);
|
|
}
|
|
|
|
if (dpaa2_sg_is_final(sge))
|
|
@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
|
|
|
|
for (i = 0; i < count; i++) {
|
|
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
|
|
- dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
|
|
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
free_pages((unsigned long)vaddr, 0);
|
|
}
|
|
@@ -331,7 +331,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
|
|
break;
|
|
case XDP_REDIRECT:
|
|
dma_unmap_page(priv->net_dev->dev.parent, addr,
|
|
- DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
|
|
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
|
|
ch->buf_count--;
|
|
xdp.data_hard_start = vaddr;
|
|
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
|
|
@@ -370,7 +370,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
|
trace_dpaa2_rx_fd(priv->net_dev, fd);
|
|
|
|
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
|
|
- dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
fas = dpaa2_get_fas(vaddr, false);
|
|
@@ -389,13 +389,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
|
return;
|
|
}
|
|
|
|
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
skb = build_linear_skb(ch, fd, vaddr);
|
|
} else if (fd_format == dpaa2_fd_sg) {
|
|
WARN_ON(priv->xdp_prog);
|
|
|
|
- dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
skb = build_frag_skb(priv, ch, buf_data);
|
|
free_pages((unsigned long)vaddr, 0);
|
|
@@ -963,7 +963,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
|
|
if (!page)
|
|
goto err_alloc;
|
|
|
|
- addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
|
|
DMA_BIDIRECTIONAL);
|
|
if (unlikely(dma_mapping_error(dev, addr)))
|
|
goto err_map;
|
|
@@ -973,7 +973,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
|
|
/* tracing point */
|
|
trace_dpaa2_eth_buf_seed(priv->net_dev,
|
|
page, DPAA2_ETH_RX_BUF_RAW_SIZE,
|
|
- addr, DPAA2_ETH_RX_BUF_SIZE,
|
|
+ addr, priv->rx_buf_size,
|
|
bpid);
|
|
}
|
|
|
|
@@ -1680,7 +1680,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
|
|
int mfl, linear_mfl;
|
|
|
|
mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
|
|
- linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
|
|
+ linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
|
|
dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
|
|
|
|
if (mfl > linear_mfl) {
|
|
@@ -2432,6 +2432,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
|
|
else
|
|
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
|
|
|
|
+ /* We need to ensure that the buffer size seen by WRIOP is a multiple
|
|
+ * of 64 or 256 bytes depending on the WRIOP version.
|
|
+ */
|
|
+ priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
|
|
+
|
|
/* tx buffer */
|
|
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
|
|
buf_layout.pass_timestamp = true;
|
|
@@ -3096,7 +3101,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
|
|
pools_params.num_dpbp = 1;
|
|
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
|
|
pools_params.pools[0].backup_pool = 0;
|
|
- pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
|
|
+ pools_params.pools[0].buffer_size = priv->rx_buf_size;
|
|
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
|
|
if (err) {
|
|
dev_err(dev, "dpni_set_pools() failed\n");
|
|
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
|
index 8a0e65b3267f..4570ed53c6c7 100644
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
|
|
@@ -373,6 +373,7 @@ struct dpaa2_eth_priv {
|
|
u16 tx_data_offset;
|
|
|
|
struct fsl_mc_device *dpbp_dev;
|
|
+ u16 rx_buf_size;
|
|
u16 bpid;
|
|
struct iommu_domain *iommu_domain;
|
|
|
|
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
|
|
index dc9a6c36cac0..e4d9fb0e72bf 100644
|
|
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
|
|
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
|
|
@@ -590,7 +590,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
|
|
|
|
static int update_cls_rule(struct net_device *net_dev,
|
|
struct ethtool_rx_flow_spec *new_fs,
|
|
- int location)
|
|
+ unsigned int location)
|
|
{
|
|
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
|
|
struct dpaa2_eth_cls_rule *rule;
|
|
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
|
|
index 8995e32dd1c0..992908e6eebf 100644
|
|
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
|
|
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
|
|
@@ -45,6 +45,8 @@
|
|
|
|
#define MGMT_MSG_TIMEOUT 5000
|
|
|
|
+#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
|
|
+
|
|
#define mgmt_to_pfhwdev(pf_mgmt) \
|
|
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
|
|
|
|
@@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
|
|
u8 *buf_in, u16 in_size,
|
|
u8 *buf_out, u16 *out_size,
|
|
enum mgmt_direction_type direction,
|
|
- u16 resp_msg_id)
|
|
+ u16 resp_msg_id, u32 timeout)
|
|
{
|
|
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
|
|
struct pci_dev *pdev = hwif->pdev;
|
|
struct hinic_recv_msg *recv_msg;
|
|
struct completion *recv_done;
|
|
+ unsigned long timeo;
|
|
u16 msg_id;
|
|
int err;
|
|
|
|
@@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
|
|
goto unlock_sync_msg;
|
|
}
|
|
|
|
- if (!wait_for_completion_timeout(recv_done,
|
|
- msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
|
|
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
|
|
+
|
|
+ if (!wait_for_completion_timeout(recv_done, timeo)) {
|
|
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
|
|
err = -ETIMEDOUT;
|
|
goto unlock_sync_msg;
|
|
@@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
|
|
{
|
|
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
|
|
struct pci_dev *pdev = hwif->pdev;
|
|
+ u32 timeout = 0;
|
|
|
|
if (sync != HINIC_MGMT_MSG_SYNC) {
|
|
dev_err(&pdev->dev, "Invalid MGMT msg type\n");
|
|
@@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
|
|
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
|
|
+
|
|
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
|
|
buf_out, out_size, MGMT_DIRECT_SEND,
|
|
- MSG_NOT_RESP);
|
|
+ MSG_NOT_RESP, timeout);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
|
|
index 42d00b049c6e..3f739ce40201 100644
|
|
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
|
|
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
|
|
@@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev)
|
|
{
|
|
struct hinic_dev *nic_dev = netdev_priv(netdev);
|
|
unsigned int flags;
|
|
- int err;
|
|
|
|
down(&nic_dev->mgmt_lock);
|
|
|
|
@@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev)
|
|
|
|
up(&nic_dev->mgmt_lock);
|
|
|
|
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
|
|
- if (err) {
|
|
- netif_err(nic_dev, drv, netdev,
|
|
- "Failed to set func port state\n");
|
|
- nic_dev->flags |= (flags & HINIC_INTF_UP);
|
|
- return err;
|
|
- }
|
|
+ hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
|
|
|
|
- err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
|
|
- if (err) {
|
|
- netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
|
|
- nic_dev->flags |= (flags & HINIC_INTF_UP);
|
|
- return err;
|
|
- }
|
|
+ hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
|
|
|
|
if (nic_dev->flags & HINIC_RSS_ENABLE) {
|
|
hinic_rss_deinit(nic_dev);
|
|
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
|
|
index e1651756bf9d..f70bb81e1ed6 100644
|
|
--- a/drivers/net/ethernet/moxa/moxart_ether.c
|
|
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
|
|
@@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
|
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
|
|
|
unregister_netdev(ndev);
|
|
- free_irq(ndev->irq, ndev);
|
|
+ devm_free_irq(&pdev->dev, ndev->irq, ndev);
|
|
moxart_mac_free_memory(ndev);
|
|
free_netdev(ndev);
|
|
|
|
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
|
|
index 51fa82b429a3..40970352d208 100644
|
|
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
|
|
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
|
|
@@ -235,11 +235,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
|
|
|
|
err = register_netdev(dev);
|
|
if (err)
|
|
- goto out1;
|
|
+ goto undo_probe1;
|
|
|
|
return 0;
|
|
|
|
-out1:
|
|
+undo_probe1:
|
|
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
|
|
+ lp->descriptors, lp->descriptors_laddr);
|
|
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
|
|
out:
|
|
free_netdev(dev);
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
|
|
index 354efffac0f9..bdbf0726145e 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
|
|
@@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
|
|
goto err_free_alink;
|
|
|
|
alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
|
|
- if (!alink->prio_map)
|
|
+ if (!alink->prio_map) {
|
|
+ err = -ENOMEM;
|
|
goto err_free_alink;
|
|
+ }
|
|
|
|
/* This is a multi-host app, make sure MAC/PHY is up, but don't
|
|
* make the MAC/PHY state follow the state of any of the ports.
|
|
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
|
|
index 3bc6d1ef29ec..6fa9852e3f97 100644
|
|
--- a/drivers/net/ethernet/realtek/r8169_main.c
|
|
+++ b/drivers/net/ethernet/realtek/r8169_main.c
|
|
@@ -2202,6 +2202,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
|
|
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
|
|
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
|
|
{ 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
|
|
+ /* RTL8401, reportedly works if treated as RTL8101e */
|
|
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
|
|
{ 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
|
|
{ 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
|
|
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
|
index e0a5fe83d8e0..bfc4a92f1d92 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
|
@@ -75,6 +75,11 @@ struct ethqos_emac_por {
|
|
unsigned int value;
|
|
};
|
|
|
|
+struct ethqos_emac_driver_data {
|
|
+ const struct ethqos_emac_por *por;
|
|
+ unsigned int num_por;
|
|
+};
|
|
+
|
|
struct qcom_ethqos {
|
|
struct platform_device *pdev;
|
|
void __iomem *rgmii_base;
|
|
@@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
|
|
{ .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
|
|
};
|
|
|
|
+static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
|
|
+ .por = emac_v2_3_0_por,
|
|
+ .num_por = ARRAY_SIZE(emac_v2_3_0_por),
|
|
+};
|
|
+
|
|
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
|
|
{
|
|
unsigned int val;
|
|
@@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
|
struct device_node *np = pdev->dev.of_node;
|
|
struct plat_stmmacenet_data *plat_dat;
|
|
struct stmmac_resources stmmac_res;
|
|
+ const struct ethqos_emac_driver_data *data;
|
|
struct qcom_ethqos *ethqos;
|
|
struct resource *res;
|
|
int ret;
|
|
@@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
|
goto err_mem;
|
|
}
|
|
|
|
- ethqos->por = of_device_get_match_data(&pdev->dev);
|
|
+ data = of_device_get_match_data(&pdev->dev);
|
|
+ ethqos->por = data->por;
|
|
+ ethqos->num_por = data->num_por;
|
|
|
|
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
|
|
if (IS_ERR(ethqos->rgmii_clk)) {
|
|
@@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
|
|
}
|
|
|
|
static const struct of_device_id qcom_ethqos_match[] = {
|
|
- { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
|
|
+ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
|
|
{ }
|
|
};
|
|
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
|
|
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
|
|
index 001def4509c2..fed3e395f18e 100644
|
|
--- a/drivers/net/phy/microchip_t1.c
|
|
+++ b/drivers/net/phy/microchip_t1.c
|
|
@@ -3,9 +3,21 @@
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/mii.h>
|
|
#include <linux/phy.h>
|
|
|
|
+/* External Register Control Register */
|
|
+#define LAN87XX_EXT_REG_CTL (0x14)
|
|
+#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
|
|
+#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
|
|
+
|
|
+/* External Register Read Data Register */
|
|
+#define LAN87XX_EXT_REG_RD_DATA (0x15)
|
|
+
|
|
+/* External Register Write Data Register */
|
|
+#define LAN87XX_EXT_REG_WR_DATA (0x16)
|
|
+
|
|
/* Interrupt Source Register */
|
|
#define LAN87XX_INTERRUPT_SOURCE (0x18)
|
|
|
|
@@ -14,9 +26,160 @@
|
|
#define LAN87XX_MASK_LINK_UP (0x0004)
|
|
#define LAN87XX_MASK_LINK_DOWN (0x0002)
|
|
|
|
+/* phyaccess nested types */
|
|
+#define PHYACC_ATTR_MODE_READ 0
|
|
+#define PHYACC_ATTR_MODE_WRITE 1
|
|
+#define PHYACC_ATTR_MODE_MODIFY 2
|
|
+
|
|
+#define PHYACC_ATTR_BANK_SMI 0
|
|
+#define PHYACC_ATTR_BANK_MISC 1
|
|
+#define PHYACC_ATTR_BANK_PCS 2
|
|
+#define PHYACC_ATTR_BANK_AFE 3
|
|
+#define PHYACC_ATTR_BANK_MAX 7
|
|
+
|
|
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
|
|
#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
|
|
|
|
+struct access_ereg_val {
|
|
+ u8 mode;
|
|
+ u8 bank;
|
|
+ u8 offset;
|
|
+ u16 val;
|
|
+ u16 mask;
|
|
+};
|
|
+
|
|
+static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
|
|
+ u8 offset, u16 val)
|
|
+{
|
|
+ u16 ereg = 0;
|
|
+ int rc = 0;
|
|
+
|
|
+ if (mode > PHYACC_ATTR_MODE_WRITE || bank > PHYACC_ATTR_BANK_MAX)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (bank == PHYACC_ATTR_BANK_SMI) {
|
|
+ if (mode == PHYACC_ATTR_MODE_WRITE)
|
|
+ rc = phy_write(phydev, offset, val);
|
|
+ else
|
|
+ rc = phy_read(phydev, offset);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ if (mode == PHYACC_ATTR_MODE_WRITE) {
|
|
+ ereg = LAN87XX_EXT_REG_CTL_WR_CTL;
|
|
+ rc = phy_write(phydev, LAN87XX_EXT_REG_WR_DATA, val);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+ } else {
|
|
+ ereg = LAN87XX_EXT_REG_CTL_RD_CTL;
|
|
+ }
|
|
+
|
|
+ ereg |= (bank << 8) | offset;
|
|
+
|
|
+ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+
|
|
+ if (mode == PHYACC_ATTR_MODE_READ)
|
|
+ rc = phy_read(phydev, LAN87XX_EXT_REG_RD_DATA);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static int access_ereg_modify_changed(struct phy_device *phydev,
|
|
+ u8 bank, u8 offset, u16 val, u16 mask)
|
|
+{
|
|
+ int new = 0, rc = 0;
|
|
+
|
|
+ if (bank > PHYACC_ATTR_BANK_MAX)
|
|
+ return -EINVAL;
|
|
+
|
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, bank, offset, val);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+
|
|
+ new = val | (rc & (mask ^ 0xFFFF));
|
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, bank, offset, new);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static int lan87xx_phy_init(struct phy_device *phydev)
|
|
+{
|
|
+ static const struct access_ereg_val init[] = {
|
|
+ /* TX Amplitude = 5 */
|
|
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
|
|
+ 0x000A, 0x001E},
|
|
+ /* Clear SMI interrupts */
|
|
+ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
|
|
+ 0, 0},
|
|
+ /* Clear MISC interrupts */
|
|
+ {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
|
|
+ 0, 0},
|
|
+ /* Turn on TC10 Ring Oscillator (ROSC) */
|
|
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
|
|
+ 0x0020, 0x0020},
|
|
+ /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
|
|
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
|
|
+ 0x283C, 0},
|
|
+ /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
|
|
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
|
|
+ 0x274F, 0},
|
|
+ /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
|
|
+ * and Wake_In to wake PHY
|
|
+ */
|
|
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
|
|
+ 0x80A7, 0},
|
|
+ /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
|
|
+ * to 128 uS
|
|
+ */
|
|
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
|
|
+ 0xF110, 0},
|
|
+ /* Enable HW Init */
|
|
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
|
|
+ 0x0100, 0x0100},
|
|
+ };
|
|
+ int rc, i;
|
|
+
|
|
+ /* Start manual initialization procedures in Managed Mode */
|
|
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
|
|
+ 0x1a, 0x0000, 0x0100);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+
|
|
+ /* Soft Reset the SMI block */
|
|
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
|
|
+ 0x00, 0x8000, 0x8000);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+
|
|
+ /* Check to see if the self-clearing bit is cleared */
|
|
+ usleep_range(1000, 2000);
|
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
|
+ PHYACC_ATTR_BANK_SMI, 0x00, 0);
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+ if ((rc & 0x8000) != 0)
|
|
+ return -ETIMEDOUT;
|
|
+
|
|
+ /* PHY Initialization */
|
|
+ for (i = 0; i < ARRAY_SIZE(init); i++) {
|
|
+ if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
|
|
+ rc = access_ereg_modify_changed(phydev, init[i].bank,
|
|
+ init[i].offset,
|
|
+ init[i].val,
|
|
+ init[i].mask);
|
|
+ } else {
|
|
+ rc = access_ereg(phydev, init[i].mode, init[i].bank,
|
|
+ init[i].offset, init[i].val);
|
|
+ }
|
|
+ if (rc < 0)
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int lan87xx_phy_config_intr(struct phy_device *phydev)
|
|
{
|
|
int rc, val = 0;
|
|
@@ -40,6 +203,13 @@ static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
|
|
return rc < 0 ? rc : 0;
|
|
}
|
|
|
|
+static int lan87xx_config_init(struct phy_device *phydev)
|
|
+{
|
|
+ int rc = lan87xx_phy_init(phydev);
|
|
+
|
|
+ return rc < 0 ? rc : 0;
|
|
+}
|
|
+
|
|
static struct phy_driver microchip_t1_phy_driver[] = {
|
|
{
|
|
.phy_id = 0x0007c150,
|
|
@@ -48,6 +218,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
|
|
|
|
.features = PHY_BASIC_T1_FEATURES,
|
|
|
|
+ .config_init = lan87xx_config_init,
|
|
.config_aneg = genphy_config_aneg,
|
|
|
|
.ack_interrupt = lan87xx_phy_ack_interrupt,
|
|
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
|
|
index ea890d802ffe..54e5d4f9622c 100644
|
|
--- a/drivers/net/phy/phy.c
|
|
+++ b/drivers/net/phy/phy.c
|
|
@@ -1160,9 +1160,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
|
|
/* Restart autonegotiation so the new modes get sent to the
|
|
* link partner.
|
|
*/
|
|
- ret = phy_restart_aneg(phydev);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ if (phydev->autoneg == AUTONEG_ENABLE) {
|
|
+ ret = phy_restart_aneg(phydev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
|
|
index a44dd3c8af63..087b01684135 100644
|
|
--- a/drivers/net/ppp/pppoe.c
|
|
+++ b/drivers/net/ppp/pppoe.c
|
|
@@ -492,6 +492,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
if (!skb)
|
|
goto out;
|
|
|
|
+ if (skb->pkt_type != PACKET_HOST)
|
|
+ goto abort;
|
|
+
|
|
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
|
|
goto abort;
|
|
|
|
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
|
|
index 5a635f028bdc..030d30603c29 100644
|
|
--- a/drivers/net/virtio_net.c
|
|
+++ b/drivers/net/virtio_net.c
|
|
@@ -1231,9 +1231,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
|
|
break;
|
|
} while (rq->vq->num_free);
|
|
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
|
|
- u64_stats_update_begin(&rq->stats.syncp);
|
|
+ unsigned long flags;
|
|
+
|
|
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
|
|
rq->stats.kicks++;
|
|
- u64_stats_update_end(&rq->stats.syncp);
|
|
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
|
|
}
|
|
|
|
return !oom;
|
|
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
|
|
index 606fe216f902..cae7caf5ab28 100644
|
|
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
|
|
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
|
|
@@ -1297,6 +1297,7 @@ static const struct gpio_chip byt_gpio_chip = {
|
|
.direction_output = byt_gpio_direction_output,
|
|
.get = byt_gpio_get,
|
|
.set = byt_gpio_set,
|
|
+ .set_config = gpiochip_generic_config,
|
|
.dbg_show = byt_gpio_dbg_show,
|
|
};
|
|
|
|
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
|
|
index 2c419fa5d1c1..8f06445a8e39 100644
|
|
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
|
|
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
|
|
@@ -1474,11 +1474,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
|
|
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
|
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
unsigned long pending;
|
|
+ unsigned long flags;
|
|
u32 intr_line;
|
|
|
|
chained_irq_enter(chip, desc);
|
|
|
|
+ raw_spin_lock_irqsave(&chv_lock, flags);
|
|
pending = readl(pctrl->regs + CHV_INTSTAT);
|
|
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
|
|
+
|
|
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
|
|
unsigned irq, offset;
|
|
|
|
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
|
|
index d936e7aa74c4..7b7736abe9d8 100644
|
|
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
|
|
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
|
|
@@ -15,17 +15,18 @@
|
|
|
|
#include "pinctrl-intel.h"
|
|
|
|
-#define SPT_PAD_OWN 0x020
|
|
-#define SPT_PADCFGLOCK 0x0a0
|
|
-#define SPT_HOSTSW_OWN 0x0d0
|
|
-#define SPT_GPI_IS 0x100
|
|
-#define SPT_GPI_IE 0x120
|
|
+#define SPT_PAD_OWN 0x020
|
|
+#define SPT_H_PADCFGLOCK 0x090
|
|
+#define SPT_LP_PADCFGLOCK 0x0a0
|
|
+#define SPT_HOSTSW_OWN 0x0d0
|
|
+#define SPT_GPI_IS 0x100
|
|
+#define SPT_GPI_IE 0x120
|
|
|
|
#define SPT_COMMUNITY(b, s, e) \
|
|
{ \
|
|
.barno = (b), \
|
|
.padown_offset = SPT_PAD_OWN, \
|
|
- .padcfglock_offset = SPT_PADCFGLOCK, \
|
|
+ .padcfglock_offset = SPT_LP_PADCFGLOCK, \
|
|
.hostown_offset = SPT_HOSTSW_OWN, \
|
|
.is_offset = SPT_GPI_IS, \
|
|
.ie_offset = SPT_GPI_IE, \
|
|
@@ -47,7 +48,7 @@
|
|
{ \
|
|
.barno = (b), \
|
|
.padown_offset = SPT_PAD_OWN, \
|
|
- .padcfglock_offset = SPT_PADCFGLOCK, \
|
|
+ .padcfglock_offset = SPT_H_PADCFGLOCK, \
|
|
.hostown_offset = SPT_HOSTSW_OWN, \
|
|
.is_offset = SPT_GPI_IS, \
|
|
.ie_offset = SPT_GPI_IE, \
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
index 763da0be10d6..44320322037d 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
|
|
@@ -688,7 +688,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
|
|
|
|
pol = msm_readl_intr_cfg(pctrl, g);
|
|
pol ^= BIT(g->intr_polarity_bit);
|
|
- msm_writel_intr_cfg(val, pctrl, g);
|
|
+ msm_writel_intr_cfg(pol, pctrl, g);
|
|
|
|
val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
|
|
intstat = msm_readl_intr_status(pctrl, g);
|
|
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
|
|
index 4fc2056bd227..e615dc240150 100644
|
|
--- a/drivers/s390/net/ism_drv.c
|
|
+++ b/drivers/s390/net/ism_drv.c
|
|
@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
|
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
|
|
ISM_NR_DMBS);
|
|
- if (!ism->smcd)
|
|
+ if (!ism->smcd) {
|
|
+ ret = -ENOMEM;
|
|
goto err_resource;
|
|
+ }
|
|
|
|
ism->smcd->priv = ism;
|
|
ret = ism_dev_init(ism);
|
|
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
|
|
index 94af30f768f7..9c6bf13daaee 100644
|
|
--- a/drivers/scsi/sg.c
|
|
+++ b/drivers/scsi/sg.c
|
|
@@ -689,8 +689,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
|
|
hp->flags = input_size; /* structure abuse ... */
|
|
hp->pack_id = old_hdr.pack_id;
|
|
hp->usr_ptr = NULL;
|
|
- if (__copy_from_user(cmnd, buf, cmd_size))
|
|
+ if (__copy_from_user(cmnd, buf, cmd_size)) {
|
|
+ sg_remove_request(sfp, srp);
|
|
return -EFAULT;
|
|
+ }
|
|
/*
|
|
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
|
|
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
|
|
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
|
|
index f624cc87cbab..856c34010021 100644
|
|
--- a/drivers/usb/cdns3/gadget.c
|
|
+++ b/drivers/usb/cdns3/gadget.c
|
|
@@ -2105,7 +2105,7 @@ found:
|
|
link_trb = priv_req->trb;
|
|
|
|
/* Update ring only if removed request is on pending_req_list list */
|
|
- if (req_on_hw_ring) {
|
|
+ if (req_on_hw_ring && link_trb) {
|
|
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
|
|
((priv_req->end_trb + 1) * TRB_SIZE));
|
|
link_trb->control = (link_trb->control & TRB_CYCLE) |
|
|
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
|
|
index 6ca40d135430..e26a6f18f421 100644
|
|
--- a/drivers/usb/core/devio.c
|
|
+++ b/drivers/usb/core/devio.c
|
|
@@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
|
|
{
|
|
struct usb_memory *usbm = NULL;
|
|
struct usb_dev_state *ps = file->private_data;
|
|
+ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
|
|
size_t size = vma->vm_end - vma->vm_start;
|
|
void *mem;
|
|
unsigned long flags;
|
|
@@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
|
|
usbm->vma_use_count = 1;
|
|
INIT_LIST_HEAD(&usbm->memlist);
|
|
|
|
- if (remap_pfn_range(vma, vma->vm_start,
|
|
- virt_to_phys(usbm->mem) >> PAGE_SHIFT,
|
|
- size, vma->vm_page_prot) < 0) {
|
|
- dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
|
|
- return -EAGAIN;
|
|
+ if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
|
|
+ if (remap_pfn_range(vma, vma->vm_start,
|
|
+ virt_to_phys(usbm->mem) >> PAGE_SHIFT,
|
|
+ size, vma->vm_page_prot) < 0) {
|
|
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ } else {
|
|
+ if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
|
|
+ size)) {
|
|
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
}
|
|
|
|
vma->vm_flags |= VM_IO;
|
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
|
index 847c85430b05..4d3de33885ff 100644
|
|
--- a/drivers/usb/core/hub.c
|
|
+++ b/drivers/usb/core/hub.c
|
|
@@ -38,6 +38,7 @@
|
|
|
|
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
|
|
#define USB_VENDOR_SMSC 0x0424
|
|
+#define USB_PRODUCT_USB5534B 0x5534
|
|
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
|
|
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
|
|
|
|
@@ -5506,8 +5507,11 @@ out_hdev_lock:
|
|
}
|
|
|
|
static const struct usb_device_id hub_id_table[] = {
|
|
- { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
|
|
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
|
+ | USB_DEVICE_ID_MATCH_PRODUCT
|
|
+ | USB_DEVICE_ID_MATCH_INT_CLASS,
|
|
.idVendor = USB_VENDOR_SMSC,
|
|
+ .idProduct = USB_PRODUCT_USB5534B,
|
|
.bInterfaceClass = USB_CLASS_HUB,
|
|
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
|
|
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index 3d30dec42c81..c30c5b1c478c 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -2480,9 +2480,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
|
|
for_each_sg(sg, s, pending, i) {
|
|
trb = &dep->trb_pool[dep->trb_dequeue];
|
|
|
|
- if (trb->ctrl & DWC3_TRB_CTRL_HWO)
|
|
- break;
|
|
-
|
|
req->sg = sg_next(s);
|
|
req->num_pending_sgs--;
|
|
|
|
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
|
|
index ab9ac48a751a..a7709d126b29 100644
|
|
--- a/drivers/usb/gadget/configfs.c
|
|
+++ b/drivers/usb/gadget/configfs.c
|
|
@@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
|
|
char *name;
|
|
int ret;
|
|
|
|
+ if (strlen(page) < len)
|
|
+ return -EOVERFLOW;
|
|
+
|
|
name = kstrdup(page, GFP_KERNEL);
|
|
if (!name)
|
|
return -ENOMEM;
|
|
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
|
|
index dd81fd538cb8..a748ed0842e8 100644
|
|
--- a/drivers/usb/gadget/legacy/audio.c
|
|
+++ b/drivers/usb/gadget/legacy/audio.c
|
|
@@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
|
|
struct usb_descriptor_header *usb_desc;
|
|
|
|
usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
|
|
- if (!usb_desc)
|
|
+ if (!usb_desc) {
|
|
+ status = -ENOMEM;
|
|
goto fail;
|
|
+ }
|
|
usb_otg_descriptor_init(cdev->gadget, usb_desc);
|
|
otg_desc[0] = usb_desc;
|
|
otg_desc[1] = NULL;
|
|
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
|
|
index 8d7a556ece30..563363aba48f 100644
|
|
--- a/drivers/usb/gadget/legacy/cdc2.c
|
|
+++ b/drivers/usb/gadget/legacy/cdc2.c
|
|
@@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
|
|
struct usb_descriptor_header *usb_desc;
|
|
|
|
usb_desc = usb_otg_descriptor_alloc(gadget);
|
|
- if (!usb_desc)
|
|
+ if (!usb_desc) {
|
|
+ status = -ENOMEM;
|
|
goto fail1;
|
|
+ }
|
|
usb_otg_descriptor_init(gadget, usb_desc);
|
|
otg_desc[0] = usb_desc;
|
|
otg_desc[1] = NULL;
|
|
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
|
|
index c61e71ba7045..0f1b45e3abd1 100644
|
|
--- a/drivers/usb/gadget/legacy/ncm.c
|
|
+++ b/drivers/usb/gadget/legacy/ncm.c
|
|
@@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
|
|
struct usb_descriptor_header *usb_desc;
|
|
|
|
usb_desc = usb_otg_descriptor_alloc(gadget);
|
|
- if (!usb_desc)
|
|
+ if (!usb_desc) {
|
|
+ status = -ENOMEM;
|
|
goto fail;
|
|
+ }
|
|
usb_otg_descriptor_init(gadget, usb_desc);
|
|
otg_desc[0] = usb_desc;
|
|
otg_desc[1] = NULL;
|
|
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
|
|
index 247de0faaeb7..5980540a8fff 100644
|
|
--- a/drivers/usb/gadget/udc/net2272.c
|
|
+++ b/drivers/usb/gadget/udc/net2272.c
|
|
@@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev)
|
|
err_req:
|
|
release_mem_region(base, len);
|
|
err:
|
|
+ kfree(dev);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
|
|
index 315b4552693c..52c625c02341 100644
|
|
--- a/drivers/usb/host/xhci-plat.c
|
|
+++ b/drivers/usb/host/xhci-plat.c
|
|
@@ -363,6 +363,7 @@ static int xhci_plat_remove(struct platform_device *dev)
|
|
struct clk *reg_clk = xhci->reg_clk;
|
|
struct usb_hcd *shared_hcd = xhci->shared_hcd;
|
|
|
|
+ pm_runtime_get_sync(&dev->dev);
|
|
xhci->xhc_state |= XHCI_STATE_REMOVING;
|
|
|
|
usb_remove_hcd(shared_hcd);
|
|
@@ -376,8 +377,9 @@ static int xhci_plat_remove(struct platform_device *dev)
|
|
clk_disable_unprepare(reg_clk);
|
|
usb_put_hcd(hcd);
|
|
|
|
- pm_runtime_set_suspended(&dev->dev);
|
|
pm_runtime_disable(&dev->dev);
|
|
+ pm_runtime_put_noidle(&dev->dev);
|
|
+ pm_runtime_set_suspended(&dev->dev);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
|
|
index a54f8f3234f9..49894541ea9a 100644
|
|
--- a/drivers/usb/host/xhci-ring.c
|
|
+++ b/drivers/usb/host/xhci-ring.c
|
|
@@ -3421,8 +3421,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
/* New sg entry */
|
|
--num_sgs;
|
|
sent_len -= block_len;
|
|
- if (num_sgs != 0) {
|
|
- sg = sg_next(sg);
|
|
+ sg = sg_next(sg);
|
|
+ if (num_sgs != 0 && sg) {
|
|
block_len = sg_dma_len(sg);
|
|
addr = (u64) sg_dma_address(sg);
|
|
addr += sent_len;
|
|
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
|
|
index c8494fa5e19d..4b8632eda2bd 100644
|
|
--- a/fs/cifs/cifssmb.c
|
|
+++ b/fs/cifs/cifssmb.c
|
|
@@ -2135,8 +2135,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
|
|
}
|
|
}
|
|
|
|
+ kref_put(&wdata2->refcount, cifs_writedata_release);
|
|
if (rc) {
|
|
- kref_put(&wdata2->refcount, cifs_writedata_release);
|
|
if (is_retryable_error(rc))
|
|
continue;
|
|
i += nr_pages;
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index fc2870f2aca9..d62cd1d71098 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -1274,6 +1274,8 @@ int flush_old_exec(struct linux_binprm * bprm)
|
|
*/
|
|
set_mm_exe_file(bprm->mm, bprm->file);
|
|
|
|
+ would_dump(bprm, bprm->file);
|
|
+
|
|
/*
|
|
* Release all of the old mmap stuff
|
|
*/
|
|
@@ -1817,8 +1819,6 @@ static int __do_execve_file(int fd, struct filename *filename,
|
|
if (retval < 0)
|
|
goto out;
|
|
|
|
- would_dump(bprm, bprm->file);
|
|
-
|
|
retval = exec_binprm(bprm);
|
|
if (retval < 0)
|
|
goto out;
|
|
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
|
|
index f63df54a08c6..adbb8fef2216 100644
|
|
--- a/fs/gfs2/bmap.c
|
|
+++ b/fs/gfs2/bmap.c
|
|
@@ -528,10 +528,12 @@ lower_metapath:
|
|
|
|
/* Advance in metadata tree. */
|
|
(mp->mp_list[hgt])++;
|
|
- if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
|
|
- if (!hgt)
|
|
+ if (hgt) {
|
|
+ if (mp->mp_list[hgt] >= sdp->sd_inptrs)
|
|
+ goto lower_metapath;
|
|
+ } else {
|
|
+ if (mp->mp_list[hgt] >= sdp->sd_diptrs)
|
|
break;
|
|
- goto lower_metapath;
|
|
}
|
|
|
|
fill_up_metapath:
|
|
@@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
|
|
ret = -ENOENT;
|
|
goto unlock;
|
|
} else {
|
|
- /* report a hole */
|
|
iomap->offset = pos;
|
|
iomap->length = length;
|
|
- goto do_alloc;
|
|
+ goto hole_found;
|
|
}
|
|
}
|
|
iomap->length = size;
|
|
@@ -933,8 +934,6 @@ unlock:
|
|
return ret;
|
|
|
|
do_alloc:
|
|
- iomap->addr = IOMAP_NULL_ADDR;
|
|
- iomap->type = IOMAP_HOLE;
|
|
if (flags & IOMAP_REPORT) {
|
|
if (pos >= size)
|
|
ret = -ENOENT;
|
|
@@ -956,6 +955,9 @@ do_alloc:
|
|
if (pos < size && height == ip->i_height)
|
|
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
|
|
}
|
|
+hole_found:
|
|
+ iomap->addr = IOMAP_NULL_ADDR;
|
|
+ iomap->type = IOMAP_HOLE;
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
|
|
index 7ca84be20cf6..8303b44a5068 100644
|
|
--- a/fs/gfs2/lops.c
|
|
+++ b/fs/gfs2/lops.c
|
|
@@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
|
|
struct super_block *sb = sdp->sd_vfs;
|
|
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
|
|
|
- bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
|
|
+ bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
|
|
bio_set_dev(bio, sb->s_bdev);
|
|
bio->bi_end_io = end_io;
|
|
bio->bi_private = sdp;
|
|
@@ -504,7 +504,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
|
|
unsigned int bsize = sdp->sd_sb.sb_bsize, off;
|
|
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
|
|
unsigned int shift = PAGE_SHIFT - bsize_shift;
|
|
- unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
|
|
+ unsigned int max_bio_size = 2 * 1024 * 1024;
|
|
struct gfs2_journal_extent *je;
|
|
int sz, ret = 0;
|
|
struct bio *bio = NULL;
|
|
@@ -532,12 +532,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
|
|
off = 0;
|
|
}
|
|
|
|
- if (!bio || (bio_chained && !off)) {
|
|
+ if (!bio || (bio_chained && !off) ||
|
|
+ bio->bi_iter.bi_size >= max_bio_size) {
|
|
/* start new bio */
|
|
} else {
|
|
- sz = bio_add_page(bio, page, bsize, off);
|
|
- if (sz == bsize)
|
|
- goto block_added;
|
|
+ sector_t sector = dblock << sdp->sd_fsb2bb_shift;
|
|
+
|
|
+ if (bio_end_sector(bio) == sector) {
|
|
+ sz = bio_add_page(bio, page, bsize, off);
|
|
+ if (sz == bsize)
|
|
+ goto block_added;
|
|
+ }
|
|
if (off) {
|
|
unsigned int blocks =
|
|
(PAGE_SIZE - off) >> bsize_shift;
|
|
@@ -563,7 +568,7 @@ block_added:
|
|
off += bsize;
|
|
if (off == PAGE_SIZE)
|
|
page = NULL;
|
|
- if (blocks_submitted < blocks_read + readahead_blocks) {
|
|
+ if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
|
|
/* Keep at least one bio in flight */
|
|
continue;
|
|
}
|
|
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
|
|
index 15f271401dcc..573b1da9342c 100644
|
|
--- a/fs/nfs/fscache-index.c
|
|
+++ b/fs/nfs/fscache-index.c
|
|
@@ -84,8 +84,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
|
|
return FSCACHE_CHECKAUX_OBSOLETE;
|
|
|
|
memset(&auxdata, 0, sizeof(auxdata));
|
|
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
|
|
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
|
|
+ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
|
|
+ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
|
|
+ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
|
|
+ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
|
|
|
|
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
|
|
auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
|
|
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
|
|
index a6dcc2151e77..7d6721ec31d4 100644
|
|
--- a/fs/nfs/fscache.c
|
|
+++ b/fs/nfs/fscache.c
|
|
@@ -188,7 +188,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
|
|
/* create a cache index for looking up filehandles */
|
|
nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
|
|
&nfs_fscache_super_index_def,
|
|
- key, sizeof(*key) + ulen,
|
|
+ &key->key,
|
|
+ sizeof(key->key) + ulen,
|
|
NULL, 0,
|
|
nfss, 0, true);
|
|
dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
|
|
@@ -226,6 +227,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
|
|
}
|
|
}
|
|
|
|
+static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
|
|
+ struct nfs_inode *nfsi)
|
|
+{
|
|
+ memset(auxdata, 0, sizeof(*auxdata));
|
|
+ auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
|
|
+ auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
|
|
+ auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
|
|
+ auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
|
|
+
|
|
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
|
|
+ auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
|
|
+}
|
|
+
|
|
/*
|
|
* Initialise the per-inode cache cookie pointer for an NFS inode.
|
|
*/
|
|
@@ -239,12 +253,7 @@ void nfs_fscache_init_inode(struct inode *inode)
|
|
if (!(nfss->fscache && S_ISREG(inode->i_mode)))
|
|
return;
|
|
|
|
- memset(&auxdata, 0, sizeof(auxdata));
|
|
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
|
|
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
|
|
-
|
|
- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
|
|
- auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
|
|
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
|
|
|
|
nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
|
|
&nfs_fscache_inode_object_def,
|
|
@@ -264,9 +273,7 @@ void nfs_fscache_clear_inode(struct inode *inode)
|
|
|
|
dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
|
|
|
|
- memset(&auxdata, 0, sizeof(auxdata));
|
|
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
|
|
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
|
|
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
|
|
fscache_relinquish_cookie(cookie, &auxdata, false);
|
|
nfsi->fscache = NULL;
|
|
}
|
|
@@ -306,9 +313,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
|
|
if (!fscache_cookie_valid(cookie))
|
|
return;
|
|
|
|
- memset(&auxdata, 0, sizeof(auxdata));
|
|
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
|
|
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
|
|
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
|
|
|
|
if (inode_is_open_for_write(inode)) {
|
|
dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
|
|
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
|
|
index ad041cfbf9ec..6754c8607230 100644
|
|
--- a/fs/nfs/fscache.h
|
|
+++ b/fs/nfs/fscache.h
|
|
@@ -62,9 +62,11 @@ struct nfs_fscache_key {
|
|
* cache object.
|
|
*/
|
|
struct nfs_fscache_inode_auxdata {
|
|
- struct timespec mtime;
|
|
- struct timespec ctime;
|
|
- u64 change_attr;
|
|
+ s64 mtime_sec;
|
|
+ s64 mtime_nsec;
|
|
+ s64 ctime_sec;
|
|
+ s64 ctime_nsec;
|
|
+ u64 change_attr;
|
|
};
|
|
|
|
/*
|
|
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
|
|
index cb7c10e9721e..a2593b787cc7 100644
|
|
--- a/fs/nfs/mount_clnt.c
|
|
+++ b/fs/nfs/mount_clnt.c
|
|
@@ -32,6 +32,7 @@
|
|
#define MNT_fhs_status_sz (1)
|
|
#define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
|
|
#define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
|
|
+#define MNT_fhandlev3_sz XDR_QUADLEN(NFS3_FHSIZE)
|
|
#define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
|
|
|
|
/*
|
|
@@ -39,7 +40,7 @@
|
|
*/
|
|
#define MNT_enc_dirpath_sz encode_dirpath_sz
|
|
#define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
|
|
-#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
|
|
+#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandlev3_sz + \
|
|
MNT_authflav3_sz)
|
|
|
|
/*
|
|
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
|
|
index b53bcf40e2a7..ea680f619438 100644
|
|
--- a/fs/nfs/nfs4state.c
|
|
+++ b/fs/nfs/nfs4state.c
|
|
@@ -733,9 +733,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
|
|
state = new;
|
|
state->owner = owner;
|
|
atomic_inc(&owner->so_count);
|
|
- list_add_rcu(&state->inode_states, &nfsi->open_states);
|
|
ihold(inode);
|
|
state->inode = inode;
|
|
+ list_add_rcu(&state->inode_states, &nfsi->open_states);
|
|
spin_unlock(&inode->i_lock);
|
|
/* Note: The reclaim code dictates that we add stateless
|
|
* and read-only stateids to the end of the list */
|
|
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
|
|
index f5d30573f4a9..deb13f0a0f7d 100644
|
|
--- a/fs/notify/fanotify/fanotify.c
|
|
+++ b/fs/notify/fanotify/fanotify.c
|
|
@@ -171,6 +171,13 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
|
|
if (!fsnotify_iter_should_report_type(iter_info, type))
|
|
continue;
|
|
mark = iter_info->marks[type];
|
|
+ /*
|
|
+ * If the event is on dir and this mark doesn't care about
|
|
+ * events on dir, don't send it!
|
|
+ */
|
|
+ if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR))
|
|
+ continue;
|
|
+
|
|
/*
|
|
* If the event is for a child and this mark doesn't care about
|
|
* events on a child, don't send it!
|
|
@@ -203,10 +210,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
|
|
user_mask &= ~FAN_ONDIR;
|
|
}
|
|
|
|
- if (event_mask & FS_ISDIR &&
|
|
- !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
|
|
- return 0;
|
|
-
|
|
return test_mask & user_mask;
|
|
}
|
|
|
|
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
|
|
index 034b0a644efc..448c91bf543b 100644
|
|
--- a/include/linux/compiler.h
|
|
+++ b/include/linux/compiler.h
|
|
@@ -356,4 +356,10 @@ static inline void *offset_to_ptr(const int *off)
|
|
/* &a[0] degrades to a pointer: a different type from an array */
|
|
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
|
|
|
|
+/*
|
|
+ * This is needed in functions which generate the stack canary, see
|
|
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
|
|
+ */
|
|
+#define prevent_tail_call_optimization() mb()
|
|
+
|
|
#endif /* __LINUX_COMPILER_H */
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index 06668379109e..5bd384dbdca5 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -978,7 +978,7 @@ struct file_handle {
|
|
__u32 handle_bytes;
|
|
int handle_type;
|
|
/* file identifier */
|
|
- unsigned char f_handle[0];
|
|
+ unsigned char f_handle[];
|
|
};
|
|
|
|
static inline struct file *get_file(struct file *f)
|
|
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
|
|
index 8faca7b52543..fb5b2a41bd45 100644
|
|
--- a/include/linux/memcontrol.h
|
|
+++ b/include/linux/memcontrol.h
|
|
@@ -793,6 +793,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
|
atomic_long_inc(&memcg->memory_events[event]);
|
|
cgroup_file_notify(&memcg->events_file);
|
|
|
|
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
|
|
+ break;
|
|
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
|
|
break;
|
|
} while ((memcg = parent_mem_cgroup(memcg)) &&
|
|
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
|
|
index 3b12fd28af78..fc4df3ccefc9 100644
|
|
--- a/include/linux/pnp.h
|
|
+++ b/include/linux/pnp.h
|
|
@@ -220,10 +220,8 @@ struct pnp_card {
|
|
#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
|
|
#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
|
|
#define to_pnp_card(n) container_of(n, struct pnp_card, dev)
|
|
-#define pnp_for_each_card(card) \
|
|
- for((card) = global_to_pnp_card(pnp_cards.next); \
|
|
- (card) != global_to_pnp_card(&pnp_cards); \
|
|
- (card) = global_to_pnp_card((card)->global_list.next))
|
|
+#define pnp_for_each_card(card) \
|
|
+ list_for_each_entry(card, &pnp_cards, global_list)
|
|
|
|
struct pnp_card_link {
|
|
struct pnp_card *card;
|
|
@@ -276,14 +274,9 @@ struct pnp_dev {
|
|
#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
|
|
#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
|
|
#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
|
|
-#define pnp_for_each_dev(dev) \
|
|
- for((dev) = global_to_pnp_dev(pnp_global.next); \
|
|
- (dev) != global_to_pnp_dev(&pnp_global); \
|
|
- (dev) = global_to_pnp_dev((dev)->global_list.next))
|
|
-#define card_for_each_dev(card,dev) \
|
|
- for((dev) = card_to_pnp_dev((card)->devices.next); \
|
|
- (dev) != card_to_pnp_dev(&(card)->devices); \
|
|
- (dev) = card_to_pnp_dev((dev)->card_list.next))
|
|
+#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
|
|
+#define card_for_each_dev(card, dev) \
|
|
+ list_for_each_entry(dev, &(card)->devices, card_list)
|
|
#define pnp_dev_name(dev) (dev)->name
|
|
|
|
static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
|
|
@@ -437,14 +430,10 @@ struct pnp_protocol {
|
|
};
|
|
|
|
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
|
|
-#define protocol_for_each_card(protocol,card) \
|
|
- for((card) = protocol_to_pnp_card((protocol)->cards.next); \
|
|
- (card) != protocol_to_pnp_card(&(protocol)->cards); \
|
|
- (card) = protocol_to_pnp_card((card)->protocol_list.next))
|
|
-#define protocol_for_each_dev(protocol,dev) \
|
|
- for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
|
|
- (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
|
|
- (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
|
|
+#define protocol_for_each_card(protocol, card) \
|
|
+ list_for_each_entry(card, &(protocol)->cards, protocol_list)
|
|
+#define protocol_for_each_dev(protocol, dev) \
|
|
+ list_for_each_entry(dev, &(protocol)->devices, protocol_list)
|
|
|
|
extern struct bus_type pnp_bus_type;
|
|
|
|
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
|
|
index 7eb6a8754f19..a3adbe593505 100644
|
|
--- a/include/linux/skmsg.h
|
|
+++ b/include/linux/skmsg.h
|
|
@@ -186,6 +186,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
|
|
dst->sg.data[which] = src->sg.data[which];
|
|
dst->sg.data[which].length = size;
|
|
dst->sg.size += size;
|
|
+ src->sg.size -= size;
|
|
src->sg.data[which].length -= size;
|
|
src->sg.data[which].offset += size;
|
|
}
|
|
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
|
|
index 5ac5db4d295f..d4326d6662a4 100644
|
|
--- a/include/linux/sunrpc/gss_api.h
|
|
+++ b/include/linux/sunrpc/gss_api.h
|
|
@@ -22,6 +22,7 @@
|
|
struct gss_ctx {
|
|
struct gss_api_mech *mech_type;
|
|
void *internal_ctx_id;
|
|
+ unsigned int slack, align;
|
|
};
|
|
|
|
#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
|
|
@@ -67,6 +68,7 @@ u32 gss_wrap(
|
|
u32 gss_unwrap(
|
|
struct gss_ctx *ctx_id,
|
|
int offset,
|
|
+ int len,
|
|
struct xdr_buf *inbuf);
|
|
u32 gss_delete_sec_context(
|
|
struct gss_ctx **ctx_id);
|
|
@@ -127,6 +129,7 @@ struct gss_api_ops {
|
|
u32 (*gss_unwrap)(
|
|
struct gss_ctx *ctx_id,
|
|
int offset,
|
|
+ int len,
|
|
struct xdr_buf *buf);
|
|
void (*gss_delete_sec_context)(
|
|
void *internal_ctx_id);
|
|
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
|
|
index 02c0412e368c..07930bc9ad60 100644
|
|
--- a/include/linux/sunrpc/gss_krb5.h
|
|
+++ b/include/linux/sunrpc/gss_krb5.h
|
|
@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
|
|
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
|
struct xdr_buf *buf,
|
|
struct page **pages); /* v2 encryption function */
|
|
- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
|
+ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
|
|
struct xdr_buf *buf, u32 *headskip,
|
|
u32 *tailskip); /* v2 decryption function */
|
|
};
|
|
@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
|
|
struct xdr_buf *outbuf, struct page **pages);
|
|
|
|
u32
|
|
-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
|
|
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
|
|
struct xdr_buf *buf);
|
|
|
|
|
|
@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
|
|
struct page **pages);
|
|
|
|
u32
|
|
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
|
|
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
|
|
struct xdr_buf *buf, u32 *plainoffset,
|
|
u32 *plainlen);
|
|
|
|
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
|
|
index f33e5013bdfb..9db6097c22c5 100644
|
|
--- a/include/linux/sunrpc/xdr.h
|
|
+++ b/include/linux/sunrpc/xdr.h
|
|
@@ -186,6 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
|
|
extern void xdr_shift_buf(struct xdr_buf *, size_t);
|
|
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
|
|
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
|
|
+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
|
|
extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
|
|
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
|
|
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
|
|
diff --git a/include/linux/tty.h b/include/linux/tty.h
|
|
index bd5fe0e907e8..a99e9b8e4e31 100644
|
|
--- a/include/linux/tty.h
|
|
+++ b/include/linux/tty.h
|
|
@@ -66,7 +66,7 @@ struct tty_buffer {
|
|
int read;
|
|
int flags;
|
|
/* Data points here */
|
|
- unsigned long data[0];
|
|
+ unsigned long data[];
|
|
};
|
|
|
|
/* Values for .flags field of tty_buffer */
|
|
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
|
|
index 9f551f3b69c6..90690e37a56f 100644
|
|
--- a/include/net/netfilter/nf_conntrack.h
|
|
+++ b/include/net/netfilter/nf_conntrack.h
|
|
@@ -87,7 +87,7 @@ struct nf_conn {
|
|
struct hlist_node nat_bysource;
|
|
#endif
|
|
/* all members below initialized via memset */
|
|
- u8 __nfct_init_offset[0];
|
|
+ struct { } __nfct_init_offset;
|
|
|
|
/* If we were expected by an expectation, this will be it */
|
|
struct nf_conn *master;
|
|
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
|
|
index 9fb7cf1cdf36..3d03756e1069 100644
|
|
--- a/include/net/sch_generic.h
|
|
+++ b/include/net/sch_generic.h
|
|
@@ -407,6 +407,7 @@ struct tcf_block {
|
|
struct mutex lock;
|
|
struct list_head chain_list;
|
|
u32 index; /* block index for shared blocks */
|
|
+ u32 classid; /* which class this block belongs to */
|
|
refcount_t refcnt;
|
|
struct net *net;
|
|
struct Qdisc *q;
|
|
diff --git a/include/net/tcp.h b/include/net/tcp.h
|
|
index cce285f70c8e..7cf1b4972c66 100644
|
|
--- a/include/net/tcp.h
|
|
+++ b/include/net/tcp.h
|
|
@@ -1401,6 +1401,19 @@ static inline int tcp_full_space(const struct sock *sk)
|
|
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
|
|
}
|
|
|
|
+/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
|
|
+ * If 87.5 % (7/8) of the space has been consumed, we want to override
|
|
+ * SO_RCVLOWAT constraint, since we are receiving skbs with too small
|
|
+ * len/truesize ratio.
|
|
+ */
|
|
+static inline bool tcp_rmem_pressure(const struct sock *sk)
|
|
+{
|
|
+ int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
|
|
+ int threshold = rcvbuf - (rcvbuf >> 3);
|
|
+
|
|
+ return atomic_read(&sk->sk_rmem_alloc) > threshold;
|
|
+}
|
|
+
|
|
extern void tcp_openreq_init_rwin(struct request_sock *req,
|
|
const struct sock *sk_listener,
|
|
const struct dst_entry *dst);
|
|
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
|
|
index a36b7227a15a..334842daa904 100644
|
|
--- a/include/sound/rawmidi.h
|
|
+++ b/include/sound/rawmidi.h
|
|
@@ -61,6 +61,7 @@ struct snd_rawmidi_runtime {
|
|
size_t avail_min; /* min avail for wakeup */
|
|
size_t avail; /* max used buffer for wakeup */
|
|
size_t xruns; /* over/underruns counter */
|
|
+ int buffer_ref; /* buffer reference count */
|
|
/* misc */
|
|
spinlock_t lock;
|
|
wait_queue_head_t sleep;
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index 0bffc8fdbf3d..6db3e310a5e4 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -36,22 +36,6 @@ config TOOLS_SUPPORT_RELR
|
|
config CC_HAS_ASM_INLINE
|
|
def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null)
|
|
|
|
-config CC_HAS_WARN_MAYBE_UNINITIALIZED
|
|
- def_bool $(cc-option,-Wmaybe-uninitialized)
|
|
- help
|
|
- GCC >= 4.7 supports this option.
|
|
-
|
|
-config CC_DISABLE_WARN_MAYBE_UNINITIALIZED
|
|
- bool
|
|
- depends on CC_HAS_WARN_MAYBE_UNINITIALIZED
|
|
- default CC_IS_GCC && GCC_VERSION < 40900 # unreliable for GCC < 4.9
|
|
- help
|
|
- GCC's -Wmaybe-uninitialized is not reliable by definition.
|
|
- Lots of false positive warnings are produced in some cases.
|
|
-
|
|
- If this option is enabled, -Wno-maybe-uninitialzed is passed
|
|
- to the compiler to suppress maybe-uninitialized warnings.
|
|
-
|
|
config CONSTRUCTORS
|
|
bool
|
|
depends on !UML
|
|
@@ -1226,14 +1210,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
|
|
config CC_OPTIMIZE_FOR_PERFORMANCE_O3
|
|
bool "Optimize more for performance (-O3)"
|
|
depends on ARC
|
|
- imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
|
|
help
|
|
Choosing this option will pass "-O3" to your compiler to optimize
|
|
the kernel yet more for performance.
|
|
|
|
config CC_OPTIMIZE_FOR_SIZE
|
|
bool "Optimize for size (-Os)"
|
|
- imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
|
|
help
|
|
Choosing this option will pass "-Os" to your compiler resulting
|
|
in a smaller kernel.
|
|
diff --git a/init/initramfs.c b/init/initramfs.c
|
|
index c47dad0884f7..5feee4f616d5 100644
|
|
--- a/init/initramfs.c
|
|
+++ b/init/initramfs.c
|
|
@@ -534,7 +534,7 @@ void __weak free_initrd_mem(unsigned long start, unsigned long end)
|
|
}
|
|
|
|
#ifdef CONFIG_KEXEC_CORE
|
|
-static bool kexec_free_initrd(void)
|
|
+static bool __init kexec_free_initrd(void)
|
|
{
|
|
unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
|
|
unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 5cbb9fe937e0..8c7d6b8ee6bd 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -782,6 +782,8 @@ asmlinkage __visible void __init start_kernel(void)
|
|
|
|
/* Do the rest non-__init'ed, we're now alive */
|
|
arch_call_rest_init();
|
|
+
|
|
+ prevent_tail_call_optimization();
|
|
}
|
|
|
|
/* Call all constructor functions linked into the kernel. */
|
|
diff --git a/ipc/util.c b/ipc/util.c
|
|
index 594871610d45..1821b6386d3b 100644
|
|
--- a/ipc/util.c
|
|
+++ b/ipc/util.c
|
|
@@ -764,21 +764,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
|
|
total++;
|
|
}
|
|
|
|
- *new_pos = pos + 1;
|
|
+ ipc = NULL;
|
|
if (total >= ids->in_use)
|
|
- return NULL;
|
|
+ goto out;
|
|
|
|
for (; pos < ipc_mni; pos++) {
|
|
ipc = idr_find(&ids->ipcs_idr, pos);
|
|
if (ipc != NULL) {
|
|
rcu_read_lock();
|
|
ipc_lock_object(ipc);
|
|
- return ipc;
|
|
+ break;
|
|
}
|
|
}
|
|
-
|
|
- /* Out of range - return NULL to terminate iteration */
|
|
- return NULL;
|
|
+out:
|
|
+ *new_pos = pos + 1;
|
|
+ return ipc;
|
|
}
|
|
|
|
static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
|
|
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
|
|
index 14f4a76b44d5..946cfdd3b2cc 100644
|
|
--- a/kernel/bpf/syscall.c
|
|
+++ b/kernel/bpf/syscall.c
|
|
@@ -1146,8 +1146,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
|
|
if (err)
|
|
goto free_value;
|
|
|
|
- if (copy_to_user(uvalue, value, value_size) != 0)
|
|
+ if (copy_to_user(uvalue, value, value_size) != 0) {
|
|
+ err = -EFAULT;
|
|
goto free_value;
|
|
+ }
|
|
|
|
err = 0;
|
|
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index 27c0ef30002e..9180f4416dba 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -2412,11 +2412,11 @@ long do_fork(unsigned long clone_flags,
|
|
int __user *child_tidptr)
|
|
{
|
|
struct kernel_clone_args args = {
|
|
- .flags = (clone_flags & ~CSIGNAL),
|
|
+ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
|
|
.pidfd = parent_tidptr,
|
|
.child_tid = child_tidptr,
|
|
.parent_tid = parent_tidptr,
|
|
- .exit_signal = (clone_flags & CSIGNAL),
|
|
+ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
|
|
.stack = stack_start,
|
|
.stack_size = stack_size,
|
|
};
|
|
@@ -2434,8 +2434,9 @@ long do_fork(unsigned long clone_flags,
|
|
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
|
{
|
|
struct kernel_clone_args args = {
|
|
- .flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL),
|
|
- .exit_signal = (flags & CSIGNAL),
|
|
+ .flags = ((lower_32_bits(flags) | CLONE_VM |
|
|
+ CLONE_UNTRACED) & ~CSIGNAL),
|
|
+ .exit_signal = (lower_32_bits(flags) & CSIGNAL),
|
|
.stack = (unsigned long)fn,
|
|
.stack_size = (unsigned long)arg,
|
|
};
|
|
@@ -2496,11 +2497,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
|
|
#endif
|
|
{
|
|
struct kernel_clone_args args = {
|
|
- .flags = (clone_flags & ~CSIGNAL),
|
|
+ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
|
|
.pidfd = parent_tidptr,
|
|
.child_tid = child_tidptr,
|
|
.parent_tid = parent_tidptr,
|
|
- .exit_signal = (clone_flags & CSIGNAL),
|
|
+ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
|
|
.stack = newsp,
|
|
.tls = tls,
|
|
};
|
|
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
|
|
index e08527f50d2a..f3f2fc8ad81a 100644
|
|
--- a/kernel/trace/Kconfig
|
|
+++ b/kernel/trace/Kconfig
|
|
@@ -371,7 +371,6 @@ config PROFILE_ANNOTATED_BRANCHES
|
|
config PROFILE_ALL_BRANCHES
|
|
bool "Profile all if conditionals" if !FORTIFY_SOURCE
|
|
select TRACE_BRANCH_PROFILING
|
|
- imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
|
|
help
|
|
This tracer profiles all branch conditions. Every if ()
|
|
taken in the kernel is recorded whether it hit or miss.
|
|
diff --git a/kernel/umh.c b/kernel/umh.c
|
|
index 11bf5eea474c..3474d6aa55d8 100644
|
|
--- a/kernel/umh.c
|
|
+++ b/kernel/umh.c
|
|
@@ -475,6 +475,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info)
|
|
{
|
|
struct umh_info *umh_info = info->data;
|
|
|
|
+ /* cleanup if umh_pipe_setup() was successful but exec failed */
|
|
+ if (info->pid && info->retval) {
|
|
+ fput(umh_info->pipe_to_umh);
|
|
+ fput(umh_info->pipe_from_umh);
|
|
+ }
|
|
+
|
|
argv_free(info->argv);
|
|
umh_info->pid = info->pid;
|
|
}
|
|
diff --git a/mm/shmem.c b/mm/shmem.c
|
|
index e71b15da1985..98802ca76a5c 100644
|
|
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -2183,7 +2183,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
|
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
|
int retval = -ENOMEM;
|
|
|
|
- spin_lock_irq(&info->lock);
|
|
+ /*
|
|
+ * What serializes the accesses to info->flags?
|
|
+ * ipc_lock_object() when called from shmctl_do_lock(),
|
|
+ * no serialization needed when called from shm_destroy().
|
|
+ */
|
|
if (lock && !(info->flags & VM_LOCKED)) {
|
|
if (!user_shm_lock(inode->i_size, user))
|
|
goto out_nomem;
|
|
@@ -2198,7 +2202,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
|
|
retval = 0;
|
|
|
|
out_nomem:
|
|
- spin_unlock_irq(&info->lock);
|
|
return retval;
|
|
}
|
|
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 8ad1e8f00958..120b994af31c 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -8595,11 +8595,13 @@ static void netdev_sync_lower_features(struct net_device *upper,
|
|
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
|
|
&feature, lower->name);
|
|
lower->wanted_features &= ~feature;
|
|
- netdev_update_features(lower);
|
|
+ __netdev_update_features(lower);
|
|
|
|
if (unlikely(lower->features & feature))
|
|
netdev_WARN(upper, "failed to disable %pNF on %s!\n",
|
|
&feature, lower->name);
|
|
+ else
|
|
+ netdev_features_change(lower);
|
|
}
|
|
}
|
|
}
|
|
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
|
|
index 246a258b1fac..af0130039f37 100644
|
|
--- a/net/core/drop_monitor.c
|
|
+++ b/net/core/drop_monitor.c
|
|
@@ -212,6 +212,7 @@ static void sched_send_work(struct timer_list *t)
|
|
static void trace_drop_common(struct sk_buff *skb, void *location)
|
|
{
|
|
struct net_dm_alert_msg *msg;
|
|
+ struct net_dm_drop_point *point;
|
|
struct nlmsghdr *nlh;
|
|
struct nlattr *nla;
|
|
int i;
|
|
@@ -230,11 +231,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|
nlh = (struct nlmsghdr *)dskb->data;
|
|
nla = genlmsg_data(nlmsg_data(nlh));
|
|
msg = nla_data(nla);
|
|
+ point = msg->points;
|
|
for (i = 0; i < msg->entries; i++) {
|
|
- if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
|
|
- msg->points[i].count++;
|
|
+ if (!memcmp(&location, &point->pc, sizeof(void *))) {
|
|
+ point->count++;
|
|
goto out;
|
|
}
|
|
+ point++;
|
|
}
|
|
if (msg->entries == dm_hit_limit)
|
|
goto out;
|
|
@@ -243,8 +246,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|
*/
|
|
__nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
|
|
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
|
|
- memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
|
|
- msg->points[msg->entries].count = 1;
|
|
+ memcpy(point->pc, &location, sizeof(void *));
|
|
+ point->count = 1;
|
|
msg->entries++;
|
|
|
|
if (!timer_pending(&data->send_timer)) {
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index d59dbc88fef5..f1f2304822e3 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -2590,8 +2590,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
|
|
}
|
|
pop = 0;
|
|
} else if (pop >= sge->length - a) {
|
|
- sge->length = a;
|
|
pop -= (sge->length - a);
|
|
+ sge->length = a;
|
|
}
|
|
}
|
|
|
|
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
|
|
index 256b7954b720..8618242c677a 100644
|
|
--- a/net/core/netprio_cgroup.c
|
|
+++ b/net/core/netprio_cgroup.c
|
|
@@ -236,6 +236,8 @@ static void net_prio_attach(struct cgroup_taskset *tset)
|
|
struct task_struct *p;
|
|
struct cgroup_subsys_state *css;
|
|
|
|
+ cgroup_sk_alloc_disable();
|
|
+
|
|
cgroup_taskset_for_each(p, css, tset) {
|
|
void *v = (void *)(unsigned long)css->cgroup->id;
|
|
|
|
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
|
|
index 716d265ba8ca..0f7f38c29579 100644
|
|
--- a/net/dsa/dsa2.c
|
|
+++ b/net/dsa/dsa2.c
|
|
@@ -461,18 +461,12 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
|
|
|
|
err = dsa_port_setup(dp);
|
|
if (err)
|
|
- goto ports_teardown;
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
-ports_teardown:
|
|
- for (i = 0; i < port; i++)
|
|
- dsa_port_teardown(&ds->ports[i]);
|
|
-
|
|
- dsa_switch_teardown(ds);
|
|
-
|
|
switch_teardown:
|
|
for (i = 0; i < device; i++) {
|
|
ds = dst->ds[i];
|
|
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
|
|
index 0bd10a1f477f..a23094b050f8 100644
|
|
--- a/net/ipv4/cipso_ipv4.c
|
|
+++ b/net/ipv4/cipso_ipv4.c
|
|
@@ -1258,7 +1258,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
|
|
return ret_val;
|
|
}
|
|
|
|
- secattr->flags |= NETLBL_SECATTR_MLS_CAT;
|
|
+ if (secattr->attr.mls.cat)
|
|
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
|
|
}
|
|
|
|
return 0;
|
|
@@ -1439,7 +1440,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
|
|
return ret_val;
|
|
}
|
|
|
|
- secattr->flags |= NETLBL_SECATTR_MLS_CAT;
|
|
+ if (secattr->attr.mls.cat)
|
|
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index fe34e9e0912a..558ddf7ab395 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -914,7 +914,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|
/* Check for load limit; set rate_last to the latest sent
|
|
* redirect.
|
|
*/
|
|
- if (peer->rate_tokens == 0 ||
|
|
+ if (peer->n_redirects == 0 ||
|
|
time_after(jiffies,
|
|
(peer->rate_last +
|
|
(ip_rt_redirect_load << peer->n_redirects)))) {
|
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
|
index e378ff17f8c6..fe3cdeddd097 100644
|
|
--- a/net/ipv4/tcp.c
|
|
+++ b/net/ipv4/tcp.c
|
|
@@ -477,9 +477,17 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
|
|
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
|
|
int target, struct sock *sk)
|
|
{
|
|
- return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
|
|
- (sk->sk_prot->stream_memory_read ?
|
|
- sk->sk_prot->stream_memory_read(sk) : false);
|
|
+ int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
|
|
+
|
|
+ if (avail > 0) {
|
|
+ if (avail >= target)
|
|
+ return true;
|
|
+ if (tcp_rmem_pressure(sk))
|
|
+ return true;
|
|
+ }
|
|
+ if (sk->sk_prot->stream_memory_read)
|
|
+ return sk->sk_prot->stream_memory_read(sk);
|
|
+ return false;
|
|
}
|
|
|
|
/*
|
|
@@ -1757,10 +1765,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
|
- ret = -EINVAL;
|
|
vma = find_vma(current->mm, address);
|
|
- if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
|
|
- goto out;
|
|
+ if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
+ return -EINVAL;
|
|
+ }
|
|
zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
|
|
|
|
tp = tcp_sk(sk);
|
|
@@ -2149,13 +2158,15 @@ skip_copy:
|
|
tp->urg_data = 0;
|
|
tcp_fast_path_check(sk);
|
|
}
|
|
- if (used + offset < skb->len)
|
|
- continue;
|
|
|
|
if (TCP_SKB_CB(skb)->has_rxtstamp) {
|
|
tcp_update_recv_tstamps(skb, &tss);
|
|
cmsg_flags |= 2;
|
|
}
|
|
+
|
|
+ if (used + offset < skb->len)
|
|
+ continue;
|
|
+
|
|
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
|
goto found_fin_ok;
|
|
if (!(flags & MSG_PEEK))
|
|
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
|
|
index 8a01428f80c1..69b025408390 100644
|
|
--- a/net/ipv4/tcp_bpf.c
|
|
+++ b/net/ipv4/tcp_bpf.c
|
|
@@ -121,14 +121,17 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|
struct sk_psock *psock;
|
|
int copied, ret;
|
|
|
|
+ if (unlikely(flags & MSG_ERRQUEUE))
|
|
+ return inet_recv_error(sk, msg, len, addr_len);
|
|
+
|
|
psock = sk_psock_get(sk);
|
|
if (unlikely(!psock))
|
|
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
|
|
- if (unlikely(flags & MSG_ERRQUEUE))
|
|
- return inet_recv_error(sk, msg, len, addr_len);
|
|
if (!skb_queue_empty(&sk->sk_receive_queue) &&
|
|
- sk_psock_queue_empty(psock))
|
|
+ sk_psock_queue_empty(psock)) {
|
|
+ sk_psock_put(sk, psock);
|
|
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
|
|
+ }
|
|
lock_sock(sk);
|
|
msg_bytes_ready:
|
|
copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
|
|
@@ -200,7 +203,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
|
|
|
|
if (!ret) {
|
|
msg->sg.start = i;
|
|
- msg->sg.size -= apply_bytes;
|
|
sk_psock_queue_msg(psock, tmp);
|
|
sk_psock_data_ready(sk, psock);
|
|
} else {
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 5af22c9712a6..677facbeed26 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -4751,7 +4751,8 @@ void tcp_data_ready(struct sock *sk)
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
int avail = tp->rcv_nxt - tp->copied_seq;
|
|
|
|
- if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE))
|
|
+ if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
|
|
+ !sock_flag(sk, SOCK_DONE))
|
|
return;
|
|
|
|
sk->sk_data_ready(sk);
|
|
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
|
|
index 221c81f85cbf..8d3f66c310db 100644
|
|
--- a/net/ipv6/calipso.c
|
|
+++ b/net/ipv6/calipso.c
|
|
@@ -1047,7 +1047,8 @@ static int calipso_opt_getattr(const unsigned char *calipso,
|
|
goto getattr_return;
|
|
}
|
|
|
|
- secattr->flags |= NETLBL_SECATTR_MLS_CAT;
|
|
+ if (secattr->attr.mls.cat)
|
|
+ secattr->flags |= NETLBL_SECATTR_MLS_CAT;
|
|
}
|
|
|
|
secattr->type = NETLBL_NLTYPE_CALIPSO;
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index c81d8e9e5169..3b4af0a8bca6 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -2728,8 +2728,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|
const struct in6_addr *daddr, *saddr;
|
|
struct rt6_info *rt6 = (struct rt6_info *)dst;
|
|
|
|
- if (dst_metric_locked(dst, RTAX_MTU))
|
|
- return;
|
|
+ /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
|
|
+ * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
|
|
+ * [see also comment in rt6_mtu_change_route()]
|
|
+ */
|
|
|
|
if (iph) {
|
|
daddr = &iph->daddr;
|
|
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
|
|
index 5cd610b547e0..c2ad462f33f1 100644
|
|
--- a/net/netfilter/nf_conntrack_core.c
|
|
+++ b/net/netfilter/nf_conntrack_core.c
|
|
@@ -1381,9 +1381,9 @@ __nf_conntrack_alloc(struct net *net,
|
|
ct->status = 0;
|
|
ct->timeout = 0;
|
|
write_pnet(&ct->ct_net, net);
|
|
- memset(&ct->__nfct_init_offset[0], 0,
|
|
+ memset(&ct->__nfct_init_offset, 0,
|
|
offsetof(struct nf_conn, proto) -
|
|
- offsetof(struct nf_conn, __nfct_init_offset[0]));
|
|
+ offsetof(struct nf_conn, __nfct_init_offset));
|
|
|
|
nf_ct_zone_add(ct, zone);
|
|
|
|
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
|
|
index a9f804f7a04a..ee7c29e0a9d7 100644
|
|
--- a/net/netfilter/nft_set_rbtree.c
|
|
+++ b/net/netfilter/nft_set_rbtree.c
|
|
@@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
|
|
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
|
|
}
|
|
|
|
+static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
|
|
+{
|
|
+ return !nft_rbtree_interval_end(rbe);
|
|
+}
|
|
+
|
|
static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
|
|
const struct nft_rbtree_elem *interval)
|
|
{
|
|
@@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
|
if (interval &&
|
|
nft_rbtree_equal(set, this, interval) &&
|
|
nft_rbtree_interval_end(rbe) &&
|
|
- !nft_rbtree_interval_end(interval))
|
|
+ nft_rbtree_interval_start(interval))
|
|
continue;
|
|
interval = rbe;
|
|
} else if (d > 0)
|
|
@@ -74,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
|
parent = rcu_dereference_raw(parent->rb_left);
|
|
continue;
|
|
}
|
|
+
|
|
+ if (nft_set_elem_expired(&rbe->ext))
|
|
+ return false;
|
|
+
|
|
if (nft_rbtree_interval_end(rbe)) {
|
|
if (nft_set_is_anonymous(set))
|
|
return false;
|
|
@@ -89,7 +98,8 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
|
|
|
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
|
|
nft_set_elem_active(&interval->ext, genmask) &&
|
|
- !nft_rbtree_interval_end(interval)) {
|
|
+ !nft_set_elem_expired(&interval->ext) &&
|
|
+ nft_rbtree_interval_start(interval)) {
|
|
*ext = &interval->ext;
|
|
return true;
|
|
}
|
|
@@ -149,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
|
|
continue;
|
|
}
|
|
|
|
+ if (nft_set_elem_expired(&rbe->ext))
|
|
+ return false;
|
|
+
|
|
if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
|
|
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
|
|
(flags & NFT_SET_ELEM_INTERVAL_END)) {
|
|
@@ -165,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
|
|
|
|
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
|
|
nft_set_elem_active(&interval->ext, genmask) &&
|
|
+ !nft_set_elem_expired(&interval->ext) &&
|
|
((!nft_rbtree_interval_end(interval) &&
|
|
!(flags & NFT_SET_ELEM_INTERVAL_END)) ||
|
|
(nft_rbtree_interval_end(interval) &&
|
|
@@ -224,9 +238,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
|
p = &parent->rb_right;
|
|
else {
|
|
if (nft_rbtree_interval_end(rbe) &&
|
|
- !nft_rbtree_interval_end(new)) {
|
|
+ nft_rbtree_interval_start(new)) {
|
|
p = &parent->rb_left;
|
|
- } else if (!nft_rbtree_interval_end(rbe) &&
|
|
+ } else if (nft_rbtree_interval_start(rbe) &&
|
|
nft_rbtree_interval_end(new)) {
|
|
p = &parent->rb_right;
|
|
} else if (nft_set_elem_active(&rbe->ext, genmask)) {
|
|
@@ -317,10 +331,10 @@ static void *nft_rbtree_deactivate(const struct net *net,
|
|
parent = parent->rb_right;
|
|
else {
|
|
if (nft_rbtree_interval_end(rbe) &&
|
|
- !nft_rbtree_interval_end(this)) {
|
|
+ nft_rbtree_interval_start(this)) {
|
|
parent = parent->rb_left;
|
|
continue;
|
|
- } else if (!nft_rbtree_interval_end(rbe) &&
|
|
+ } else if (nft_rbtree_interval_start(rbe) &&
|
|
nft_rbtree_interval_end(this)) {
|
|
parent = parent->rb_right;
|
|
continue;
|
|
@@ -350,6 +364,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
|
|
|
|
if (iter->count < iter->skip)
|
|
goto cont;
|
|
+ if (nft_set_elem_expired(&rbe->ext))
|
|
+ goto cont;
|
|
if (!nft_set_elem_active(&rbe->ext, iter->genmask))
|
|
goto cont;
|
|
|
|
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
|
|
index 409a3ae47ce2..5e1239cef000 100644
|
|
--- a/net/netlabel/netlabel_kapi.c
|
|
+++ b/net/netlabel/netlabel_kapi.c
|
|
@@ -734,6 +734,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
|
|
if ((off & (BITS_PER_LONG - 1)) != 0)
|
|
return -EINVAL;
|
|
|
|
+ /* a null catmap is equivalent to an empty one */
|
|
+ if (!catmap) {
|
|
+ *offset = (u32)-1;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
if (off < catmap->startbit) {
|
|
off = catmap->startbit;
|
|
*offset = off;
|
|
diff --git a/net/rds/message.c b/net/rds/message.c
|
|
index 50f13f1d4ae0..2d43e13d6dd5 100644
|
|
--- a/net/rds/message.c
|
|
+++ b/net/rds/message.c
|
|
@@ -308,26 +308,20 @@ out:
|
|
/*
|
|
* RDS ops use this to grab SG entries from the rm's sg pool.
|
|
*/
|
|
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
|
|
- int *ret)
|
|
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
|
|
{
|
|
struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
|
|
struct scatterlist *sg_ret;
|
|
|
|
- if (WARN_ON(!ret))
|
|
- return NULL;
|
|
-
|
|
if (nents <= 0) {
|
|
pr_warn("rds: alloc sgs failed! nents <= 0\n");
|
|
- *ret = -EINVAL;
|
|
- return NULL;
|
|
+ return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
if (rm->m_used_sgs + nents > rm->m_total_sgs) {
|
|
pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
|
|
rm->m_total_sgs, rm->m_used_sgs, nents);
|
|
- *ret = -ENOMEM;
|
|
- return NULL;
|
|
+ return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
sg_ret = &sg_first[rm->m_used_sgs];
|
|
@@ -343,7 +337,6 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
|
|
unsigned int i;
|
|
int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
|
|
int extra_bytes = num_sgs * sizeof(struct scatterlist);
|
|
- int ret;
|
|
|
|
rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
|
|
if (!rm)
|
|
@@ -352,10 +345,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
|
|
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
|
|
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
|
|
rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
|
|
- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
|
|
- if (!rm->data.op_sg) {
|
|
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
|
|
+ if (IS_ERR(rm->data.op_sg)) {
|
|
rds_message_put(rm);
|
|
- return ERR_PTR(ret);
|
|
+ return ERR_CAST(rm->data.op_sg);
|
|
}
|
|
|
|
for (i = 0; i < rm->data.op_nents; ++i) {
|
|
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
|
|
index 916f5ec373d8..8e10f954a22f 100644
|
|
--- a/net/rds/rdma.c
|
|
+++ b/net/rds/rdma.c
|
|
@@ -624,9 +624,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
|
|
op->op_active = 1;
|
|
op->op_recverr = rs->rs_recverr;
|
|
WARN_ON(!nr_pages);
|
|
- op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
|
|
- if (!op->op_sg)
|
|
+ op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
|
|
+ if (IS_ERR(op->op_sg)) {
|
|
+ ret = PTR_ERR(op->op_sg);
|
|
goto out_pages;
|
|
+ }
|
|
|
|
if (op->op_notify || op->op_recverr) {
|
|
/* We allocate an uninitialized notifier here, because
|
|
@@ -828,9 +830,11 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
|
|
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
|
|
rm->atomic.op_active = 1;
|
|
rm->atomic.op_recverr = rs->rs_recverr;
|
|
- rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
|
|
- if (!rm->atomic.op_sg)
|
|
+ rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
|
|
+ if (IS_ERR(rm->atomic.op_sg)) {
|
|
+ ret = PTR_ERR(rm->atomic.op_sg);
|
|
goto err;
|
|
+ }
|
|
|
|
/* verify 8 byte-aligned */
|
|
if (args->local_addr & 0x7) {
|
|
diff --git a/net/rds/rds.h b/net/rds/rds.h
|
|
index 53e86911773a..2ac5b5e55901 100644
|
|
--- a/net/rds/rds.h
|
|
+++ b/net/rds/rds.h
|
|
@@ -849,8 +849,7 @@ rds_conn_connecting(struct rds_connection *conn)
|
|
|
|
/* message.c */
|
|
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
|
|
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
|
|
- int *ret);
|
|
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
|
|
int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
|
|
bool zcopy);
|
|
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
|
|
diff --git a/net/rds/send.c b/net/rds/send.c
|
|
index 82dcd8b84fe7..68e2bdb08fd0 100644
|
|
--- a/net/rds/send.c
|
|
+++ b/net/rds/send.c
|
|
@@ -1274,9 +1274,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
|
|
|
|
/* Attach data to the rm */
|
|
if (payload_len) {
|
|
- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
|
|
- if (!rm->data.op_sg)
|
|
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
|
|
+ if (IS_ERR(rm->data.op_sg)) {
|
|
+ ret = PTR_ERR(rm->data.op_sg);
|
|
goto out;
|
|
+ }
|
|
ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
|
|
if (ret)
|
|
goto out;
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index c2cdd0fc2e70..68c8fc6f535c 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -2005,6 +2005,7 @@ replay:
|
|
err = PTR_ERR(block);
|
|
goto errout;
|
|
}
|
|
+ block->classid = parent;
|
|
|
|
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
|
|
if (chain_index > TC_ACT_EXT_VAL_MASK) {
|
|
@@ -2547,12 +2548,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|
return skb->len;
|
|
|
|
parent = tcm->tcm_parent;
|
|
- if (!parent) {
|
|
+ if (!parent)
|
|
q = dev->qdisc;
|
|
- parent = q->handle;
|
|
- } else {
|
|
+ else
|
|
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
|
|
- }
|
|
if (!q)
|
|
goto out;
|
|
cops = q->ops->cl_ops;
|
|
@@ -2568,6 +2567,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|
block = cops->tcf_block(q, cl, NULL);
|
|
if (!block)
|
|
goto out;
|
|
+ parent = block->classid;
|
|
if (tcf_block_shared(block))
|
|
q = NULL;
|
|
}
|
|
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
|
|
index ff5fcb3e1208..5fc6c028f89c 100644
|
|
--- a/net/sunrpc/auth_gss/auth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/auth_gss.c
|
|
@@ -2030,7 +2030,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
|
|
struct kvec *head = rqstp->rq_rcv_buf.head;
|
|
struct rpc_auth *auth = cred->cr_auth;
|
|
- unsigned int savedlen = rcv_buf->len;
|
|
u32 offset, opaque_len, maj_stat;
|
|
__be32 *p;
|
|
|
|
@@ -2041,9 +2040,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|
offset = (u8 *)(p) - (u8 *)head->iov_base;
|
|
if (offset + opaque_len > rcv_buf->len)
|
|
goto unwrap_failed;
|
|
- rcv_buf->len = offset + opaque_len;
|
|
|
|
- maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
|
|
+ maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
|
|
+ offset + opaque_len, rcv_buf);
|
|
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
|
|
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
|
if (maj_stat != GSS_S_COMPLETE)
|
|
@@ -2057,10 +2056,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
|
|
*/
|
|
xdr_init_decode(xdr, rcv_buf, p, rqstp);
|
|
|
|
- auth->au_rslack = auth->au_verfsize + 2 +
|
|
- XDR_QUADLEN(savedlen - rcv_buf->len);
|
|
- auth->au_ralign = auth->au_verfsize + 2 +
|
|
- XDR_QUADLEN(savedlen - rcv_buf->len);
|
|
+ auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
|
|
+ auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
|
|
+
|
|
return 0;
|
|
unwrap_failed:
|
|
trace_rpcgss_unwrap_failed(task);
|
|
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
|
|
index 6f2d30d7b766..e7180da1fc6a 100644
|
|
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
|
|
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
|
|
@@ -851,8 +851,8 @@ out_err:
|
|
}
|
|
|
|
u32
|
|
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
|
- u32 *headskip, u32 *tailskip)
|
|
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
|
|
+ struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
|
|
{
|
|
struct xdr_buf subbuf;
|
|
u32 ret = 0;
|
|
@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
|
|
|
/* create a segment skipping the header and leaving out the checksum */
|
|
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
|
|
- (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
|
|
+ (len - offset - GSS_KRB5_TOK_HDR_LEN -
|
|
kctx->gk5e->cksumlength));
|
|
|
|
nblocks = (subbuf.len + blocksize - 1) / blocksize;
|
|
@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
|
|
goto out_err;
|
|
|
|
/* Get the packet's hmac value */
|
|
- ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
|
|
+ ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
|
|
pkt_hmac, kctx->gk5e->cksumlength);
|
|
if (ret)
|
|
goto out_err;
|
|
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
|
|
index 14a0aff0cd84..683755d95075 100644
|
|
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
|
|
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
|
|
@@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
|
|
}
|
|
|
|
static u32
|
|
-gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
+gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
|
|
+ struct xdr_buf *buf, unsigned int *slack,
|
|
+ unsigned int *align)
|
|
{
|
|
int signalg;
|
|
int sealalg;
|
|
@@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
u32 conflen = kctx->gk5e->conflen;
|
|
int crypt_offset;
|
|
u8 *cksumkey;
|
|
+ unsigned int saved_len = buf->len;
|
|
|
|
dprintk("RPC: gss_unwrap_kerberos\n");
|
|
|
|
ptr = (u8 *)buf->head[0].iov_base + offset;
|
|
if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
|
|
- buf->len - offset))
|
|
+ len - offset))
|
|
return GSS_S_DEFECTIVE_TOKEN;
|
|
|
|
if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
|
|
@@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
(!kctx->initiate && direction != 0))
|
|
return GSS_S_BAD_SIG;
|
|
|
|
+ buf->len = len;
|
|
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
|
|
struct crypto_sync_skcipher *cipher;
|
|
int err;
|
|
@@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
|
|
memmove(orig_start, data_start, data_len);
|
|
buf->head[0].iov_len -= (data_start - orig_start);
|
|
- buf->len -= (data_start - orig_start);
|
|
+ buf->len = len - (data_start - orig_start);
|
|
|
|
if (gss_krb5_remove_padding(buf, blocksize))
|
|
return GSS_S_DEFECTIVE_TOKEN;
|
|
|
|
+ /* slack must include room for krb5 padding */
|
|
+ *slack = XDR_QUADLEN(saved_len - buf->len);
|
|
+ /* The GSS blob always precedes the RPC message payload */
|
|
+ *align = *slack;
|
|
return GSS_S_COMPLETE;
|
|
}
|
|
|
|
@@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
|
|
}
|
|
|
|
static u32
|
|
-gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
+gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
|
|
+ struct xdr_buf *buf, unsigned int *slack,
|
|
+ unsigned int *align)
|
|
{
|
|
s32 now;
|
|
u8 *ptr;
|
|
@@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
if (rrc != 0)
|
|
rotate_left(offset + 16, buf, rrc);
|
|
|
|
- err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
|
|
+ err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
|
|
&headskip, &tailskip);
|
|
if (err)
|
|
return GSS_S_FAILURE;
|
|
@@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
* it against the original
|
|
*/
|
|
err = read_bytes_from_xdr_buf(buf,
|
|
- buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
|
|
+ len - GSS_KRB5_TOK_HDR_LEN - tailskip,
|
|
decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
|
|
if (err) {
|
|
dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
|
|
@@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
|
|
* Note that buf->head[0].iov_len may indicate the available
|
|
* head buffer space rather than that actually occupied.
|
|
*/
|
|
- movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
|
|
+ movelen = min_t(unsigned int, buf->head[0].iov_len, len);
|
|
movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
|
|
- if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
|
|
- buf->head[0].iov_len)
|
|
- return GSS_S_FAILURE;
|
|
+ BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
|
|
+ buf->head[0].iov_len);
|
|
memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
|
|
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
|
|
- buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
|
|
+ buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
|
|
|
|
/* Trim off the trailing "extra count" and checksum blob */
|
|
- buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
|
|
+ xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
|
|
|
|
+ *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
|
|
+ *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
|
|
return GSS_S_COMPLETE;
|
|
}
|
|
|
|
@@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
|
|
}
|
|
|
|
u32
|
|
-gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
|
|
+gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
|
|
+ int len, struct xdr_buf *buf)
|
|
{
|
|
struct krb5_ctx *kctx = gctx->internal_ctx_id;
|
|
|
|
@@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
|
|
case ENCTYPE_DES_CBC_RAW:
|
|
case ENCTYPE_DES3_CBC_RAW:
|
|
case ENCTYPE_ARCFOUR_HMAC:
|
|
- return gss_unwrap_kerberos_v1(kctx, offset, buf);
|
|
+ return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
|
|
+ &gctx->slack, &gctx->align);
|
|
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
|
|
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
|
|
- return gss_unwrap_kerberos_v2(kctx, offset, buf);
|
|
+ return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
|
|
+ &gctx->slack, &gctx->align);
|
|
}
|
|
}
|
|
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
|
|
index 82060099a429..8fa924c8e282 100644
|
|
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
|
|
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
|
|
@@ -438,10 +438,11 @@ gss_wrap(struct gss_ctx *ctx_id,
|
|
u32
|
|
gss_unwrap(struct gss_ctx *ctx_id,
|
|
int offset,
|
|
+ int len,
|
|
struct xdr_buf *buf)
|
|
{
|
|
return ctx_id->mech_type->gm_ops
|
|
- ->gss_unwrap(ctx_id, offset, buf);
|
|
+ ->gss_unwrap(ctx_id, offset, len, buf);
|
|
}
|
|
|
|
|
|
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
index ed20fa8a6f70..d9f7439e2431 100644
|
|
--- a/net/sunrpc/auth_gss/svcauth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
@@ -897,7 +897,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
|
|
if (svc_getnl(&buf->head[0]) != seq)
|
|
goto out;
|
|
/* trim off the mic and padding at the end before returning */
|
|
- buf->len -= 4 + round_up_to_quad(mic.len);
|
|
+ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
|
|
stat = 0;
|
|
out:
|
|
kfree(mic.data);
|
|
@@ -925,7 +925,7 @@ static int
|
|
unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
|
|
{
|
|
u32 priv_len, maj_stat;
|
|
- int pad, saved_len, remaining_len, offset;
|
|
+ int pad, remaining_len, offset;
|
|
|
|
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
|
|
|
|
@@ -945,12 +945,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
|
|
buf->len -= pad;
|
|
fix_priv_head(buf, pad);
|
|
|
|
- /* Maybe it would be better to give gss_unwrap a length parameter: */
|
|
- saved_len = buf->len;
|
|
- buf->len = priv_len;
|
|
- maj_stat = gss_unwrap(ctx, 0, buf);
|
|
+ maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
|
|
pad = priv_len - buf->len;
|
|
- buf->len = saved_len;
|
|
buf->len -= pad;
|
|
/* The upper layers assume the buffer is aligned on 4-byte boundaries.
|
|
* In the krb5p case, at least, the data ends up offset, so we need to
|
|
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
|
|
index f7f78566be46..f1088ca39d44 100644
|
|
--- a/net/sunrpc/clnt.c
|
|
+++ b/net/sunrpc/clnt.c
|
|
@@ -2422,6 +2422,11 @@ rpc_check_timeout(struct rpc_task *task)
|
|
{
|
|
struct rpc_clnt *clnt = task->tk_client;
|
|
|
|
+ if (RPC_SIGNALLED(task)) {
|
|
+ rpc_call_rpcerror(task, -ERESTARTSYS);
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (xprt_adjust_timeout(task->tk_rqstp) == 0)
|
|
return;
|
|
|
|
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
|
|
index f3104be8ff5d..451ca7ec321c 100644
|
|
--- a/net/sunrpc/xdr.c
|
|
+++ b/net/sunrpc/xdr.c
|
|
@@ -1150,6 +1150,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
|
|
|
|
+/**
|
|
+ * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
|
|
+ * @buf: buf to be trimmed
|
|
+ * @len: number of bytes to reduce "buf" by
|
|
+ *
|
|
+ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
|
|
+ * that it's possible that we'll trim less than that amount if the xdr_buf is
|
|
+ * too small, or if (for instance) it's all in the head and the parser has
|
|
+ * already read too far into it.
|
|
+ */
|
|
+void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
|
|
+{
|
|
+ size_t cur;
|
|
+ unsigned int trim = len;
|
|
+
|
|
+ if (buf->tail[0].iov_len) {
|
|
+ cur = min_t(size_t, buf->tail[0].iov_len, trim);
|
|
+ buf->tail[0].iov_len -= cur;
|
|
+ trim -= cur;
|
|
+ if (!trim)
|
|
+ goto fix_len;
|
|
+ }
|
|
+
|
|
+ if (buf->page_len) {
|
|
+ cur = min_t(unsigned int, buf->page_len, trim);
|
|
+ buf->page_len -= cur;
|
|
+ trim -= cur;
|
|
+ if (!trim)
|
|
+ goto fix_len;
|
|
+ }
|
|
+
|
|
+ if (buf->head[0].iov_len) {
|
|
+ cur = min_t(size_t, buf->head[0].iov_len, trim);
|
|
+ buf->head[0].iov_len -= cur;
|
|
+ trim -= cur;
|
|
+ }
|
|
+fix_len:
|
|
+ buf->len -= (len - trim);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(xdr_buf_trim);
|
|
+
|
|
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
|
|
{
|
|
unsigned int this_len;
|
|
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
|
|
index 8a12a7538d63..94db4683cfaf 100644
|
|
--- a/sound/core/rawmidi.c
|
|
+++ b/sound/core/rawmidi.c
|
|
@@ -97,6 +97,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
|
|
runtime->event(runtime->substream);
|
|
}
|
|
|
|
+/* buffer refcount management: call with runtime->lock held */
|
|
+static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
|
|
+{
|
|
+ runtime->buffer_ref++;
|
|
+}
|
|
+
|
|
+static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime)
|
|
+{
|
|
+ runtime->buffer_ref--;
|
|
+}
|
|
+
|
|
static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
|
|
{
|
|
struct snd_rawmidi_runtime *runtime;
|
|
@@ -646,6 +657,11 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
|
|
if (!newbuf)
|
|
return -ENOMEM;
|
|
spin_lock_irq(&runtime->lock);
|
|
+ if (runtime->buffer_ref) {
|
|
+ spin_unlock_irq(&runtime->lock);
|
|
+ kvfree(newbuf);
|
|
+ return -EBUSY;
|
|
+ }
|
|
oldbuf = runtime->buffer;
|
|
runtime->buffer = newbuf;
|
|
runtime->buffer_size = params->buffer_size;
|
|
@@ -945,8 +961,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
|
|
long result = 0, count1;
|
|
struct snd_rawmidi_runtime *runtime = substream->runtime;
|
|
unsigned long appl_ptr;
|
|
+ int err = 0;
|
|
|
|
spin_lock_irqsave(&runtime->lock, flags);
|
|
+ snd_rawmidi_buffer_ref(runtime);
|
|
while (count > 0 && runtime->avail) {
|
|
count1 = runtime->buffer_size - runtime->appl_ptr;
|
|
if (count1 > count)
|
|
@@ -965,16 +983,19 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
|
|
if (userbuf) {
|
|
spin_unlock_irqrestore(&runtime->lock, flags);
|
|
if (copy_to_user(userbuf + result,
|
|
- runtime->buffer + appl_ptr, count1)) {
|
|
- return result > 0 ? result : -EFAULT;
|
|
- }
|
|
+ runtime->buffer + appl_ptr, count1))
|
|
+ err = -EFAULT;
|
|
spin_lock_irqsave(&runtime->lock, flags);
|
|
+ if (err)
|
|
+ goto out;
|
|
}
|
|
result += count1;
|
|
count -= count1;
|
|
}
|
|
+ out:
|
|
+ snd_rawmidi_buffer_unref(runtime);
|
|
spin_unlock_irqrestore(&runtime->lock, flags);
|
|
- return result;
|
|
+ return result > 0 ? result : err;
|
|
}
|
|
|
|
long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
|
|
@@ -1268,6 +1289,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
|
|
return -EAGAIN;
|
|
}
|
|
}
|
|
+ snd_rawmidi_buffer_ref(runtime);
|
|
while (count > 0 && runtime->avail > 0) {
|
|
count1 = runtime->buffer_size - runtime->appl_ptr;
|
|
if (count1 > count)
|
|
@@ -1299,6 +1321,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
|
|
}
|
|
__end:
|
|
count1 = runtime->avail < runtime->buffer_size;
|
|
+ snd_rawmidi_buffer_unref(runtime);
|
|
spin_unlock_irqrestore(&runtime->lock, flags);
|
|
if (count1)
|
|
snd_rawmidi_output_trigger(substream, 1);
|
|
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
|
|
index 16c7f6605511..26e7cb555d3c 100644
|
|
--- a/sound/firewire/amdtp-stream-trace.h
|
|
+++ b/sound/firewire/amdtp-stream-trace.h
|
|
@@ -66,8 +66,7 @@ TRACE_EVENT(amdtp_packet,
|
|
__entry->irq,
|
|
__entry->index,
|
|
__print_array(__get_dynamic_array(cip_header),
|
|
- __get_dynamic_array_len(cip_header),
|
|
- sizeof(u8)))
|
|
+ __get_dynamic_array_len(cip_header), 1))
|
|
);
|
|
|
|
#endif
|
|
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
|
|
index 663168ddce72..d48263d1f6a2 100644
|
|
--- a/sound/pci/hda/patch_hdmi.c
|
|
+++ b/sound/pci/hda/patch_hdmi.c
|
|
@@ -2234,7 +2234,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
|
|
|
|
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
|
|
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
|
|
+ struct hdmi_eld *pin_eld = &per_pin->sink_eld;
|
|
|
|
+ pin_eld->eld_valid = false;
|
|
hdmi_present_sense(per_pin, 0);
|
|
}
|
|
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 64270983ab7d..004d2f638cf2 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -5743,6 +5743,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
|
|
}
|
|
}
|
|
|
|
+static void alc225_fixup_s3_pop_noise(struct hda_codec *codec,
|
|
+ const struct hda_fixup *fix, int action)
|
|
+{
|
|
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
|
|
+ return;
|
|
+
|
|
+ codec->power_save_node = 1;
|
|
+}
|
|
+
|
|
/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
|
|
static void alc274_fixup_bind_dacs(struct hda_codec *codec,
|
|
const struct hda_fixup *fix, int action)
|
|
@@ -5847,6 +5856,7 @@ enum {
|
|
ALC269_FIXUP_HP_LINE1_MIC1_LED,
|
|
ALC269_FIXUP_INV_DMIC,
|
|
ALC269_FIXUP_LENOVO_DOCK,
|
|
+ ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST,
|
|
ALC269_FIXUP_NO_SHUTUP,
|
|
ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
|
|
ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
|
|
@@ -5932,6 +5942,7 @@ enum {
|
|
ALC233_FIXUP_ACER_HEADSET_MIC,
|
|
ALC294_FIXUP_LENOVO_MIC_LOCATION,
|
|
ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
|
|
+ ALC225_FIXUP_S3_POP_NOISE,
|
|
ALC700_FIXUP_INTEL_REFERENCE,
|
|
ALC274_FIXUP_DELL_BIND_DACS,
|
|
ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
|
|
@@ -5967,6 +5978,7 @@ enum {
|
|
ALC294_FIXUP_ASUS_DUAL_SPK,
|
|
ALC285_FIXUP_THINKPAD_HEADSET_JACK,
|
|
ALC294_FIXUP_ASUS_HPE,
|
|
+ ALC294_FIXUP_ASUS_COEF_1B,
|
|
ALC285_FIXUP_HP_GPIO_LED,
|
|
};
|
|
|
|
@@ -6165,6 +6177,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
|
|
},
|
|
+ [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc269_fixup_limit_int_mic_boost,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_LENOVO_DOCK,
|
|
+ },
|
|
[ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
|
|
@@ -6817,6 +6835,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
{ }
|
|
},
|
|
.chained = true,
|
|
+ .chain_id = ALC225_FIXUP_S3_POP_NOISE
|
|
+ },
|
|
+ [ALC225_FIXUP_S3_POP_NOISE] = {
|
|
+ .type = HDA_FIXUP_FUNC,
|
|
+ .v.func = alc225_fixup_s3_pop_noise,
|
|
+ .chained = true,
|
|
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
|
|
},
|
|
[ALC700_FIXUP_INTEL_REFERENCE] = {
|
|
@@ -7089,6 +7113,17 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
|
|
},
|
|
+ [ALC294_FIXUP_ASUS_COEF_1B] = {
|
|
+ .type = HDA_FIXUP_VERBS,
|
|
+ .v.verbs = (const struct hda_verb[]) {
|
|
+ /* Set bit 10 to correct noisy output after reboot from
|
|
+ * Windows 10 (due to pop noise reduction?)
|
|
+ */
|
|
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x1b },
|
|
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
|
|
+ { }
|
|
+ },
|
|
+ },
|
|
[ALC285_FIXUP_HP_GPIO_LED] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc285_fixup_hp_gpio_led,
|
|
@@ -7260,6 +7295,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
|
|
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
|
|
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
|
|
+ SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
|
|
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
|
|
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
|
@@ -7301,7 +7337,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
|
|
SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
|
|
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
|
|
- SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
|
|
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
|
|
SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
|
|
SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
|
|
SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
|
|
@@ -7440,6 +7476,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
|
|
{.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"},
|
|
{.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
|
|
{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
|
|
+ {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"},
|
|
{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
|
|
{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
|
|
{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
|
|
@@ -8113,8 +8150,6 @@ static int patch_alc269(struct hda_codec *codec)
|
|
spec->gen.mixer_nid = 0;
|
|
break;
|
|
case 0x10ec0225:
|
|
- codec->power_save_node = 1;
|
|
- /* fall through */
|
|
case 0x10ec0295:
|
|
case 0x10ec0299:
|
|
spec->codec_variant = ALC269_TYPE_ALC225;
|
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
|
index 5a81c444a18b..092720ce2c55 100644
|
|
--- a/sound/usb/quirks.c
|
|
+++ b/sound/usb/quirks.c
|
|
@@ -1592,13 +1592,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
|
|
&& (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
|
|
msleep(20);
|
|
|
|
- /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
|
|
- * otherwise requests like get/set frequency return as failed despite
|
|
- * actually succeeding.
|
|
+ /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
|
|
+ * delay here, otherwise requests like get/set frequency return as
|
|
+ * failed despite actually succeeding.
|
|
*/
|
|
if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
|
|
chip->usb_id == USB_ID(0x046d, 0x0a46) ||
|
|
- chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
|
|
+ chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
|
|
+ chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
|
|
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
|
|
usleep_range(1000, 2000);
|
|
}
|
|
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
|
|
index b6403712c2f4..281cc65276e0 100644
|
|
--- a/tools/lib/bpf/libbpf.c
|
|
+++ b/tools/lib/bpf/libbpf.c
|
|
@@ -5905,62 +5905,104 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
|
|
}
|
|
}
|
|
|
|
-int libbpf_num_possible_cpus(void)
|
|
+int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
|
|
{
|
|
- static const char *fcpu = "/sys/devices/system/cpu/possible";
|
|
- int len = 0, n = 0, il = 0, ir = 0;
|
|
- unsigned int start = 0, end = 0;
|
|
- int tmp_cpus = 0;
|
|
- static int cpus;
|
|
- char buf[128];
|
|
- int error = 0;
|
|
- int fd = -1;
|
|
+ int err = 0, n, len, start, end = -1;
|
|
+ bool *tmp;
|
|
|
|
- tmp_cpus = READ_ONCE(cpus);
|
|
- if (tmp_cpus > 0)
|
|
- return tmp_cpus;
|
|
+ *mask = NULL;
|
|
+ *mask_sz = 0;
|
|
+
|
|
+ /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
|
|
+ while (*s) {
|
|
+ if (*s == ',' || *s == '\n') {
|
|
+ s++;
|
|
+ continue;
|
|
+ }
|
|
+ n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
|
|
+ if (n <= 0 || n > 2) {
|
|
+ pr_warning("Failed to get CPU range %s: %d\n", s, n);
|
|
+ err = -EINVAL;
|
|
+ goto cleanup;
|
|
+ } else if (n == 1) {
|
|
+ end = start;
|
|
+ }
|
|
+ if (start < 0 || start > end) {
|
|
+ pr_warning("Invalid CPU range [%d,%d] in %s\n",
|
|
+ start, end, s);
|
|
+ err = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ tmp = realloc(*mask, end + 1);
|
|
+ if (!tmp) {
|
|
+ err = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ *mask = tmp;
|
|
+ memset(tmp + *mask_sz, 0, start - *mask_sz);
|
|
+ memset(tmp + start, 1, end - start + 1);
|
|
+ *mask_sz = end + 1;
|
|
+ s += len;
|
|
+ }
|
|
+ if (!*mask_sz) {
|
|
+ pr_warning("Empty CPU range\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+cleanup:
|
|
+ free(*mask);
|
|
+ *mask = NULL;
|
|
+ return err;
|
|
+}
|
|
+
|
|
+int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
|
|
+{
|
|
+ int fd, err = 0, len;
|
|
+ char buf[128];
|
|
|
|
fd = open(fcpu, O_RDONLY);
|
|
if (fd < 0) {
|
|
- error = errno;
|
|
- pr_warning("Failed to open file %s: %s\n",
|
|
- fcpu, strerror(error));
|
|
- return -error;
|
|
+ err = -errno;
|
|
+ pr_warning("Failed to open cpu mask file %s: %d\n", fcpu, err);
|
|
+ return err;
|
|
}
|
|
len = read(fd, buf, sizeof(buf));
|
|
close(fd);
|
|
if (len <= 0) {
|
|
- error = len ? errno : EINVAL;
|
|
- pr_warning("Failed to read # of possible cpus from %s: %s\n",
|
|
- fcpu, strerror(error));
|
|
- return -error;
|
|
+ err = len ? -errno : -EINVAL;
|
|
+ pr_warning("Failed to read cpu mask from %s: %d\n", fcpu, err);
|
|
+ return err;
|
|
}
|
|
- if (len == sizeof(buf)) {
|
|
- pr_warning("File %s size overflow\n", fcpu);
|
|
- return -EOVERFLOW;
|
|
+ if (len >= sizeof(buf)) {
|
|
+ pr_warning("CPU mask is too big in file %s\n", fcpu);
|
|
+ return -E2BIG;
|
|
}
|
|
buf[len] = '\0';
|
|
|
|
- for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
|
|
- /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
|
|
- if (buf[ir] == ',' || buf[ir] == '\0') {
|
|
- buf[ir] = '\0';
|
|
- n = sscanf(&buf[il], "%u-%u", &start, &end);
|
|
- if (n <= 0) {
|
|
- pr_warning("Failed to get # CPUs from %s\n",
|
|
- &buf[il]);
|
|
- return -EINVAL;
|
|
- } else if (n == 1) {
|
|
- end = start;
|
|
- }
|
|
- tmp_cpus += end - start + 1;
|
|
- il = ir + 1;
|
|
- }
|
|
- }
|
|
- if (tmp_cpus <= 0) {
|
|
- pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
|
|
- return -EINVAL;
|
|
+ return parse_cpu_mask_str(buf, mask, mask_sz);
|
|
+}
|
|
+
|
|
+int libbpf_num_possible_cpus(void)
|
|
+{
|
|
+ static const char *fcpu = "/sys/devices/system/cpu/possible";
|
|
+ static int cpus;
|
|
+ int err, n, i, tmp_cpus;
|
|
+ bool *mask;
|
|
+
|
|
+ tmp_cpus = READ_ONCE(cpus);
|
|
+ if (tmp_cpus > 0)
|
|
+ return tmp_cpus;
|
|
+
|
|
+ err = parse_cpu_mask_file(fcpu, &mask, &n);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ tmp_cpus = 0;
|
|
+ for (i = 0; i < n; i++) {
|
|
+ if (mask[i])
|
|
+ tmp_cpus++;
|
|
}
|
|
+ free(mask);
|
|
|
|
WRITE_ONCE(cpus, tmp_cpus);
|
|
return tmp_cpus;
|
|
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
|
|
index 98216a69c32f..92940ae26ada 100644
|
|
--- a/tools/lib/bpf/libbpf_internal.h
|
|
+++ b/tools/lib/bpf/libbpf_internal.h
|
|
@@ -63,6 +63,8 @@ do { \
|
|
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
|
|
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
|
|
|
|
+int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
|
|
+int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
|
|
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
|
|
const char *str_sec, size_t str_len);
|
|
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
|
|
index 1735faf17536..437cb93e72ac 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
|
|
@@ -52,7 +52,7 @@ retry:
|
|
if (pmu_fd < 0 && errno == ENOENT) {
|
|
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
|
|
test__skip();
|
|
- goto cleanup;
|
|
+ goto close_prog;
|
|
}
|
|
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
|
|
pmu_fd, errno))
|
|
diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
|
|
index 8941a41c2a55..cce6d605c017 100644
|
|
--- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
|
|
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
|
|
@@ -1,7 +1,7 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/bpf.h>
|
|
-#include <bpf/bpf_helpers.h>
|
|
+#include "bpf_helpers.h"
|
|
|
|
#define MAX_STACK_RAWTP 10
|
|
|
|
diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
|
|
index 079d0f5a2909..7e4c91f2238d 100644
|
|
--- a/tools/testing/selftests/bpf/test_select_reuseport.c
|
|
+++ b/tools/testing/selftests/bpf/test_select_reuseport.c
|
|
@@ -668,12 +668,12 @@ static void cleanup_per_test(void)
|
|
|
|
for (i = 0; i < NR_RESULTS; i++) {
|
|
err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY);
|
|
- RET_IF(err, "reset elem in result_map",
|
|
+ CHECK(err, "reset elem in result_map",
|
|
"i:%u err:%d errno:%d\n", i, err, errno);
|
|
}
|
|
|
|
err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY);
|
|
- RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n",
|
|
+ CHECK(err, "reset line number in linum_map", "err:%d errno:%d\n",
|
|
err, errno);
|
|
|
|
for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++)
|
|
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
|
|
index 063ecb290a5a..144308a757b7 100755
|
|
--- a/tools/testing/selftests/ftrace/ftracetest
|
|
+++ b/tools/testing/selftests/ftrace/ftracetest
|
|
@@ -29,8 +29,25 @@ err_ret=1
|
|
# kselftest skip code is 4
|
|
err_skip=4
|
|
|
|
+# cgroup RT scheduling prevents chrt commands from succeeding, which
|
|
+# induces failures in test wakeup tests. Disable for the duration of
|
|
+# the tests.
|
|
+
|
|
+readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us
|
|
+
|
|
+sched_rt_runtime_orig=$(cat $sched_rt_runtime)
|
|
+
|
|
+setup() {
|
|
+ echo -1 > $sched_rt_runtime
|
|
+}
|
|
+
|
|
+cleanup() {
|
|
+ echo $sched_rt_runtime_orig > $sched_rt_runtime
|
|
+}
|
|
+
|
|
errexit() { # message
|
|
echo "Error: $1" 1>&2
|
|
+ cleanup
|
|
exit $err_ret
|
|
}
|
|
|
|
@@ -39,6 +56,8 @@ if [ `id -u` -ne 0 ]; then
|
|
errexit "this must be run by root user"
|
|
fi
|
|
|
|
+setup
|
|
+
|
|
# Utilities
|
|
absdir() { # file_path
|
|
(cd `dirname $1`; pwd)
|
|
@@ -235,6 +254,7 @@ TOTAL_RESULT=0
|
|
|
|
INSTANCE=
|
|
CASENO=0
|
|
+
|
|
testcase() { # testfile
|
|
CASENO=$((CASENO+1))
|
|
desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
|
|
@@ -406,5 +426,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
|
|
prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
|
|
prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
|
|
|
|
+cleanup
|
|
+
|
|
# if no error, return 0
|
|
exit $TOTAL_RESULT
|
|
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
|
|
index 1bcb67dcae26..81490ecaaa92 100644
|
|
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
|
|
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
|
|
@@ -38,7 +38,7 @@ for width in 64 32 16 8; do
|
|
echo 0 > events/kprobes/testprobe/enable
|
|
|
|
: "Confirm the arguments is recorded in given types correctly"
|
|
- ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
|
|
+ ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
|
|
check_types $ARGS $width
|
|
|
|
: "Clear event for next loop"
|
|
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
|
|
index 5945f062d749..d63881f60e1a 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
|
|
@@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
|
VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
|
|
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
|
- NULL, vgic_mmio_uaccess_write_sactive, 1,
|
|
+ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
|
|
VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
|
|
vgic_mmio_read_active, vgic_mmio_write_cactive,
|
|
- NULL, vgic_mmio_uaccess_write_cactive, 1,
|
|
+ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
|
|
VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
|
|
vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
|
|
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
|
|
index 7dfd15dbb308..4c5909e38f78 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
|
|
@@ -491,11 +491,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
|
|
VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
|
|
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
|
- NULL, vgic_mmio_uaccess_write_sactive, 1,
|
|
+ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
|
|
VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
|
|
vgic_mmio_read_active, vgic_mmio_write_cactive,
|
|
- NULL, vgic_mmio_uaccess_write_cactive,
|
|
+ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
|
|
1, VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
|
|
vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
|
|
@@ -563,12 +563,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
|
VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
|
|
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
|
- NULL, vgic_mmio_uaccess_write_sactive,
|
|
- 4, VGIC_ACCESS_32bit),
|
|
+ vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
|
|
+ VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
|
|
vgic_mmio_read_active, vgic_mmio_write_cactive,
|
|
- NULL, vgic_mmio_uaccess_write_cactive,
|
|
- 4, VGIC_ACCESS_32bit),
|
|
+ vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
|
|
+ VGIC_ACCESS_32bit),
|
|
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
|
|
vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
|
|
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
|
|
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
|
|
index 7eacf00e5abe..fb1dcd397b93 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-mmio.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
|
|
@@ -300,8 +300,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|
}
|
|
}
|
|
|
|
-unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|
- gpa_t addr, unsigned int len)
|
|
+
|
|
+/*
|
|
+ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
|
|
+ * is not queued on some running VCPU's LRs, because then the change to the
|
|
+ * active state can be overwritten when the VCPU's state is synced coming back
|
|
+ * from the guest.
|
|
+ *
|
|
+ * For shared interrupts as well as GICv3 private interrupts, we have to
|
|
+ * stop all the VCPUs because interrupts can be migrated while we don't hold
|
|
+ * the IRQ locks and we don't want to be chasing moving targets.
|
|
+ *
|
|
+ * For GICv2 private interrupts we don't have to do anything because
|
|
+ * userspace accesses to the VGIC state already require all VCPUs to be
|
|
+ * stopped, and only the VCPU itself can modify its private interrupts
|
|
+ * active state, which guarantees that the VCPU is not running.
|
|
+ */
|
|
+static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
|
+{
|
|
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
|
+ intid >= VGIC_NR_PRIVATE_IRQS)
|
|
+ kvm_arm_halt_guest(vcpu->kvm);
|
|
+}
|
|
+
|
|
+/* See vgic_access_active_prepare */
|
|
+static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
|
+{
|
|
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
|
+ intid >= VGIC_NR_PRIVATE_IRQS)
|
|
+ kvm_arm_resume_guest(vcpu->kvm);
|
|
+}
|
|
+
|
|
+static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|
+ gpa_t addr, unsigned int len)
|
|
{
|
|
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
|
u32 value = 0;
|
|
@@ -311,6 +342,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|
for (i = 0; i < len * 8; i++) {
|
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
|
|
|
+ /*
|
|
+ * Even for HW interrupts, don't evaluate the HW state as
|
|
+ * all the guest is interested in is the virtual state.
|
|
+ */
|
|
if (irq->active)
|
|
value |= (1U << i);
|
|
|
|
@@ -320,6 +355,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|
return value;
|
|
}
|
|
|
|
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|
+ gpa_t addr, unsigned int len)
|
|
+{
|
|
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
|
+ u32 val;
|
|
+
|
|
+ mutex_lock(&vcpu->kvm->lock);
|
|
+ vgic_access_active_prepare(vcpu, intid);
|
|
+
|
|
+ val = __vgic_mmio_read_active(vcpu, addr, len);
|
|
+
|
|
+ vgic_access_active_finish(vcpu, intid);
|
|
+ mutex_unlock(&vcpu->kvm->lock);
|
|
+
|
|
+ return val;
|
|
+}
|
|
+
|
|
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
|
|
+ gpa_t addr, unsigned int len)
|
|
+{
|
|
+ return __vgic_mmio_read_active(vcpu, addr, len);
|
|
+}
|
|
+
|
|
/* Must be called with irq->irq_lock held */
|
|
static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
|
bool active, bool is_uaccess)
|
|
@@ -371,36 +429,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
}
|
|
|
|
-/*
|
|
- * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
|
|
- * is not queued on some running VCPU's LRs, because then the change to the
|
|
- * active state can be overwritten when the VCPU's state is synced coming back
|
|
- * from the guest.
|
|
- *
|
|
- * For shared interrupts, we have to stop all the VCPUs because interrupts can
|
|
- * be migrated while we don't hold the IRQ locks and we don't want to be
|
|
- * chasing moving targets.
|
|
- *
|
|
- * For private interrupts we don't have to do anything because userspace
|
|
- * accesses to the VGIC state already require all VCPUs to be stopped, and
|
|
- * only the VCPU itself can modify its private interrupts active state, which
|
|
- * guarantees that the VCPU is not running.
|
|
- */
|
|
-static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
|
-{
|
|
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
|
- intid >= VGIC_NR_PRIVATE_IRQS)
|
|
- kvm_arm_halt_guest(vcpu->kvm);
|
|
-}
|
|
-
|
|
-/* See vgic_change_active_prepare */
|
|
-static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
|
-{
|
|
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
|
- intid >= VGIC_NR_PRIVATE_IRQS)
|
|
- kvm_arm_resume_guest(vcpu->kvm);
|
|
-}
|
|
-
|
|
static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
|
|
gpa_t addr, unsigned int len,
|
|
unsigned long val)
|
|
@@ -422,11 +450,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
|
|
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
|
- vgic_change_active_prepare(vcpu, intid);
|
|
+ vgic_access_active_prepare(vcpu, intid);
|
|
|
|
__vgic_mmio_write_cactive(vcpu, addr, len, val);
|
|
|
|
- vgic_change_active_finish(vcpu, intid);
|
|
+ vgic_access_active_finish(vcpu, intid);
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
}
|
|
|
|
@@ -459,11 +487,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
|
|
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
|
- vgic_change_active_prepare(vcpu, intid);
|
|
+ vgic_access_active_prepare(vcpu, intid);
|
|
|
|
__vgic_mmio_write_sactive(vcpu, addr, len, val);
|
|
|
|
- vgic_change_active_finish(vcpu, intid);
|
|
+ vgic_access_active_finish(vcpu, intid);
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
}
|
|
|
|
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
|
|
index 836f418f1ee8..b6aff5252429 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-mmio.h
|
|
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
|
|
@@ -157,6 +157,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|
gpa_t addr, unsigned int len);
|
|
|
|
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
|
|
+ gpa_t addr, unsigned int len);
|
|
+
|
|
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
|
|
gpa_t addr, unsigned int len,
|
|
unsigned long val);
|