mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 09:11:49 +00:00
6337 lines
206 KiB
Diff
6337 lines
206 KiB
Diff
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt
|
|
index ec52c472c845..0603af877155 100644
|
|
--- a/Documentation/devicetree/bindings/display/panel/panel-common.txt
|
|
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt
|
|
@@ -38,7 +38,7 @@ Display Timings
|
|
require specific display timings. The panel-timing subnode expresses those
|
|
timings as specified in the timing subnode section of the display timing
|
|
bindings defined in
|
|
- Documentation/devicetree/bindings/display/display-timing.txt.
|
|
+ Documentation/devicetree/bindings/display/panel/display-timing.txt.
|
|
|
|
|
|
Connectivity
|
|
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
|
|
index 891db41e9420..98d7898fcd78 100644
|
|
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
|
|
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
|
|
@@ -25,6 +25,7 @@ Required Properties:
|
|
- "renesas,dmac-r8a7794" (R-Car E2)
|
|
- "renesas,dmac-r8a7795" (R-Car H3)
|
|
- "renesas,dmac-r8a7796" (R-Car M3-W)
|
|
+ - "renesas,dmac-r8a77965" (R-Car M3-N)
|
|
- "renesas,dmac-r8a77970" (R-Car V3M)
|
|
|
|
- reg: base address and length of the registers block for the DMAC
|
|
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
|
|
index 6f2ec9af0de2..dee9520224a9 100644
|
|
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
|
|
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
|
|
@@ -55,9 +55,9 @@ pins it needs, and how they should be configured, with regard to muxer
|
|
configuration, drive strength and pullups. If one of these options is
|
|
not set, its actual value will be unspecified.
|
|
|
|
-This driver supports the generic pin multiplexing and configuration
|
|
-bindings. For details on each properties, you can refer to
|
|
-./pinctrl-bindings.txt.
|
|
+Allwinner A1X Pin Controller supports the generic pin multiplexing and
|
|
+configuration bindings. For details on each properties, you can refer to
|
|
+ ./pinctrl-bindings.txt.
|
|
|
|
Required sub-node properties:
|
|
- pins
|
|
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
|
|
index 8ff65fa632fd..c06c045126fc 100644
|
|
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
|
|
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
|
|
@@ -21,7 +21,7 @@ Required properties:
|
|
- interrupts : identifier to the device interrupt
|
|
- clocks : a list of phandle + clock-specifier pairs, one for each
|
|
entry in clock names.
|
|
-- clocks-names :
|
|
+- clock-names :
|
|
* "xtal" for external xtal clock identifier
|
|
* "pclk" for the bus core clock, either the clk81 clock or the gate clock
|
|
* "baud" for the source of the baudrate generator, can be either the xtal
|
|
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
|
|
index cf504d0380ae..88f947c47adc 100644
|
|
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
|
|
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
|
|
@@ -41,6 +41,8 @@ Required properties:
|
|
- "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
|
|
- "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
|
|
- "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
|
|
+ - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
|
|
+ - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
|
|
- "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
|
|
- "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
|
|
- "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
|
|
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
|
|
index 1afd298eddd7..f4a98c85340a 100644
|
|
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
|
|
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
|
|
@@ -172,6 +172,7 @@ karo Ka-Ro electronics GmbH
|
|
keithkoep Keith & Koep GmbH
|
|
keymile Keymile GmbH
|
|
khadas Khadas
|
|
+kiebackpeter Kieback & Peter GmbH
|
|
kinetic Kinetic Technologies
|
|
kingnovel Kingnovel Technology Co., Ltd.
|
|
kosagi Sutajio Ko-Usagi PTE Ltd.
|
|
diff --git a/Makefile b/Makefile
|
|
index 84374c5ba60e..a33376204c17 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 50
|
|
+SUBLEVEL = 51
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
|
|
index 8a756870c238..5f687ba1eaa7 100644
|
|
--- a/arch/arm/boot/compressed/head.S
|
|
+++ b/arch/arm/boot/compressed/head.S
|
|
@@ -29,19 +29,19 @@
|
|
#if defined(CONFIG_DEBUG_ICEDCC)
|
|
|
|
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
|
|
- .macro loadsp, rb, tmp
|
|
+ .macro loadsp, rb, tmp1, tmp2
|
|
.endm
|
|
.macro writeb, ch, rb
|
|
mcr p14, 0, \ch, c0, c5, 0
|
|
.endm
|
|
#elif defined(CONFIG_CPU_XSCALE)
|
|
- .macro loadsp, rb, tmp
|
|
+ .macro loadsp, rb, tmp1, tmp2
|
|
.endm
|
|
.macro writeb, ch, rb
|
|
mcr p14, 0, \ch, c8, c0, 0
|
|
.endm
|
|
#else
|
|
- .macro loadsp, rb, tmp
|
|
+ .macro loadsp, rb, tmp1, tmp2
|
|
.endm
|
|
.macro writeb, ch, rb
|
|
mcr p14, 0, \ch, c1, c0, 0
|
|
@@ -57,7 +57,7 @@
|
|
.endm
|
|
|
|
#if defined(CONFIG_ARCH_SA1100)
|
|
- .macro loadsp, rb, tmp
|
|
+ .macro loadsp, rb, tmp1, tmp2
|
|
mov \rb, #0x80000000 @ physical base address
|
|
#ifdef CONFIG_DEBUG_LL_SER3
|
|
add \rb, \rb, #0x00050000 @ Ser3
|
|
@@ -66,8 +66,8 @@
|
|
#endif
|
|
.endm
|
|
#else
|
|
- .macro loadsp, rb, tmp
|
|
- addruart \rb, \tmp
|
|
+ .macro loadsp, rb, tmp1, tmp2
|
|
+ addruart \rb, \tmp1, \tmp2
|
|
.endm
|
|
#endif
|
|
#endif
|
|
@@ -559,8 +559,6 @@ not_relocated: mov r0, #0
|
|
bl decompress_kernel
|
|
bl cache_clean_flush
|
|
bl cache_off
|
|
- mov r1, r7 @ restore architecture number
|
|
- mov r2, r8 @ restore atags pointer
|
|
|
|
#ifdef CONFIG_ARM_VIRT_EXT
|
|
mrs r0, spsr @ Get saved CPU boot mode
|
|
@@ -1295,7 +1293,7 @@ phex: adr r3, phexbuf
|
|
b 1b
|
|
|
|
@ puts corrupts {r0, r1, r2, r3}
|
|
-puts: loadsp r3, r1
|
|
+puts: loadsp r3, r2, r1
|
|
1: ldrb r2, [r0], #1
|
|
teq r2, #0
|
|
moveq pc, lr
|
|
@@ -1312,8 +1310,8 @@ puts: loadsp r3, r1
|
|
@ putc corrupts {r0, r1, r2, r3}
|
|
putc:
|
|
mov r2, r0
|
|
+ loadsp r3, r1, r0
|
|
mov r0, #0
|
|
- loadsp r3, r1
|
|
b 2b
|
|
|
|
@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
|
|
@@ -1363,6 +1361,8 @@ __hyp_reentry_vectors:
|
|
|
|
__enter_kernel:
|
|
mov r0, #0 @ must be 0
|
|
+ mov r1, r7 @ restore architecture number
|
|
+ mov r2, r8 @ restore atags pointer
|
|
ARM( mov pc, r4 ) @ call kernel
|
|
M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
|
|
THUMB( bx r4 ) @ entry point is always ARM for A/R classes
|
|
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
|
|
index 7c957ea06c66..9a9902974b1b 100644
|
|
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
|
|
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
|
|
@@ -69,7 +69,7 @@
|
|
timer@20200 {
|
|
compatible = "arm,cortex-a9-global-timer";
|
|
reg = <0x20200 0x100>;
|
|
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
|
clocks = <&periph_clk>;
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
|
|
index af68ef7b0caa..8a15f7193c82 100644
|
|
--- a/arch/arm/boot/dts/da850.dtsi
|
|
+++ b/arch/arm/boot/dts/da850.dtsi
|
|
@@ -34,8 +34,6 @@
|
|
pmx_core: pinmux@14120 {
|
|
compatible = "pinctrl-single";
|
|
reg = <0x14120 0x50>;
|
|
- #address-cells = <1>;
|
|
- #size-cells = <0>;
|
|
#pinctrl-cells = <2>;
|
|
pinctrl-single,bit-per-mux;
|
|
pinctrl-single,register-width = <32>;
|
|
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
index e262fa9ef334..c335b923753a 100644
|
|
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
@@ -26,7 +26,7 @@
|
|
gpio = <&gpio1 3 0>; /* gpio_3 */
|
|
startup-delay-us = <70000>;
|
|
enable-active-high;
|
|
- vin-supply = <&vmmc2>;
|
|
+ vin-supply = <&vaux3>;
|
|
};
|
|
|
|
/* HS USB Host PHY on PORT 1 */
|
|
@@ -108,6 +108,7 @@
|
|
twl_audio: audio {
|
|
compatible = "ti,twl4030-audio";
|
|
codec {
|
|
+ ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
|
|
};
|
|
};
|
|
};
|
|
@@ -221,6 +222,7 @@
|
|
pinctrl-single,pins = <
|
|
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
|
|
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
|
|
+ OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */
|
|
>;
|
|
};
|
|
};
|
|
@@ -235,7 +237,7 @@
|
|
};
|
|
wl127x_gpio: pinmux_wl127x_gpio_pin {
|
|
pinctrl-single,pins = <
|
|
- OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
|
|
+ OMAP3_WKUP_IOPAD(0x2a0a, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
|
|
OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
|
|
>;
|
|
};
|
|
@@ -270,6 +272,11 @@
|
|
#include "twl4030.dtsi"
|
|
#include "twl4030_omap3.dtsi"
|
|
|
|
+&vaux3 {
|
|
+ regulator-min-microvolt = <2800000>;
|
|
+ regulator-max-microvolt = <2800000>;
|
|
+};
|
|
+
|
|
&twl {
|
|
twl_power: power {
|
|
compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle";
|
|
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
|
|
index 6b38d7a634c1..c15318431986 100644
|
|
--- a/arch/arm/kernel/machine_kexec.c
|
|
+++ b/arch/arm/kernel/machine_kexec.c
|
|
@@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
|
|
cpu_relax();
|
|
}
|
|
|
|
+void crash_smp_send_stop(void)
|
|
+{
|
|
+ static int cpus_stopped;
|
|
+ unsigned long msecs;
|
|
+
|
|
+ if (cpus_stopped)
|
|
+ return;
|
|
+
|
|
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
|
+ smp_call_function(machine_crash_nonpanic_core, NULL, false);
|
|
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
|
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
|
+ mdelay(1);
|
|
+ msecs--;
|
|
+ }
|
|
+ if (atomic_read(&waiting_for_crash_ipi) > 0)
|
|
+ pr_warn("Non-crashing CPUs did not react to IPI\n");
|
|
+
|
|
+ cpus_stopped = 1;
|
|
+}
|
|
+
|
|
static void machine_kexec_mask_interrupts(void)
|
|
{
|
|
unsigned int i;
|
|
@@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
|
|
|
|
void machine_crash_shutdown(struct pt_regs *regs)
|
|
{
|
|
- unsigned long msecs;
|
|
-
|
|
local_irq_disable();
|
|
-
|
|
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
|
- smp_call_function(machine_crash_nonpanic_core, NULL, false);
|
|
- msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
|
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
|
- mdelay(1);
|
|
- msecs--;
|
|
- }
|
|
- if (atomic_read(&waiting_for_crash_ipi) > 0)
|
|
- pr_warn("Non-crashing CPUs did not react to IPI\n");
|
|
+ crash_smp_send_stop();
|
|
|
|
crash_save_cpu(regs, smp_processor_id());
|
|
machine_kexec_mask_interrupts();
|
|
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
|
|
index f673cd7a6766..fb7c44cdadcb 100644
|
|
--- a/arch/arm/mach-davinci/board-da830-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-da830-evm.c
|
|
@@ -205,12 +205,17 @@ static const short da830_evm_mmc_sd_pins[] = {
|
|
-1
|
|
};
|
|
|
|
+#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1)
|
|
+#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2)
|
|
+
|
|
static struct gpiod_lookup_table mmc_gpios_table = {
|
|
.dev_id = "da830-mmc.0",
|
|
.table = {
|
|
/* gpio chip 1 contains gpio range 32-63 */
|
|
- GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW),
|
|
- GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd",
|
|
+ GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp",
|
|
+ GPIO_ACTIVE_LOW),
|
|
},
|
|
};
|
|
|
|
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
|
|
index cbde0030c092..25f12118c364 100644
|
|
--- a/arch/arm/mach-davinci/board-da850-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-da850-evm.c
|
|
@@ -763,12 +763,17 @@ static const short da850_evm_mcasp_pins[] __initconst = {
|
|
-1
|
|
};
|
|
|
|
+#define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0)
|
|
+#define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1)
|
|
+
|
|
static struct gpiod_lookup_table mmc_gpios_table = {
|
|
.dev_id = "da830-mmc.0",
|
|
.table = {
|
|
/* gpio chip 2 contains gpio range 64-95 */
|
|
- GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW),
|
|
- GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
|
|
+ GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
|
|
+ GPIO_ACTIVE_LOW),
|
|
},
|
|
};
|
|
|
|
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
|
|
index 62e7bc3018f0..8e64685b1941 100644
|
|
--- a/arch/arm/mach-davinci/board-dm355-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/i2c.h>
|
|
#include <linux/gpio.h>
|
|
#include <linux/clk.h>
|
|
+#include <linux/dm9000.h>
|
|
#include <linux/videodev2.h>
|
|
#include <media/i2c/tvp514x.h>
|
|
#include <linux/spi/spi.h>
|
|
@@ -168,11 +169,16 @@ static struct resource dm355evm_dm9000_rsrc[] = {
|
|
},
|
|
};
|
|
|
|
+static struct dm9000_plat_data dm335evm_dm9000_platdata;
|
|
+
|
|
static struct platform_device dm355evm_dm9000 = {
|
|
.name = "dm9000",
|
|
.id = -1,
|
|
.resource = dm355evm_dm9000_rsrc,
|
|
.num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc),
|
|
+ .dev = {
|
|
+ .platform_data = &dm335evm_dm9000_platdata,
|
|
+ },
|
|
};
|
|
|
|
static struct tvp514x_platform_data tvp5146_pdata = {
|
|
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
|
|
index cb0a41e83582..4c458f714101 100644
|
|
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
|
|
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
|
|
@@ -534,11 +534,12 @@ static struct vpif_display_config dm646x_vpif_display_config = {
|
|
.set_clock = set_vpif_clock,
|
|
.subdevinfo = dm646x_vpif_subdev,
|
|
.subdev_count = ARRAY_SIZE(dm646x_vpif_subdev),
|
|
+ .i2c_adapter_id = 1,
|
|
.chan_config[0] = {
|
|
.outputs = dm6467_ch0_outputs,
|
|
.output_count = ARRAY_SIZE(dm6467_ch0_outputs),
|
|
},
|
|
- .card_name = "DM646x EVM",
|
|
+ .card_name = "DM646x EVM Video Display",
|
|
};
|
|
|
|
/**
|
|
@@ -676,6 +677,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = {
|
|
.setup_input_channel_mode = setup_vpif_input_channel_mode,
|
|
.subdev_info = vpif_capture_sdev_info,
|
|
.subdev_count = ARRAY_SIZE(vpif_capture_sdev_info),
|
|
+ .i2c_adapter_id = 1,
|
|
.chan_config[0] = {
|
|
.inputs = dm6467_ch0_inputs,
|
|
.input_count = ARRAY_SIZE(dm6467_ch0_inputs),
|
|
@@ -696,6 +698,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = {
|
|
.fid_pol = 0,
|
|
},
|
|
},
|
|
+ .card_name = "DM646x EVM Video Capture",
|
|
};
|
|
|
|
static void __init evm_init_video(void)
|
|
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
|
|
index 62eb7d668890..10a027253250 100644
|
|
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
|
|
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
|
|
@@ -123,12 +123,16 @@ static const short hawk_mmcsd0_pins[] = {
|
|
-1
|
|
};
|
|
|
|
+#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
|
|
+#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)
|
|
+
|
|
static struct gpiod_lookup_table mmc_gpios_table = {
|
|
.dev_id = "da830-mmc.0",
|
|
.table = {
|
|
- /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
|
|
- GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
|
|
- GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd",
|
|
+ GPIO_ACTIVE_LOW),
|
|
+ GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp",
|
|
+ GPIO_ACTIVE_LOW),
|
|
},
|
|
};
|
|
|
|
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
|
|
index da21353cac45..d869369ca2bc 100644
|
|
--- a/arch/arm/mach-davinci/dm646x.c
|
|
+++ b/arch/arm/mach-davinci/dm646x.c
|
|
@@ -495,7 +495,8 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
|
|
[IRQ_DM646X_MCASP0TXINT] = 7,
|
|
[IRQ_DM646X_MCASP0RXINT] = 7,
|
|
[IRQ_DM646X_RESERVED_3] = 7,
|
|
- [IRQ_DM646X_MCASP1TXINT] = 7, /* clockevent */
|
|
+ [IRQ_DM646X_MCASP1TXINT] = 7,
|
|
+ [IRQ_TINT0_TINT12] = 7, /* clockevent */
|
|
[IRQ_TINT0_TINT34] = 7, /* clocksource */
|
|
[IRQ_TINT1_TINT12] = 7, /* DSP timer */
|
|
[IRQ_TINT1_TINT34] = 7, /* system tick */
|
|
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
|
|
index fe57e2692629..abca83d22ff3 100644
|
|
--- a/arch/arm/mach-keystone/pm_domain.c
|
|
+++ b/arch/arm/mach-keystone/pm_domain.c
|
|
@@ -29,6 +29,7 @@ static struct dev_pm_domain keystone_pm_domain = {
|
|
|
|
static struct pm_clk_notifier_block platform_domain_notifier = {
|
|
.pm_domain = &keystone_pm_domain,
|
|
+ .con_ids = { NULL },
|
|
};
|
|
|
|
static const struct of_device_id of_keystone_table[] = {
|
|
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
|
|
index 793a24a53c52..d7ca9e2b40d2 100644
|
|
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
|
|
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
|
|
@@ -58,22 +58,24 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
|
|
irq_num = gpio_to_irq(gpio);
|
|
fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
|
|
|
|
- while (irq_counter[gpio] < fiq_count) {
|
|
- if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
|
|
- struct irq_data *d = irq_get_irq_data(irq_num);
|
|
-
|
|
- /*
|
|
- * It looks like handle_edge_irq() that
|
|
- * OMAP GPIO edge interrupts default to,
|
|
- * expects interrupt already unmasked.
|
|
- */
|
|
- if (irq_chip && irq_chip->irq_unmask)
|
|
+ if (irq_counter[gpio] < fiq_count &&
|
|
+ gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
|
|
+ struct irq_data *d = irq_get_irq_data(irq_num);
|
|
+
|
|
+ /*
|
|
+ * handle_simple_irq() that OMAP GPIO edge
|
|
+ * interrupts default to since commit 80ac93c27441
|
|
+ * requires interrupt already acked and unmasked.
|
|
+ */
|
|
+ if (irq_chip) {
|
|
+ if (irq_chip->irq_ack)
|
|
+ irq_chip->irq_ack(d);
|
|
+ if (irq_chip->irq_unmask)
|
|
irq_chip->irq_unmask(d);
|
|
}
|
|
- generic_handle_irq(irq_num);
|
|
-
|
|
- irq_counter[gpio]++;
|
|
}
|
|
+ for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
|
|
+ generic_handle_irq(irq_num);
|
|
}
|
|
return IRQ_HANDLED;
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
|
|
index 76eb6ec5f157..1e6a967cd2d5 100644
|
|
--- a/arch/arm/mach-omap2/powerdomain.c
|
|
+++ b/arch/arm/mach-omap2/powerdomain.c
|
|
@@ -188,7 +188,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
|
|
((prev & OMAP_POWERSTATE_MASK) << 0));
|
|
trace_power_domain_target_rcuidle(pwrdm->name,
|
|
trace_state,
|
|
- smp_processor_id());
|
|
+ raw_smp_processor_id());
|
|
}
|
|
break;
|
|
default:
|
|
@@ -518,7 +518,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
|
|
if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
|
|
/* Trace the pwrdm desired target state */
|
|
trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
|
|
- smp_processor_id());
|
|
+ raw_smp_processor_id());
|
|
/* Program the pwrdm desired target state */
|
|
ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
|
|
}
|
|
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
|
|
index 54f418d05e15..2306b1a0c09a 100644
|
|
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
|
|
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
|
|
@@ -46,7 +46,7 @@
|
|
compatible = "ethernet-phy-ieee802.3-c22";
|
|
reg = <0x0>;
|
|
interrupt-parent = <&gpio>;
|
|
- interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_HIGH>;
|
|
+ interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_LOW>;
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
|
|
index 30da0918d046..04569aa267fd 100644
|
|
--- a/arch/arm64/include/asm/cputype.h
|
|
+++ b/arch/arm64/include/asm/cputype.h
|
|
@@ -75,6 +75,7 @@
|
|
#define ARM_CPU_IMP_CAVIUM 0x43
|
|
#define ARM_CPU_IMP_BRCM 0x42
|
|
#define ARM_CPU_IMP_QCOM 0x51
|
|
+#define ARM_CPU_IMP_NVIDIA 0x4E
|
|
|
|
#define ARM_CPU_PART_AEM_V8 0xD0F
|
|
#define ARM_CPU_PART_FOUNDATION 0xD00
|
|
@@ -98,6 +99,9 @@
|
|
#define QCOM_CPU_PART_FALKOR 0xC00
|
|
#define QCOM_CPU_PART_KRYO 0x200
|
|
|
|
+#define NVIDIA_CPU_PART_DENVER 0x003
|
|
+#define NVIDIA_CPU_PART_CARMEL 0x004
|
|
+
|
|
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
|
|
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
|
|
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
|
|
@@ -112,6 +116,8 @@
|
|
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
|
|
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
|
|
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
|
|
+#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
|
|
+#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
|
|
index 9cbb6123208f..edaf346d13d5 100644
|
|
--- a/arch/arm64/kernel/ptrace.c
|
|
+++ b/arch/arm64/kernel/ptrace.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/mm.h>
|
|
+#include <linux/nospec.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/user.h>
|
|
@@ -247,15 +248,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
|
|
|
|
switch (note_type) {
|
|
case NT_ARM_HW_BREAK:
|
|
- if (idx < ARM_MAX_BRP)
|
|
- bp = tsk->thread.debug.hbp_break[idx];
|
|
+ if (idx >= ARM_MAX_BRP)
|
|
+ goto out;
|
|
+ idx = array_index_nospec(idx, ARM_MAX_BRP);
|
|
+ bp = tsk->thread.debug.hbp_break[idx];
|
|
break;
|
|
case NT_ARM_HW_WATCH:
|
|
- if (idx < ARM_MAX_WRP)
|
|
- bp = tsk->thread.debug.hbp_watch[idx];
|
|
+ if (idx >= ARM_MAX_WRP)
|
|
+ goto out;
|
|
+ idx = array_index_nospec(idx, ARM_MAX_WRP);
|
|
+ bp = tsk->thread.debug.hbp_watch[idx];
|
|
break;
|
|
}
|
|
|
|
+out:
|
|
return bp;
|
|
}
|
|
|
|
@@ -1194,9 +1200,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
|
|
{
|
|
int ret;
|
|
u32 kdata;
|
|
- mm_segment_t old_fs = get_fs();
|
|
|
|
- set_fs(KERNEL_DS);
|
|
/* Watchpoint */
|
|
if (num < 0) {
|
|
ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
|
|
@@ -1207,7 +1211,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
|
|
} else {
|
|
ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
|
|
}
|
|
- set_fs(old_fs);
|
|
|
|
if (!ret)
|
|
ret = put_user(kdata, data);
|
|
@@ -1220,7 +1223,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
|
|
{
|
|
int ret;
|
|
u32 kdata = 0;
|
|
- mm_segment_t old_fs = get_fs();
|
|
|
|
if (num == 0)
|
|
return 0;
|
|
@@ -1229,12 +1231,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- set_fs(KERNEL_DS);
|
|
if (num < 0)
|
|
ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
|
|
else
|
|
ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
|
|
- set_fs(old_fs);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
|
|
index 66f5e9a61efc..7288e31d3713 100644
|
|
--- a/arch/hexagon/include/asm/io.h
|
|
+++ b/arch/hexagon/include/asm/io.h
|
|
@@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
|
memcpy((void *) dst, src, count);
|
|
}
|
|
|
|
+static inline void memset_io(volatile void __iomem *addr, int value,
|
|
+ size_t size)
|
|
+{
|
|
+ memset((void __force *)addr, value, size);
|
|
+}
|
|
+
|
|
#define PCI_IO_ADDR (volatile void __iomem *)
|
|
|
|
/*
|
|
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
|
|
index 617506d1a559..7cd0a2259269 100644
|
|
--- a/arch/hexagon/lib/checksum.c
|
|
+++ b/arch/hexagon/lib/checksum.c
|
|
@@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
|
|
memcpy(dst, src, len);
|
|
return csum_partial(dst, len, sum);
|
|
}
|
|
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
|
|
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
|
|
index 2cd49b60e030..f7aad80c69ab 100644
|
|
--- a/arch/mips/boot/dts/img/boston.dts
|
|
+++ b/arch/mips/boot/dts/img/boston.dts
|
|
@@ -51,6 +51,8 @@
|
|
ranges = <0x02000000 0 0x40000000
|
|
0x40000000 0 0x40000000>;
|
|
|
|
+ bus-range = <0x00 0xff>;
|
|
+
|
|
interrupt-map-mask = <0 0 0 7>;
|
|
interrupt-map = <0 0 0 1 &pci0_intc 1>,
|
|
<0 0 0 2 &pci0_intc 2>,
|
|
@@ -79,6 +81,8 @@
|
|
ranges = <0x02000000 0 0x20000000
|
|
0x20000000 0 0x20000000>;
|
|
|
|
+ bus-range = <0x00 0xff>;
|
|
+
|
|
interrupt-map-mask = <0 0 0 7>;
|
|
interrupt-map = <0 0 0 1 &pci1_intc 1>,
|
|
<0 0 0 2 &pci1_intc 2>,
|
|
@@ -107,6 +111,8 @@
|
|
ranges = <0x02000000 0 0x16000000
|
|
0x16000000 0 0x100000>;
|
|
|
|
+ bus-range = <0x00 0xff>;
|
|
+
|
|
interrupt-map-mask = <0 0 0 7>;
|
|
interrupt-map = <0 0 0 1 &pci2_intc 1>,
|
|
<0 0 0 2 &pci2_intc 2>,
|
|
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
|
|
index 0cbf3af37eca..a7d0b836f2f7 100644
|
|
--- a/arch/mips/include/asm/io.h
|
|
+++ b/arch/mips/include/asm/io.h
|
|
@@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr)
|
|
#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
|
|
#define war_io_reorder_wmb() wmb()
|
|
#else
|
|
-#define war_io_reorder_wmb() do { } while (0)
|
|
+#define war_io_reorder_wmb() barrier()
|
|
#endif
|
|
|
|
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
|
|
@@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
|
|
BUG(); \
|
|
} \
|
|
\
|
|
+ /* prevent prefetching of coherent DMA data prematurely */ \
|
|
+ rmb(); \
|
|
return pfx##ioswab##bwlq(__mem, __val); \
|
|
}
|
|
|
|
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
|
|
index 513826a43efd..6a71d3151a23 100644
|
|
--- a/arch/parisc/kernel/drivers.c
|
|
+++ b/arch/parisc/kernel/drivers.c
|
|
@@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data)
|
|
* Checks all the children of @parent for a matching @id. If none
|
|
* found, it allocates a new device and returns it.
|
|
*/
|
|
-static struct parisc_device * alloc_tree_node(struct device *parent, char id)
|
|
+static struct parisc_device * __init alloc_tree_node(
|
|
+ struct device *parent, char id)
|
|
{
|
|
struct match_id_data d = {
|
|
.id = id,
|
|
@@ -825,8 +826,8 @@ void walk_lower_bus(struct parisc_device *dev)
|
|
* devices which are not physically connected (such as extra serial &
|
|
* keyboard ports). This problem is not yet solved.
|
|
*/
|
|
-static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
|
|
- struct device *parent)
|
|
+static void __init walk_native_bus(unsigned long io_io_low,
|
|
+ unsigned long io_io_high, struct device *parent)
|
|
{
|
|
int i, devices_found = 0;
|
|
unsigned long hpa = io_io_low;
|
|
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
|
|
index 30c28ab14540..ab4d5580bb02 100644
|
|
--- a/arch/parisc/kernel/smp.c
|
|
+++ b/arch/parisc/kernel/smp.c
|
|
@@ -418,8 +418,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|
}
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
-int __init
|
|
-setup_profiling_timer(unsigned int multiplier)
|
|
+int setup_profiling_timer(unsigned int multiplier)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
|
|
index f7e684560186..42a873226a04 100644
|
|
--- a/arch/parisc/kernel/time.c
|
|
+++ b/arch/parisc/kernel/time.c
|
|
@@ -205,7 +205,7 @@ static int __init rtc_init(void)
|
|
device_initcall(rtc_init);
|
|
#endif
|
|
|
|
-void read_persistent_clock(struct timespec *ts)
|
|
+void read_persistent_clock64(struct timespec64 *ts)
|
|
{
|
|
static struct pdc_tod tod_data;
|
|
if (pdc_tod_read(&tod_data) == 0) {
|
|
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
|
|
index de470caf0784..fc222a0c2ac4 100644
|
|
--- a/arch/powerpc/platforms/powernv/memtrace.c
|
|
+++ b/arch/powerpc/platforms/powernv/memtrace.c
|
|
@@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
|
|
.open = simple_open,
|
|
};
|
|
|
|
-static void flush_memory_region(u64 base, u64 size)
|
|
-{
|
|
- unsigned long line_size = ppc64_caches.l1d.size;
|
|
- u64 end = base + size;
|
|
- u64 addr;
|
|
-
|
|
- base = round_down(base, line_size);
|
|
- end = round_up(end, line_size);
|
|
-
|
|
- for (addr = base; addr < end; addr += line_size)
|
|
- asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
|
|
-}
|
|
-
|
|
static int check_memblock_online(struct memory_block *mem, void *arg)
|
|
{
|
|
if (mem->state != MEM_ONLINE)
|
|
@@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
|
|
walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
|
|
change_memblock_state);
|
|
|
|
- /* RCU grace period? */
|
|
- flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
|
|
- nr_pages << PAGE_SHIFT);
|
|
-
|
|
lock_device_hotplug();
|
|
remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
|
|
unlock_device_hotplug();
|
|
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
|
|
index 4205f6d42b69..a5bd03642678 100644
|
|
--- a/arch/sh/kernel/cpu/sh2/probe.c
|
|
+++ b/arch/sh/kernel/cpu/sh2/probe.c
|
|
@@ -43,7 +43,11 @@ void __ref cpu_probe(void)
|
|
#endif
|
|
|
|
#if defined(CONFIG_CPU_J2)
|
|
+#if defined(CONFIG_SMP)
|
|
unsigned cpu = hard_smp_processor_id();
|
|
+#else
|
|
+ unsigned cpu = 0;
|
|
+#endif
|
|
if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
|
|
if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
|
|
if (cpu != 0) return;
|
|
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
|
|
index 6965ee8c4b8a..228732654cfe 100644
|
|
--- a/arch/x86/events/intel/core.c
|
|
+++ b/arch/x86/events/intel/core.c
|
|
@@ -3331,7 +3331,8 @@ static void intel_pmu_cpu_starting(int cpu)
|
|
|
|
cpuc->lbr_sel = NULL;
|
|
|
|
- flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
|
|
+ if (x86_pmu.version > 1)
|
|
+ flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
|
|
|
|
if (!cpuc->shared_regs)
|
|
return;
|
|
@@ -3494,6 +3495,8 @@ static __initconst const struct x86_pmu core_pmu = {
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
|
};
|
|
|
|
+static struct attribute *intel_pmu_attrs[];
|
|
+
|
|
static __initconst const struct x86_pmu intel_pmu = {
|
|
.name = "Intel",
|
|
.handle_irq = intel_pmu_handle_irq,
|
|
@@ -3524,6 +3527,8 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|
.format_attrs = intel_arch3_formats_attr,
|
|
.events_sysfs_show = intel_event_sysfs_show,
|
|
|
|
+ .attrs = intel_pmu_attrs,
|
|
+
|
|
.cpu_prepare = intel_pmu_cpu_prepare,
|
|
.cpu_starting = intel_pmu_cpu_starting,
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
|
@@ -3902,8 +3907,6 @@ __init int intel_pmu_init(void)
|
|
|
|
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
|
|
|
|
-
|
|
- x86_pmu.attrs = intel_pmu_attrs;
|
|
/*
|
|
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
|
* assume at least 3 events, when not running in a hypervisor:
|
|
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
|
|
index b3e32b010ab1..c2c01f84df75 100644
|
|
--- a/arch/x86/include/asm/insn.h
|
|
+++ b/arch/x86/include/asm/insn.h
|
|
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
|
|
return insn_offset_displacement(insn) + insn->displacement.nbytes;
|
|
}
|
|
|
|
+#define POP_SS_OPCODE 0x1f
|
|
+#define MOV_SREG_OPCODE 0x8e
|
|
+
|
|
+/*
|
|
+ * Intel SDM Vol.3A 6.8.3 states;
|
|
+ * "Any single-step trap that would be delivered following the MOV to SS
|
|
+ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
|
|
+ * suppressed."
|
|
+ * This function returns true if @insn is MOV SS or POP SS. On these
|
|
+ * instructions, single stepping is suppressed.
|
|
+ */
|
|
+static inline int insn_masking_exception(struct insn *insn)
|
|
+{
|
|
+ return insn->opcode.bytes[0] == POP_SS_OPCODE ||
|
|
+ (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
|
|
+ X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
|
|
+}
|
|
+
|
|
#endif /* _ASM_X86_INSN_H */
|
|
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
|
|
index c895f38a7a5e..0b2330e19169 100644
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -751,6 +751,9 @@ static const struct _tlb_table intel_tlb_table[] = {
|
|
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
|
|
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
|
|
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
|
|
+ { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
|
|
+ { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
|
|
+ { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
|
|
{ 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
|
|
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
|
|
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
|
|
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
|
|
index fb095ba0c02f..f24cd9f1799a 100644
|
|
--- a/arch/x86/kernel/kexec-bzimage64.c
|
|
+++ b/arch/x86/kernel/kexec-bzimage64.c
|
|
@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|
* little bit simple
|
|
*/
|
|
efi_map_sz = efi_get_runtime_map_size();
|
|
- efi_map_sz = ALIGN(efi_map_sz, 16);
|
|
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
|
|
MAX_ELFCOREHDR_STR_LEN;
|
|
params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
|
|
- kbuf.bufsz = params_cmdline_sz + efi_map_sz +
|
|
+ kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
|
|
sizeof(struct setup_data) +
|
|
sizeof(struct efi_setup_data);
|
|
|
|
@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|
if (!params)
|
|
return ERR_PTR(-ENOMEM);
|
|
efi_map_offset = params_cmdline_sz;
|
|
- efi_setup_data_offset = efi_map_offset + efi_map_sz;
|
|
+ efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
|
|
|
|
/* Copy setup header onto bootparams. Documentation/x86/boot.txt */
|
|
setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
|
|
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
|
|
index ce06ec9c2323..f1030c522e06 100644
|
|
--- a/arch/x86/kernel/kprobes/core.c
|
|
+++ b/arch/x86/kernel/kprobes/core.c
|
|
@@ -369,6 +369,10 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
|
|
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
|
return 0;
|
|
|
|
+ /* We should not singlestep on the exception masking instructions */
|
|
+ if (insn_masking_exception(insn))
|
|
+ return 0;
|
|
+
|
|
#ifdef CONFIG_X86_64
|
|
/* Only x86_64 has RIP relative instructions */
|
|
if (insn_rip_relative(insn)) {
|
|
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
|
|
index 495c776de4b4..e1ea13ae53b9 100644
|
|
--- a/arch/x86/kernel/uprobes.c
|
|
+++ b/arch/x86/kernel/uprobes.c
|
|
@@ -296,6 +296,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
|
|
if (is_prefix_bad(insn))
|
|
return -ENOTSUPP;
|
|
|
|
+ /* We should not singlestep on the exception masking instructions */
|
|
+ if (insn_masking_exception(insn))
|
|
+ return -ENOTSUPP;
|
|
+
|
|
if (x86_64)
|
|
good_insns = good_insns_64;
|
|
else
|
|
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
|
|
index dc97f2544b6f..5d13abecb384 100644
|
|
--- a/arch/x86/kvm/hyperv.c
|
|
+++ b/arch/x86/kvm/hyperv.c
|
|
@@ -1223,7 +1223,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
|
|
struct kvm_run *run = vcpu->run;
|
|
|
|
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
|
|
- return 1;
|
|
+ return kvm_skip_emulated_instruction(vcpu);
|
|
}
|
|
|
|
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index 029aa1318874..cfa155078ebb 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -4756,9 +4756,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
|
|
}
|
|
|
|
if (!ret && svm) {
|
|
- trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
|
|
- host_irq, e->gsi,
|
|
- vcpu_info.vector,
|
|
+ trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
|
|
+ e->gsi, vcpu_info.vector,
|
|
vcpu_info.pi_desc_addr, set);
|
|
}
|
|
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index b1556166a06d..90747865205d 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -10318,6 +10318,16 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
|
return true;
|
|
}
|
|
|
|
+static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
|
|
+ struct vmcs12 *vmcs12)
|
|
+{
|
|
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
|
|
+ !page_address_valid(vcpu, vmcs12->apic_access_addr))
|
|
+ return -EINVAL;
|
|
+ else
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
|
struct vmcs12 *vmcs12)
|
|
{
|
|
@@ -10961,6 +10971,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
|
|
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
|
|
|
+ if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
|
|
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
|
+
|
|
if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
|
|
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
|
|
|
@@ -12171,7 +12184,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
|
|
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
|
|
vcpu_info.vector = irq.vector;
|
|
|
|
- trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
|
|
+ trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
|
|
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
|
|
|
|
if (set)
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index b62328cd4cb0..2f3fe25639b3 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -6297,12 +6297,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
|
|
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
{
|
|
unsigned long nr, a0, a1, a2, a3, ret;
|
|
- int op_64_bit, r;
|
|
+ int op_64_bit;
|
|
|
|
- r = kvm_skip_emulated_instruction(vcpu);
|
|
-
|
|
- if (kvm_hv_hypercall_enabled(vcpu->kvm))
|
|
- return kvm_hv_hypercall(vcpu);
|
|
+ if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
|
|
+ if (!kvm_hv_hypercall(vcpu))
|
|
+ return 0;
|
|
+ goto out;
|
|
+ }
|
|
|
|
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
|
@@ -6323,7 +6324,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
|
|
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
|
|
ret = -KVM_EPERM;
|
|
- goto out;
|
|
+ goto out_error;
|
|
}
|
|
|
|
switch (nr) {
|
|
@@ -6343,12 +6344,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
ret = -KVM_ENOSYS;
|
|
break;
|
|
}
|
|
-out:
|
|
+out_error:
|
|
if (!op_64_bit)
|
|
ret = (u32)ret;
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
|
|
+
|
|
+out:
|
|
++vcpu->stat.hypercalls;
|
|
- return r;
|
|
+ return kvm_skip_emulated_instruction(vcpu);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
|
|
|
|
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
|
|
index bb77606d04e0..a9deb2b0397d 100644
|
|
--- a/arch/x86/net/bpf_jit_comp.c
|
|
+++ b/arch/x86/net/bpf_jit_comp.c
|
|
@@ -1159,6 +1159,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|
for (pass = 0; pass < 20 || image; pass++) {
|
|
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
|
|
if (proglen <= 0) {
|
|
+out_image:
|
|
image = NULL;
|
|
if (header)
|
|
bpf_jit_binary_free(header);
|
|
@@ -1169,8 +1170,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|
if (proglen != oldproglen) {
|
|
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
|
|
proglen, oldproglen);
|
|
- prog = orig_prog;
|
|
- goto out_addrs;
|
|
+ goto out_image;
|
|
}
|
|
break;
|
|
}
|
|
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
|
|
index 754d5391d9fa..854508b00bbb 100644
|
|
--- a/arch/x86/xen/enlighten_hvm.c
|
|
+++ b/arch/x86/xen/enlighten_hvm.c
|
|
@@ -64,6 +64,19 @@ static void __init xen_hvm_init_mem_mapping(void)
|
|
{
|
|
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
|
|
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
|
|
+
|
|
+ /*
|
|
+ * The virtual address of the shared_info page has changed, so
|
|
+ * the vcpu_info pointer for VCPU 0 is now stale.
|
|
+ *
|
|
+ * The prepare_boot_cpu callback will re-initialize it via
|
|
+ * xen_vcpu_setup, but we can't rely on that to be called for
|
|
+ * old Xen versions (xen_have_vector_callback == 0).
|
|
+ *
|
|
+ * It is, in any case, bad to have a stale vcpu_info pointer
|
|
+ * so reset it now.
|
|
+ */
|
|
+ xen_vcpu_info_reset(0);
|
|
}
|
|
|
|
static void __init init_hvm_pv_info(void)
|
|
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
|
|
index d3f56baee936..3dc7c0b4adcb 100644
|
|
--- a/block/blk-cgroup.c
|
|
+++ b/block/blk-cgroup.c
|
|
@@ -1149,18 +1149,16 @@ int blkcg_init_queue(struct request_queue *q)
|
|
rcu_read_lock();
|
|
spin_lock_irq(q->queue_lock);
|
|
blkg = blkg_create(&blkcg_root, q, new_blkg);
|
|
+ if (IS_ERR(blkg))
|
|
+ goto err_unlock;
|
|
+ q->root_blkg = blkg;
|
|
+ q->root_rl.blkg = blkg;
|
|
spin_unlock_irq(q->queue_lock);
|
|
rcu_read_unlock();
|
|
|
|
if (preloaded)
|
|
radix_tree_preload_end();
|
|
|
|
- if (IS_ERR(blkg))
|
|
- return PTR_ERR(blkg);
|
|
-
|
|
- q->root_blkg = blkg;
|
|
- q->root_rl.blkg = blkg;
|
|
-
|
|
ret = blk_throtl_init(q);
|
|
if (ret) {
|
|
spin_lock_irq(q->queue_lock);
|
|
@@ -1168,6 +1166,13 @@ int blkcg_init_queue(struct request_queue *q)
|
|
spin_unlock_irq(q->queue_lock);
|
|
}
|
|
return ret;
|
|
+
|
|
+err_unlock:
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
+ rcu_read_unlock();
|
|
+ if (preloaded)
|
|
+ radix_tree_preload_end();
|
|
+ return PTR_ERR(blkg);
|
|
}
|
|
|
|
/**
|
|
@@ -1374,17 +1379,12 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
|
__clear_bit(pol->plid, q->blkcg_pols);
|
|
|
|
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
|
- /* grab blkcg lock too while removing @pd from @blkg */
|
|
- spin_lock(&blkg->blkcg->lock);
|
|
-
|
|
if (blkg->pd[pol->plid]) {
|
|
if (pol->pd_offline_fn)
|
|
pol->pd_offline_fn(blkg->pd[pol->plid]);
|
|
pol->pd_free_fn(blkg->pd[pol->plid]);
|
|
blkg->pd[pol->plid] = NULL;
|
|
}
|
|
-
|
|
- spin_unlock(&blkg->blkcg->lock);
|
|
}
|
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
index 007f96611364..74c35513ada5 100644
|
|
--- a/block/blk-mq.c
|
|
+++ b/block/blk-mq.c
|
|
@@ -118,6 +118,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
|
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
|
|
}
|
|
|
|
+static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
|
|
+ struct request *rq, void *priv,
|
|
+ bool reserved)
|
|
+{
|
|
+ struct mq_inflight *mi = priv;
|
|
+
|
|
+ if (rq->part == mi->part)
|
|
+ mi->inflight[rq_data_dir(rq)]++;
|
|
+}
|
|
+
|
|
+void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
|
+ unsigned int inflight[2])
|
|
+{
|
|
+ struct mq_inflight mi = { .part = part, .inflight = inflight, };
|
|
+
|
|
+ inflight[0] = inflight[1] = 0;
|
|
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
|
|
+}
|
|
+
|
|
void blk_freeze_queue_start(struct request_queue *q)
|
|
{
|
|
int freeze_depth;
|
|
diff --git a/block/blk-mq.h b/block/blk-mq.h
|
|
index 4933af9d61f7..877237e09083 100644
|
|
--- a/block/blk-mq.h
|
|
+++ b/block/blk-mq.h
|
|
@@ -136,6 +136,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
|
|
}
|
|
|
|
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
|
- unsigned int inflight[2]);
|
|
+ unsigned int inflight[2]);
|
|
+void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
|
+ unsigned int inflight[2]);
|
|
|
|
#endif
|
|
diff --git a/block/genhd.c b/block/genhd.c
|
|
index dd305c65ffb0..449ef56bba70 100644
|
|
--- a/block/genhd.c
|
|
+++ b/block/genhd.c
|
|
@@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
|
}
|
|
}
|
|
|
|
+void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
|
+ unsigned int inflight[2])
|
|
+{
|
|
+ if (q->mq_ops) {
|
|
+ blk_mq_in_flight_rw(q, part, inflight);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ inflight[0] = atomic_read(&part->in_flight[0]);
|
|
+ inflight[1] = atomic_read(&part->in_flight[1]);
|
|
+}
|
|
+
|
|
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
|
|
{
|
|
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
|
|
diff --git a/block/partition-generic.c b/block/partition-generic.c
|
|
index 08dabcd8b6ae..db57cced9b98 100644
|
|
--- a/block/partition-generic.c
|
|
+++ b/block/partition-generic.c
|
|
@@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
|
|
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
|
|
}
|
|
|
|
-ssize_t part_inflight_show(struct device *dev,
|
|
- struct device_attribute *attr, char *buf)
|
|
+ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
{
|
|
struct hd_struct *p = dev_to_part(dev);
|
|
+ struct request_queue *q = part_to_disk(p)->queue;
|
|
+ unsigned int inflight[2];
|
|
|
|
- return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
|
|
- atomic_read(&p->in_flight[1]));
|
|
+ part_in_flight_rw(q, p, inflight);
|
|
+ return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
|
|
}
|
|
|
|
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
|
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
|
|
index ebb626ffb5fa..4bde16fb97d8 100644
|
|
--- a/drivers/acpi/acpi_watchdog.c
|
|
+++ b/drivers/acpi/acpi_watchdog.c
|
|
@@ -12,23 +12,64 @@
|
|
#define pr_fmt(fmt) "ACPI: watchdog: " fmt
|
|
|
|
#include <linux/acpi.h>
|
|
+#include <linux/dmi.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/platform_device.h>
|
|
|
|
#include "internal.h"
|
|
|
|
+static const struct dmi_system_id acpi_watchdog_skip[] = {
|
|
+ {
|
|
+ /*
|
|
+ * On Lenovo Z50-70 there are two issues with the WDAT
|
|
+ * table. First some of the instructions use RTC SRAM
|
|
+ * to store persistent information. This does not work well
|
|
+ * with Linux RTC driver. Second, more important thing is
|
|
+ * that the instructions do not actually reset the system.
|
|
+ *
|
|
+ * On this particular system iTCO_wdt seems to work just
|
|
+ * fine so we prefer that over WDAT for now.
|
|
+ *
|
|
+ * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
|
|
+ */
|
|
+ .ident = "Lenovo Z50-70",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
|
|
+ },
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
+static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
|
|
+{
|
|
+ const struct acpi_table_wdat *wdat = NULL;
|
|
+ acpi_status status;
|
|
+
|
|
+ if (acpi_disabled)
|
|
+ return NULL;
|
|
+
|
|
+ if (dmi_check_system(acpi_watchdog_skip))
|
|
+ return NULL;
|
|
+
|
|
+ status = acpi_get_table(ACPI_SIG_WDAT, 0,
|
|
+ (struct acpi_table_header **)&wdat);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ /* It is fine if there is no WDAT */
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ return wdat;
|
|
+}
|
|
+
|
|
/**
|
|
* Returns true if this system should prefer ACPI based watchdog instead of
|
|
* the native one (which are typically the same hardware).
|
|
*/
|
|
bool acpi_has_watchdog(void)
|
|
{
|
|
- struct acpi_table_header hdr;
|
|
-
|
|
- if (acpi_disabled)
|
|
- return false;
|
|
-
|
|
- return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
|
|
+ return !!acpi_watchdog_get_wdat();
|
|
}
|
|
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
|
|
|
|
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
|
|
struct platform_device *pdev;
|
|
struct resource *resources;
|
|
size_t nresources = 0;
|
|
- acpi_status status;
|
|
int i;
|
|
|
|
- status = acpi_get_table(ACPI_SIG_WDAT, 0,
|
|
- (struct acpi_table_header **)&wdat);
|
|
- if (ACPI_FAILURE(status)) {
|
|
+ wdat = acpi_watchdog_get_wdat();
|
|
+ if (!wdat) {
|
|
/* It is fine if there is no WDAT */
|
|
return;
|
|
}
|
|
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
|
|
index 8082871b409a..2ef0ad6a33d6 100644
|
|
--- a/drivers/acpi/sleep.c
|
|
+++ b/drivers/acpi/sleep.c
|
|
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
|
|
},
|
|
},
|
|
+ /*
|
|
+ * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
|
|
+ * the Low Power S0 Idle firmware interface (see
|
|
+ * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
|
|
+ */
|
|
+ {
|
|
+ .callback = init_no_lps0,
|
|
+ .ident = "ThinkPad X1 Tablet(2016)",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
|
|
+ },
|
|
+ },
|
|
{},
|
|
};
|
|
|
|
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
|
|
index 18391d0c0cd7..75eb50041c99 100644
|
|
--- a/drivers/ata/ahci.c
|
|
+++ b/drivers/ata/ahci.c
|
|
@@ -686,7 +686,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
|
|
deadline, &online, NULL);
|
|
@@ -712,7 +712,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
|
|
bool online;
|
|
int rc;
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
/* clear D2H reception area to properly wait for D2H FIS */
|
|
ata_tf_init(link->device, &tf);
|
|
@@ -776,7 +776,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
u16 val;
|
|
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
|
|
index 8b61123d2c3c..781b898e5785 100644
|
|
--- a/drivers/ata/ahci.h
|
|
+++ b/drivers/ata/ahci.h
|
|
@@ -361,6 +361,13 @@ struct ahci_host_priv {
|
|
* be overridden anytime before the host is activated.
|
|
*/
|
|
void (*start_engine)(struct ata_port *ap);
|
|
+ /*
|
|
+ * Optional ahci_stop_engine override, if not set this gets set to the
|
|
+ * default ahci_stop_engine during ahci_save_initial_config, this can
|
|
+ * be overridden anytime before the host is activated.
|
|
+ */
|
|
+ int (*stop_engine)(struct ata_port *ap);
|
|
+
|
|
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
|
|
|
|
/* only required for per-port MSI(-X) support */
|
|
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
|
|
index de7128d81e9c..0045dacd814b 100644
|
|
--- a/drivers/ata/ahci_mvebu.c
|
|
+++ b/drivers/ata/ahci_mvebu.c
|
|
@@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
|
|
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
|
|
}
|
|
|
|
+/**
|
|
+ * ahci_mvebu_stop_engine
|
|
+ *
|
|
+ * @ap: Target ata port
|
|
+ *
|
|
+ * Errata Ref#226 - SATA Disk HOT swap issue when connected through
|
|
+ * Port Multiplier in FIS-based Switching mode.
|
|
+ *
|
|
+ * To avoid the issue, according to design, the bits[11:8, 0] of
|
|
+ * register PxFBS are cleared when Port Command and Status (0x18) bit[0]
|
|
+ * changes its value from 1 to 0, i.e. falling edge of Port
|
|
+ * Command and Status bit[0] sends PULSE that resets PxFBS
|
|
+ * bits[11:8; 0].
|
|
+ *
|
|
+ * This function is used to override function of "ahci_stop_engine"
|
|
+ * from libahci.c by adding the mvebu work around(WA) to save PxFBS
|
|
+ * value before the PxCMD ST write of 0, then restore PxFBS value.
|
|
+ *
|
|
+ * Return: 0 on success; Error code otherwise.
|
|
+ */
|
|
+int ahci_mvebu_stop_engine(struct ata_port *ap)
|
|
+{
|
|
+ void __iomem *port_mmio = ahci_port_base(ap);
|
|
+ u32 tmp, port_fbs;
|
|
+
|
|
+ tmp = readl(port_mmio + PORT_CMD);
|
|
+
|
|
+ /* check if the HBA is idle */
|
|
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
|
|
+ return 0;
|
|
+
|
|
+ /* save the port PxFBS register for later restore */
|
|
+ port_fbs = readl(port_mmio + PORT_FBS);
|
|
+
|
|
+ /* setting HBA to idle */
|
|
+ tmp &= ~PORT_CMD_START;
|
|
+ writel(tmp, port_mmio + PORT_CMD);
|
|
+
|
|
+ /*
|
|
+ * bit #15 PxCMD signal doesn't clear PxFBS,
|
|
+ * restore the PxFBS register right after clearing the PxCMD ST,
|
|
+ * no need to wait for the PxCMD bit #15.
|
|
+ */
|
|
+ writel(port_fbs, port_mmio + PORT_FBS);
|
|
+
|
|
+ /* wait for engine to stop. This could be as long as 500 msec */
|
|
+ tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
|
|
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
|
|
+ if (tmp & PORT_CMD_LIST_ON)
|
|
+ return -EIO;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_PM_SLEEP
|
|
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
|
|
{
|
|
@@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
|
|
if (rc)
|
|
return rc;
|
|
|
|
+ hpriv->stop_engine = ahci_mvebu_stop_engine;
|
|
+
|
|
if (of_device_is_compatible(pdev->dev.of_node,
|
|
"marvell,armada-380-ahci")) {
|
|
dram = mv_mbus_dram_info();
|
|
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
|
|
index b6b0bf76dfc7..ab5ac103bfb8 100644
|
|
--- a/drivers/ata/ahci_qoriq.c
|
|
+++ b/drivers/ata/ahci_qoriq.c
|
|
@@ -94,7 +94,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
/*
|
|
* There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
|
|
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
|
|
index c2b5941d9184..ad58da7c9aff 100644
|
|
--- a/drivers/ata/ahci_xgene.c
|
|
+++ b/drivers/ata/ahci_xgene.c
|
|
@@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
|
|
PORT_CMD_ISSUE, 0x0, 1, 100))
|
|
return -EBUSY;
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
ahci_start_fis_rx(ap);
|
|
|
|
/*
|
|
@@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
|
|
portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
|
|
portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
rc = xgene_ahci_do_hardreset(link, deadline, &online);
|
|
|
|
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
|
|
index 3e286d86ab42..5ae268b8514e 100644
|
|
--- a/drivers/ata/libahci.c
|
|
+++ b/drivers/ata/libahci.c
|
|
@@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
|
if (!hpriv->start_engine)
|
|
hpriv->start_engine = ahci_start_engine;
|
|
|
|
+ if (!hpriv->stop_engine)
|
|
+ hpriv->stop_engine = ahci_stop_engine;
|
|
+
|
|
if (!hpriv->irq_handler)
|
|
hpriv->irq_handler = ahci_single_level_irq_intr;
|
|
}
|
|
@@ -887,9 +890,10 @@ static void ahci_start_port(struct ata_port *ap)
|
|
static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
|
|
{
|
|
int rc;
|
|
+ struct ahci_host_priv *hpriv = ap->host->private_data;
|
|
|
|
/* disable DMA */
|
|
- rc = ahci_stop_engine(ap);
|
|
+ rc = hpriv->stop_engine(ap);
|
|
if (rc) {
|
|
*emsg = "failed to stop engine";
|
|
return rc;
|
|
@@ -1299,7 +1303,7 @@ int ahci_kick_engine(struct ata_port *ap)
|
|
int busy, rc;
|
|
|
|
/* stop engine */
|
|
- rc = ahci_stop_engine(ap);
|
|
+ rc = hpriv->stop_engine(ap);
|
|
if (rc)
|
|
goto out_restart;
|
|
|
|
@@ -1538,7 +1542,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
/* clear D2H reception area to properly wait for D2H FIS */
|
|
ata_tf_init(link->device, &tf);
|
|
@@ -2064,14 +2068,14 @@ void ahci_error_handler(struct ata_port *ap)
|
|
|
|
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
|
|
/* restart engine */
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
hpriv->start_engine(ap);
|
|
}
|
|
|
|
sata_pmp_error_handler(ap);
|
|
|
|
if (!ata_dev_enabled(ap->link.device))
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
}
|
|
EXPORT_SYMBOL_GPL(ahci_error_handler);
|
|
|
|
@@ -2118,7 +2122,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
|
|
return;
|
|
|
|
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
|
|
- rc = ahci_stop_engine(ap);
|
|
+ rc = hpriv->stop_engine(ap);
|
|
if (rc)
|
|
return;
|
|
|
|
@@ -2178,7 +2182,7 @@ static void ahci_enable_fbs(struct ata_port *ap)
|
|
return;
|
|
}
|
|
|
|
- rc = ahci_stop_engine(ap);
|
|
+ rc = hpriv->stop_engine(ap);
|
|
if (rc)
|
|
return;
|
|
|
|
@@ -2211,7 +2215,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
|
|
return;
|
|
}
|
|
|
|
- rc = ahci_stop_engine(ap);
|
|
+ rc = hpriv->stop_engine(ap);
|
|
if (rc)
|
|
return;
|
|
|
|
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
|
|
index ea20e0eb4d5a..711dd91b5e2c 100644
|
|
--- a/drivers/ata/libata-eh.c
|
|
+++ b/drivers/ata/libata-eh.c
|
|
@@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
|
|
{ }
|
|
#endif /* CONFIG_PM */
|
|
|
|
-static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
|
|
- va_list args)
|
|
+static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
|
|
+ const char *fmt, va_list args)
|
|
{
|
|
ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
|
|
ATA_EH_DESC_LEN - ehi->desc_len,
|
|
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
|
|
index aafb8cc03523..e67815b896fc 100644
|
|
--- a/drivers/ata/sata_highbank.c
|
|
+++ b/drivers/ata/sata_highbank.c
|
|
@@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
|
|
int rc;
|
|
int retry = 100;
|
|
|
|
- ahci_stop_engine(ap);
|
|
+ hpriv->stop_engine(ap);
|
|
|
|
/* clear D2H reception area to properly wait for D2H FIS */
|
|
ata_tf_init(link->device, &tf);
|
|
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
|
|
index c381c8e396fc..79d8c84693a1 100644
|
|
--- a/drivers/char/agp/uninorth-agp.c
|
|
+++ b/drivers/char/agp/uninorth-agp.c
|
|
@@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
|
|
return 0;
|
|
}
|
|
|
|
-int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|
+static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|
{
|
|
size_t i;
|
|
u32 *gp;
|
|
@@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
|
|
return 0;
|
|
}
|
|
|
|
-void null_cache_flush(void)
|
|
+static void null_cache_flush(void)
|
|
{
|
|
mb();
|
|
}
|
|
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
|
|
index 16a3d5717f4e..a062f79bc509 100644
|
|
--- a/drivers/clk/clk-mux.c
|
|
+++ b/drivers/clk/clk-mux.c
|
|
@@ -101,10 +101,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
|
return 0;
|
|
}
|
|
|
|
+static int clk_mux_determine_rate(struct clk_hw *hw,
|
|
+ struct clk_rate_request *req)
|
|
+{
|
|
+ struct clk_mux *mux = to_clk_mux(hw);
|
|
+
|
|
+ return clk_mux_determine_rate_flags(hw, req, mux->flags);
|
|
+}
|
|
+
|
|
const struct clk_ops clk_mux_ops = {
|
|
.get_parent = clk_mux_get_parent,
|
|
.set_parent = clk_mux_set_parent,
|
|
- .determine_rate = __clk_mux_determine_rate,
|
|
+ .determine_rate = clk_mux_determine_rate,
|
|
};
|
|
EXPORT_SYMBOL_GPL(clk_mux_ops);
|
|
|
|
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
|
|
index 4e21f5bcd954..6f4c98ca6e50 100644
|
|
--- a/drivers/clk/clk.c
|
|
+++ b/drivers/clk/clk.c
|
|
@@ -351,9 +351,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
|
|
return now <= rate && now > best;
|
|
}
|
|
|
|
-static int
|
|
-clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
|
|
- unsigned long flags)
|
|
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
|
|
+ struct clk_rate_request *req,
|
|
+ unsigned long flags)
|
|
{
|
|
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
|
|
int i, num_parents, ret;
|
|
@@ -413,6 +413,7 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
|
|
|
|
return 0;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
|
|
|
|
struct clk *__clk_lookup(const char *name)
|
|
{
|
|
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
|
|
index 5e8c18afce9a..41c08fc892b9 100644
|
|
--- a/drivers/clk/imx/clk-imx6ul.c
|
|
+++ b/drivers/clk/imx/clk-imx6ul.c
|
|
@@ -461,7 +461,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
|
|
clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
|
|
|
|
/* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
|
|
- clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
|
|
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
|
|
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
|
|
clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
|
|
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
|
|
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
|
|
index 557ed25b42e3..d175b9545581 100644
|
|
--- a/drivers/clocksource/timer-imx-tpm.c
|
|
+++ b/drivers/clocksource/timer-imx-tpm.c
|
|
@@ -20,6 +20,7 @@
|
|
#define TPM_SC 0x10
|
|
#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
|
|
#define TPM_SC_CMOD_DIV_DEFAULT 0x3
|
|
+#define TPM_SC_TOF_MASK (0x1 << 7)
|
|
#define TPM_CNT 0x14
|
|
#define TPM_MOD 0x18
|
|
#define TPM_STATUS 0x1c
|
|
@@ -29,6 +30,7 @@
|
|
#define TPM_C0SC_MODE_SHIFT 2
|
|
#define TPM_C0SC_MODE_MASK 0x3c
|
|
#define TPM_C0SC_MODE_SW_COMPARE 0x4
|
|
+#define TPM_C0SC_CHF_MASK (0x1 << 7)
|
|
#define TPM_C0V 0x24
|
|
|
|
static void __iomem *timer_base;
|
|
@@ -205,9 +207,13 @@ static int __init tpm_timer_init(struct device_node *np)
|
|
* 4) Channel0 disabled
|
|
* 5) DMA transfers disabled
|
|
*/
|
|
+ /* make sure counter is disabled */
|
|
writel(0, timer_base + TPM_SC);
|
|
+ /* TOF is W1C */
|
|
+ writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
|
|
writel(0, timer_base + TPM_CNT);
|
|
- writel(0, timer_base + TPM_C0SC);
|
|
+ /* CHF is W1C */
|
|
+ writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
|
|
|
|
/* increase per cnt, div 8 by default */
|
|
writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
|
|
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
|
|
index b9bd827caa22..1b4d465cc5d9 100644
|
|
--- a/drivers/firmware/efi/libstub/arm64-stub.c
|
|
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
|
|
@@ -97,6 +97,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
|
|
u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
|
|
(phys_seed >> 32) & mask : TEXT_OFFSET;
|
|
|
|
+ /*
|
|
+ * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
|
|
+ * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
|
|
+ * we preserve the misalignment of 'offset' relative to
|
|
+ * EFI_KIMG_ALIGN so that statically allocated objects whose
|
|
+ * alignment exceeds PAGE_SIZE appear correctly aligned in
|
|
+ * memory.
|
|
+ */
|
|
+ offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
|
|
+
|
|
/*
|
|
* If KASLR is enabled, and we have some randomness available,
|
|
* locate the kernel at a randomized offset in physical memory.
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
|
|
index 660b3fbade41..8a05efa7edf0 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
|
|
@@ -716,12 +716,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
|
|
struct timespec64 time;
|
|
|
|
dev = kfd_device_by_id(args->gpu_id);
|
|
- if (dev == NULL)
|
|
- return -EINVAL;
|
|
-
|
|
- /* Reading GPU clock counter from KGD */
|
|
- args->gpu_clock_counter =
|
|
- dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
|
|
+ if (dev)
|
|
+ /* Reading GPU clock counter from KGD */
|
|
+ args->gpu_clock_counter =
|
|
+ dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
|
|
+ else
|
|
+ /* Node without GPU resource */
|
|
+ args->gpu_clock_counter = 0;
|
|
|
|
/* No access to rdtsc. Using raw monotonic time */
|
|
getrawmonotonic64(&time);
|
|
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
|
|
index 39ac15ce4702..9e2ae02f31e0 100644
|
|
--- a/drivers/gpu/drm/drm_dumb_buffers.c
|
|
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
|
|
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
|
|
return -EINVAL;
|
|
|
|
/* overflow checks for 32bit size calculations */
|
|
- /* NOTE: DIV_ROUND_UP() can overflow */
|
|
+ if (args->bpp > U32_MAX - 8)
|
|
+ return -EINVAL;
|
|
cpp = DIV_ROUND_UP(args->bpp, 8);
|
|
- if (!cpp || cpp > 0xffffffffU / args->width)
|
|
+ if (cpp > U32_MAX / args->width)
|
|
return -EINVAL;
|
|
stride = cpp * args->width;
|
|
- if (args->height > 0xffffffffU / stride)
|
|
+ if (args->height > U32_MAX / stride)
|
|
return -EINVAL;
|
|
|
|
/* test for wrap-around */
|
|
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
|
|
index deaf869374ea..a9a0b56f1fbc 100644
|
|
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
|
|
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
|
|
@@ -740,7 +740,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
|
|
switch (mipi_fmt) {
|
|
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
|
|
case MIPI_DSI_FMT_RGB666_PACKED:
|
|
- case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
|
|
+ case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
|
|
case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
|
|
default: return CMD_DST_FORMAT_RGB888;
|
|
}
|
|
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
|
|
index c178563fcd4d..456622b46335 100644
|
|
--- a/drivers/gpu/drm/msm/msm_fbdev.c
|
|
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
|
|
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|
|
|
if (IS_ERR(fb)) {
|
|
dev_err(dev->dev, "failed to allocate fb\n");
|
|
- ret = PTR_ERR(fb);
|
|
- goto fail;
|
|
+ return PTR_ERR(fb);
|
|
}
|
|
|
|
bo = msm_framebuffer_bo(fb, 0);
|
|
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
|
|
|
fail_unlock:
|
|
mutex_unlock(&dev->struct_mutex);
|
|
-fail:
|
|
-
|
|
- if (ret) {
|
|
- if (fb)
|
|
- drm_framebuffer_remove(fb);
|
|
- }
|
|
-
|
|
+ drm_framebuffer_remove(fb);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
|
|
index 6e0fb50d0de4..f2df718af370 100644
|
|
--- a/drivers/gpu/drm/msm/msm_gem.c
|
|
+++ b/drivers/gpu/drm/msm/msm_gem.c
|
|
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
|
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
|
|
|
if (msm_obj->pages) {
|
|
- /* For non-cached buffers, ensure the new pages are clean
|
|
- * because display controller, GPU, etc. are not coherent:
|
|
- */
|
|
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
|
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
|
|
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
|
+ if (msm_obj->sgt) {
|
|
+ /* For non-cached buffers, ensure the new
|
|
+ * pages are clean because display controller,
|
|
+ * GPU, etc. are not coherent:
|
|
+ */
|
|
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
|
+ dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
|
|
+ msm_obj->sgt->nents,
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
|
- if (msm_obj->sgt)
|
|
sg_free_table(msm_obj->sgt);
|
|
-
|
|
- kfree(msm_obj->sgt);
|
|
+ kfree(msm_obj->sgt);
|
|
+ }
|
|
|
|
if (use_pages(obj))
|
|
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
|
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
|
|
index f169348da377..ef3731d2f2e7 100644
|
|
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
|
|
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
|
|
@@ -634,7 +634,7 @@ static int hdmi_audio_config(struct device *dev,
|
|
struct omap_dss_audio *dss_audio)
|
|
{
|
|
struct omap_hdmi *hd = dev_get_drvdata(dev);
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
mutex_lock(&hd->lock);
|
|
|
|
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
|
|
index c3453f3bd603..1359bf50598f 100644
|
|
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
|
|
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
|
|
@@ -926,8 +926,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
|
|
{
|
|
const struct hdmi4_features *features;
|
|
struct resource *res;
|
|
+ const struct soc_device_attribute *soc;
|
|
|
|
- features = soc_device_match(hdmi4_soc_devices)->data;
|
|
+ soc = soc_device_match(hdmi4_soc_devices);
|
|
+ if (!soc)
|
|
+ return -ENODEV;
|
|
+
|
|
+ features = soc->data;
|
|
core->cts_swmode = features->cts_swmode;
|
|
core->audio_use_mclk = features->audio_use_mclk;
|
|
|
|
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
|
|
index b3221ca5bcd8..26db0ce7a085 100644
|
|
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
|
|
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
|
|
@@ -660,7 +660,7 @@ static int hdmi_audio_config(struct device *dev,
|
|
struct omap_dss_audio *dss_audio)
|
|
{
|
|
struct omap_hdmi *hd = dev_get_drvdata(dev);
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
mutex_lock(&hd->lock);
|
|
|
|
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
|
|
index aa5ba9ae2191..556335ecb2b7 100644
|
|
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
|
|
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
|
|
@@ -123,6 +123,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
|
|
if (dssdrv->read_edid) {
|
|
void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
|
|
|
|
+ if (!edid)
|
|
+ return 0;
|
|
+
|
|
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
|
|
drm_edid_is_valid(edid)) {
|
|
drm_mode_connector_update_edid_property(
|
|
@@ -141,6 +144,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
|
|
struct drm_display_mode *mode = drm_mode_create(dev);
|
|
struct videomode vm = {0};
|
|
|
|
+ if (!mode)
|
|
+ return 0;
|
|
+
|
|
dssdrv->get_timings(dssdev, &vm);
|
|
|
|
drm_display_mode_from_videomode(&vm, mode);
|
|
@@ -196,6 +202,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
|
|
if (!r) {
|
|
/* check if vrefresh is still valid */
|
|
new_mode = drm_mode_duplicate(dev, mode);
|
|
+
|
|
+ if (!new_mode)
|
|
+ return MODE_BAD;
|
|
+
|
|
new_mode->clock = vm.pixelclock / 1000;
|
|
new_mode->vrefresh = 0;
|
|
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
|
|
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
|
|
index fd05f7e9f43f..df05fe53c399 100644
|
|
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
|
|
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
|
|
@@ -389,12 +389,16 @@ int tiler_unpin(struct tiler_block *block)
|
|
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
|
|
uint16_t h, uint16_t align)
|
|
{
|
|
- struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
|
|
+ struct tiler_block *block;
|
|
u32 min_align = 128;
|
|
int ret;
|
|
unsigned long flags;
|
|
u32 slot_bytes;
|
|
|
|
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
|
|
+ if (!block)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
BUG_ON(!validfmt(fmt));
|
|
|
|
/* convert width/height to slots */
|
|
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
|
|
index c10fdfc0930f..1cd39507b634 100644
|
|
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
|
|
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
|
|
@@ -92,7 +92,7 @@ static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
|
|
{
|
|
int i;
|
|
unsigned long index;
|
|
- bool area_free;
|
|
+ bool area_free = false;
|
|
unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
|
|
unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
|
|
unsigned long curr_bit = bit_offset;
|
|
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
|
|
index 8c7a0ce147a1..eca4c9d97110 100644
|
|
--- a/drivers/hid/Kconfig
|
|
+++ b/drivers/hid/Kconfig
|
|
@@ -436,10 +436,11 @@ config HID_LENOVO
|
|
select NEW_LEDS
|
|
select LEDS_CLASS
|
|
---help---
|
|
- Support for Lenovo devices that are not fully compliant with HID standard.
|
|
+ Support for IBM/Lenovo devices that are not fully compliant with HID standard.
|
|
|
|
- Say Y if you want support for the non-compliant features of the Lenovo
|
|
- Thinkpad standalone keyboards, e.g:
|
|
+ Say Y if you want support for horizontal scrolling of the IBM/Lenovo
|
|
+ Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad
|
|
+ standalone keyboards, e.g:
|
|
- ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
|
|
configuration)
|
|
- ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
|
|
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
|
|
index ff539c0b4637..9e478f03e845 100644
|
|
--- a/drivers/hid/hid-ids.h
|
|
+++ b/drivers/hid/hid-ids.h
|
|
@@ -532,6 +532,13 @@
|
|
#define USB_VENDOR_ID_HUION 0x256c
|
|
#define USB_DEVICE_ID_HUION_TABLET 0x006e
|
|
|
|
+#define USB_VENDOR_ID_IBM 0x04b3
|
|
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
|
|
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103
|
|
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105
|
|
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108
|
|
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109
|
|
+
|
|
#define USB_VENDOR_ID_IDEACOM 0x1cb6
|
|
#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
|
|
#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
|
|
@@ -664,6 +671,7 @@
|
|
#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
|
|
#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
|
|
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
|
|
+#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
|
|
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
|
|
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
|
|
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
|
|
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
|
|
index 1ac4ff4d57a6..643b6eb54442 100644
|
|
--- a/drivers/hid/hid-lenovo.c
|
|
+++ b/drivers/hid/hid-lenovo.c
|
|
@@ -6,6 +6,17 @@
|
|
*
|
|
* Copyright (c) 2012 Bernhard Seibold
|
|
* Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
|
|
+ *
|
|
+ * Linux IBM/Lenovo Scrollpoint mouse driver:
|
|
+ * - IBM Scrollpoint III
|
|
+ * - IBM Scrollpoint Pro
|
|
+ * - IBM Scrollpoint Optical
|
|
+ * - IBM Scrollpoint Optical 800dpi
|
|
+ * - IBM Scrollpoint Optical 800dpi Pro
|
|
+ * - Lenovo Scrollpoint Optical
|
|
+ *
|
|
+ * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com>
|
|
+ * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com>
|
|
*/
|
|
|
|
/*
|
|
@@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
|
|
return 0;
|
|
}
|
|
|
|
+static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
|
|
+ struct hid_input *hi, struct hid_field *field,
|
|
+ struct hid_usage *usage, unsigned long **bit, int *max)
|
|
+{
|
|
+ if (usage->hid == HID_GD_Z) {
|
|
+ hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
|
|
+ return 1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int lenovo_input_mapping(struct hid_device *hdev,
|
|
struct hid_input *hi, struct hid_field *field,
|
|
struct hid_usage *usage, unsigned long **bit, int *max)
|
|
@@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev,
|
|
case USB_DEVICE_ID_LENOVO_CBTKBD:
|
|
return lenovo_input_mapping_cptkbd(hdev, hi, field,
|
|
usage, bit, max);
|
|
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_III:
|
|
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO:
|
|
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL:
|
|
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL:
|
|
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO:
|
|
+ case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
|
|
+ return lenovo_input_mapping_scrollpoint(hdev, hi, field,
|
|
+ usage, bit, max);
|
|
default:
|
|
return 0;
|
|
}
|
|
@@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = {
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
|
|
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
|
|
{ }
|
|
};
|
|
|
|
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
|
|
index f272cdd9bd55..2623a567ffba 100644
|
|
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
|
|
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
|
|
@@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
|
|
list_del(&device->device_link);
|
|
spin_unlock_irqrestore(&dev->device_list_lock, flags);
|
|
dev_err(dev->devc, "Failed to register ISHTP client device\n");
|
|
- kfree(device);
|
|
+ put_device(&device->dev);
|
|
return NULL;
|
|
}
|
|
|
|
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
|
|
index 4c337585479e..69afd7968d9c 100644
|
|
--- a/drivers/hid/wacom_sys.c
|
|
+++ b/drivers/hid/wacom_sys.c
|
|
@@ -1102,8 +1102,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
|
|
devres->root = root;
|
|
|
|
error = sysfs_create_group(devres->root, group);
|
|
- if (error)
|
|
+ if (error) {
|
|
+ devres_free(devres);
|
|
return error;
|
|
+ }
|
|
|
|
devres_add(&wacom->hdev->dev, devres);
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
|
|
index 2aa0e83174c5..dae8ac618a52 100644
|
|
--- a/drivers/i2c/busses/i2c-pmcmsp.c
|
|
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
|
|
@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
|
|
* TODO: We could potentially loop and retry in the case
|
|
* of MSP_TWI_XFER_TIMEOUT.
|
|
*/
|
|
- return -1;
|
|
+ return -EIO;
|
|
}
|
|
|
|
- return 0;
|
|
+ return num;
|
|
}
|
|
|
|
static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
|
|
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
|
|
index 25fcc3c1e32b..4053259bccb8 100644
|
|
--- a/drivers/i2c/busses/i2c-sprd.c
|
|
+++ b/drivers/i2c/busses/i2c-sprd.c
|
|
@@ -86,6 +86,7 @@ struct sprd_i2c {
|
|
u32 count;
|
|
int irq;
|
|
int err;
|
|
+ bool is_suspended;
|
|
};
|
|
|
|
static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
|
|
@@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
|
|
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
|
|
int im, ret;
|
|
|
|
+ if (i2c_dev->is_suspended)
|
|
+ return -EBUSY;
|
|
+
|
|
ret = pm_runtime_get_sync(i2c_dev->dev);
|
|
if (ret < 0)
|
|
return ret;
|
|
@@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
|
|
struct sprd_i2c *i2c_dev = dev_id;
|
|
struct i2c_msg *msg = i2c_dev->msg;
|
|
bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
|
|
- u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
|
|
u32 i2c_tran;
|
|
|
|
if (msg->flags & I2C_M_RD)
|
|
i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
|
|
else
|
|
- i2c_tran = i2c_count;
|
|
+ i2c_tran = i2c_dev->count;
|
|
|
|
/*
|
|
* If we got one ACK from slave when writing data, and we did not
|
|
@@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
|
|
{
|
|
struct sprd_i2c *i2c_dev = dev_id;
|
|
struct i2c_msg *msg = i2c_dev->msg;
|
|
- u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
|
|
bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
|
|
u32 i2c_tran;
|
|
|
|
if (msg->flags & I2C_M_RD)
|
|
i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
|
|
else
|
|
- i2c_tran = i2c_count;
|
|
+ i2c_tran = i2c_dev->count;
|
|
|
|
/*
|
|
* If we did not get one ACK from slave when writing data, then we
|
|
@@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
|
|
|
|
static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
|
|
{
|
|
+ struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
|
|
+
|
|
+ i2c_lock_adapter(&i2c_dev->adap);
|
|
+ i2c_dev->is_suspended = true;
|
|
+ i2c_unlock_adapter(&i2c_dev->adap);
|
|
+
|
|
return pm_runtime_force_suspend(pdev);
|
|
}
|
|
|
|
static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
|
|
{
|
|
+ struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
|
|
+
|
|
+ i2c_lock_adapter(&i2c_dev->adap);
|
|
+ i2c_dev->is_suspended = false;
|
|
+ i2c_unlock_adapter(&i2c_dev->adap);
|
|
+
|
|
return pm_runtime_force_resume(pdev);
|
|
}
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
|
|
index e4be86b3de9a..7235c7302bb7 100644
|
|
--- a/drivers/i2c/busses/i2c-viperboard.c
|
|
+++ b/drivers/i2c/busses/i2c-viperboard.c
|
|
@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
|
|
}
|
|
mutex_unlock(&vb->lock);
|
|
}
|
|
- return 0;
|
|
+ return num;
|
|
error:
|
|
mutex_unlock(&vb->lock);
|
|
return error;
|
|
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
|
|
index 3726205c8704..7507cc641de3 100644
|
|
--- a/drivers/infiniband/Kconfig
|
|
+++ b/drivers/infiniband/Kconfig
|
|
@@ -60,9 +60,12 @@ config INFINIBAND_ON_DEMAND_PAGING
|
|
pages on demand instead.
|
|
|
|
config INFINIBAND_ADDR_TRANS
|
|
- bool
|
|
+ bool "RDMA/CM"
|
|
depends on INFINIBAND
|
|
default y
|
|
+ ---help---
|
|
+ Support for RDMA communication manager (CM).
|
|
+ This allows for a generic connection abstraction over RDMA.
|
|
|
|
config INFINIBAND_ADDR_TRANS_CONFIGFS
|
|
bool
|
|
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
|
|
index 6c725c435f5d..79843a3ca9dc 100644
|
|
--- a/drivers/infiniband/core/cma.c
|
|
+++ b/drivers/infiniband/core/cma.c
|
|
@@ -420,6 +420,8 @@ struct cma_hdr {
|
|
#define CMA_VERSION 0x00
|
|
|
|
struct cma_req_info {
|
|
+ struct sockaddr_storage listen_addr_storage;
|
|
+ struct sockaddr_storage src_addr_storage;
|
|
struct ib_device *device;
|
|
int port;
|
|
union ib_gid local_gid;
|
|
@@ -898,7 +900,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
|
|
{
|
|
struct ib_qp_attr qp_attr;
|
|
int qp_attr_mask, ret;
|
|
- union ib_gid sgid;
|
|
|
|
mutex_lock(&id_priv->qp_mutex);
|
|
if (!id_priv->id.qp) {
|
|
@@ -921,12 +922,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
|
|
if (ret)
|
|
goto out;
|
|
|
|
- ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
|
|
- rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
|
|
- &sgid, NULL);
|
|
- if (ret)
|
|
- goto out;
|
|
-
|
|
BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
|
|
|
|
if (conn_param)
|
|
@@ -1372,11 +1367,11 @@ static bool validate_net_dev(struct net_device *net_dev,
|
|
}
|
|
|
|
static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
|
|
- const struct cma_req_info *req)
|
|
+ struct cma_req_info *req)
|
|
{
|
|
- struct sockaddr_storage listen_addr_storage, src_addr_storage;
|
|
- struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
|
|
- *src_addr = (struct sockaddr *)&src_addr_storage;
|
|
+ struct sockaddr *listen_addr =
|
|
+ (struct sockaddr *)&req->listen_addr_storage;
|
|
+ struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
|
|
struct net_device *net_dev;
|
|
const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
|
|
int err;
|
|
@@ -1391,11 +1386,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
|
|
if (!net_dev)
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
- if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
|
|
- dev_put(net_dev);
|
|
- return ERR_PTR(-EHOSTUNREACH);
|
|
- }
|
|
-
|
|
return net_dev;
|
|
}
|
|
|
|
@@ -1531,15 +1521,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * Net namespace might be getting deleted while route lookup,
|
|
+ * cm_id lookup is in progress. Therefore, perform netdevice
|
|
+ * validation, cm_id lookup under rcu lock.
|
|
+ * RCU lock along with netdevice state check, synchronizes with
|
|
+ * netdevice migrating to different net namespace and also avoids
|
|
+ * case where net namespace doesn't get deleted while lookup is in
|
|
+ * progress.
|
|
+ * If the device state is not IFF_UP, its properties such as ifindex
|
|
+ * and nd_net cannot be trusted to remain valid without rcu lock.
|
|
+ * net/core/dev.c change_net_namespace() ensures to synchronize with
|
|
+ * ongoing operations on net device after device is closed using
|
|
+ * synchronize_net().
|
|
+ */
|
|
+ rcu_read_lock();
|
|
+ if (*net_dev) {
|
|
+ /*
|
|
+ * If netdevice is down, it is likely that it is administratively
|
|
+ * down or it might be migrating to different namespace.
|
|
+ * In that case avoid further processing, as the net namespace
|
|
+ * or ifindex may change.
|
|
+ */
|
|
+ if (((*net_dev)->flags & IFF_UP) == 0) {
|
|
+ id_priv = ERR_PTR(-EHOSTUNREACH);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (!validate_net_dev(*net_dev,
|
|
+ (struct sockaddr *)&req.listen_addr_storage,
|
|
+ (struct sockaddr *)&req.src_addr_storage)) {
|
|
+ id_priv = ERR_PTR(-EHOSTUNREACH);
|
|
+ goto err;
|
|
+ }
|
|
+ }
|
|
+
|
|
bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
|
|
rdma_ps_from_service_id(req.service_id),
|
|
cma_port_from_service_id(req.service_id));
|
|
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
|
|
+err:
|
|
+ rcu_read_unlock();
|
|
if (IS_ERR(id_priv) && *net_dev) {
|
|
dev_put(*net_dev);
|
|
*net_dev = NULL;
|
|
}
|
|
-
|
|
return id_priv;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
|
|
index 81528f64061a..cb0fecc958b5 100644
|
|
--- a/drivers/infiniband/core/iwpm_util.c
|
|
+++ b/drivers/infiniband/core/iwpm_util.c
|
|
@@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
|
|
struct sockaddr_storage *mapped_sockaddr,
|
|
u8 nl_client)
|
|
{
|
|
- struct hlist_head *hash_bucket_head;
|
|
+ struct hlist_head *hash_bucket_head = NULL;
|
|
struct iwpm_mapping_info *map_info;
|
|
unsigned long flags;
|
|
int ret = -EINVAL;
|
|
@@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
|
|
+
|
|
+ if (!hash_bucket_head)
|
|
+ kfree(map_info);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
|
|
index cb91245e9163..d8efdc191c27 100644
|
|
--- a/drivers/infiniband/core/mad.c
|
|
+++ b/drivers/infiniband/core/mad.c
|
|
@@ -60,7 +60,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
|
|
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
|
|
|
|
static struct list_head ib_mad_port_list;
|
|
-static u32 ib_mad_client_id = 0;
|
|
+static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
|
|
|
|
/* Port list lock */
|
|
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
|
|
@@ -378,7 +378,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
|
}
|
|
|
|
spin_lock_irqsave(&port_priv->reg_lock, flags);
|
|
- mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
|
|
+ mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
|
|
|
|
/*
|
|
* Make sure MAD registration (if supplied)
|
|
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
|
|
index 5e9f72ea4579..5feb8bbeff18 100644
|
|
--- a/drivers/infiniband/core/uverbs_ioctl.c
|
|
+++ b/drivers/infiniband/core/uverbs_ioctl.c
|
|
@@ -191,6 +191,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ for (; i < method_spec->num_buckets; i++) {
|
|
+ struct uverbs_attr_spec_hash *attr_spec_bucket =
|
|
+ method_spec->attr_buckets[i];
|
|
+
|
|
+ if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
|
|
+ attr_spec_bucket->num_attrs))
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
|
|
index a97055dd4fbd..b5fab55cc275 100644
|
|
--- a/drivers/infiniband/hw/hfi1/affinity.c
|
|
+++ b/drivers/infiniband/hw/hfi1/affinity.c
|
|
@@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
|
|
static int get_irq_affinity(struct hfi1_devdata *dd,
|
|
struct hfi1_msix_entry *msix)
|
|
{
|
|
- int ret;
|
|
cpumask_var_t diff;
|
|
struct hfi1_affinity_node *entry;
|
|
struct cpu_mask_set *set = NULL;
|
|
@@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
|
|
extra[0] = '\0';
|
|
cpumask_clear(&msix->mask);
|
|
|
|
- ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
|
|
- if (!ret)
|
|
- return -ENOMEM;
|
|
-
|
|
entry = node_affinity_lookup(dd->node);
|
|
|
|
switch (msix->type) {
|
|
@@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
|
|
* finds its CPU here.
|
|
*/
|
|
if (cpu == -1 && set) {
|
|
+ if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
|
|
+ return -ENOMEM;
|
|
+
|
|
if (cpumask_equal(&set->mask, &set->used)) {
|
|
/*
|
|
* We've used up all the CPUs, bump up the generation
|
|
@@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
|
|
cpumask_andnot(diff, &set->mask, &set->used);
|
|
cpu = cpumask_first(diff);
|
|
cpumask_set_cpu(cpu, &set->used);
|
|
+
|
|
+ free_cpumask_var(diff);
|
|
}
|
|
|
|
cpumask_set_cpu(cpu, &msix->mask);
|
|
@@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
|
|
hfi1_setup_sdma_notifier(msix);
|
|
}
|
|
|
|
- free_cpumask_var(diff);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
|
|
index d5c6ff843fc6..918dbd350c71 100644
|
|
--- a/drivers/infiniband/hw/hfi1/init.c
|
|
+++ b/drivers/infiniband/hw/hfi1/init.c
|
|
@@ -88,9 +88,9 @@
|
|
* pio buffers per ctxt, etc.) Zero means use one user context per CPU.
|
|
*/
|
|
int num_user_contexts = -1;
|
|
-module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
|
|
+module_param_named(num_user_contexts, num_user_contexts, int, 0444);
|
|
MODULE_PARM_DESC(
|
|
- num_user_contexts, "Set max number of user contexts to use");
|
|
+ num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
|
|
|
|
uint krcvqs[RXE_NUM_DATA_VL];
|
|
int krcvqsset;
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
|
|
index 61927c165b59..4cf11063e0b5 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
|
|
@@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
|
|
.name = "IB_OPCODE_RC_SEND_ONLY_INV",
|
|
.mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
|
|
| RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
|
|
- | RXE_END_MASK,
|
|
+ | RXE_END_MASK | RXE_START_MASK,
|
|
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
|
|
.offset = {
|
|
[RXE_BTH] = 0,
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
|
|
index 44b838ec9420..54cc9cb1e3b7 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_req.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
|
|
@@ -728,7 +728,6 @@ int rxe_requester(void *arg)
|
|
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
|
|
|
if (ret == -EAGAIN) {
|
|
- kfree_skb(skb);
|
|
rxe_run_task(&qp->req.task, 1);
|
|
goto exit;
|
|
}
|
|
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
|
|
index 01f926fd9029..bd43c1c7a42f 100644
|
|
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
|
|
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
|
|
@@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
|
err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
|
|
if (err) {
|
|
pr_err("Failed sending RDMA reply.\n");
|
|
- kfree_skb(skb);
|
|
return RESPST_ERR_RNR;
|
|
}
|
|
|
|
@@ -955,10 +954,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
|
|
}
|
|
|
|
err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
|
|
- if (err) {
|
|
+ if (err)
|
|
pr_err_ratelimited("Failed sending ack\n");
|
|
- kfree_skb(skb);
|
|
- }
|
|
|
|
err1:
|
|
return err;
|
|
@@ -1151,7 +1148,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
|
|
if (rc) {
|
|
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
|
|
rxe_drop_ref(qp);
|
|
- kfree_skb(skb_copy);
|
|
rc = RESPST_CLEANUP;
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
|
|
index c74ee9633041..99db8fe5173a 100644
|
|
--- a/drivers/infiniband/ulp/srp/Kconfig
|
|
+++ b/drivers/infiniband/ulp/srp/Kconfig
|
|
@@ -1,6 +1,6 @@
|
|
config INFINIBAND_SRP
|
|
tristate "InfiniBand SCSI RDMA Protocol"
|
|
- depends on SCSI
|
|
+ depends on SCSI && INFINIBAND_ADDR_TRANS
|
|
select SCSI_SRP_ATTRS
|
|
---help---
|
|
Support for the SCSI RDMA Protocol over InfiniBand. This
|
|
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
|
|
index 31ee83d528d9..fb8b7182f05e 100644
|
|
--- a/drivers/infiniband/ulp/srpt/Kconfig
|
|
+++ b/drivers/infiniband/ulp/srpt/Kconfig
|
|
@@ -1,6 +1,6 @@
|
|
config INFINIBAND_SRPT
|
|
tristate "InfiniBand SCSI RDMA Protocol target support"
|
|
- depends on INFINIBAND && TARGET_CORE
|
|
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
|
|
---help---
|
|
|
|
Support for the SCSI RDMA Protocol (SRP) Target driver. The
|
|
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
|
|
index d97a85907ed6..d0c3d275bf9f 100644
|
|
--- a/drivers/input/rmi4/rmi_spi.c
|
|
+++ b/drivers/input/rmi4/rmi_spi.c
|
|
@@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
|
|
if (len > RMI_SPI_XFER_SIZE_LIMIT)
|
|
return -EINVAL;
|
|
|
|
- if (rmi_spi->xfer_buf_size < len)
|
|
- rmi_spi_manage_pools(rmi_spi, len);
|
|
+ if (rmi_spi->xfer_buf_size < len) {
|
|
+ ret = rmi_spi_manage_pools(rmi_spi, len);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
|
|
if (addr == 0)
|
|
/*
|
|
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
index 429b694405c7..fc149ea64be7 100644
|
|
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
|
|
@@ -275,7 +275,8 @@ struct mxt_data {
|
|
char phys[64]; /* device physical location */
|
|
const struct mxt_platform_data *pdata;
|
|
struct mxt_object *object_table;
|
|
- struct mxt_info info;
|
|
+ struct mxt_info *info;
|
|
+ void *raw_info_block;
|
|
unsigned int irq;
|
|
unsigned int max_x;
|
|
unsigned int max_y;
|
|
@@ -450,12 +451,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
|
|
{
|
|
u8 appmode = data->client->addr;
|
|
u8 bootloader;
|
|
+ u8 family_id = data->info ? data->info->family_id : 0;
|
|
|
|
switch (appmode) {
|
|
case 0x4a:
|
|
case 0x4b:
|
|
/* Chips after 1664S use different scheme */
|
|
- if (retry || data->info.family_id >= 0xa2) {
|
|
+ if (retry || family_id >= 0xa2) {
|
|
bootloader = appmode - 0x24;
|
|
break;
|
|
}
|
|
@@ -682,7 +684,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
|
|
struct mxt_object *object;
|
|
int i;
|
|
|
|
- for (i = 0; i < data->info.object_num; i++) {
|
|
+ for (i = 0; i < data->info->object_num; i++) {
|
|
object = data->object_table + i;
|
|
if (object->type == type)
|
|
return object;
|
|
@@ -1453,12 +1455,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
|
|
data_pos += offset;
|
|
}
|
|
|
|
- if (cfg_info.family_id != data->info.family_id) {
|
|
+ if (cfg_info.family_id != data->info->family_id) {
|
|
dev_err(dev, "Family ID mismatch!\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
- if (cfg_info.variant_id != data->info.variant_id) {
|
|
+ if (cfg_info.variant_id != data->info->variant_id) {
|
|
dev_err(dev, "Variant ID mismatch!\n");
|
|
return -EINVAL;
|
|
}
|
|
@@ -1503,7 +1505,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
|
|
|
|
/* Malloc memory to store configuration */
|
|
cfg_start_ofs = MXT_OBJECT_START +
|
|
- data->info.object_num * sizeof(struct mxt_object) +
|
|
+ data->info->object_num * sizeof(struct mxt_object) +
|
|
MXT_INFO_CHECKSUM_SIZE;
|
|
config_mem_size = data->mem_size - cfg_start_ofs;
|
|
config_mem = kzalloc(config_mem_size, GFP_KERNEL);
|
|
@@ -1554,20 +1556,6 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
|
|
return ret;
|
|
}
|
|
|
|
-static int mxt_get_info(struct mxt_data *data)
|
|
-{
|
|
- struct i2c_client *client = data->client;
|
|
- struct mxt_info *info = &data->info;
|
|
- int error;
|
|
-
|
|
- /* Read 7-byte info block starting at address 0 */
|
|
- error = __mxt_read_reg(client, 0, sizeof(*info), info);
|
|
- if (error)
|
|
- return error;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static void mxt_free_input_device(struct mxt_data *data)
|
|
{
|
|
if (data->input_dev) {
|
|
@@ -1582,9 +1570,10 @@ static void mxt_free_object_table(struct mxt_data *data)
|
|
video_unregister_device(&data->dbg.vdev);
|
|
v4l2_device_unregister(&data->dbg.v4l2);
|
|
#endif
|
|
-
|
|
- kfree(data->object_table);
|
|
data->object_table = NULL;
|
|
+ data->info = NULL;
|
|
+ kfree(data->raw_info_block);
|
|
+ data->raw_info_block = NULL;
|
|
kfree(data->msg_buf);
|
|
data->msg_buf = NULL;
|
|
data->T5_address = 0;
|
|
@@ -1600,34 +1589,18 @@ static void mxt_free_object_table(struct mxt_data *data)
|
|
data->max_reportid = 0;
|
|
}
|
|
|
|
-static int mxt_get_object_table(struct mxt_data *data)
|
|
+static int mxt_parse_object_table(struct mxt_data *data,
|
|
+ struct mxt_object *object_table)
|
|
{
|
|
struct i2c_client *client = data->client;
|
|
- size_t table_size;
|
|
- struct mxt_object *object_table;
|
|
- int error;
|
|
int i;
|
|
u8 reportid;
|
|
u16 end_address;
|
|
|
|
- table_size = data->info.object_num * sizeof(struct mxt_object);
|
|
- object_table = kzalloc(table_size, GFP_KERNEL);
|
|
- if (!object_table) {
|
|
- dev_err(&data->client->dev, "Failed to allocate memory\n");
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
|
|
- object_table);
|
|
- if (error) {
|
|
- kfree(object_table);
|
|
- return error;
|
|
- }
|
|
-
|
|
/* Valid Report IDs start counting from 1 */
|
|
reportid = 1;
|
|
data->mem_size = 0;
|
|
- for (i = 0; i < data->info.object_num; i++) {
|
|
+ for (i = 0; i < data->info->object_num; i++) {
|
|
struct mxt_object *object = object_table + i;
|
|
u8 min_id, max_id;
|
|
|
|
@@ -1651,8 +1624,8 @@ static int mxt_get_object_table(struct mxt_data *data)
|
|
|
|
switch (object->type) {
|
|
case MXT_GEN_MESSAGE_T5:
|
|
- if (data->info.family_id == 0x80 &&
|
|
- data->info.version < 0x20) {
|
|
+ if (data->info->family_id == 0x80 &&
|
|
+ data->info->version < 0x20) {
|
|
/*
|
|
* On mXT224 firmware versions prior to V2.0
|
|
* read and discard unused CRC byte otherwise
|
|
@@ -1707,24 +1680,102 @@ static int mxt_get_object_table(struct mxt_data *data)
|
|
/* If T44 exists, T5 position has to be directly after */
|
|
if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
|
|
dev_err(&client->dev, "Invalid T44 position\n");
|
|
- error = -EINVAL;
|
|
- goto free_object_table;
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
data->msg_buf = kcalloc(data->max_reportid,
|
|
data->T5_msg_size, GFP_KERNEL);
|
|
- if (!data->msg_buf) {
|
|
- dev_err(&client->dev, "Failed to allocate message buffer\n");
|
|
+ if (!data->msg_buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int mxt_read_info_block(struct mxt_data *data)
|
|
+{
|
|
+ struct i2c_client *client = data->client;
|
|
+ int error;
|
|
+ size_t size;
|
|
+ void *id_buf, *buf;
|
|
+ uint8_t num_objects;
|
|
+ u32 calculated_crc;
|
|
+ u8 *crc_ptr;
|
|
+
|
|
+ /* If info block already allocated, free it */
|
|
+ if (data->raw_info_block)
|
|
+ mxt_free_object_table(data);
|
|
+
|
|
+ /* Read 7-byte ID information block starting at address 0 */
|
|
+ size = sizeof(struct mxt_info);
|
|
+ id_buf = kzalloc(size, GFP_KERNEL);
|
|
+ if (!id_buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ error = __mxt_read_reg(client, 0, size, id_buf);
|
|
+ if (error)
|
|
+ goto err_free_mem;
|
|
+
|
|
+ /* Resize buffer to give space for rest of info block */
|
|
+ num_objects = ((struct mxt_info *)id_buf)->object_num;
|
|
+ size += (num_objects * sizeof(struct mxt_object))
|
|
+ + MXT_INFO_CHECKSUM_SIZE;
|
|
+
|
|
+ buf = krealloc(id_buf, size, GFP_KERNEL);
|
|
+ if (!buf) {
|
|
error = -ENOMEM;
|
|
- goto free_object_table;
|
|
+ goto err_free_mem;
|
|
+ }
|
|
+ id_buf = buf;
|
|
+
|
|
+ /* Read rest of info block */
|
|
+ error = __mxt_read_reg(client, MXT_OBJECT_START,
|
|
+ size - MXT_OBJECT_START,
|
|
+ id_buf + MXT_OBJECT_START);
|
|
+ if (error)
|
|
+ goto err_free_mem;
|
|
+
|
|
+ /* Extract & calculate checksum */
|
|
+ crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
|
|
+ data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
|
|
+
|
|
+ calculated_crc = mxt_calculate_crc(id_buf, 0,
|
|
+ size - MXT_INFO_CHECKSUM_SIZE);
|
|
+
|
|
+ /*
|
|
+ * CRC mismatch can be caused by data corruption due to I2C comms
|
|
+ * issue or else device is not using Object Based Protocol (eg i2c-hid)
|
|
+ */
|
|
+ if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
|
|
+ dev_err(&client->dev,
|
|
+ "Info Block CRC error calculated=0x%06X read=0x%06X\n",
|
|
+ calculated_crc, data->info_crc);
|
|
+ error = -EIO;
|
|
+ goto err_free_mem;
|
|
+ }
|
|
+
|
|
+ data->raw_info_block = id_buf;
|
|
+ data->info = (struct mxt_info *)id_buf;
|
|
+
|
|
+ dev_info(&client->dev,
|
|
+ "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
|
|
+ data->info->family_id, data->info->variant_id,
|
|
+ data->info->version >> 4, data->info->version & 0xf,
|
|
+ data->info->build, data->info->object_num);
|
|
+
|
|
+ /* Parse object table information */
|
|
+ error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
|
|
+ if (error) {
|
|
+ dev_err(&client->dev, "Error %d parsing object table\n", error);
|
|
+ mxt_free_object_table(data);
|
|
+ goto err_free_mem;
|
|
}
|
|
|
|
- data->object_table = object_table;
|
|
+ data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
|
|
|
|
return 0;
|
|
|
|
-free_object_table:
|
|
- mxt_free_object_table(data);
|
|
+err_free_mem:
|
|
+ kfree(id_buf);
|
|
return error;
|
|
}
|
|
|
|
@@ -2039,7 +2090,7 @@ static int mxt_initialize(struct mxt_data *data)
|
|
int error;
|
|
|
|
while (1) {
|
|
- error = mxt_get_info(data);
|
|
+ error = mxt_read_info_block(data);
|
|
if (!error)
|
|
break;
|
|
|
|
@@ -2070,16 +2121,9 @@ static int mxt_initialize(struct mxt_data *data)
|
|
msleep(MXT_FW_RESET_TIME);
|
|
}
|
|
|
|
- /* Get object table information */
|
|
- error = mxt_get_object_table(data);
|
|
- if (error) {
|
|
- dev_err(&client->dev, "Error %d reading object table\n", error);
|
|
- return error;
|
|
- }
|
|
-
|
|
error = mxt_acquire_irq(data);
|
|
if (error)
|
|
- goto err_free_object_table;
|
|
+ return error;
|
|
|
|
error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
|
|
&client->dev, GFP_KERNEL, data,
|
|
@@ -2087,14 +2131,10 @@ static int mxt_initialize(struct mxt_data *data)
|
|
if (error) {
|
|
dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
|
|
error);
|
|
- goto err_free_object_table;
|
|
+ return error;
|
|
}
|
|
|
|
return 0;
|
|
-
|
|
-err_free_object_table:
|
|
- mxt_free_object_table(data);
|
|
- return error;
|
|
}
|
|
|
|
static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
|
|
@@ -2155,7 +2195,7 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data)
|
|
static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
|
|
unsigned int y)
|
|
{
|
|
- struct mxt_info *info = &data->info;
|
|
+ struct mxt_info *info = data->info;
|
|
struct mxt_dbg *dbg = &data->dbg;
|
|
unsigned int ofs, page;
|
|
unsigned int col = 0;
|
|
@@ -2483,7 +2523,7 @@ static const struct video_device mxt_video_device = {
|
|
|
|
static void mxt_debug_init(struct mxt_data *data)
|
|
{
|
|
- struct mxt_info *info = &data->info;
|
|
+ struct mxt_info *info = data->info;
|
|
struct mxt_dbg *dbg = &data->dbg;
|
|
struct mxt_object *object;
|
|
int error;
|
|
@@ -2569,7 +2609,6 @@ static int mxt_configure_objects(struct mxt_data *data,
|
|
const struct firmware *cfg)
|
|
{
|
|
struct device *dev = &data->client->dev;
|
|
- struct mxt_info *info = &data->info;
|
|
int error;
|
|
|
|
error = mxt_init_t7_power_cfg(data);
|
|
@@ -2594,11 +2633,6 @@ static int mxt_configure_objects(struct mxt_data *data,
|
|
|
|
mxt_debug_init(data);
|
|
|
|
- dev_info(dev,
|
|
- "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
|
|
- info->family_id, info->variant_id, info->version >> 4,
|
|
- info->version & 0xf, info->build, info->object_num);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -2607,7 +2641,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct mxt_data *data = dev_get_drvdata(dev);
|
|
- struct mxt_info *info = &data->info;
|
|
+ struct mxt_info *info = data->info;
|
|
return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
|
|
info->version >> 4, info->version & 0xf, info->build);
|
|
}
|
|
@@ -2617,7 +2651,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct mxt_data *data = dev_get_drvdata(dev);
|
|
- struct mxt_info *info = &data->info;
|
|
+ struct mxt_info *info = data->info;
|
|
return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
|
|
info->family_id, info->variant_id);
|
|
}
|
|
@@ -2656,7 +2690,7 @@ static ssize_t mxt_object_show(struct device *dev,
|
|
return -ENOMEM;
|
|
|
|
error = 0;
|
|
- for (i = 0; i < data->info.object_num; i++) {
|
|
+ for (i = 0; i < data->info->object_num; i++) {
|
|
object = data->object_table + i;
|
|
|
|
if (!mxt_object_readable(object->type))
|
|
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
|
|
index 57c920c1372d..e3dbb6101b4a 100644
|
|
--- a/drivers/iommu/dmar.c
|
|
+++ b/drivers/iommu/dmar.c
|
|
@@ -1342,7 +1342,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
|
|
struct qi_desc desc;
|
|
|
|
if (mask) {
|
|
- BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
|
|
+ BUG_ON(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
|
|
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
|
|
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
|
|
} else
|
|
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
|
|
index 365a8cc62405..b6a681bce400 100644
|
|
--- a/drivers/net/can/dev.c
|
|
+++ b/drivers/net/can/dev.c
|
|
@@ -604,7 +604,7 @@ void can_bus_off(struct net_device *dev)
|
|
{
|
|
struct can_priv *priv = netdev_priv(dev);
|
|
|
|
- netdev_dbg(dev, "bus-off\n");
|
|
+ netdev_info(dev, "bus-off\n");
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
|
|
index c93e5613d4cc..cc658a29cc33 100644
|
|
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
|
|
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
|
|
@@ -310,6 +310,8 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
|
|
|
|
self->ndev->hw_features |= aq_hw_caps->hw_features;
|
|
self->ndev->features = aq_hw_caps->hw_features;
|
|
+ self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
|
|
+ NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
|
|
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
|
|
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
|
|
self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
|
|
index 3e62692af011..fa5b30f547f6 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
|
|
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
|
|
@@ -87,7 +87,7 @@ do { \
|
|
|
|
#define HNAE_AE_REGISTER 0x1
|
|
|
|
-#define RCB_RING_NAME_LEN 16
|
|
+#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
|
|
|
|
#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
|
|
#define HNAE_LOW_LATENCY_COAL_PARAM 80
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
|
|
index 8a85217845ae..cf6a245db6d5 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
|
|
@@ -3413,6 +3413,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
|
|
hw->phy.sfp_setup_needed = false;
|
|
}
|
|
|
|
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
|
|
+ return status;
|
|
+
|
|
/* Reset PHY */
|
|
if (!hw->phy.reset_disable && hw->phy.ops.reset)
|
|
hw->phy.ops.reset(hw);
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
|
|
index f88ff3f4b661..35d14af235f7 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
|
|
@@ -277,8 +277,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
|
|
if ((*reg & mask) == val)
|
|
return 0;
|
|
|
|
- if (msleep_interruptible(25))
|
|
- return -ERESTARTSYS;
|
|
+ msleep(25);
|
|
|
|
if (time_after(start_time, wait_until))
|
|
return -ETIMEDOUT;
|
|
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
|
|
index 085338990f49..c5452b445c37 100644
|
|
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
|
|
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
|
|
@@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
|
|
|
|
void qed_l2_setup(struct qed_hwfn *p_hwfn)
|
|
{
|
|
- if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
|
|
- p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
|
|
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
|
|
return;
|
|
|
|
mutex_init(&p_hwfn->p_l2_info->lock);
|
|
@@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn)
|
|
{
|
|
u32 i;
|
|
|
|
- if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
|
|
- p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
|
|
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
|
|
return;
|
|
|
|
if (!p_hwfn->p_l2_info)
|
|
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
|
|
index 50b142fad6b8..1900bf7e67d1 100644
|
|
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
|
|
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
|
|
@@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev)
|
|
}
|
|
|
|
if (!found) {
|
|
- event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
|
|
+ event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
|
|
if (!event_node) {
|
|
DP_NOTICE(edev,
|
|
"qedr: Could not allocate memory for rdma work\n");
|
|
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
|
|
index a3f456b91c99..e9e67c22c8bb 100644
|
|
--- a/drivers/net/phy/marvell.c
|
|
+++ b/drivers/net/phy/marvell.c
|
|
@@ -1409,6 +1409,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
|
|
if (err < 0)
|
|
return err;
|
|
|
|
+ /* If WOL event happened once, the LED[2] interrupt pin
|
|
+ * will not be cleared unless we reading the interrupt status
|
|
+ * register. If interrupts are in use, the normal interrupt
|
|
+ * handling will clear the WOL event. Clear the WOL event
|
|
+ * before enabling it if !phy_interrupt_is_valid()
|
|
+ */
|
|
+ if (!phy_interrupt_is_valid(phydev))
|
|
+ phy_read(phydev, MII_M1011_IEVENT);
|
|
+
|
|
/* Enable the WOL interrupt */
|
|
temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
|
|
temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
|
|
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
|
|
index 46d6cb1e03bd..8f845de8a8a2 100644
|
|
--- a/drivers/nvme/host/Kconfig
|
|
+++ b/drivers/nvme/host/Kconfig
|
|
@@ -18,7 +18,7 @@ config NVME_FABRICS
|
|
|
|
config NVME_RDMA
|
|
tristate "NVM Express over Fabrics RDMA host driver"
|
|
- depends on INFINIBAND && BLOCK
|
|
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
|
select NVME_CORE
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index dd956311a85a..38c128f230e7 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -665,6 +665,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
|
ret = PTR_ERR(meta);
|
|
goto out_unmap;
|
|
}
|
|
+ req->cmd_flags |= REQ_INTEGRITY;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
|
|
index 740aae51e1c6..33d060c524e6 100644
|
|
--- a/drivers/nvme/host/fabrics.c
|
|
+++ b/drivers/nvme/host/fabrics.c
|
|
@@ -587,6 +587,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
+ kfree(opts->transport);
|
|
opts->transport = p;
|
|
break;
|
|
case NVMF_OPT_NQN:
|
|
@@ -595,6 +596,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
+ kfree(opts->subsysnqn);
|
|
opts->subsysnqn = p;
|
|
nqnlen = strlen(opts->subsysnqn);
|
|
if (nqnlen >= NVMF_NQN_SIZE) {
|
|
@@ -617,6 +619,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
+ kfree(opts->traddr);
|
|
opts->traddr = p;
|
|
break;
|
|
case NVMF_OPT_TRSVCID:
|
|
@@ -625,6 +628,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
+ kfree(opts->trsvcid);
|
|
opts->trsvcid = p;
|
|
break;
|
|
case NVMF_OPT_QUEUE_SIZE:
|
|
@@ -706,6 +710,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
+ nvmf_host_put(opts->host);
|
|
opts->host = nvmf_host_add(p);
|
|
kfree(p);
|
|
if (!opts->host) {
|
|
@@ -731,6 +736,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
+ kfree(opts->host_traddr);
|
|
opts->host_traddr = p;
|
|
break;
|
|
case NVMF_OPT_HOST_ID:
|
|
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
|
|
index 03e4ab65fe77..48d20c2c1256 100644
|
|
--- a/drivers/nvme/target/Kconfig
|
|
+++ b/drivers/nvme/target/Kconfig
|
|
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
|
|
|
|
config NVME_TARGET_RDMA
|
|
tristate "NVMe over Fabrics RDMA target support"
|
|
- depends on INFINIBAND
|
|
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS
|
|
depends on NVME_TARGET
|
|
help
|
|
This enables the NVMe RDMA target support, which allows exporting NVMe
|
|
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
|
|
index dc3033cf3c19..efc317e7669d 100644
|
|
--- a/drivers/pci/dwc/pcie-kirin.c
|
|
+++ b/drivers/pci/dwc/pcie-kirin.c
|
|
@@ -490,7 +490,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
|
|
return ret;
|
|
|
|
kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
|
|
- "reset-gpio", 0);
|
|
+ "reset-gpios", 0);
|
|
if (kirin_pcie->gpio_id_reset < 0)
|
|
return -ENODEV;
|
|
|
|
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
|
|
index 2d3d5ac92c06..81ec9b6805fc 100644
|
|
--- a/drivers/remoteproc/qcom_q6v5_pil.c
|
|
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
|
|
@@ -915,6 +915,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
|
|
dev_err(qproc->dev, "unable to resolve mba region\n");
|
|
return ret;
|
|
}
|
|
+ of_node_put(node);
|
|
|
|
qproc->mba_phys = r.start;
|
|
qproc->mba_size = resource_size(&r);
|
|
@@ -932,6 +933,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
|
|
dev_err(qproc->dev, "unable to resolve mpss region\n");
|
|
return ret;
|
|
}
|
|
+ of_node_put(node);
|
|
|
|
qproc->mpss_phys = qproc->mpss_reloc = r.start;
|
|
qproc->mpss_size = resource_size(&r);
|
|
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
|
|
index e0996fce3963..6a5b5b16145e 100644
|
|
--- a/drivers/rpmsg/rpmsg_char.c
|
|
+++ b/drivers/rpmsg/rpmsg_char.c
|
|
@@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void)
|
|
unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
|
|
}
|
|
module_exit(rpmsg_chrdev_exit);
|
|
+
|
|
+MODULE_ALIAS("rpmsg:rpmsg_chrdev");
|
|
MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
|
|
index a851d34c642b..04674ce961f1 100644
|
|
--- a/drivers/s390/net/smsgiucv.c
|
|
+++ b/drivers/s390/net/smsgiucv.c
|
|
@@ -189,7 +189,7 @@ static struct device_driver smsg_driver = {
|
|
|
|
static void __exit smsg_exit(void)
|
|
{
|
|
- cpcmd("SET SMSG IUCV", NULL, 0, NULL);
|
|
+ cpcmd("SET SMSG OFF", NULL, 0, NULL);
|
|
device_unregister(smsg_dev);
|
|
iucv_unregister(&smsg_handler, 1);
|
|
driver_unregister(&smsg_driver);
|
|
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
|
|
index ac879745ef80..18a409bb9e0c 100644
|
|
--- a/drivers/scsi/isci/port_config.c
|
|
+++ b/drivers/scsi/isci/port_config.c
|
|
@@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
|
|
* Note: We have not moved the current phy_index so we will actually
|
|
* compare the startting phy with itself.
|
|
* This is expected and required to add the phy to the port. */
|
|
- while (phy_index < SCI_MAX_PHYS) {
|
|
+ for (; phy_index < SCI_MAX_PHYS; phy_index++) {
|
|
if ((phy_mask & (1 << phy_index)) == 0)
|
|
continue;
|
|
sci_phy_get_sas_address(&ihost->phys[phy_index],
|
|
@@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
|
|
&ihost->phys[phy_index]);
|
|
|
|
assigned_phy_mask |= (1 << phy_index);
|
|
- phy_index++;
|
|
}
|
|
|
|
}
|
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
index 4bf406df051b..72a919179d06 100644
|
|
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
|
@@ -903,7 +903,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
|
goto fail_fw_init;
|
|
}
|
|
|
|
- ret = 0;
|
|
+ return 0;
|
|
|
|
fail_fw_init:
|
|
megasas_return_cmd(instance, cmd);
|
|
@@ -913,8 +913,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
|
IOCInitMessage, ioc_init_handle);
|
|
fail_get_cmd:
|
|
dev_err(&instance->pdev->dev,
|
|
- "Init cmd return status %s for SCSI host %d\n",
|
|
- ret ? "FAILED" : "SUCCESS", instance->host->host_no);
|
|
+ "Init cmd return status FAILED for SCSI host %d\n",
|
|
+ instance->host->host_no);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
|
|
index 7404d26895f5..f6542c159ed6 100644
|
|
--- a/drivers/scsi/scsi_transport_iscsi.c
|
|
+++ b/drivers/scsi/scsi_transport_iscsi.c
|
|
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
|
|
return nlmsg_multicast(nls, skb, 0, group, gfp);
|
|
}
|
|
|
|
+static int
|
|
+iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
|
|
+{
|
|
+ return nlmsg_unicast(nls, skb, portid);
|
|
+}
|
|
+
|
|
int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
|
|
char *data, uint32_t data_size)
|
|
{
|
|
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
|
|
EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
|
|
|
|
static int
|
|
-iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
|
|
- void *payload, int size)
|
|
+iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct nlmsghdr *nlh;
|
|
int len = nlmsg_total_size(size);
|
|
- int flags = multi ? NLM_F_MULTI : 0;
|
|
- int t = done ? NLMSG_DONE : type;
|
|
|
|
skb = alloc_skb(len, GFP_ATOMIC);
|
|
if (!skb) {
|
|
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
|
|
- nlh->nlmsg_flags = flags;
|
|
+ nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
|
|
memcpy(nlmsg_data(nlh), payload, size);
|
|
- return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
|
|
+ return iscsi_unicast_skb(skb, portid);
|
|
}
|
|
|
|
static int
|
|
@@ -3470,6 +3472,7 @@ static int
|
|
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
|
|
{
|
|
int err = 0;
|
|
+ u32 portid;
|
|
struct iscsi_uevent *ev = nlmsg_data(nlh);
|
|
struct iscsi_transport *transport = NULL;
|
|
struct iscsi_internal *priv;
|
|
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
|
|
if (!try_module_get(transport->owner))
|
|
return -EINVAL;
|
|
|
|
+ portid = NETLINK_CB(skb).portid;
|
|
+
|
|
switch (nlh->nlmsg_type) {
|
|
case ISCSI_UEVENT_CREATE_SESSION:
|
|
err = iscsi_if_create_session(priv, ep, ev,
|
|
- NETLINK_CB(skb).portid,
|
|
+ portid,
|
|
ev->u.c_session.initial_cmdsn,
|
|
ev->u.c_session.cmds_max,
|
|
ev->u.c_session.queue_depth);
|
|
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
|
|
}
|
|
|
|
err = iscsi_if_create_session(priv, ep, ev,
|
|
- NETLINK_CB(skb).portid,
|
|
+ portid,
|
|
ev->u.c_bound_session.initial_cmdsn,
|
|
ev->u.c_bound_session.cmds_max,
|
|
ev->u.c_bound_session.queue_depth);
|
|
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
|
|
static void
|
|
iscsi_if_rx(struct sk_buff *skb)
|
|
{
|
|
+ u32 portid = NETLINK_CB(skb).portid;
|
|
+
|
|
mutex_lock(&rx_queue_mutex);
|
|
while (skb->len >= NLMSG_HDRLEN) {
|
|
int err;
|
|
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
|
|
break;
|
|
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
|
|
break;
|
|
- err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
|
|
- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
|
|
+ err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
|
|
+ ev, sizeof(*ev));
|
|
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
|
|
skb_pull(skb, rlen);
|
|
}
|
|
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
|
|
index c44de0b4a995..beb585ddc07d 100644
|
|
--- a/drivers/scsi/storvsc_drv.c
|
|
+++ b/drivers/scsi/storvsc_drv.c
|
|
@@ -1725,11 +1725,14 @@ static int storvsc_probe(struct hv_device *device,
|
|
max_targets = STORVSC_MAX_TARGETS;
|
|
max_channels = STORVSC_MAX_CHANNELS;
|
|
/*
|
|
- * On Windows8 and above, we support sub-channels for storage.
|
|
+ * On Windows8 and above, we support sub-channels for storage
|
|
+ * on SCSI and FC controllers.
|
|
* The number of sub-channels offerred is based on the number of
|
|
* VCPUs in the guest.
|
|
*/
|
|
- max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
|
|
+ if (!dev_is_ide)
|
|
+ max_sub_channels =
|
|
+ (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
|
|
}
|
|
|
|
scsi_driver.can_queue = (max_outstanding_req_per_channel *
|
|
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
|
|
index c374e3b5c678..777e5f1e52d1 100644
|
|
--- a/drivers/scsi/vmw_pvscsi.c
|
|
+++ b/drivers/scsi/vmw_pvscsi.c
|
|
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
|
|
break;
|
|
|
|
case BTSTAT_ABORTQUEUE:
|
|
- cmd->result = (DID_ABORT << 16);
|
|
+ cmd->result = (DID_BUS_BUSY << 16);
|
|
break;
|
|
|
|
case BTSTAT_SCSIPARITY:
|
|
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
|
|
index fe96a8b956fb..f7ed1187518b 100644
|
|
--- a/drivers/soc/bcm/raspberrypi-power.c
|
|
+++ b/drivers/soc/bcm/raspberrypi-power.c
|
|
@@ -45,7 +45,7 @@ struct rpi_power_domains {
|
|
struct rpi_power_domain_packet {
|
|
u32 domain;
|
|
u32 on;
|
|
-} __packet;
|
|
+};
|
|
|
|
/*
|
|
* Asks the firmware to enable or disable power on a specific power
|
|
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
|
|
index 7428091d3f5b..bd00b7cc8b78 100644
|
|
--- a/drivers/spi/spi-bcm2835aux.c
|
|
+++ b/drivers/spi/spi-bcm2835aux.c
|
|
@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
|
|
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
|
|
irqreturn_t ret = IRQ_NONE;
|
|
|
|
+ /* IRQ may be shared, so return if our interrupts are disabled */
|
|
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
|
|
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
|
|
+ return ret;
|
|
+
|
|
/* check if we have data to read */
|
|
while (bs->rx_len &&
|
|
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
|
|
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
|
|
index 5c9516ae4942..4a001634023e 100644
|
|
--- a/drivers/spi/spi-cadence.c
|
|
+++ b/drivers/spi/spi-cadence.c
|
|
@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
|
|
|
|
while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
|
|
(xspi->tx_bytes > 0)) {
|
|
+
|
|
+ /* When xspi in busy condition, bytes may send failed,
|
|
+ * then spi control did't work thoroughly, add one byte delay
|
|
+ */
|
|
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
|
|
+ CDNS_SPI_IXR_TXFULL)
|
|
+ usleep_range(10, 20);
|
|
+
|
|
if (xspi->txbuf)
|
|
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
|
|
else
|
|
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
|
|
index 2770fbd4ce49..52056535f54e 100644
|
|
--- a/drivers/spi/spi-sh-msiof.c
|
|
+++ b/drivers/spi/spi-sh-msiof.c
|
|
@@ -277,6 +277,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
|
|
}
|
|
|
|
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
|
|
+ brps = min_t(int, brps, 32);
|
|
|
|
scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
|
|
sh_msiof_write(p, TSCR, scr);
|
|
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
|
|
index 0d99b242e82e..6cb933ecc084 100644
|
|
--- a/drivers/target/target_core_pscsi.c
|
|
+++ b/drivers/target/target_core_pscsi.c
|
|
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
bytes = min(bytes, data_len);
|
|
|
|
if (!bio) {
|
|
+new_bio:
|
|
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
|
|
nr_pages -= nr_vecs;
|
|
/*
|
|
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|
* be allocated with pscsi_get_bio() above.
|
|
*/
|
|
bio = NULL;
|
|
+ goto new_bio;
|
|
}
|
|
|
|
data_len -= bytes;
|
|
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
|
|
index 58a5009eacc3..a548c3695797 100644
|
|
--- a/drivers/tee/tee_core.c
|
|
+++ b/drivers/tee/tee_core.c
|
|
@@ -181,6 +181,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
|
if (IS_ERR(shm))
|
|
return PTR_ERR(shm);
|
|
|
|
+ /*
|
|
+ * Ensure offset + size does not overflow offset
|
|
+ * and does not overflow the size of the referred
|
|
+ * shared memory object.
|
|
+ */
|
|
+ if ((ip.a + ip.b) < ip.a ||
|
|
+ (ip.a + ip.b) > shm->size) {
|
|
+ tee_shm_put(shm);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
params[n].u.memref.shm_offs = ip.a;
|
|
params[n].u.memref.size = ip.b;
|
|
params[n].u.memref.shm = shm;
|
|
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
|
|
index 8a7f24dd9315..0c19fcd56a0d 100644
|
|
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
|
|
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
|
|
@@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
|
|
return -EFAULT;
|
|
}
|
|
|
|
+ priv->priv = obj;
|
|
obj->max_state = p->package.count - 1;
|
|
obj->cdev =
|
|
thermal_cooling_device_register(acpi_device_bid(priv->adev),
|
|
@@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv)
|
|
if (IS_ERR(obj->cdev))
|
|
result = PTR_ERR(obj->cdev);
|
|
|
|
- priv->priv = obj;
|
|
-
|
|
kfree(buf.pointer);
|
|
/* TODO: add ACPI notification support */
|
|
|
|
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
|
|
index 5a6dca01a1d0..802388bb42ba 100644
|
|
--- a/drivers/usb/musb/musb_host.c
|
|
+++ b/drivers/usb/musb/musb_host.c
|
|
@@ -2560,8 +2560,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
|
|
{
|
|
struct musb *musb = hcd_to_musb(hcd);
|
|
u8 devctl;
|
|
+ int ret;
|
|
|
|
- musb_port_suspend(musb, true);
|
|
+ ret = musb_port_suspend(musb, true);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
if (!is_host_active(musb))
|
|
return 0;
|
|
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
|
|
index 7bbf01bf4bb0..54d02ed032df 100644
|
|
--- a/drivers/usb/musb/musb_host.h
|
|
+++ b/drivers/usb/musb/musb_host.h
|
|
@@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8);
|
|
extern void musb_root_disconnect(struct musb *musb);
|
|
extern void musb_host_resume_root_hub(struct musb *musb);
|
|
extern void musb_host_poke_root_hub(struct musb *musb);
|
|
-extern void musb_port_suspend(struct musb *musb, bool do_suspend);
|
|
+extern int musb_port_suspend(struct musb *musb, bool do_suspend);
|
|
extern void musb_port_reset(struct musb *musb, bool do_reset);
|
|
extern void musb_host_finish_resume(struct work_struct *work);
|
|
#else
|
|
@@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
|
|
static inline void musb_host_resume_root_hub(struct musb *musb) {}
|
|
static inline void musb_host_poll_rh_status(struct musb *musb) {}
|
|
static inline void musb_host_poke_root_hub(struct musb *musb) {}
|
|
-static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
|
|
+static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
|
|
static inline void musb_host_finish_resume(struct work_struct *work) {}
|
|
#endif
|
|
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
|
|
index 0b4595439d51..5eca5d2d5e00 100644
|
|
--- a/drivers/usb/musb/musb_virthub.c
|
|
+++ b/drivers/usb/musb/musb_virthub.c
|
|
@@ -73,14 +73,14 @@ void musb_host_finish_resume(struct work_struct *work)
|
|
spin_unlock_irqrestore(&musb->lock, flags);
|
|
}
|
|
|
|
-void musb_port_suspend(struct musb *musb, bool do_suspend)
|
|
+int musb_port_suspend(struct musb *musb, bool do_suspend)
|
|
{
|
|
struct usb_otg *otg = musb->xceiv->otg;
|
|
u8 power;
|
|
void __iomem *mbase = musb->mregs;
|
|
|
|
if (!is_host_active(musb))
|
|
- return;
|
|
+ return 0;
|
|
|
|
/* NOTE: this doesn't necessarily put PHY into low power mode,
|
|
* turning off its clock; that's a function of PHY integration and
|
|
@@ -91,16 +91,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
|
|
if (do_suspend) {
|
|
int retries = 10000;
|
|
|
|
- power &= ~MUSB_POWER_RESUME;
|
|
- power |= MUSB_POWER_SUSPENDM;
|
|
- musb_writeb(mbase, MUSB_POWER, power);
|
|
+ if (power & MUSB_POWER_RESUME)
|
|
+ return -EBUSY;
|
|
|
|
- /* Needed for OPT A tests */
|
|
- power = musb_readb(mbase, MUSB_POWER);
|
|
- while (power & MUSB_POWER_SUSPENDM) {
|
|
+ if (!(power & MUSB_POWER_SUSPENDM)) {
|
|
+ power |= MUSB_POWER_SUSPENDM;
|
|
+ musb_writeb(mbase, MUSB_POWER, power);
|
|
+
|
|
+ /* Needed for OPT A tests */
|
|
power = musb_readb(mbase, MUSB_POWER);
|
|
- if (retries-- < 1)
|
|
- break;
|
|
+ while (power & MUSB_POWER_SUSPENDM) {
|
|
+ power = musb_readb(mbase, MUSB_POWER);
|
|
+ if (retries-- < 1)
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
musb_dbg(musb, "Root port suspended, power %02x", power);
|
|
@@ -136,6 +140,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
|
|
schedule_delayed_work(&musb->finish_resume_work,
|
|
msecs_to_jiffies(USB_RESUME_TIMEOUT));
|
|
}
|
|
+ return 0;
|
|
}
|
|
|
|
void musb_port_reset(struct musb *musb, bool do_reset)
|
|
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
|
|
index b57891c1fd31..7afbea512207 100644
|
|
--- a/drivers/usb/typec/ucsi/Makefile
|
|
+++ b/drivers/usb/typec/ucsi/Makefile
|
|
@@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
|
|
|
|
typec_ucsi-y := ucsi.o
|
|
|
|
-typec_ucsi-$(CONFIG_FTRACE) += trace.o
|
|
+typec_ucsi-$(CONFIG_TRACING) += trace.o
|
|
|
|
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
|
|
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
|
|
index d2edbc79384a..83243af22d51 100644
|
|
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
|
|
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
|
|
@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
|
|
{
|
|
struct {
|
|
struct xsd_sockmsg hdr;
|
|
- const char body[16];
|
|
+ char body[16];
|
|
} msg;
|
|
int rc;
|
|
|
|
@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
|
|
msg.hdr.len = strlen(reply) + 1;
|
|
if (msg.hdr.len > sizeof(msg.body))
|
|
return -E2BIG;
|
|
+ memcpy(&msg.body, reply, msg.hdr.len);
|
|
|
|
mutex_lock(&u->reply_mutex);
|
|
rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
|
|
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
|
|
index 9f715c3edcf9..ccc9c708a860 100644
|
|
--- a/fs/afs/rxrpc.c
|
|
+++ b/fs/afs/rxrpc.c
|
|
@@ -55,6 +55,7 @@ int afs_open_socket(void)
|
|
{
|
|
struct sockaddr_rxrpc srx;
|
|
struct socket *socket;
|
|
+ unsigned int min_level;
|
|
int ret;
|
|
|
|
_enter("");
|
|
@@ -80,6 +81,12 @@ int afs_open_socket(void)
|
|
memset(&srx.transport.sin.sin_addr, 0,
|
|
sizeof(srx.transport.sin.sin_addr));
|
|
|
|
+ min_level = RXRPC_SECURITY_ENCRYPT;
|
|
+ ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
|
|
+ (void *)&min_level, sizeof(min_level));
|
|
+ if (ret < 0)
|
|
+ goto error_2;
|
|
+
|
|
ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
|
|
if (ret < 0)
|
|
goto error_2;
|
|
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
|
|
index e3f6c49e5c4d..24613b4e224c 100644
|
|
--- a/fs/btrfs/scrub.c
|
|
+++ b/fs/btrfs/scrub.c
|
|
@@ -301,6 +301,11 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
|
|
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
|
|
static void scrub_put_ctx(struct scrub_ctx *sctx);
|
|
|
|
+static inline int scrub_is_page_on_raid56(struct scrub_page *page)
|
|
+{
|
|
+ return page->recover &&
|
|
+ (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
|
|
+}
|
|
|
|
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
|
|
{
|
|
@@ -1323,15 +1328,34 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
|
|
* could happen otherwise that a correct page would be
|
|
* overwritten by a bad one).
|
|
*/
|
|
- for (mirror_index = 0;
|
|
- mirror_index < BTRFS_MAX_MIRRORS &&
|
|
- sblocks_for_recheck[mirror_index].page_count > 0;
|
|
- mirror_index++) {
|
|
+ for (mirror_index = 0; ;mirror_index++) {
|
|
struct scrub_block *sblock_other;
|
|
|
|
if (mirror_index == failed_mirror_index)
|
|
continue;
|
|
- sblock_other = sblocks_for_recheck + mirror_index;
|
|
+
|
|
+ /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
|
|
+ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
|
|
+ if (mirror_index >= BTRFS_MAX_MIRRORS)
|
|
+ break;
|
|
+ if (!sblocks_for_recheck[mirror_index].page_count)
|
|
+ break;
|
|
+
|
|
+ sblock_other = sblocks_for_recheck + mirror_index;
|
|
+ } else {
|
|
+ struct scrub_recover *r = sblock_bad->pagev[0]->recover;
|
|
+ int max_allowed = r->bbio->num_stripes -
|
|
+ r->bbio->num_tgtdevs;
|
|
+
|
|
+ if (mirror_index >= max_allowed)
|
|
+ break;
|
|
+ if (!sblocks_for_recheck[1].page_count)
|
|
+ break;
|
|
+
|
|
+ ASSERT(failed_mirror_index == 0);
|
|
+ sblock_other = sblocks_for_recheck + 1;
|
|
+ sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
|
|
+ }
|
|
|
|
/* build and submit the bios, check checksums */
|
|
scrub_recheck_block(fs_info, sblock_other, 0);
|
|
@@ -1679,18 +1703,13 @@ static void scrub_bio_wait_endio(struct bio *bio)
|
|
complete(&ret->event);
|
|
}
|
|
|
|
-static inline int scrub_is_page_on_raid56(struct scrub_page *page)
|
|
-{
|
|
- return page->recover &&
|
|
- (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
|
|
-}
|
|
-
|
|
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
|
|
struct bio *bio,
|
|
struct scrub_page *page)
|
|
{
|
|
struct scrub_bio_ret done;
|
|
int ret;
|
|
+ int mirror_num;
|
|
|
|
init_completion(&done.event);
|
|
done.status = 0;
|
|
@@ -1698,9 +1717,10 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
|
|
bio->bi_private = &done;
|
|
bio->bi_end_io = scrub_bio_wait_endio;
|
|
|
|
+ mirror_num = page->sblock->pagev[0]->mirror_num;
|
|
ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
|
|
page->recover->map_length,
|
|
- page->mirror_num, 0);
|
|
+ mirror_num, 0);
|
|
if (ret)
|
|
return ret;
|
|
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index eef875da7c0b..839327f75e3d 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -570,9 +570,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
|
|
|
|
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
|
|
|
|
+ /*
|
|
+ * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's
|
|
+ * not an error. Otherwise, the specified ea_name was not found.
|
|
+ */
|
|
if (!rc)
|
|
rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data,
|
|
SMB2_MAX_EA_BUF, ea_name);
|
|
+ else if (!ea_name && rc == -ENODATA)
|
|
+ rc = 0;
|
|
|
|
kfree(smb2_data);
|
|
return rc;
|
|
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
|
|
index e5e29f8c920b..9d1823efff34 100644
|
|
--- a/fs/ecryptfs/crypto.c
|
|
+++ b/fs/ecryptfs/crypto.c
|
|
@@ -2026,6 +2026,16 @@ int ecryptfs_encrypt_and_encode_filename(
|
|
return rc;
|
|
}
|
|
|
|
+static bool is_dot_dotdot(const char *name, size_t name_size)
|
|
+{
|
|
+ if (name_size == 1 && name[0] == '.')
|
|
+ return true;
|
|
+ else if (name_size == 2 && name[0] == '.' && name[1] == '.')
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
/**
|
|
* ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
|
|
* @plaintext_name: The plaintext name
|
|
@@ -2050,13 +2060,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
|
|
size_t packet_size;
|
|
int rc = 0;
|
|
|
|
- if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
|
|
- && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
|
|
- && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)
|
|
- && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
|
|
- ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) {
|
|
- const char *orig_name = name;
|
|
- size_t orig_name_size = name_size;
|
|
+ if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) &&
|
|
+ !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) {
|
|
+ if (is_dot_dotdot(name, name_size)) {
|
|
+ rc = ecryptfs_copy_filename(plaintext_name,
|
|
+ plaintext_name_size,
|
|
+ name, name_size);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE ||
|
|
+ strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
|
|
+ ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) {
|
|
+ rc = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
|
|
name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
|
|
name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
|
|
@@ -2079,12 +2097,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
|
|
decoded_name,
|
|
decoded_name_size);
|
|
if (rc) {
|
|
- printk(KERN_INFO "%s: Could not parse tag 70 packet "
|
|
- "from filename; copying through filename "
|
|
- "as-is\n", __func__);
|
|
- rc = ecryptfs_copy_filename(plaintext_name,
|
|
- plaintext_name_size,
|
|
- orig_name, orig_name_size);
|
|
+ ecryptfs_printk(KERN_DEBUG,
|
|
+ "%s: Could not parse tag 70 packet from filename\n",
|
|
+ __func__);
|
|
goto out_free;
|
|
}
|
|
} else {
|
|
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
|
|
index c74ed3ca3372..b76a9853325e 100644
|
|
--- a/fs/ecryptfs/file.c
|
|
+++ b/fs/ecryptfs/file.c
|
|
@@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
|
|
buf->sb, lower_name,
|
|
lower_namelen);
|
|
if (rc) {
|
|
- printk(KERN_ERR "%s: Error attempting to decode and decrypt "
|
|
- "filename [%s]; rc = [%d]\n", __func__, lower_name,
|
|
- rc);
|
|
- goto out;
|
|
+ if (rc != -EINVAL) {
|
|
+ ecryptfs_printk(KERN_DEBUG,
|
|
+ "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n",
|
|
+ __func__, lower_name, rc);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ /* Mask -EINVAL errors as these are most likely due a plaintext
|
|
+ * filename present in the lower filesystem despite filename
|
|
+ * encryption being enabled. One unavoidable example would be
|
|
+ * the "lost+found" dentry in the root directory of an Ext4
|
|
+ * filesystem.
|
|
+ */
|
|
+ return 0;
|
|
}
|
|
+
|
|
buf->caller->pos = buf->ctx.pos;
|
|
rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
|
|
kfree(name);
|
|
if (!rc)
|
|
buf->entries_written++;
|
|
-out:
|
|
+
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
|
|
index 447a24d77b89..ed4edcd2bc56 100644
|
|
--- a/fs/isofs/inode.c
|
|
+++ b/fs/isofs/inode.c
|
|
@@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt)
|
|
break;
|
|
#ifdef CONFIG_JOLIET
|
|
case Opt_iocharset:
|
|
+ kfree(popt->iocharset);
|
|
popt->iocharset = match_strdup(&args[0]);
|
|
+ if (!popt->iocharset)
|
|
+ return 0;
|
|
break;
|
|
#endif
|
|
case Opt_map_a:
|
|
diff --git a/fs/namespace.c b/fs/namespace.c
|
|
index 62b17aff1908..1eb3bfd8be5a 100644
|
|
--- a/fs/namespace.c
|
|
+++ b/fs/namespace.c
|
|
@@ -2810,7 +2810,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
|
|
mnt_flags |= MNT_NODIRATIME;
|
|
if (flags & MS_STRICTATIME)
|
|
mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
|
|
- if (flags & SB_RDONLY)
|
|
+ if (flags & MS_RDONLY)
|
|
mnt_flags |= MNT_READONLY;
|
|
|
|
/* The default atime for remount is preservation */
|
|
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
|
|
index 074716293829..d76c81323dc1 100644
|
|
--- a/fs/notify/fsnotify.c
|
|
+++ b/fs/notify/fsnotify.c
|
|
@@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell,
|
|
struct fsnotify_iter_info *iter_info)
|
|
{
|
|
struct fsnotify_group *group = NULL;
|
|
- __u32 inode_test_mask = 0;
|
|
- __u32 vfsmount_test_mask = 0;
|
|
+ __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
|
|
+ __u32 marks_mask = 0;
|
|
+ __u32 marks_ignored_mask = 0;
|
|
|
|
if (unlikely(!inode_mark && !vfsmount_mark)) {
|
|
BUG();
|
|
@@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell,
|
|
/* does the inode mark tell us to do something? */
|
|
if (inode_mark) {
|
|
group = inode_mark->group;
|
|
- inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
|
|
- inode_test_mask &= inode_mark->mask;
|
|
- inode_test_mask &= ~inode_mark->ignored_mask;
|
|
+ marks_mask |= inode_mark->mask;
|
|
+ marks_ignored_mask |= inode_mark->ignored_mask;
|
|
}
|
|
|
|
/* does the vfsmount_mark tell us to do something? */
|
|
if (vfsmount_mark) {
|
|
- vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
|
|
group = vfsmount_mark->group;
|
|
- vfsmount_test_mask &= vfsmount_mark->mask;
|
|
- vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
|
|
- if (inode_mark)
|
|
- vfsmount_test_mask &= ~inode_mark->ignored_mask;
|
|
+ marks_mask |= vfsmount_mark->mask;
|
|
+ marks_ignored_mask |= vfsmount_mark->ignored_mask;
|
|
}
|
|
|
|
pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
|
|
- " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
|
|
+ " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x"
|
|
" data=%p data_is=%d cookie=%d\n",
|
|
- __func__, group, to_tell, mask, inode_mark,
|
|
- inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
|
|
+ __func__, group, to_tell, mask, inode_mark, vfsmount_mark,
|
|
+ marks_mask, marks_ignored_mask, data,
|
|
data_is, cookie);
|
|
|
|
- if (!inode_test_mask && !vfsmount_test_mask)
|
|
+ if (!(test_mask & marks_mask & ~marks_ignored_mask))
|
|
return 0;
|
|
|
|
return group->ops->handle_event(group, to_tell, inode_mark,
|
|
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
|
|
index ab156e35ec00..1b1283f07941 100644
|
|
--- a/fs/ocfs2/refcounttree.c
|
|
+++ b/fs/ocfs2/refcounttree.c
|
|
@@ -4250,10 +4250,11 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
|
|
static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
|
|
struct dentry *new_dentry, bool preserve)
|
|
{
|
|
- int error;
|
|
+ int error, had_lock;
|
|
struct inode *inode = d_inode(old_dentry);
|
|
struct buffer_head *old_bh = NULL;
|
|
struct inode *new_orphan_inode = NULL;
|
|
+ struct ocfs2_lock_holder oh;
|
|
|
|
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
|
|
return -EOPNOTSUPP;
|
|
@@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
|
|
goto out;
|
|
}
|
|
|
|
+ had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
|
|
+ &oh);
|
|
+ if (had_lock < 0) {
|
|
+ error = had_lock;
|
|
+ mlog_errno(error);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
/* If the security isn't preserved, we need to re-initialize them. */
|
|
if (!preserve) {
|
|
error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
|
|
@@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
|
|
if (error)
|
|
mlog_errno(error);
|
|
}
|
|
-out:
|
|
if (!error) {
|
|
error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
|
|
new_dentry);
|
|
if (error)
|
|
mlog_errno(error);
|
|
}
|
|
+ ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
|
|
|
|
+out:
|
|
if (new_orphan_inode) {
|
|
/*
|
|
* We need to open_unlock the inode no matter whether we
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index dd9d4d3a2e39..c5c42f3e33d1 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -1694,6 +1694,12 @@ void task_dump_owner(struct task_struct *task, mode_t mode,
|
|
kuid_t uid;
|
|
kgid_t gid;
|
|
|
|
+ if (unlikely(task->flags & PF_KTHREAD)) {
|
|
+ *ruid = GLOBAL_ROOT_UID;
|
|
+ *rgid = GLOBAL_ROOT_GID;
|
|
+ return;
|
|
+ }
|
|
+
|
|
/* Default to the tasks effective ownership */
|
|
rcu_read_lock();
|
|
cred = __task_cred(task);
|
|
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
|
|
index d1e82761de81..e64ecb9f2720 100644
|
|
--- a/fs/proc/kcore.c
|
|
+++ b/fs/proc/kcore.c
|
|
@@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
|
|
{
|
|
struct list_head *head = (struct list_head *)arg;
|
|
struct kcore_list *ent;
|
|
+ struct page *p;
|
|
+
|
|
+ if (!pfn_valid(pfn))
|
|
+ return 1;
|
|
+
|
|
+ p = pfn_to_page(pfn);
|
|
+ if (!memmap_valid_within(pfn, p, page_zone(p)))
|
|
+ return 1;
|
|
|
|
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
|
|
if (!ent)
|
|
return -ENOMEM;
|
|
- ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
|
|
+ ent->addr = (unsigned long)page_to_virt(p);
|
|
ent->size = nr_pages << PAGE_SHIFT;
|
|
|
|
- /* Sanity check: Can happen in 32bit arch...maybe */
|
|
- if (ent->addr < (unsigned long) __va(0))
|
|
+ if (!virt_addr_valid(ent->addr))
|
|
goto free_out;
|
|
|
|
/* cut not-mapped area. ....from ppc-32 code. */
|
|
if (ULONG_MAX - ent->addr < ent->size)
|
|
ent->size = ULONG_MAX - ent->addr;
|
|
|
|
- /* cut when vmalloc() area is higher than direct-map area */
|
|
- if (VMALLOC_START > (unsigned long)__va(0)) {
|
|
- if (ent->addr > VMALLOC_START)
|
|
- goto free_out;
|
|
+ /*
|
|
+ * We've already checked virt_addr_valid so we know this address
|
|
+ * is a valid pointer, therefore we can check against it to determine
|
|
+ * if we need to trim
|
|
+ */
|
|
+ if (VMALLOC_START > ent->addr) {
|
|
if (VMALLOC_START - ent->addr < ent->size)
|
|
ent->size = VMALLOC_START - ent->addr;
|
|
}
|
|
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
|
index 6744bd706ecf..4cd8328e4039 100644
|
|
--- a/fs/proc/task_mmu.c
|
|
+++ b/fs/proc/task_mmu.c
|
|
@@ -1327,9 +1327,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
else if (is_swap_pmd(pmd)) {
|
|
swp_entry_t entry = pmd_to_swp_entry(pmd);
|
|
+ unsigned long offset = swp_offset(entry);
|
|
|
|
+ offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
|
|
frame = swp_type(entry) |
|
|
- (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
|
|
+ (offset << MAX_SWAPFILES_SHIFT);
|
|
flags |= PM_SWAP;
|
|
if (pmd_swp_soft_dirty(pmd))
|
|
flags |= PM_SOFT_DIRTY;
|
|
@@ -1349,6 +1351,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|
break;
|
|
if (pm->show_pfn && (flags & PM_PRESENT))
|
|
frame++;
|
|
+ else if (flags & PM_SWAP)
|
|
+ frame += (1 << MAX_SWAPFILES_SHIFT);
|
|
}
|
|
spin_unlock(ptl);
|
|
return err;
|
|
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
|
|
index 86eb33f67618..2f4e79fe7b86 100644
|
|
--- a/include/linux/clk-provider.h
|
|
+++ b/include/linux/clk-provider.h
|
|
@@ -752,6 +752,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
|
|
int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
|
|
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
|
|
struct clk_rate_request *req);
|
|
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
|
|
+ struct clk_rate_request *req,
|
|
+ unsigned long flags);
|
|
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
|
|
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
|
|
unsigned long max_rate);
|
|
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
|
|
index 0c0146e7e274..59fbe005f204 100644
|
|
--- a/include/linux/ethtool.h
|
|
+++ b/include/linux/ethtool.h
|
|
@@ -300,6 +300,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
|
|
* fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
|
|
* instead of the latter), any change to them will be overwritten
|
|
* by kernel. Returns a negative error code or zero.
|
|
+ * @get_fecparam: Get the network device Forward Error Correction parameters.
|
|
+ * @set_fecparam: Set the network device Forward Error Correction parameters.
|
|
*
|
|
* All operations are optional (i.e. the function pointer may be set
|
|
* to %NULL) and callers must take this into account. Callers must
|
|
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
|
|
index 5ade8f2a6987..550fa358893a 100644
|
|
--- a/include/linux/genhd.h
|
|
+++ b/include/linux/genhd.h
|
|
@@ -365,7 +365,9 @@ static inline void free_part_stats(struct hd_struct *part)
|
|
part_stat_add(cpu, gendiskp, field, -subnd)
|
|
|
|
void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
|
- unsigned int inflight[2]);
|
|
+ unsigned int inflight[2]);
|
|
+void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
|
+ unsigned int inflight[2]);
|
|
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
|
|
int rw);
|
|
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
|
|
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
index 39f0489eb137..b81d458ad4fb 100644
|
|
--- a/include/linux/kvm_host.h
|
|
+++ b/include/linux/kvm_host.h
|
|
@@ -1044,13 +1044,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
|
|
|
|
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
|
|
|
|
-#ifdef CONFIG_S390
|
|
-#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
|
|
-#elif defined(CONFIG_ARM64)
|
|
-#define KVM_MAX_IRQ_ROUTES 4096
|
|
-#else
|
|
-#define KVM_MAX_IRQ_ROUTES 1024
|
|
-#endif
|
|
+#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
|
|
|
|
bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
|
|
int kvm_set_irq_routing(struct kvm *kvm,
|
|
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
|
|
index b5b43f94f311..01b990e4b228 100644
|
|
--- a/include/linux/mtd/map.h
|
|
+++ b/include/linux/mtd/map.h
|
|
@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
|
|
({ \
|
|
int i, ret = 1; \
|
|
for (i = 0; i < map_words(map); i++) { \
|
|
- if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
|
|
+ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
|
|
ret = 0; \
|
|
break; \
|
|
} \
|
|
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
|
|
index b1f37a89e368..79b99d653e03 100644
|
|
--- a/include/linux/percpu-rwsem.h
|
|
+++ b/include/linux/percpu-rwsem.h
|
|
@@ -133,7 +133,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
|
|
lock_release(&sem->rw_sem.dep_map, 1, ip);
|
|
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
|
if (!read)
|
|
- sem->rw_sem.owner = NULL;
|
|
+ sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN;
|
|
#endif
|
|
}
|
|
|
|
@@ -141,6 +141,10 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
|
|
bool read, unsigned long ip)
|
|
{
|
|
lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
|
|
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
|
+ if (!read)
|
|
+ sem->rw_sem.owner = current;
|
|
+#endif
|
|
}
|
|
|
|
#endif
|
|
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
|
|
index dfa34d803439..c427ffaa4904 100644
|
|
--- a/include/linux/rwsem.h
|
|
+++ b/include/linux/rwsem.h
|
|
@@ -44,6 +44,12 @@ struct rw_semaphore {
|
|
#endif
|
|
};
|
|
|
|
+/*
|
|
+ * Setting bit 0 of the owner field with other non-zero bits will indicate
|
|
+ * that the rwsem is writer-owned with an unknown owner.
|
|
+ */
|
|
+#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L)
|
|
+
|
|
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
|
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
|
|
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 2c9790b43881..e04919aa8201 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -113,17 +113,36 @@ struct task_group;
|
|
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
|
|
+/*
|
|
+ * Special states are those that do not use the normal wait-loop pattern. See
|
|
+ * the comment with set_special_state().
|
|
+ */
|
|
+#define is_special_task_state(state) \
|
|
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
|
|
+
|
|
#define __set_current_state(state_value) \
|
|
do { \
|
|
+ WARN_ON_ONCE(is_special_task_state(state_value));\
|
|
current->task_state_change = _THIS_IP_; \
|
|
current->state = (state_value); \
|
|
} while (0)
|
|
+
|
|
#define set_current_state(state_value) \
|
|
do { \
|
|
+ WARN_ON_ONCE(is_special_task_state(state_value));\
|
|
current->task_state_change = _THIS_IP_; \
|
|
smp_store_mb(current->state, (state_value)); \
|
|
} while (0)
|
|
|
|
+#define set_special_state(state_value) \
|
|
+ do { \
|
|
+ unsigned long flags; /* may shadow */ \
|
|
+ WARN_ON_ONCE(!is_special_task_state(state_value)); \
|
|
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); \
|
|
+ current->task_state_change = _THIS_IP_; \
|
|
+ current->state = (state_value); \
|
|
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
|
+ } while (0)
|
|
#else
|
|
/*
|
|
* set_current_state() includes a barrier so that the write of current->state
|
|
@@ -145,8 +164,8 @@ struct task_group;
|
|
*
|
|
* The above is typically ordered against the wakeup, which does:
|
|
*
|
|
- * need_sleep = false;
|
|
- * wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
|
+ * need_sleep = false;
|
|
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
|
*
|
|
* Where wake_up_state() (and all other wakeup primitives) imply enough
|
|
* barriers to order the store of the variable against wakeup.
|
|
@@ -155,12 +174,33 @@ struct task_group;
|
|
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
|
|
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
|
|
*
|
|
- * This is obviously fine, since they both store the exact same value.
|
|
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
|
|
+ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
|
|
+ * a problem either because that will result in one extra go around the loop
|
|
+ * and our @cond test will save the day.
|
|
*
|
|
* Also see the comments of try_to_wake_up().
|
|
*/
|
|
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
|
|
-#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
|
|
+#define __set_current_state(state_value) \
|
|
+ current->state = (state_value)
|
|
+
|
|
+#define set_current_state(state_value) \
|
|
+ smp_store_mb(current->state, (state_value))
|
|
+
|
|
+/*
|
|
+ * set_special_state() should be used for those states when the blocking task
|
|
+ * can not use the regular condition based wait-loop. In that case we must
|
|
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
|
|
+ * will not collide with our state change.
|
|
+ */
|
|
+#define set_special_state(state_value) \
|
|
+ do { \
|
|
+ unsigned long flags; /* may shadow */ \
|
|
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); \
|
|
+ current->state = (state_value); \
|
|
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
|
+ } while (0)
|
|
+
|
|
#endif
|
|
|
|
/* Task command name length: */
|
|
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
|
|
index 0aa4548fb492..fbf86ecd149d 100644
|
|
--- a/include/linux/sched/signal.h
|
|
+++ b/include/linux/sched/signal.h
|
|
@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
|
|
{
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
|
|
- __set_current_state(TASK_STOPPED);
|
|
+ set_special_state(TASK_STOPPED);
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
schedule();
|
|
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
|
|
index e8f0f852968f..c0c5c5b73dc0 100644
|
|
--- a/include/linux/stringhash.h
|
|
+++ b/include/linux/stringhash.h
|
|
@@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
|
|
* losing bits). This also has the property (wanted by the dcache)
|
|
* that the msbits make a good hash table index.
|
|
*/
|
|
-static inline unsigned long end_name_hash(unsigned long hash)
|
|
+static inline unsigned int end_name_hash(unsigned long hash)
|
|
{
|
|
- return __hash_32((unsigned int)hash);
|
|
+ return hash_long(hash, 32);
|
|
}
|
|
|
|
/*
|
|
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
|
|
index cb979ad90401..b86c4c367004 100644
|
|
--- a/include/soc/bcm2835/raspberrypi-firmware.h
|
|
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
|
|
@@ -125,13 +125,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
|
|
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
|
|
void *data, size_t len)
|
|
{
|
|
- return 0;
|
|
+ return -ENOSYS;
|
|
}
|
|
|
|
static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
|
|
void *data, size_t tag_size)
|
|
{
|
|
- return 0;
|
|
+ return -ENOSYS;
|
|
}
|
|
|
|
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 2d355a61dfc5..0d88f37febcb 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -974,6 +974,13 @@ __setup("rodata=", set_debug_rodata);
|
|
static void mark_readonly(void)
|
|
{
|
|
if (rodata_enabled) {
|
|
+ /*
|
|
+ * load_module() results in W+X mappings, which are cleaned up
|
|
+ * with call_rcu_sched(). Let's make sure that queued work is
|
|
+ * flushed so that we don't hit false positives looking for
|
|
+ * insecure pages which are W+X.
|
|
+ */
|
|
+ rcu_barrier_sched();
|
|
mark_rodata_ro();
|
|
rodata_test();
|
|
} else
|
|
diff --git a/kernel/kthread.c b/kernel/kthread.c
|
|
index 1c19edf82427..1ef8f3a5b072 100644
|
|
--- a/kernel/kthread.c
|
|
+++ b/kernel/kthread.c
|
|
@@ -169,12 +169,13 @@ void *kthread_probe_data(struct task_struct *task)
|
|
|
|
static void __kthread_parkme(struct kthread *self)
|
|
{
|
|
- __set_current_state(TASK_PARKED);
|
|
- while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
|
|
+ for (;;) {
|
|
+ set_current_state(TASK_PARKED);
|
|
+ if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
|
|
+ break;
|
|
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
|
|
complete(&self->parked);
|
|
schedule();
|
|
- __set_current_state(TASK_PARKED);
|
|
}
|
|
clear_bit(KTHREAD_IS_PARKED, &self->flags);
|
|
__set_current_state(TASK_RUNNING);
|
|
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
|
|
index e795908f3607..a90336779375 100644
|
|
--- a/kernel/locking/rwsem-xadd.c
|
|
+++ b/kernel/locking/rwsem-xadd.c
|
|
@@ -352,16 +352,15 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
|
struct task_struct *owner;
|
|
bool ret = true;
|
|
|
|
+ BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
|
|
+
|
|
if (need_resched())
|
|
return false;
|
|
|
|
rcu_read_lock();
|
|
owner = READ_ONCE(sem->owner);
|
|
- if (!rwsem_owner_is_writer(owner)) {
|
|
- /*
|
|
- * Don't spin if the rwsem is readers owned.
|
|
- */
|
|
- ret = !rwsem_owner_is_reader(owner);
|
|
+ if (!owner || !is_rwsem_owner_spinnable(owner)) {
|
|
+ ret = !owner; /* !owner is spinnable */
|
|
goto done;
|
|
}
|
|
|
|
@@ -382,11 +381,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
|
|
{
|
|
struct task_struct *owner = READ_ONCE(sem->owner);
|
|
|
|
- if (!rwsem_owner_is_writer(owner))
|
|
- goto out;
|
|
+ if (!is_rwsem_owner_spinnable(owner))
|
|
+ return false;
|
|
|
|
rcu_read_lock();
|
|
- while (sem->owner == owner) {
|
|
+ while (owner && (READ_ONCE(sem->owner) == owner)) {
|
|
/*
|
|
* Ensure we emit the owner->on_cpu, dereference _after_
|
|
* checking sem->owner still matches owner, if that fails,
|
|
@@ -408,12 +407,12 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
|
|
cpu_relax();
|
|
}
|
|
rcu_read_unlock();
|
|
-out:
|
|
+
|
|
/*
|
|
* If there is a new owner or the owner is not set, we continue
|
|
* spinning.
|
|
*/
|
|
- return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
|
|
+ return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
|
|
}
|
|
|
|
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
|
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
|
|
index a6c76a4832b4..22bd01a7dcaa 100644
|
|
--- a/kernel/locking/rwsem.c
|
|
+++ b/kernel/locking/rwsem.c
|
|
@@ -201,5 +201,3 @@ void up_read_non_owner(struct rw_semaphore *sem)
|
|
EXPORT_SYMBOL(up_read_non_owner);
|
|
|
|
#endif
|
|
-
|
|
-
|
|
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
|
|
index a883b8f1fdc6..410ee7b9ac2c 100644
|
|
--- a/kernel/locking/rwsem.h
|
|
+++ b/kernel/locking/rwsem.h
|
|
@@ -1,20 +1,24 @@
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* The owner field of the rw_semaphore structure will be set to
|
|
- * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear
|
|
+ * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear
|
|
* the owner field when it unlocks. A reader, on the other hand, will
|
|
* not touch the owner field when it unlocks.
|
|
*
|
|
- * In essence, the owner field now has the following 3 states:
|
|
+ * In essence, the owner field now has the following 4 states:
|
|
* 1) 0
|
|
* - lock is free or the owner hasn't set the field yet
|
|
* 2) RWSEM_READER_OWNED
|
|
* - lock is currently or previously owned by readers (lock is free
|
|
* or not set by owner yet)
|
|
- * 3) Other non-zero value
|
|
- * - a writer owns the lock
|
|
+ * 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well
|
|
+ * - lock is owned by an anonymous writer, so spinning on the lock
|
|
+ * owner should be disabled.
|
|
+ * 4) Other non-zero value
|
|
+ * - a writer owns the lock and other writers can spin on the lock owner.
|
|
*/
|
|
-#define RWSEM_READER_OWNED ((struct task_struct *)1UL)
|
|
+#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0)
|
|
+#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED)
|
|
|
|
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
|
|
/*
|
|
@@ -45,14 +49,22 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
|
WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
|
|
}
|
|
|
|
-static inline bool rwsem_owner_is_writer(struct task_struct *owner)
|
|
+/*
|
|
+ * Return true if the a rwsem waiter can spin on the rwsem's owner
|
|
+ * and steal the lock, i.e. the lock is not anonymously owned.
|
|
+ * N.B. !owner is considered spinnable.
|
|
+ */
|
|
+static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
|
|
{
|
|
- return owner && owner != RWSEM_READER_OWNED;
|
|
+ return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
|
|
}
|
|
|
|
-static inline bool rwsem_owner_is_reader(struct task_struct *owner)
|
|
+/*
|
|
+ * Return true if rwsem is owned by an anonymous writer or readers.
|
|
+ */
|
|
+static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
|
|
{
|
|
- return owner == RWSEM_READER_OWNED;
|
|
+ return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
|
|
}
|
|
#else
|
|
static inline void rwsem_set_owner(struct rw_semaphore *sem)
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 690c0651c40f..321b0b1f87e7 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -3506,6 +3506,11 @@ static noinline int do_init_module(struct module *mod)
|
|
* walking this with preempt disabled. In all the failure paths, we
|
|
* call synchronize_sched(), but we don't want to slow down the success
|
|
* path, so use actual RCU here.
|
|
+ * Note that module_alloc() on most architectures creates W+X page
|
|
+ * mappings which won't be cleaned up until do_free_init() runs. Any
|
|
+ * code such as mark_rodata_ro() which depends on those mappings to
|
|
+ * be cleaned up needs to sync with the queued work - ie
|
|
+ * rcu_barrier_sched()
|
|
*/
|
|
call_rcu_sched(&freeinit->rcu, do_free_init);
|
|
mutex_unlock(&module_mutex);
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 8cf36b30a006..f287dcbe8cb2 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -3374,23 +3374,8 @@ static void __sched notrace __schedule(bool preempt)
|
|
|
|
void __noreturn do_task_dead(void)
|
|
{
|
|
- /*
|
|
- * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
|
|
- * when the following two conditions become true.
|
|
- * - There is race condition of mmap_sem (It is acquired by
|
|
- * exit_mm()), and
|
|
- * - SMI occurs before setting TASK_RUNINNG.
|
|
- * (or hypervisor of virtual machine switches to other guest)
|
|
- * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
|
|
- *
|
|
- * To avoid it, we have to wait for releasing tsk->pi_lock which
|
|
- * is held by try_to_wake_up()
|
|
- */
|
|
- raw_spin_lock_irq(¤t->pi_lock);
|
|
- raw_spin_unlock_irq(¤t->pi_lock);
|
|
-
|
|
/* Causes final put_task_struct in finish_task_switch(): */
|
|
- __set_current_state(TASK_DEAD);
|
|
+ set_special_state(TASK_DEAD);
|
|
|
|
/* Tell freezer to ignore us: */
|
|
current->flags |= PF_NOFREEZE;
|
|
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
|
|
index 4ae5c1ea90e2..501f17c642ab 100644
|
|
--- a/kernel/sched/deadline.c
|
|
+++ b/kernel/sched/deadline.c
|
|
@@ -1084,7 +1084,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
|
|
* should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
|
|
* So, overflow is not an issue here.
|
|
*/
|
|
-u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
|
|
+static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
|
|
{
|
|
u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
|
|
u64 u_act;
|
|
@@ -2655,8 +2655,6 @@ bool dl_cpu_busy(unsigned int cpu)
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
-extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
|
|
-
|
|
void print_dl_stats(struct seq_file *m, int cpu)
|
|
{
|
|
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
|
|
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
|
index 113eaeb6c0f8..bba2217652ff 100644
|
|
--- a/kernel/sched/rt.c
|
|
+++ b/kernel/sched/rt.c
|
|
@@ -2689,8 +2689,6 @@ int sched_rr_handler(struct ctl_table *table, int write,
|
|
}
|
|
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
-extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
|
|
-
|
|
void print_rt_stats(struct seq_file *m, int cpu)
|
|
{
|
|
rt_rq_iter_t iter;
|
|
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
|
index 307c35d33660..b29376169f3f 100644
|
|
--- a/kernel/sched/sched.h
|
|
+++ b/kernel/sched/sched.h
|
|
@@ -1969,8 +1969,9 @@ extern bool sched_debug_enabled;
|
|
extern void print_cfs_stats(struct seq_file *m, int cpu);
|
|
extern void print_rt_stats(struct seq_file *m, int cpu);
|
|
extern void print_dl_stats(struct seq_file *m, int cpu);
|
|
-extern void
|
|
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
|
|
+extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
|
|
+extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
|
|
+extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
extern void
|
|
show_numa_stats(struct task_struct *p, struct seq_file *m);
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 6895f6bb98a7..4439ba9dc5d9 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -1828,14 +1828,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
|
|
return;
|
|
}
|
|
|
|
+ set_special_state(TASK_TRACED);
|
|
+
|
|
/*
|
|
* We're committing to trapping. TRACED should be visible before
|
|
* TRAPPING is cleared; otherwise, the tracer might fail do_wait().
|
|
* Also, transition to TRACED and updates to ->jobctl should be
|
|
* atomic with respect to siglock and should be done after the arch
|
|
* hook as siglock is released and regrabbed across it.
|
|
+ *
|
|
+ * TRACER TRACEE
|
|
+ *
|
|
+ * ptrace_attach()
|
|
+ * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
|
|
+ * do_wait()
|
|
+ * set_current_state() smp_wmb();
|
|
+ * ptrace_do_wait()
|
|
+ * wait_task_stopped()
|
|
+ * task_stopped_code()
|
|
+ * [L] task_is_traced() [S] task_clear_jobctl_trapping();
|
|
*/
|
|
- set_current_state(TASK_TRACED);
|
|
+ smp_wmb();
|
|
|
|
current->last_siginfo = info;
|
|
current->exit_code = exit_code;
|
|
@@ -2043,7 +2056,7 @@ static bool do_signal_stop(int signr)
|
|
if (task_participate_group_stop(current))
|
|
notify = CLD_STOPPED;
|
|
|
|
- __set_current_state(TASK_STOPPED);
|
|
+ set_special_state(TASK_STOPPED);
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
/*
|
|
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
|
|
index b7591261652d..64c0291b579c 100644
|
|
--- a/kernel/stop_machine.c
|
|
+++ b/kernel/stop_machine.c
|
|
@@ -21,6 +21,7 @@
|
|
#include <linux/smpboot.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/nmi.h>
|
|
+#include <linux/sched/wake_q.h>
|
|
|
|
/*
|
|
* Structure to determine completion condition and record errors. May
|
|
@@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
|
|
}
|
|
|
|
static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
|
|
- struct cpu_stop_work *work)
|
|
+ struct cpu_stop_work *work,
|
|
+ struct wake_q_head *wakeq)
|
|
{
|
|
list_add_tail(&work->list, &stopper->works);
|
|
- wake_up_process(stopper->thread);
|
|
+ wake_q_add(wakeq, stopper->thread);
|
|
}
|
|
|
|
/* queue @work to @stopper. if offline, @work is completed immediately */
|
|
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
|
{
|
|
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
|
+ DEFINE_WAKE_Q(wakeq);
|
|
unsigned long flags;
|
|
bool enabled;
|
|
|
|
spin_lock_irqsave(&stopper->lock, flags);
|
|
enabled = stopper->enabled;
|
|
if (enabled)
|
|
- __cpu_stop_queue_work(stopper, work);
|
|
+ __cpu_stop_queue_work(stopper, work, &wakeq);
|
|
else if (work->done)
|
|
cpu_stop_signal_done(work->done);
|
|
spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
|
+ wake_up_q(&wakeq);
|
|
+
|
|
return enabled;
|
|
}
|
|
|
|
@@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
|
|
{
|
|
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
|
|
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
|
|
+ DEFINE_WAKE_Q(wakeq);
|
|
int err;
|
|
retry:
|
|
spin_lock_irq(&stopper1->lock);
|
|
@@ -252,8 +258,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
|
|
goto unlock;
|
|
|
|
err = 0;
|
|
- __cpu_stop_queue_work(stopper1, work1);
|
|
- __cpu_stop_queue_work(stopper2, work2);
|
|
+ __cpu_stop_queue_work(stopper1, work1, &wakeq);
|
|
+ __cpu_stop_queue_work(stopper2, work2, &wakeq);
|
|
unlock:
|
|
spin_unlock(&stopper2->lock);
|
|
spin_unlock_irq(&stopper1->lock);
|
|
@@ -263,6 +269,9 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
|
|
cpu_relax();
|
|
goto retry;
|
|
}
|
|
+
|
|
+ wake_up_q(&wakeq);
|
|
+
|
|
return err;
|
|
}
|
|
/**
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 546cd481a2ca..942d9342b63b 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -2205,7 +2205,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
|
|
{
|
|
struct memcg_kmem_cache_create_work *cw;
|
|
|
|
- cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
|
|
+ cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
|
|
if (!cw)
|
|
return;
|
|
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index 125b49c166a4..f0caff3139ed 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -647,7 +647,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
|
sk->sk_rcvbuf = rcvbuf;
|
|
|
|
/* Make the window clamp follow along. */
|
|
- tp->window_clamp = rcvwin;
|
|
+ tp->window_clamp = tcp_win_from_space(rcvbuf);
|
|
}
|
|
}
|
|
tp->rcvq_space.space = copied;
|
|
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
|
|
index 6acb2eecd986..c764c2a77d94 100644
|
|
--- a/net/ipv6/netfilter/Kconfig
|
|
+++ b/net/ipv6/netfilter/Kconfig
|
|
@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
|
|
fields such as the source, destination, flowlabel, hop-limit and
|
|
the packet mark.
|
|
|
|
+if NF_NAT_IPV6
|
|
+
|
|
+config NFT_CHAIN_NAT_IPV6
|
|
+ tristate "IPv6 nf_tables nat chain support"
|
|
+ help
|
|
+ This option enables the "nat" chain for IPv6 in nf_tables. This
|
|
+ chain type is used to perform Network Address Translation (NAT)
|
|
+ packet transformations such as the source, destination address and
|
|
+ source and destination ports.
|
|
+
|
|
+config NFT_MASQ_IPV6
|
|
+ tristate "IPv6 masquerade support for nf_tables"
|
|
+ depends on NFT_MASQ
|
|
+ select NF_NAT_MASQUERADE_IPV6
|
|
+ help
|
|
+ This is the expression that provides IPv4 masquerading support for
|
|
+ nf_tables.
|
|
+
|
|
+config NFT_REDIR_IPV6
|
|
+ tristate "IPv6 redirect support for nf_tables"
|
|
+ depends on NFT_REDIR
|
|
+ select NF_NAT_REDIRECT
|
|
+ help
|
|
+ This is the expression that provides IPv4 redirect support for
|
|
+ nf_tables.
|
|
+
|
|
+endif # NF_NAT_IPV6
|
|
+
|
|
config NFT_REJECT_IPV6
|
|
select NF_REJECT_IPV6
|
|
default NFT_REJECT
|
|
@@ -99,39 +127,12 @@ config NF_NAT_IPV6
|
|
|
|
if NF_NAT_IPV6
|
|
|
|
-config NFT_CHAIN_NAT_IPV6
|
|
- depends on NF_TABLES_IPV6
|
|
- tristate "IPv6 nf_tables nat chain support"
|
|
- help
|
|
- This option enables the "nat" chain for IPv6 in nf_tables. This
|
|
- chain type is used to perform Network Address Translation (NAT)
|
|
- packet transformations such as the source, destination address and
|
|
- source and destination ports.
|
|
-
|
|
config NF_NAT_MASQUERADE_IPV6
|
|
tristate "IPv6 masquerade support"
|
|
help
|
|
This is the kernel functionality to provide NAT in the masquerade
|
|
flavour (automatic source address selection) for IPv6.
|
|
|
|
-config NFT_MASQ_IPV6
|
|
- tristate "IPv6 masquerade support for nf_tables"
|
|
- depends on NF_TABLES_IPV6
|
|
- depends on NFT_MASQ
|
|
- select NF_NAT_MASQUERADE_IPV6
|
|
- help
|
|
- This is the expression that provides IPv4 masquerading support for
|
|
- nf_tables.
|
|
-
|
|
-config NFT_REDIR_IPV6
|
|
- tristate "IPv6 redirect support for nf_tables"
|
|
- depends on NF_TABLES_IPV6
|
|
- depends on NFT_REDIR
|
|
- select NF_NAT_REDIRECT
|
|
- help
|
|
- This is the expression that provides IPv4 redirect support for
|
|
- nf_tables.
|
|
-
|
|
endif # NF_NAT_IPV6
|
|
|
|
config IP6_NF_IPTABLES
|
|
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
|
|
index bef516ec47f9..197947a07f83 100644
|
|
--- a/net/mac80211/agg-tx.c
|
|
+++ b/net/mac80211/agg-tx.c
|
|
@@ -8,6 +8,7 @@
|
|
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
|
* Copyright 2007-2010, Intel Corporation
|
|
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
|
+ * Copyright (C) 2018 Intel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -987,6 +988,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
|
|
|
|
sta->ampdu_mlme.addba_req_num[tid] = 0;
|
|
|
|
+ tid_tx->timeout =
|
|
+ le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
|
|
+
|
|
if (tid_tx->timeout) {
|
|
mod_timer(&tid_tx->session_timer,
|
|
TU_TO_EXP_TIME(tid_tx->timeout));
|
|
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
|
|
index 9115cc52ce83..052dbd4fa366 100644
|
|
--- a/net/mac80211/mlme.c
|
|
+++ b/net/mac80211/mlme.c
|
|
@@ -35,6 +35,7 @@
|
|
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
|
|
#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
|
|
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
|
|
+#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2)
|
|
#define IEEE80211_AUTH_MAX_TRIES 3
|
|
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
|
|
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
|
|
@@ -3798,16 +3799,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
|
|
tx_flags);
|
|
|
|
if (tx_flags == 0) {
|
|
- auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
|
|
- auth_data->timeout_started = true;
|
|
- run_again(sdata, auth_data->timeout);
|
|
+ if (auth_data->algorithm == WLAN_AUTH_SAE)
|
|
+ auth_data->timeout = jiffies +
|
|
+ IEEE80211_AUTH_TIMEOUT_SAE;
|
|
+ else
|
|
+ auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
|
|
} else {
|
|
auth_data->timeout =
|
|
round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
|
|
- auth_data->timeout_started = true;
|
|
- run_again(sdata, auth_data->timeout);
|
|
}
|
|
|
|
+ auth_data->timeout_started = true;
|
|
+ run_again(sdata, auth_data->timeout);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -3878,8 +3882,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
|
|
ifmgd->status_received = false;
|
|
if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
|
|
if (status_acked) {
|
|
- ifmgd->auth_data->timeout =
|
|
- jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
|
|
+ if (ifmgd->auth_data->algorithm ==
|
|
+ WLAN_AUTH_SAE)
|
|
+ ifmgd->auth_data->timeout =
|
|
+ jiffies +
|
|
+ IEEE80211_AUTH_TIMEOUT_SAE;
|
|
+ else
|
|
+ ifmgd->auth_data->timeout =
|
|
+ jiffies +
|
|
+ IEEE80211_AUTH_TIMEOUT_SHORT;
|
|
run_again(sdata, ifmgd->auth_data->timeout);
|
|
} else {
|
|
ifmgd->auth_data->timeout = jiffies - 1;
|
|
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
|
|
index 73429841f115..ccb65f18df5d 100644
|
|
--- a/net/mac80211/tx.c
|
|
+++ b/net/mac80211/tx.c
|
|
@@ -4,6 +4,7 @@
|
|
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
|
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
|
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
|
+ * Copyright (C) 2018 Intel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -1138,7 +1139,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
|
}
|
|
|
|
/* reset session timer */
|
|
- if (reset_agg_timer && tid_tx->timeout)
|
|
+ if (reset_agg_timer)
|
|
tid_tx->last_tx = jiffies;
|
|
|
|
return queued;
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index 689e9c0570ba..cf30c440f7a7 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -4977,7 +4977,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
|
|
struct nft_base_chain *basechain;
|
|
|
|
if (nft_trans_chain_name(trans))
|
|
- strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
|
|
+ swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
|
|
|
|
if (!nft_is_base_chain(trans->ctx.chain))
|
|
return;
|
|
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
|
|
index 80fb6f63e768..6e721c449c4b 100644
|
|
--- a/net/rds/ib_cm.c
|
|
+++ b/net/rds/ib_cm.c
|
|
@@ -546,7 +546,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
|
|
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
|
|
ic->i_send_cq, ic->i_recv_cq);
|
|
|
|
- return ret;
|
|
+ goto out;
|
|
|
|
sends_out:
|
|
vfree(ic->i_sends);
|
|
@@ -571,6 +571,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
|
|
ic->i_send_cq = NULL;
|
|
rds_ibdev_out:
|
|
rds_ib_remove_conn(rds_ibdev, conn);
|
|
+out:
|
|
rds_ib_dev_put(rds_ibdev);
|
|
|
|
return ret;
|
|
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
|
|
index 7c1cb08874d5..2a32f60652d8 100644
|
|
--- a/net/rxrpc/af_rxrpc.c
|
|
+++ b/net/rxrpc/af_rxrpc.c
|
|
@@ -302,7 +302,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
|
memset(&cp, 0, sizeof(cp));
|
|
cp.local = rx->local;
|
|
cp.key = key;
|
|
- cp.security_level = 0;
|
|
+ cp.security_level = rx->min_sec_level;
|
|
cp.exclusive = false;
|
|
cp.service_id = srx->srx_service;
|
|
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
|
|
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
|
|
index 38b99db30e54..2af42c7d5b82 100644
|
|
--- a/net/rxrpc/local_object.c
|
|
+++ b/net/rxrpc/local_object.c
|
|
@@ -133,22 +133,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
|
|
}
|
|
}
|
|
|
|
- /* we want to receive ICMP errors */
|
|
- opt = 1;
|
|
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
|
|
- (char *) &opt, sizeof(opt));
|
|
- if (ret < 0) {
|
|
- _debug("setsockopt failed");
|
|
- goto error;
|
|
- }
|
|
+ switch (local->srx.transport.family) {
|
|
+ case AF_INET:
|
|
+ /* we want to receive ICMP errors */
|
|
+ opt = 1;
|
|
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
|
|
+ (char *) &opt, sizeof(opt));
|
|
+ if (ret < 0) {
|
|
+ _debug("setsockopt failed");
|
|
+ goto error;
|
|
+ }
|
|
|
|
- /* we want to set the don't fragment bit */
|
|
- opt = IP_PMTUDISC_DO;
|
|
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
|
|
- (char *) &opt, sizeof(opt));
|
|
- if (ret < 0) {
|
|
- _debug("setsockopt failed");
|
|
- goto error;
|
|
+ /* we want to set the don't fragment bit */
|
|
+ opt = IP_PMTUDISC_DO;
|
|
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
|
|
+ (char *) &opt, sizeof(opt));
|
|
+ if (ret < 0) {
|
|
+ _debug("setsockopt failed");
|
|
+ goto error;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case AF_INET6:
|
|
+ /* we want to receive ICMP errors */
|
|
+ opt = 1;
|
|
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
|
|
+ (char *) &opt, sizeof(opt));
|
|
+ if (ret < 0) {
|
|
+ _debug("setsockopt failed");
|
|
+ goto error;
|
|
+ }
|
|
+
|
|
+ /* we want to set the don't fragment bit */
|
|
+ opt = IPV6_PMTUDISC_DO;
|
|
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
|
|
+ (char *) &opt, sizeof(opt));
|
|
+ if (ret < 0) {
|
|
+ _debug("setsockopt failed");
|
|
+ goto error;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ BUG();
|
|
}
|
|
|
|
/* set the socket up */
|
|
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
|
|
index 59949d61f20d..6e749497009e 100644
|
|
--- a/net/sched/act_skbedit.c
|
|
+++ b/net/sched/act_skbedit.c
|
|
@@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
|
return 0;
|
|
|
|
if (!flags) {
|
|
- tcf_idr_release(*a, bind);
|
|
+ if (exists)
|
|
+ tcf_idr_release(*a, bind);
|
|
return -EINVAL;
|
|
}
|
|
|
|
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
|
|
index f9c289e05707..654a81238406 100644
|
|
--- a/net/smc/af_smc.c
|
|
+++ b/net/smc/af_smc.c
|
|
@@ -1264,8 +1264,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
|
|
|
|
smc = smc_sk(sk);
|
|
lock_sock(sk);
|
|
- if (sk->sk_state != SMC_ACTIVE)
|
|
+ if (sk->sk_state != SMC_ACTIVE) {
|
|
+ release_sock(sk);
|
|
goto out;
|
|
+ }
|
|
+ release_sock(sk);
|
|
if (smc->use_fallback)
|
|
rc = kernel_sendpage(smc->clcsock, page, offset,
|
|
size, flags);
|
|
@@ -1273,7 +1276,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
|
|
rc = sock_no_sendpage(sock, page, offset, size, flags);
|
|
|
|
out:
|
|
- release_sock(sk);
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
|
|
index 0fcfb3916dcf..254ddc2c3914 100644
|
|
--- a/net/tipc/monitor.c
|
|
+++ b/net/tipc/monitor.c
|
|
@@ -768,7 +768,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
|
|
|
|
ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
|
|
if (ret || !mon)
|
|
- return -EINVAL;
|
|
+ return 0;
|
|
|
|
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
|
NLM_F_MULTI, TIPC_NL_MON_GET);
|
|
diff --git a/net/tipc/node.c b/net/tipc/node.c
|
|
index f6c5743c170e..42e9bdcc4bb6 100644
|
|
--- a/net/tipc/node.c
|
|
+++ b/net/tipc/node.c
|
|
@@ -1831,6 +1831,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
|
|
int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
|
|
{
|
|
struct net *net = genl_info_net(info);
|
|
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
|
|
struct tipc_nl_msg msg;
|
|
char *name;
|
|
int err;
|
|
@@ -1838,9 +1839,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
|
|
msg.portid = info->snd_portid;
|
|
msg.seq = info->snd_seq;
|
|
|
|
- if (!info->attrs[TIPC_NLA_LINK_NAME])
|
|
+ if (!info->attrs[TIPC_NLA_LINK])
|
|
return -EINVAL;
|
|
- name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
|
|
+
|
|
+ err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
|
|
+ info->attrs[TIPC_NLA_LINK],
|
|
+ tipc_nl_link_policy, info->extack);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ if (!attrs[TIPC_NLA_LINK_NAME])
|
|
+ return -EINVAL;
|
|
+
|
|
+ name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
|
|
|
|
msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
|
|
if (!msg.skb)
|
|
@@ -2113,8 +2124,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
|
|
struct net *net = sock_net(skb->sk);
|
|
u32 prev_bearer = cb->args[0];
|
|
struct tipc_nl_msg msg;
|
|
+ int bearer_id;
|
|
int err;
|
|
- int i;
|
|
|
|
if (prev_bearer == MAX_BEARERS)
|
|
return 0;
|
|
@@ -2124,16 +2135,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
|
|
msg.seq = cb->nlh->nlmsg_seq;
|
|
|
|
rtnl_lock();
|
|
- for (i = prev_bearer; i < MAX_BEARERS; i++) {
|
|
- prev_bearer = i;
|
|
- err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
|
|
+ for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
|
|
+ err = __tipc_nl_add_monitor(net, &msg, bearer_id);
|
|
if (err)
|
|
- goto out;
|
|
+ break;
|
|
}
|
|
-
|
|
-out:
|
|
rtnl_unlock();
|
|
- cb->args[0] = prev_bearer;
|
|
+ cb->args[0] = bearer_id;
|
|
|
|
return skb->len;
|
|
}
|
|
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
|
|
index 8c7063e1aa46..0b9b014b4bb6 100644
|
|
--- a/sound/soc/codecs/msm8916-wcd-analog.c
|
|
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
|
|
@@ -1184,7 +1184,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
|
|
return irq;
|
|
}
|
|
|
|
- ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler,
|
|
+ ret = devm_request_threaded_irq(dev, irq, NULL,
|
|
+ pm8916_mbhc_switch_irq_handler,
|
|
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
|
|
IRQF_ONESHOT,
|
|
"mbhc switch irq", priv);
|
|
@@ -1198,7 +1199,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
|
|
return irq;
|
|
}
|
|
|
|
- ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler,
|
|
+ ret = devm_request_threaded_irq(dev, irq, NULL,
|
|
+ mbhc_btn_press_irq_handler,
|
|
IRQF_TRIGGER_RISING |
|
|
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
|
"mbhc btn press irq", priv);
|
|
@@ -1211,7 +1213,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
|
|
return irq;
|
|
}
|
|
|
|
- ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler,
|
|
+ ret = devm_request_threaded_irq(dev, irq, NULL,
|
|
+ mbhc_btn_release_irq_handler,
|
|
IRQF_TRIGGER_RISING |
|
|
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
|
"mbhc btn release irq", priv);
|
|
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
|
|
index d7956ababd11..e52e68b56238 100644
|
|
--- a/sound/soc/codecs/rt5514.c
|
|
+++ b/sound/soc/codecs/rt5514.c
|
|
@@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = {
|
|
{RT5514_PLL3_CALIB_CTRL5, 0x40220012},
|
|
{RT5514_DELAY_BUF_CTRL1, 0x7fff006a},
|
|
{RT5514_DELAY_BUF_CTRL3, 0x00000000},
|
|
+ {RT5514_ASRC_IN_CTRL1, 0x00000003},
|
|
{RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
|
|
{RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
|
|
{RT5514_DOWNFILTER0_CTRL3, 0x10000362},
|
|
@@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg)
|
|
case RT5514_PLL3_CALIB_CTRL5:
|
|
case RT5514_DELAY_BUF_CTRL1:
|
|
case RT5514_DELAY_BUF_CTRL3:
|
|
+ case RT5514_ASRC_IN_CTRL1:
|
|
case RT5514_DOWNFILTER0_CTRL1:
|
|
case RT5514_DOWNFILTER0_CTRL2:
|
|
case RT5514_DOWNFILTER0_CTRL3:
|
|
@@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev,
|
|
case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5:
|
|
case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1:
|
|
case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3:
|
|
+ case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1:
|
|
case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1:
|
|
case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2:
|
|
case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
|
|
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
|
|
index e5049fbfc4f1..30cdad2eab7f 100644
|
|
--- a/sound/soc/soc-topology.c
|
|
+++ b/sound/soc/soc-topology.c
|
|
@@ -510,7 +510,7 @@ static void remove_widget(struct snd_soc_component *comp,
|
|
*/
|
|
if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
|
|
/* enumerated widget mixer */
|
|
- for (i = 0; i < w->num_kcontrols; i++) {
|
|
+ for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
|
|
struct snd_kcontrol *kcontrol = w->kcontrols[i];
|
|
struct soc_enum *se =
|
|
(struct soc_enum *)kcontrol->private_value;
|
|
@@ -528,7 +528,7 @@ static void remove_widget(struct snd_soc_component *comp,
|
|
kfree(w->kcontrol_news);
|
|
} else {
|
|
/* volume mixer or bytes controls */
|
|
- for (i = 0; i < w->num_kcontrols; i++) {
|
|
+ for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
|
|
struct snd_kcontrol *kcontrol = w->kcontrols[i];
|
|
|
|
if (dobj->widget.kcontrol_type
|
|
@@ -2571,7 +2571,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
|
|
|
/* match index */
|
|
if (dobj->index != index &&
|
|
- dobj->index != SND_SOC_TPLG_INDEX_ALL)
|
|
+ index != SND_SOC_TPLG_INDEX_ALL)
|
|
continue;
|
|
|
|
switch (dobj->type) {
|
|
diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c
|
|
index 4f254bcc4423..61b9aa5d6415 100644
|
|
--- a/tools/net/bpf_dbg.c
|
|
+++ b/tools/net/bpf_dbg.c
|
|
@@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file)
|
|
|
|
static int cmd_load(char *arg)
|
|
{
|
|
- char *subcmd, *cont, *tmp = strdup(arg);
|
|
+ char *subcmd, *cont = NULL, *tmp = strdup(arg);
|
|
int ret = CMD_OK;
|
|
|
|
subcmd = strtok_r(tmp, " ", &cont);
|
|
@@ -1073,7 +1073,10 @@ static int cmd_load(char *arg)
|
|
bpf_reset();
|
|
bpf_reset_breakpoints();
|
|
|
|
- ret = cmd_load_bpf(cont);
|
|
+ if (!cont)
|
|
+ ret = CMD_ERR;
|
|
+ else
|
|
+ ret = cmd_load_bpf(cont);
|
|
} else if (matches(subcmd, "pcap") == 0) {
|
|
ret = cmd_load_pcap(cont);
|
|
} else {
|
|
diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
|
|
index b3e32b010ab1..c2c01f84df75 100644
|
|
--- a/tools/objtool/arch/x86/include/asm/insn.h
|
|
+++ b/tools/objtool/arch/x86/include/asm/insn.h
|
|
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
|
|
return insn_offset_displacement(insn) + insn->displacement.nbytes;
|
|
}
|
|
|
|
+#define POP_SS_OPCODE 0x1f
|
|
+#define MOV_SREG_OPCODE 0x8e
|
|
+
|
|
+/*
|
|
+ * Intel SDM Vol.3A 6.8.3 states;
|
|
+ * "Any single-step trap that would be delivered following the MOV to SS
|
|
+ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
|
|
+ * suppressed."
|
|
+ * This function returns true if @insn is MOV SS or POP SS. On these
|
|
+ * instructions, single stepping is suppressed.
|
|
+ */
|
|
+static inline int insn_masking_exception(struct insn *insn)
|
|
+{
|
|
+ return insn->opcode.bytes[0] == POP_SS_OPCODE ||
|
|
+ (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
|
|
+ X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
|
|
+}
|
|
+
|
|
#endif /* _ASM_X86_INSN_H */
|
|
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
|
|
index 4e8dd5fd45fd..ec40e47aa198 100644
|
|
--- a/tools/perf/util/symbol.c
|
|
+++ b/tools/perf/util/symbol.c
|
|
@@ -2093,16 +2093,14 @@ static bool symbol__read_kptr_restrict(void)
|
|
|
|
int symbol__annotation_init(void)
|
|
{
|
|
+ if (symbol_conf.init_annotation)
|
|
+ return 0;
|
|
+
|
|
if (symbol_conf.initialized) {
|
|
pr_err("Annotation needs to be init before symbol__init()\n");
|
|
return -1;
|
|
}
|
|
|
|
- if (symbol_conf.init_annotation) {
|
|
- pr_warning("Annotation being initialized multiple times\n");
|
|
- return 0;
|
|
- }
|
|
-
|
|
symbol_conf.priv_size += sizeof(struct annotation);
|
|
symbol_conf.init_annotation = true;
|
|
return 0;
|
|
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
|
|
new file mode 100644
|
|
index 000000000000..c193dce611a2
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
|
|
@@ -0,0 +1,44 @@
|
|
+#!/bin/sh
|
|
+# description: event trigger - test multiple actions on hist trigger
|
|
+
|
|
+
|
|
+do_reset() {
|
|
+ reset_trigger
|
|
+ echo > set_event
|
|
+ clear_trace
|
|
+}
|
|
+
|
|
+fail() { #msg
|
|
+ do_reset
|
|
+ echo $1
|
|
+ exit_fail
|
|
+}
|
|
+
|
|
+if [ ! -f set_event ]; then
|
|
+ echo "event tracing is not supported"
|
|
+ exit_unsupported
|
|
+fi
|
|
+
|
|
+if [ ! -f synthetic_events ]; then
|
|
+ echo "synthetic event is not supported"
|
|
+ exit_unsupported
|
|
+fi
|
|
+
|
|
+clear_synthetic_events
|
|
+reset_tracer
|
|
+do_reset
|
|
+
|
|
+echo "Test multiple actions on hist trigger"
|
|
+echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
|
|
+TRIGGER1=events/sched/sched_wakeup/trigger
|
|
+TRIGGER2=events/sched/sched_switch/trigger
|
|
+
|
|
+echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
|
|
+echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
|
|
+echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
|
|
+echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
|
|
+echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
|
|
+
|
|
+do_reset
|
|
+
|
|
+exit 0
|
|
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
|
|
index aa6e2d7f6a1f..903980921d9e 100644
|
|
--- a/tools/testing/selftests/x86/Makefile
|
|
+++ b/tools/testing/selftests/x86/Makefile
|
|
@@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
|
|
|
|
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
|
|
check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
|
|
- protection_keys test_vdso test_vsyscall
|
|
+ protection_keys test_vdso test_vsyscall mov_ss_trap
|
|
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
|
|
test_FCMOV test_FCOMI test_FISTTP \
|
|
vdso_restorer
|
|
diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
|
|
new file mode 100644
|
|
index 000000000000..3c3a022654f3
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/x86/mov_ss_trap.c
|
|
@@ -0,0 +1,285 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+/*
|
|
+ * mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS
|
|
+ *
|
|
+ * This does MOV SS from a watchpointed address followed by various
|
|
+ * types of kernel entries. A MOV SS that hits a watchpoint will queue
|
|
+ * up a #DB trap but will not actually deliver that trap. The trap
|
|
+ * will be delivered after the next instruction instead. The CPU's logic
|
|
+ * seems to be:
|
|
+ *
|
|
+ * - Any fault: drop the pending #DB trap.
|
|
+ * - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then
|
|
+ * deliver #DB.
|
|
+ * - ICEBP: enter the kernel but do not deliver the watchpoint trap
|
|
+ * - breakpoint: only one #DB is delivered (phew!)
|
|
+ *
|
|
+ * There are plenty of ways for a kernel to handle this incorrectly. This
|
|
+ * test tries to exercise all the cases.
|
|
+ *
|
|
+ * This should mostly cover CVE-2018-1087 and CVE-2018-8897.
|
|
+ */
|
|
+#define _GNU_SOURCE
|
|
+
|
|
+#include <stdlib.h>
|
|
+#include <sys/ptrace.h>
|
|
+#include <sys/types.h>
|
|
+#include <sys/wait.h>
|
|
+#include <sys/user.h>
|
|
+#include <sys/syscall.h>
|
|
+#include <unistd.h>
|
|
+#include <errno.h>
|
|
+#include <stddef.h>
|
|
+#include <stdio.h>
|
|
+#include <err.h>
|
|
+#include <string.h>
|
|
+#include <setjmp.h>
|
|
+#include <sys/prctl.h>
|
|
+
|
|
+#define X86_EFLAGS_RF (1UL << 16)
|
|
+
|
|
+#if __x86_64__
|
|
+# define REG_IP REG_RIP
|
|
+#else
|
|
+# define REG_IP REG_EIP
|
|
+#endif
|
|
+
|
|
+unsigned short ss;
|
|
+extern unsigned char breakpoint_insn[];
|
|
+sigjmp_buf jmpbuf;
|
|
+static unsigned char altstack_data[SIGSTKSZ];
|
|
+
|
|
+static void enable_watchpoint(void)
|
|
+{
|
|
+ pid_t parent = getpid();
|
|
+ int status;
|
|
+
|
|
+ pid_t child = fork();
|
|
+ if (child < 0)
|
|
+ err(1, "fork");
|
|
+
|
|
+ if (child) {
|
|
+ if (waitpid(child, &status, 0) != child)
|
|
+ err(1, "waitpid for child");
|
|
+ } else {
|
|
+ unsigned long dr0, dr1, dr7;
|
|
+
|
|
+ dr0 = (unsigned long)&ss;
|
|
+ dr1 = (unsigned long)breakpoint_insn;
|
|
+ dr7 = ((1UL << 1) | /* G0 */
|
|
+ (3UL << 16) | /* RW0 = read or write */
|
|
+ (1UL << 18) | /* LEN0 = 2 bytes */
|
|
+ (1UL << 3)); /* G1, RW1 = insn */
|
|
+
|
|
+ if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0)
|
|
+ err(1, "PTRACE_ATTACH");
|
|
+
|
|
+ if (waitpid(parent, &status, 0) != parent)
|
|
+ err(1, "waitpid for child");
|
|
+
|
|
+ if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0)
|
|
+ err(1, "PTRACE_POKEUSER DR0");
|
|
+
|
|
+ if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0)
|
|
+ err(1, "PTRACE_POKEUSER DR1");
|
|
+
|
|
+ if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0)
|
|
+ err(1, "PTRACE_POKEUSER DR7");
|
|
+
|
|
+ printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7);
|
|
+
|
|
+ if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0)
|
|
+ err(1, "PTRACE_DETACH");
|
|
+
|
|
+ exit(0);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
|
|
+ int flags)
|
|
+{
|
|
+ struct sigaction sa;
|
|
+ memset(&sa, 0, sizeof(sa));
|
|
+ sa.sa_sigaction = handler;
|
|
+ sa.sa_flags = SA_SIGINFO | flags;
|
|
+ sigemptyset(&sa.sa_mask);
|
|
+ if (sigaction(sig, &sa, 0))
|
|
+ err(1, "sigaction");
|
|
+}
|
|
+
|
|
+static char const * const signames[] = {
|
|
+ [SIGSEGV] = "SIGSEGV",
|
|
+ [SIGBUS] = "SIBGUS",
|
|
+ [SIGTRAP] = "SIGTRAP",
|
|
+ [SIGILL] = "SIGILL",
|
|
+};
|
|
+
|
|
+static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
|
|
+{
|
|
+ ucontext_t *ctx = ctx_void;
|
|
+
|
|
+ printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n",
|
|
+ (unsigned long)ctx->uc_mcontext.gregs[REG_IP],
|
|
+ !!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF));
|
|
+}
|
|
+
|
|
+static void handle_and_return(int sig, siginfo_t *si, void *ctx_void)
|
|
+{
|
|
+ ucontext_t *ctx = ctx_void;
|
|
+
|
|
+ printf("\tGot %s with RIP=%lx\n", signames[sig],
|
|
+ (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
|
|
+}
|
|
+
|
|
+static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void)
|
|
+{
|
|
+ ucontext_t *ctx = ctx_void;
|
|
+
|
|
+ printf("\tGot %s with RIP=%lx\n", signames[sig],
|
|
+ (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
|
|
+
|
|
+ siglongjmp(jmpbuf, 1);
|
|
+}
|
|
+
|
|
+int main()
|
|
+{
|
|
+ unsigned long nr;
|
|
+
|
|
+ asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss));
|
|
+ printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss);
|
|
+
|
|
+ if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0)
|
|
+ printf("\tPR_SET_PTRACER_ANY succeeded\n");
|
|
+
|
|
+ printf("\tSet up a watchpoint\n");
|
|
+ sethandler(SIGTRAP, sigtrap, 0);
|
|
+ enable_watchpoint();
|
|
+
|
|
+ printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n");
|
|
+ asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss));
|
|
+
|
|
+ printf("[RUN]\tMOV SS; INT3\n");
|
|
+ asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss));
|
|
+
|
|
+ printf("[RUN]\tMOV SS; INT 3\n");
|
|
+ asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss));
|
|
+
|
|
+ printf("[RUN]\tMOV SS; CS CS INT3\n");
|
|
+ asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss));
|
|
+
|
|
+ printf("[RUN]\tMOV SS; CSx14 INT3\n");
|
|
+ asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss));
|
|
+
|
|
+ printf("[RUN]\tMOV SS; INT 4\n");
|
|
+ sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
|
|
+ asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss));
|
|
+
|
|
+#ifdef __i386__
|
|
+ printf("[RUN]\tMOV SS; INTO\n");
|
|
+ sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
|
|
+ nr = -1;
|
|
+ asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into"
|
|
+ : [tmp] "+r" (nr) : [ss] "m" (ss));
|
|
+#endif
|
|
+
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; ICEBP\n");
|
|
+
|
|
+ /* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */
|
|
+ sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
|
|
+
|
|
+ asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss));
|
|
+ }
|
|
+
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; CLI\n");
|
|
+ sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
|
|
+ asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss));
|
|
+ }
|
|
+
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; #PF\n");
|
|
+ sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
|
|
+ asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]"
|
|
+ : [tmp] "=r" (nr) : [ss] "m" (ss));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * INT $1: if #DB has DPL=3 and there isn't special handling,
|
|
+ * then the kernel will die.
|
|
+ */
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; INT 1\n");
|
|
+ sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
|
|
+ asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss));
|
|
+ }
|
|
+
|
|
+#ifdef __x86_64__
|
|
+ /*
|
|
+ * In principle, we should test 32-bit SYSCALL as well, but
|
|
+ * the calling convention is so unpredictable that it's
|
|
+ * not obviously worth the effort.
|
|
+ */
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; SYSCALL\n");
|
|
+ sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
|
|
+ nr = SYS_getpid;
|
|
+ /*
|
|
+ * Toggle the high bit of RSP to make it noncanonical to
|
|
+ * strengthen this test on non-SMAP systems.
|
|
+ */
|
|
+ asm volatile ("btc $63, %%rsp\n\t"
|
|
+ "mov %[ss], %%ss; syscall\n\t"
|
|
+ "btc $63, %%rsp"
|
|
+ : "+a" (nr) : [ss] "m" (ss)
|
|
+ : "rcx"
|
|
+#ifdef __x86_64__
|
|
+ , "r11"
|
|
+#endif
|
|
+ );
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ printf("[RUN]\tMOV SS; breakpointed NOP\n");
|
|
+ asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss));
|
|
+
|
|
+ /*
|
|
+ * Invoking SYSENTER directly breaks all the rules. Just handle
|
|
+ * the SIGSEGV.
|
|
+ */
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; SYSENTER\n");
|
|
+ stack_t stack = {
|
|
+ .ss_sp = altstack_data,
|
|
+ .ss_size = SIGSTKSZ,
|
|
+ };
|
|
+ if (sigaltstack(&stack, NULL) != 0)
|
|
+ err(1, "sigaltstack");
|
|
+ sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
|
|
+ nr = SYS_getpid;
|
|
+ asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr)
|
|
+ : [ss] "m" (ss) : "flags", "rcx"
|
|
+#ifdef __x86_64__
|
|
+ , "r11"
|
|
+#endif
|
|
+ );
|
|
+
|
|
+ /* We're unreachable here. SYSENTER forgets RIP. */
|
|
+ }
|
|
+
|
|
+ if (sigsetjmp(jmpbuf, 1) == 0) {
|
|
+ printf("[RUN]\tMOV SS; INT $0x80\n");
|
|
+ sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
|
|
+ nr = 20; /* compat getpid */
|
|
+ asm volatile ("mov %[ss], %%ss; int $0x80"
|
|
+ : "+a" (nr) : [ss] "m" (ss)
|
|
+ : "flags"
|
|
+#ifdef __x86_64__
|
|
+ , "r8", "r9", "r10", "r11"
|
|
+#endif
|
|
+ );
|
|
+ }
|
|
+
|
|
+ printf("[OK]\tI aten't dead\n");
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
|
|
index 9c0325e1ea68..50f7e9272481 100644
|
|
--- a/tools/testing/selftests/x86/mpx-mini-test.c
|
|
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
|
|
@@ -368,6 +368,11 @@ static int expected_bnd_index = -1;
|
|
uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */
|
|
unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS];
|
|
|
|
+/* Failed address bound checks: */
|
|
+#ifndef SEGV_BNDERR
|
|
+# define SEGV_BNDERR 3
|
|
+#endif
|
|
+
|
|
/*
|
|
* The kernel is supposed to provide some information about the bounds
|
|
* exception in the siginfo. It should match what we have in the bounds
|
|
@@ -419,8 +424,6 @@ void handler(int signum, siginfo_t *si, void *vucontext)
|
|
br_count++;
|
|
dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count);
|
|
|
|
-#define SEGV_BNDERR 3 /* failed address bound checks */
|
|
-
|
|
dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n",
|
|
status, ip, br_reason);
|
|
dprintf2("si_signo: %d\n", si->si_signo);
|
|
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
|
|
index f15aa5a76fe3..460b4bdf4c1e 100644
|
|
--- a/tools/testing/selftests/x86/protection_keys.c
|
|
+++ b/tools/testing/selftests/x86/protection_keys.c
|
|
@@ -72,10 +72,9 @@ extern void abort_hooks(void);
|
|
test_nr, iteration_nr); \
|
|
dprintf0("errno at assert: %d", errno); \
|
|
abort_hooks(); \
|
|
- assert(condition); \
|
|
+ exit(__LINE__); \
|
|
} \
|
|
} while (0)
|
|
-#define raw_assert(cond) assert(cond)
|
|
|
|
void cat_into_file(char *str, char *file)
|
|
{
|
|
@@ -87,12 +86,17 @@ void cat_into_file(char *str, char *file)
|
|
* these need to be raw because they are called under
|
|
* pkey_assert()
|
|
*/
|
|
- raw_assert(fd >= 0);
|
|
+ if (fd < 0) {
|
|
+ fprintf(stderr, "error opening '%s'\n", str);
|
|
+ perror("error: ");
|
|
+ exit(__LINE__);
|
|
+ }
|
|
+
|
|
ret = write(fd, str, strlen(str));
|
|
if (ret != strlen(str)) {
|
|
perror("write to file failed");
|
|
fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
|
|
- raw_assert(0);
|
|
+ exit(__LINE__);
|
|
}
|
|
close(fd);
|
|
}
|
|
@@ -191,26 +195,30 @@ void lots_o_noops_around_write(int *write_to_me)
|
|
#ifdef __i386__
|
|
|
|
#ifndef SYS_mprotect_key
|
|
-# define SYS_mprotect_key 380
|
|
+# define SYS_mprotect_key 380
|
|
#endif
|
|
+
|
|
#ifndef SYS_pkey_alloc
|
|
-# define SYS_pkey_alloc 381
|
|
-# define SYS_pkey_free 382
|
|
+# define SYS_pkey_alloc 381
|
|
+# define SYS_pkey_free 382
|
|
#endif
|
|
-#define REG_IP_IDX REG_EIP
|
|
-#define si_pkey_offset 0x14
|
|
+
|
|
+#define REG_IP_IDX REG_EIP
|
|
+#define si_pkey_offset 0x14
|
|
|
|
#else
|
|
|
|
#ifndef SYS_mprotect_key
|
|
-# define SYS_mprotect_key 329
|
|
+# define SYS_mprotect_key 329
|
|
#endif
|
|
+
|
|
#ifndef SYS_pkey_alloc
|
|
-# define SYS_pkey_alloc 330
|
|
-# define SYS_pkey_free 331
|
|
+# define SYS_pkey_alloc 330
|
|
+# define SYS_pkey_free 331
|
|
#endif
|
|
-#define REG_IP_IDX REG_RIP
|
|
-#define si_pkey_offset 0x20
|
|
+
|
|
+#define REG_IP_IDX REG_RIP
|
|
+#define si_pkey_offset 0x20
|
|
|
|
#endif
|
|
|
|
@@ -225,8 +233,14 @@ void dump_mem(void *dumpme, int len_bytes)
|
|
}
|
|
}
|
|
|
|
-#define SEGV_BNDERR 3 /* failed address bound checks */
|
|
-#define SEGV_PKUERR 4
|
|
+/* Failed address bound checks: */
|
|
+#ifndef SEGV_BNDERR
|
|
+# define SEGV_BNDERR 3
|
|
+#endif
|
|
+
|
|
+#ifndef SEGV_PKUERR
|
|
+# define SEGV_PKUERR 4
|
|
+#endif
|
|
|
|
static char *si_code_str(int si_code)
|
|
{
|
|
@@ -289,13 +303,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
|
|
dump_mem(pkru_ptr - 128, 256);
|
|
pkey_assert(*pkru_ptr);
|
|
|
|
- si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
|
|
- dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
|
|
- dump_mem(si_pkey_ptr - 8, 24);
|
|
- siginfo_pkey = *si_pkey_ptr;
|
|
- pkey_assert(siginfo_pkey < NR_PKEYS);
|
|
- last_si_pkey = siginfo_pkey;
|
|
-
|
|
if ((si->si_code == SEGV_MAPERR) ||
|
|
(si->si_code == SEGV_ACCERR) ||
|
|
(si->si_code == SEGV_BNDERR)) {
|
|
@@ -303,6 +310,13 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
|
|
exit(4);
|
|
}
|
|
|
|
+ si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
|
|
+ dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
|
|
+ dump_mem((u8 *)si_pkey_ptr - 8, 24);
|
|
+ siginfo_pkey = *si_pkey_ptr;
|
|
+ pkey_assert(siginfo_pkey < NR_PKEYS);
|
|
+ last_si_pkey = siginfo_pkey;
|
|
+
|
|
dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
|
|
/* need __rdpkru() version so we do not do shadow_pkru checking */
|
|
dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
|
|
@@ -311,22 +325,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
|
|
dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
|
|
pkru_faults++;
|
|
dprintf1("<<<<==================================================\n");
|
|
- return;
|
|
- if (trapno == 14) {
|
|
- fprintf(stderr,
|
|
- "ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n",
|
|
- trapno, ip);
|
|
- fprintf(stderr, "si_addr %p\n", si->si_addr);
|
|
- fprintf(stderr, "REG_ERR: %lx\n",
|
|
- (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
|
|
- exit(1);
|
|
- } else {
|
|
- fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip);
|
|
- fprintf(stderr, "si_addr %p\n", si->si_addr);
|
|
- fprintf(stderr, "REG_ERR: %lx\n",
|
|
- (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
|
|
- exit(2);
|
|
- }
|
|
dprint_in_signal = 0;
|
|
}
|
|
|
|
@@ -393,10 +391,15 @@ pid_t fork_lazy_child(void)
|
|
return forkret;
|
|
}
|
|
|
|
-#define PKEY_DISABLE_ACCESS 0x1
|
|
-#define PKEY_DISABLE_WRITE 0x2
|
|
+#ifndef PKEY_DISABLE_ACCESS
|
|
+# define PKEY_DISABLE_ACCESS 0x1
|
|
+#endif
|
|
+
|
|
+#ifndef PKEY_DISABLE_WRITE
|
|
+# define PKEY_DISABLE_WRITE 0x2
|
|
+#endif
|
|
|
|
-u32 pkey_get(int pkey, unsigned long flags)
|
|
+static u32 hw_pkey_get(int pkey, unsigned long flags)
|
|
{
|
|
u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
|
|
u32 pkru = __rdpkru();
|
|
@@ -418,7 +421,7 @@ u32 pkey_get(int pkey, unsigned long flags)
|
|
return masked_pkru;
|
|
}
|
|
|
|
-int pkey_set(int pkey, unsigned long rights, unsigned long flags)
|
|
+static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags)
|
|
{
|
|
u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
|
|
u32 old_pkru = __rdpkru();
|
|
@@ -452,15 +455,15 @@ void pkey_disable_set(int pkey, int flags)
|
|
pkey, flags);
|
|
pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
|
|
|
|
- pkey_rights = pkey_get(pkey, syscall_flags);
|
|
+ pkey_rights = hw_pkey_get(pkey, syscall_flags);
|
|
|
|
- dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
|
|
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
|
|
pkey, pkey, pkey_rights);
|
|
pkey_assert(pkey_rights >= 0);
|
|
|
|
pkey_rights |= flags;
|
|
|
|
- ret = pkey_set(pkey, pkey_rights, syscall_flags);
|
|
+ ret = hw_pkey_set(pkey, pkey_rights, syscall_flags);
|
|
assert(!ret);
|
|
/*pkru and flags have the same format */
|
|
shadow_pkru |= flags << (pkey * 2);
|
|
@@ -468,8 +471,8 @@ void pkey_disable_set(int pkey, int flags)
|
|
|
|
pkey_assert(ret >= 0);
|
|
|
|
- pkey_rights = pkey_get(pkey, syscall_flags);
|
|
- dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
|
|
+ pkey_rights = hw_pkey_get(pkey, syscall_flags);
|
|
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
|
|
pkey, pkey, pkey_rights);
|
|
|
|
dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
|
|
@@ -483,24 +486,24 @@ void pkey_disable_clear(int pkey, int flags)
|
|
{
|
|
unsigned long syscall_flags = 0;
|
|
int ret;
|
|
- int pkey_rights = pkey_get(pkey, syscall_flags);
|
|
+ int pkey_rights = hw_pkey_get(pkey, syscall_flags);
|
|
u32 orig_pkru = rdpkru();
|
|
|
|
pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
|
|
|
|
- dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
|
|
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
|
|
pkey, pkey, pkey_rights);
|
|
pkey_assert(pkey_rights >= 0);
|
|
|
|
pkey_rights |= flags;
|
|
|
|
- ret = pkey_set(pkey, pkey_rights, 0);
|
|
+ ret = hw_pkey_set(pkey, pkey_rights, 0);
|
|
/* pkru and flags have the same format */
|
|
shadow_pkru &= ~(flags << (pkey * 2));
|
|
pkey_assert(ret >= 0);
|
|
|
|
- pkey_rights = pkey_get(pkey, syscall_flags);
|
|
- dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
|
|
+ pkey_rights = hw_pkey_get(pkey, syscall_flags);
|
|
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
|
|
pkey, pkey, pkey_rights);
|
|
|
|
dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
|
|
@@ -674,10 +677,12 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
|
|
struct pkey_malloc_record {
|
|
void *ptr;
|
|
long size;
|
|
+ int prot;
|
|
};
|
|
struct pkey_malloc_record *pkey_malloc_records;
|
|
+struct pkey_malloc_record *pkey_last_malloc_record;
|
|
long nr_pkey_malloc_records;
|
|
-void record_pkey_malloc(void *ptr, long size)
|
|
+void record_pkey_malloc(void *ptr, long size, int prot)
|
|
{
|
|
long i;
|
|
struct pkey_malloc_record *rec = NULL;
|
|
@@ -709,6 +714,8 @@ void record_pkey_malloc(void *ptr, long size)
|
|
(int)(rec - pkey_malloc_records), rec, ptr, size);
|
|
rec->ptr = ptr;
|
|
rec->size = size;
|
|
+ rec->prot = prot;
|
|
+ pkey_last_malloc_record = rec;
|
|
nr_pkey_malloc_records++;
|
|
}
|
|
|
|
@@ -753,7 +760,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
|
|
pkey_assert(ptr != (void *)-1);
|
|
ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
|
|
pkey_assert(!ret);
|
|
- record_pkey_malloc(ptr, size);
|
|
+ record_pkey_malloc(ptr, size, prot);
|
|
rdpkru();
|
|
|
|
dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
|
|
@@ -774,7 +781,7 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
|
|
size = ALIGN_UP(size, HPAGE_SIZE * 2);
|
|
ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
|
pkey_assert(ptr != (void *)-1);
|
|
- record_pkey_malloc(ptr, size);
|
|
+ record_pkey_malloc(ptr, size, prot);
|
|
mprotect_pkey(ptr, size, prot, pkey);
|
|
|
|
dprintf1("unaligned ptr: %p\n", ptr);
|
|
@@ -847,7 +854,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
|
|
pkey_assert(ptr != (void *)-1);
|
|
mprotect_pkey(ptr, size, prot, pkey);
|
|
|
|
- record_pkey_malloc(ptr, size);
|
|
+ record_pkey_malloc(ptr, size, prot);
|
|
|
|
dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
|
|
return ptr;
|
|
@@ -869,7 +876,7 @@ void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
|
|
|
|
mprotect_pkey(ptr, size, prot, pkey);
|
|
|
|
- record_pkey_malloc(ptr, size);
|
|
+ record_pkey_malloc(ptr, size, prot);
|
|
|
|
dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
|
|
close(fd);
|
|
@@ -918,13 +925,21 @@ void *malloc_pkey(long size, int prot, u16 pkey)
|
|
}
|
|
|
|
int last_pkru_faults;
|
|
+#define UNKNOWN_PKEY -2
|
|
void expected_pk_fault(int pkey)
|
|
{
|
|
dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n",
|
|
__func__, last_pkru_faults, pkru_faults);
|
|
dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
|
|
pkey_assert(last_pkru_faults + 1 == pkru_faults);
|
|
- pkey_assert(last_si_pkey == pkey);
|
|
+
|
|
+ /*
|
|
+ * For exec-only memory, we do not know the pkey in
|
|
+ * advance, so skip this check.
|
|
+ */
|
|
+ if (pkey != UNKNOWN_PKEY)
|
|
+ pkey_assert(last_si_pkey == pkey);
|
|
+
|
|
/*
|
|
* The signal handler shold have cleared out PKRU to let the
|
|
* test program continue. We now have to restore it.
|
|
@@ -939,10 +954,11 @@ void expected_pk_fault(int pkey)
|
|
last_si_pkey = -1;
|
|
}
|
|
|
|
-void do_not_expect_pk_fault(void)
|
|
-{
|
|
- pkey_assert(last_pkru_faults == pkru_faults);
|
|
-}
|
|
+#define do_not_expect_pk_fault(msg) do { \
|
|
+ if (last_pkru_faults != pkru_faults) \
|
|
+ dprintf0("unexpected PK fault: %s\n", msg); \
|
|
+ pkey_assert(last_pkru_faults == pkru_faults); \
|
|
+} while (0)
|
|
|
|
int test_fds[10] = { -1 };
|
|
int nr_test_fds;
|
|
@@ -1151,12 +1167,15 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
|
pkey_assert(i < NR_PKEYS*2);
|
|
|
|
/*
|
|
- * There are 16 pkeys supported in hardware. One is taken
|
|
- * up for the default (0) and another can be taken up by
|
|
- * an execute-only mapping. Ensure that we can allocate
|
|
- * at least 14 (16-2).
|
|
+ * There are 16 pkeys supported in hardware. Three are
|
|
+ * allocated by the time we get here:
|
|
+ * 1. The default key (0)
|
|
+ * 2. One possibly consumed by an execute-only mapping.
|
|
+ * 3. One allocated by the test code and passed in via
|
|
+ * 'pkey' to this function.
|
|
+ * Ensure that we can allocate at least another 13 (16-3).
|
|
*/
|
|
- pkey_assert(i >= NR_PKEYS-2);
|
|
+ pkey_assert(i >= NR_PKEYS-3);
|
|
|
|
for (i = 0; i < nr_allocated_pkeys; i++) {
|
|
err = sys_pkey_free(allocated_pkeys[i]);
|
|
@@ -1165,6 +1184,35 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * pkey 0 is special. It is allocated by default, so you do not
|
|
+ * have to call pkey_alloc() to use it first. Make sure that it
|
|
+ * is usable.
|
|
+ */
|
|
+void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
|
|
+{
|
|
+ long size;
|
|
+ int prot;
|
|
+
|
|
+ assert(pkey_last_malloc_record);
|
|
+ size = pkey_last_malloc_record->size;
|
|
+ /*
|
|
+ * This is a bit of a hack. But mprotect() requires
|
|
+ * huge-page-aligned sizes when operating on hugetlbfs.
|
|
+ * So, make sure that we use something that's a multiple
|
|
+ * of a huge page when we can.
|
|
+ */
|
|
+ if (size >= HPAGE_SIZE)
|
|
+ size = HPAGE_SIZE;
|
|
+ prot = pkey_last_malloc_record->prot;
|
|
+
|
|
+ /* Use pkey 0 */
|
|
+ mprotect_pkey(ptr, size, prot, 0);
|
|
+
|
|
+ /* Make sure that we can set it back to the original pkey. */
|
|
+ mprotect_pkey(ptr, size, prot, pkey);
|
|
+}
|
|
+
|
|
void test_ptrace_of_child(int *ptr, u16 pkey)
|
|
{
|
|
__attribute__((__unused__)) int peek_result;
|
|
@@ -1228,7 +1276,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey)
|
|
pkey_assert(ret != -1);
|
|
/* Now access from the current task, and expect NO exception: */
|
|
peek_result = read_ptr(plain_ptr);
|
|
- do_not_expect_pk_fault();
|
|
+ do_not_expect_pk_fault("read plain pointer after ptrace");
|
|
|
|
ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
|
|
pkey_assert(ret != -1);
|
|
@@ -1241,12 +1289,9 @@ void test_ptrace_of_child(int *ptr, u16 pkey)
|
|
free(plain_ptr_unaligned);
|
|
}
|
|
|
|
-void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
|
|
+void *get_pointer_to_instructions(void)
|
|
{
|
|
void *p1;
|
|
- int scratch;
|
|
- int ptr_contents;
|
|
- int ret;
|
|
|
|
p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
|
|
dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
|
|
@@ -1256,7 +1301,23 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
|
|
/* Point 'p1' at the *second* page of the function: */
|
|
p1 += PAGE_SIZE;
|
|
|
|
+ /*
|
|
+ * Try to ensure we fault this in on next touch to ensure
|
|
+ * we get an instruction fault as opposed to a data one
|
|
+ */
|
|
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
|
|
+
|
|
+ return p1;
|
|
+}
|
|
+
|
|
+void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
|
|
+{
|
|
+ void *p1;
|
|
+ int scratch;
|
|
+ int ptr_contents;
|
|
+ int ret;
|
|
+
|
|
+ p1 = get_pointer_to_instructions();
|
|
lots_o_noops_around_write(&scratch);
|
|
ptr_contents = read_ptr(p1);
|
|
dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
|
|
@@ -1272,12 +1333,55 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
|
|
*/
|
|
madvise(p1, PAGE_SIZE, MADV_DONTNEED);
|
|
lots_o_noops_around_write(&scratch);
|
|
- do_not_expect_pk_fault();
|
|
+ do_not_expect_pk_fault("executing on PROT_EXEC memory");
|
|
ptr_contents = read_ptr(p1);
|
|
dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
|
|
expected_pk_fault(pkey);
|
|
}
|
|
|
|
+void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
|
|
+{
|
|
+ void *p1;
|
|
+ int scratch;
|
|
+ int ptr_contents;
|
|
+ int ret;
|
|
+
|
|
+ dprintf1("%s() start\n", __func__);
|
|
+
|
|
+ p1 = get_pointer_to_instructions();
|
|
+ lots_o_noops_around_write(&scratch);
|
|
+ ptr_contents = read_ptr(p1);
|
|
+ dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
|
|
+
|
|
+ /* Use a *normal* mprotect(), not mprotect_pkey(): */
|
|
+ ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
|
|
+ pkey_assert(!ret);
|
|
+
|
|
+ dprintf2("pkru: %x\n", rdpkru());
|
|
+
|
|
+ /* Make sure this is an *instruction* fault */
|
|
+ madvise(p1, PAGE_SIZE, MADV_DONTNEED);
|
|
+ lots_o_noops_around_write(&scratch);
|
|
+ do_not_expect_pk_fault("executing on PROT_EXEC memory");
|
|
+ ptr_contents = read_ptr(p1);
|
|
+ dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
|
|
+ expected_pk_fault(UNKNOWN_PKEY);
|
|
+
|
|
+ /*
|
|
+ * Put the memory back to non-PROT_EXEC. Should clear the
|
|
+ * exec-only pkey off the VMA and allow it to be readable
|
|
+ * again. Go to PROT_NONE first to check for a kernel bug
|
|
+ * that did not clear the pkey when doing PROT_NONE.
|
|
+ */
|
|
+ ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
|
|
+ pkey_assert(!ret);
|
|
+
|
|
+ ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
|
|
+ pkey_assert(!ret);
|
|
+ ptr_contents = read_ptr(p1);
|
|
+ do_not_expect_pk_fault("plain read on recently PROT_EXEC area");
|
|
+}
|
|
+
|
|
void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
|
|
{
|
|
int size = PAGE_SIZE;
|
|
@@ -1302,6 +1406,8 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = {
|
|
test_kernel_gup_of_access_disabled_region,
|
|
test_kernel_gup_write_to_write_disabled_region,
|
|
test_executing_on_unreadable_memory,
|
|
+ test_implicit_mprotect_exec_only_memory,
|
|
+ test_mprotect_with_pkey_0,
|
|
test_ptrace_of_child,
|
|
test_pkey_syscalls_on_non_allocated_pkey,
|
|
test_pkey_syscalls_bad_args,
|
|
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
|
|
index b3d4a10f09a1..af003268bf3e 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
|
|
@@ -14,6 +14,8 @@
|
|
#include <linux/irqchip/arm-gic.h>
|
|
#include <linux/kvm.h>
|
|
#include <linux/kvm_host.h>
|
|
+#include <linux/nospec.h>
|
|
+
|
|
#include <kvm/iodev.h>
|
|
#include <kvm/arm_vgic.h>
|
|
|
|
@@ -320,6 +322,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
|
|
|
|
if (n > vgic_v3_max_apr_idx(vcpu))
|
|
return 0;
|
|
+
|
|
+ n = array_index_nospec(n, 4);
|
|
+
|
|
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
|
|
return vgicv3->vgic_ap1r[n];
|
|
}
|