mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-22 06:41:23 +00:00
4923 lines
153 KiB
Diff
4923 lines
153 KiB
Diff
diff --git a/MAINTAINERS b/MAINTAINERS
|
|
index 233f83464814..d826f1b9eb02 100644
|
|
--- a/MAINTAINERS
|
|
+++ b/MAINTAINERS
|
|
@@ -10289,9 +10289,11 @@ S: Maintained
|
|
F: drivers/net/ethernet/dlink/sundance.c
|
|
|
|
SUPERH
|
|
+M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
|
+M: Rich Felker <dalias@libc.org>
|
|
L: linux-sh@vger.kernel.org
|
|
Q: http://patchwork.kernel.org/project/linux-sh/list/
|
|
-S: Orphan
|
|
+S: Maintained
|
|
F: Documentation/sh/
|
|
F: arch/sh/
|
|
F: drivers/sh/
|
|
diff --git a/Makefile b/Makefile
|
|
index e7a2958eb771..802be10c40c5 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 2
|
|
+SUBLEVEL = 3
|
|
EXTRAVERSION =
|
|
NAME = Blurry Fish Butt
|
|
|
|
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
|
|
index a633be3defda..cd316021d6ce 100644
|
|
--- a/arch/arm/boot/dts/armada-388-gp.dts
|
|
+++ b/arch/arm/boot/dts/armada-388-gp.dts
|
|
@@ -303,16 +303,6 @@
|
|
gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
|
|
};
|
|
|
|
- reg_usb2_1_vbus: v5-vbus1 {
|
|
- compatible = "regulator-fixed";
|
|
- regulator-name = "v5.0-vbus1";
|
|
- regulator-min-microvolt = <5000000>;
|
|
- regulator-max-microvolt = <5000000>;
|
|
- enable-active-high;
|
|
- regulator-always-on;
|
|
- gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
|
|
- };
|
|
-
|
|
reg_sata0: pwr-sata0 {
|
|
compatible = "regulator-fixed";
|
|
regulator-name = "pwr_en_sata0";
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
|
|
index 131614f28e75..569026e8f96c 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
|
|
@@ -86,10 +86,12 @@
|
|
macb0: ethernet@f8020000 {
|
|
phy-mode = "rmii";
|
|
status = "okay";
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
|
|
|
|
phy0: ethernet-phy@1 {
|
|
interrupt-parent = <&pioE>;
|
|
- interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
|
|
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
|
|
reg = <1>;
|
|
};
|
|
};
|
|
@@ -152,6 +154,10 @@
|
|
atmel,pins =
|
|
<AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
|
|
};
|
|
+ pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
|
|
+ atmel,pins =
|
|
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
|
|
+ };
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
|
|
index 2d4a33100af6..4e98cda97403 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
|
|
@@ -160,8 +160,15 @@
|
|
};
|
|
|
|
macb0: ethernet@f8020000 {
|
|
+ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
|
|
phy-mode = "rmii";
|
|
status = "okay";
|
|
+
|
|
+ ethernet-phy@1 {
|
|
+ reg = <0x1>;
|
|
+ interrupt-parent = <&pioE>;
|
|
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
|
|
+ };
|
|
};
|
|
|
|
mmc1: mmc@fc000000 {
|
|
@@ -193,6 +200,10 @@
|
|
|
|
pinctrl@fc06a000 {
|
|
board {
|
|
+ pinctrl_macb0_phy_irq: macb0_phy_irq {
|
|
+ atmel,pins =
|
|
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
|
|
+ };
|
|
pinctrl_mmc0_cd: mmc0_cd {
|
|
atmel,pins =
|
|
<AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
|
|
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
|
|
index 36387b11451d..80f6c786a37e 100644
|
|
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
|
|
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
|
|
@@ -122,6 +122,7 @@
|
|
interrupt-parent = <&gpio5>;
|
|
interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
|
|
ref-clock-frequency = <26000000>;
|
|
+ tcxo-clock-frequency = <26000000>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
|
|
index 5cf76a1c5c75..41e80e7f20be 100644
|
|
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
|
|
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
|
|
@@ -130,6 +130,16 @@
|
|
};
|
|
};
|
|
|
|
+&gpio8 {
|
|
+ /* TI trees use GPIO instead of msecure, see also muxing */
|
|
+ p234 {
|
|
+ gpio-hog;
|
|
+ gpios = <10 GPIO_ACTIVE_HIGH>;
|
|
+ output-high;
|
|
+ line-name = "gpio8_234/msecure";
|
|
+ };
|
|
+};
|
|
+
|
|
&omap5_pmx_core {
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <
|
|
@@ -213,6 +223,13 @@
|
|
>;
|
|
};
|
|
|
|
+ /* TI trees use GPIO mode; msecure mode does not work reliably? */
|
|
+ palmas_msecure_pins: palmas_msecure_pins {
|
|
+ pinctrl-single,pins = <
|
|
+ OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
|
|
+ >;
|
|
+ };
|
|
+
|
|
usbhost_pins: pinmux_usbhost_pins {
|
|
pinctrl-single,pins = <
|
|
0x84 (PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
|
|
@@ -278,6 +295,12 @@
|
|
&usbhost_wkup_pins
|
|
>;
|
|
|
|
+ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
|
|
+ pinctrl-single,pins = <
|
|
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
|
|
+ >;
|
|
+ };
|
|
+
|
|
usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
|
|
pinctrl-single,pins = <
|
|
0x1A (PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
|
|
@@ -345,6 +368,8 @@
|
|
interrupt-controller;
|
|
#interrupt-cells = <2>;
|
|
ti,system-power-controller;
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
|
|
|
|
extcon_usb3: palmas_usb {
|
|
compatible = "ti,palmas-usb-vid";
|
|
@@ -358,6 +383,14 @@
|
|
#clock-cells = <0>;
|
|
};
|
|
|
|
+ rtc {
|
|
+ compatible = "ti,palmas-rtc";
|
|
+ interrupt-parent = <&palmas>;
|
|
+ interrupts = <8 IRQ_TYPE_NONE>;
|
|
+ ti,backup-battery-chargeable;
|
|
+ ti,backup-battery-charge-high-current;
|
|
+ };
|
|
+
|
|
palmas_pmic {
|
|
compatible = "ti,palmas-pmic";
|
|
interrupt-parent = <&palmas>;
|
|
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
|
|
index 2193637b9cd2..3daf8d5d7878 100644
|
|
--- a/arch/arm/boot/dts/sama5d4.dtsi
|
|
+++ b/arch/arm/boot/dts/sama5d4.dtsi
|
|
@@ -1342,7 +1342,7 @@
|
|
dbgu: serial@fc069000 {
|
|
compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
|
|
reg = <0xfc069000 0x200>;
|
|
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
|
|
+ interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&pinctrl_dbgu>;
|
|
clocks = <&dbgu_clk>;
|
|
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
|
|
index d0c743853318..27a333eb8987 100644
|
|
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
|
|
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
|
|
@@ -127,22 +127,14 @@
|
|
};
|
|
mmcsd_default_mode: mmcsd_default {
|
|
mmcsd_default_cfg1 {
|
|
- /* MCCLK */
|
|
- pins = "GPIO8_B10";
|
|
- ste,output = <0>;
|
|
- };
|
|
- mmcsd_default_cfg2 {
|
|
- /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
|
|
- pins = "GPIO10_C11", "GPIO15_A12",
|
|
- "GPIO16_C13", "GPIO23_D15";
|
|
- ste,output = <1>;
|
|
- };
|
|
- mmcsd_default_cfg3 {
|
|
- /* MCCMD, MCDAT3-0, MCMSFBCLK */
|
|
- pins = "GPIO9_A10", "GPIO11_B11",
|
|
- "GPIO12_A11", "GPIO13_C12",
|
|
- "GPIO14_B12", "GPIO24_C15";
|
|
- ste,input = <1>;
|
|
+ /*
|
|
+ * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
|
|
+ * MCCMD, MCDAT3-0, MCMSFBCLK
|
|
+ */
|
|
+ pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
|
|
+ "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
|
|
+ "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
|
|
+ ste,output = <2>;
|
|
};
|
|
};
|
|
};
|
|
@@ -802,10 +794,21 @@
|
|
clock-names = "mclk", "apb_pclk";
|
|
interrupt-parent = <&vica>;
|
|
interrupts = <22>;
|
|
- max-frequency = <48000000>;
|
|
+ max-frequency = <400000>;
|
|
bus-width = <4>;
|
|
cap-mmc-highspeed;
|
|
cap-sd-highspeed;
|
|
+ full-pwr-cycle;
|
|
+ /*
|
|
+ * The STw4811 circuit used with the Nomadik strictly
|
|
+ * requires that all of these signal direction pins be
|
|
+ * routed and used for its 4-bit levelshifter.
|
|
+ */
|
|
+ st,sig-dir-dat0;
|
|
+ st,sig-dir-dat2;
|
|
+ st,sig-dir-dat31;
|
|
+ st,sig-dir-cmd;
|
|
+ st,sig-pin-fbclk;
|
|
pinctrl-names = "default";
|
|
pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
|
|
vmmc-supply = <&vmmc_regulator>;
|
|
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
|
|
index 2dc6da70ae59..d7ed252708c5 100644
|
|
--- a/arch/arm/common/icst.c
|
|
+++ b/arch/arm/common/icst.c
|
|
@@ -16,7 +16,7 @@
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
-
|
|
+#include <asm/div64.h>
|
|
#include <asm/hardware/icst.h>
|
|
|
|
/*
|
|
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
|
|
|
|
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
|
|
{
|
|
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
|
|
+ u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
|
|
+ u32 divisor = (vco.r + 2) * p->s2div[vco.s];
|
|
+
|
|
+ do_div(dividend, divisor);
|
|
+ return (unsigned long)dividend;
|
|
}
|
|
|
|
EXPORT_SYMBOL(icst_hz);
|
|
@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
|
|
|
|
if (f > p->vco_min && f <= p->vco_max)
|
|
break;
|
|
+ i++;
|
|
} while (i < 8);
|
|
|
|
if (i >= 8)
|
|
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
|
|
index eafd120b53f1..1b9f0520dea9 100644
|
|
--- a/arch/arm/mach-omap2/sleep34xx.S
|
|
+++ b/arch/arm/mach-omap2/sleep34xx.S
|
|
@@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
|
|
stmfd sp!, {lr} @ save registers on stack
|
|
/* Setup so that we will disable and enable l2 */
|
|
mov r1, #0x1
|
|
- adrl r2, l2dis_3630 @ may be too distant for plain adr
|
|
- str r1, [r2]
|
|
+ adrl r3, l2dis_3630_offset @ may be too distant for plain adr
|
|
+ ldr r2, [r3] @ value for offset
|
|
+ str r1, [r2, r3] @ write to l2dis_3630
|
|
ldmfd sp!, {pc} @ restore regs and return
|
|
ENDPROC(enable_omap3630_toggle_l2_on_restore)
|
|
|
|
- .text
|
|
-/* Function to call rom code to save secure ram context */
|
|
+/*
|
|
+ * Function to call rom code to save secure ram context. This gets
|
|
+ * relocated to SRAM, so it can be all in .data section. Otherwise
|
|
+ * we need to initialize api_params separately.
|
|
+ */
|
|
+ .data
|
|
.align 3
|
|
ENTRY(save_secure_ram_context)
|
|
stmfd sp!, {r4 - r11, lr} @ save registers on stack
|
|
@@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
|
|
ENTRY(save_secure_ram_context_sz)
|
|
.word . - save_secure_ram_context
|
|
|
|
+ .text
|
|
+
|
|
/*
|
|
* ======================
|
|
* == Idle entry point ==
|
|
@@ -289,12 +296,6 @@ wait_sdrc_ready:
|
|
bic r5, r5, #0x40
|
|
str r5, [r4]
|
|
|
|
-/*
|
|
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
|
|
- * base instead.
|
|
- * Be careful not to clobber r7 when maintaing this code.
|
|
- */
|
|
-
|
|
is_dll_in_lock_mode:
|
|
/* Is dll in lock mode? */
|
|
ldr r4, sdrc_dlla_ctrl
|
|
@@ -302,11 +303,7 @@ is_dll_in_lock_mode:
|
|
tst r5, #0x4
|
|
bne exit_nonoff_modes @ Return if locked
|
|
/* wait till dll locks */
|
|
- adr r7, kick_counter
|
|
wait_dll_lock_timed:
|
|
- ldr r4, wait_dll_lock_counter
|
|
- add r4, r4, #1
|
|
- str r4, [r7, #wait_dll_lock_counter - kick_counter]
|
|
ldr r4, sdrc_dlla_status
|
|
/* Wait 20uS for lock */
|
|
mov r6, #8
|
|
@@ -330,9 +327,6 @@ kick_dll:
|
|
orr r6, r6, #(1<<3) @ enable dll
|
|
str r6, [r4]
|
|
dsb
|
|
- ldr r4, kick_counter
|
|
- add r4, r4, #1
|
|
- str r4, [r7] @ kick_counter
|
|
b wait_dll_lock_timed
|
|
|
|
exit_nonoff_modes:
|
|
@@ -360,15 +354,6 @@ sdrc_dlla_status:
|
|
.word SDRC_DLLA_STATUS_V
|
|
sdrc_dlla_ctrl:
|
|
.word SDRC_DLLA_CTRL_V
|
|
- /*
|
|
- * When exporting to userspace while the counters are in SRAM,
|
|
- * these 2 words need to be at the end to facilitate retrival!
|
|
- */
|
|
-kick_counter:
|
|
- .word 0
|
|
-wait_dll_lock_counter:
|
|
- .word 0
|
|
-
|
|
ENTRY(omap3_do_wfi_sz)
|
|
.word . - omap3_do_wfi
|
|
|
|
@@ -437,7 +422,9 @@ ENTRY(omap3_restore)
|
|
cmp r2, #0x0 @ Check if target power state was OFF or RET
|
|
bne logic_l1_restore
|
|
|
|
- ldr r0, l2dis_3630
|
|
+ adr r1, l2dis_3630_offset @ address for offset
|
|
+ ldr r0, [r1] @ value for offset
|
|
+ ldr r0, [r1, r0] @ value at l2dis_3630
|
|
cmp r0, #0x1 @ should we disable L2 on 3630?
|
|
bne skipl2dis
|
|
mrc p15, 0, r0, c1, c0, 1
|
|
@@ -449,12 +436,14 @@ skipl2dis:
|
|
and r1, #0x700
|
|
cmp r1, #0x300
|
|
beq l2_inv_gp
|
|
+ adr r0, l2_inv_api_params_offset
|
|
+ ldr r3, [r0]
|
|
+ add r3, r3, r0 @ r3 points to dummy parameters
|
|
mov r0, #40 @ set service ID for PPA
|
|
mov r12, r0 @ copy secure Service ID in r12
|
|
mov r1, #0 @ set task id for ROM code in r1
|
|
mov r2, #4 @ set some flags in r2, r6
|
|
mov r6, #0xff
|
|
- adr r3, l2_inv_api_params @ r3 points to dummy parameters
|
|
dsb @ data write barrier
|
|
dmb @ data memory barrier
|
|
smc #1 @ call SMI monitor (smi #1)
|
|
@@ -488,8 +477,8 @@ skipl2dis:
|
|
b logic_l1_restore
|
|
|
|
.align
|
|
-l2_inv_api_params:
|
|
- .word 0x1, 0x00
|
|
+l2_inv_api_params_offset:
|
|
+ .long l2_inv_api_params - .
|
|
l2_inv_gp:
|
|
/* Execute smi to invalidate L2 cache */
|
|
mov r12, #0x1 @ set up to invalidate L2
|
|
@@ -506,7 +495,9 @@ l2_inv_gp:
|
|
mov r12, #0x2
|
|
smc #0 @ Call SMI monitor (smieq)
|
|
logic_l1_restore:
|
|
- ldr r1, l2dis_3630
|
|
+ adr r0, l2dis_3630_offset @ adress for offset
|
|
+ ldr r1, [r0] @ value for offset
|
|
+ ldr r1, [r0, r1] @ value at l2dis_3630
|
|
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
|
|
bne skipl2reen
|
|
mrc p15, 0, r1, c1, c0, 1
|
|
@@ -535,9 +526,17 @@ control_stat:
|
|
.word CONTROL_STAT
|
|
control_mem_rta:
|
|
.word CONTROL_MEM_RTA_CTRL
|
|
+l2dis_3630_offset:
|
|
+ .long l2dis_3630 - .
|
|
+
|
|
+ .data
|
|
l2dis_3630:
|
|
.word 0
|
|
|
|
+ .data
|
|
+l2_inv_api_params:
|
|
+ .word 0x1, 0x00
|
|
+
|
|
/*
|
|
* Internal functions
|
|
*/
|
|
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
|
|
index 9b09d85d811a..c7a3b4aab4b5 100644
|
|
--- a/arch/arm/mach-omap2/sleep44xx.S
|
|
+++ b/arch/arm/mach-omap2/sleep44xx.S
|
|
@@ -29,12 +29,6 @@
|
|
dsb
|
|
.endm
|
|
|
|
-ppa_zero_params:
|
|
- .word 0x0
|
|
-
|
|
-ppa_por_params:
|
|
- .word 1, 0
|
|
-
|
|
#ifdef CONFIG_ARCH_OMAP4
|
|
|
|
/*
|
|
@@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
|
|
beq skip_ns_smp_enable
|
|
ppa_actrl_retry:
|
|
mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
|
|
- adr r3, ppa_zero_params @ Pointer to parameters
|
|
+ adr r1, ppa_zero_params_offset
|
|
+ ldr r3, [r1]
|
|
+ add r3, r3, r1 @ Pointer to ppa_zero_params
|
|
mov r1, #0x0 @ Process ID
|
|
mov r2, #0x4 @ Flag
|
|
mov r6, #0xff
|
|
@@ -303,7 +299,9 @@ skip_ns_smp_enable:
|
|
ldr r0, =OMAP4_PPA_L2_POR_INDEX
|
|
ldr r1, =OMAP44XX_SAR_RAM_BASE
|
|
ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
|
|
- adr r3, ppa_por_params
|
|
+ adr r1, ppa_por_params_offset
|
|
+ ldr r3, [r1]
|
|
+ add r3, r3, r1 @ Pointer to ppa_por_params
|
|
str r4, [r3, #0x04]
|
|
mov r1, #0x0 @ Process ID
|
|
mov r2, #0x4 @ Flag
|
|
@@ -328,6 +326,8 @@ skip_l2en:
|
|
#endif
|
|
|
|
b cpu_resume @ Jump to generic resume
|
|
+ppa_por_params_offset:
|
|
+ .long ppa_por_params - .
|
|
ENDPROC(omap4_cpu_resume)
|
|
#endif /* CONFIG_ARCH_OMAP4 */
|
|
|
|
@@ -380,4 +380,13 @@ ENTRY(omap_do_wfi)
|
|
nop
|
|
|
|
ldmfd sp!, {pc}
|
|
+ppa_zero_params_offset:
|
|
+ .long ppa_zero_params - .
|
|
ENDPROC(omap_do_wfi)
|
|
+
|
|
+ .data
|
|
+ppa_zero_params:
|
|
+ .word 0
|
|
+
|
|
+ppa_por_params:
|
|
+ .word 1, 0
|
|
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
|
|
index 7963aa4b5d28..354144e33218 100644
|
|
--- a/arch/arm64/mm/dma-mapping.c
|
|
+++ b/arch/arm64/mm/dma-mapping.c
|
|
@@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void)
|
|
ret = register_iommu_dma_ops_notifier(&platform_bus_type);
|
|
if (!ret)
|
|
ret = register_iommu_dma_ops_notifier(&amba_bustype);
|
|
+
|
|
+ /* handle devices queued before this arch_initcall */
|
|
+ if (!ret)
|
|
+ __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
|
|
return ret;
|
|
}
|
|
arch_initcall(__iommu_dma_init);
|
|
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
|
|
index 3571c7309c5e..cf6240741134 100644
|
|
--- a/arch/arm64/mm/pageattr.c
|
|
+++ b/arch/arm64/mm/pageattr.c
|
|
@@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
|
|
if (end < MODULES_VADDR || end >= MODULES_END)
|
|
return -EINVAL;
|
|
|
|
+ if (!numpages)
|
|
+ return 0;
|
|
+
|
|
data.set_mask = set_mask;
|
|
data.clear_mask = clear_mask;
|
|
|
|
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
|
|
index 0392112a5d70..a5ecef7188ba 100644
|
|
--- a/arch/m32r/kernel/setup.c
|
|
+++ b/arch/m32r/kernel/setup.c
|
|
@@ -81,7 +81,10 @@ static struct resource code_resource = {
|
|
};
|
|
|
|
unsigned long memory_start;
|
|
+EXPORT_SYMBOL(memory_start);
|
|
+
|
|
unsigned long memory_end;
|
|
+EXPORT_SYMBOL(memory_end);
|
|
|
|
void __init setup_arch(char **);
|
|
int get_cpuinfo(char *);
|
|
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
|
|
index c5eb86f3d452..867c39b45df6 100644
|
|
--- a/arch/powerpc/include/asm/eeh.h
|
|
+++ b/arch/powerpc/include/asm/eeh.h
|
|
@@ -81,6 +81,7 @@ struct pci_dn;
|
|
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
|
|
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
|
|
#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
|
|
+#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
|
|
|
|
struct eeh_pe {
|
|
int type; /* PE type: PHB/Bus/Device */
|
|
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
|
|
index 8d14feb40f12..f69ecaa7ce33 100644
|
|
--- a/arch/powerpc/kernel/eeh_driver.c
|
|
+++ b/arch/powerpc/kernel/eeh_driver.c
|
|
@@ -564,6 +564,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
|
*/
|
|
eeh_pe_state_mark(pe, EEH_PE_KEEP);
|
|
if (bus) {
|
|
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
|
|
pci_lock_rescan_remove();
|
|
pcibios_remove_pci_devices(bus);
|
|
pci_unlock_rescan_remove();
|
|
@@ -803,6 +804,7 @@ perm_error:
|
|
* the their PCI config any more.
|
|
*/
|
|
if (frozen_bus) {
|
|
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
|
|
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
|
|
|
|
pci_lock_rescan_remove();
|
|
@@ -886,6 +888,7 @@ static void eeh_handle_special_event(void)
|
|
continue;
|
|
|
|
/* Notify all devices to be down */
|
|
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
|
|
bus = eeh_pe_bus_get(phb_pe);
|
|
eeh_pe_dev_traverse(pe,
|
|
eeh_report_failure, NULL);
|
|
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
|
|
index 8654cb166c19..98f81800e00c 100644
|
|
--- a/arch/powerpc/kernel/eeh_pe.c
|
|
+++ b/arch/powerpc/kernel/eeh_pe.c
|
|
@@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
|
|
const char *eeh_pe_loc_get(struct eeh_pe *pe)
|
|
{
|
|
struct pci_bus *bus = eeh_pe_bus_get(pe);
|
|
- struct device_node *dn = pci_bus_to_OF_node(bus);
|
|
+ struct device_node *dn;
|
|
const char *loc = NULL;
|
|
|
|
- if (!dn)
|
|
- goto out;
|
|
+ while (bus) {
|
|
+ dn = pci_bus_to_OF_node(bus);
|
|
+ if (!dn) {
|
|
+ bus = bus->parent;
|
|
+ continue;
|
|
+ }
|
|
|
|
- /* PHB PE or root PE ? */
|
|
- if (pci_is_root_bus(bus)) {
|
|
- loc = of_get_property(dn, "ibm,loc-code", NULL);
|
|
- if (!loc)
|
|
+ if (pci_is_root_bus(bus))
|
|
loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
|
|
+ else
|
|
+ loc = of_get_property(dn, "ibm,slot-location-code",
|
|
+ NULL);
|
|
+
|
|
if (loc)
|
|
- goto out;
|
|
+ return loc;
|
|
|
|
- /* Check the root port */
|
|
- dn = dn->child;
|
|
- if (!dn)
|
|
- goto out;
|
|
+ bus = bus->parent;
|
|
}
|
|
|
|
- loc = of_get_property(dn, "ibm,loc-code", NULL);
|
|
- if (!loc)
|
|
- loc = of_get_property(dn, "ibm,slot-location-code", NULL);
|
|
-
|
|
-out:
|
|
- return loc ? loc : "N/A";
|
|
+ return "N/A";
|
|
}
|
|
|
|
/**
|
|
@@ -931,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
|
|
bus = pe->phb->bus;
|
|
} else if (pe->type & EEH_PE_BUS ||
|
|
pe->type & EEH_PE_DEVICE) {
|
|
- if (pe->bus) {
|
|
+ if (pe->state & EEH_PE_PRI_BUS) {
|
|
bus = pe->bus;
|
|
goto out;
|
|
}
|
|
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
|
|
index 3c6badcd53ef..e57cc383e5da 100644
|
|
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
|
|
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
|
|
@@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
|
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
|
|
2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
|
|
- rlwimi r5, r4, 1, DAWRX_WT
|
|
+ rlwimi r5, r4, 2, DAWRX_WT
|
|
clrrdi r4, r4, 3
|
|
std r4, VCPU_DAWR(r3)
|
|
std r5, VCPU_DAWRX(r3)
|
|
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
|
|
index 6fd2405c7f4a..a3b182dcb823 100644
|
|
--- a/arch/powerpc/kvm/powerpc.c
|
|
+++ b/arch/powerpc/kvm/powerpc.c
|
|
@@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|
r = -ENXIO;
|
|
break;
|
|
}
|
|
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
|
|
+ val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
|
|
break;
|
|
case KVM_REG_PPC_VSCR:
|
|
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
|
r = -ENXIO;
|
|
break;
|
|
}
|
|
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
|
|
+ val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
|
|
break;
|
|
case KVM_REG_PPC_VRSAVE:
|
|
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
|
- r = -ENXIO;
|
|
- break;
|
|
- }
|
|
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
|
+ val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
|
break;
|
|
#endif /* CONFIG_ALTIVEC */
|
|
default:
|
|
@@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|
r = -ENXIO;
|
|
break;
|
|
}
|
|
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
|
|
+ vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
|
|
break;
|
|
case KVM_REG_PPC_VSCR:
|
|
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
|
r = -ENXIO;
|
|
break;
|
|
}
|
|
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
|
|
+ vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
|
|
break;
|
|
case KVM_REG_PPC_VRSAVE:
|
|
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
|
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
|
+ r = -ENXIO;
|
|
+ break;
|
|
+ }
|
|
+ vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
|
break;
|
|
#endif /* CONFIG_ALTIVEC */
|
|
default:
|
|
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
|
|
index e1c90725522a..2ba602591a20 100644
|
|
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
|
|
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
|
|
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
|
|
* PCI devices of the PE are expected to be removed prior
|
|
* to PE reset.
|
|
*/
|
|
- if (!edev->pe->bus)
|
|
+ if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
|
|
edev->pe->bus = pci_find_bus(hose->global_number,
|
|
pdn->busno);
|
|
+ if (edev->pe->bus)
|
|
+ edev->pe->state |= EEH_PE_PRI_BUS;
|
|
+ }
|
|
|
|
/*
|
|
* Enable EEH explicitly so that we will do EEH check
|
|
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
index 414fd1a00fda..e40d0714679e 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
@@ -3034,6 +3034,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
|
|
|
|
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
|
|
.dma_dev_setup = pnv_pci_dma_dev_setup,
|
|
+ .dma_bus_setup = pnv_pci_dma_bus_setup,
|
|
#ifdef CONFIG_PCI_MSI
|
|
.setup_msi_irqs = pnv_setup_msi_irqs,
|
|
.teardown_msi_irqs = pnv_teardown_msi_irqs,
|
|
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
|
|
index f2dd77234240..ad8c3f4a5e0b 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci.c
|
|
@@ -601,6 +601,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
|
|
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
|
|
long i;
|
|
|
|
+ if (proto_tce & TCE_PCI_WRITE)
|
|
+ proto_tce |= TCE_PCI_READ;
|
|
+
|
|
for (i = 0; i < npages; i++) {
|
|
unsigned long newtce = proto_tce |
|
|
((rpn + i) << tbl->it_page_shift);
|
|
@@ -622,6 +625,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
|
|
|
|
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
|
|
|
|
+ if (newtce & TCE_PCI_WRITE)
|
|
+ newtce |= TCE_PCI_READ;
|
|
+
|
|
oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
|
|
*hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
|
*direction = iommu_tce_direction(oldtce);
|
|
@@ -762,6 +768,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
|
|
phb->dma_dev_setup(phb, pdev);
|
|
}
|
|
|
|
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
|
|
+{
|
|
+ struct pci_controller *hose = bus->sysdata;
|
|
+ struct pnv_phb *phb = hose->private_data;
|
|
+ struct pnv_ioda_pe *pe;
|
|
+
|
|
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
+ if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
|
|
+ continue;
|
|
+
|
|
+ if (!pe->pbus)
|
|
+ continue;
|
|
+
|
|
+ if (bus->number == ((pe->rid >> 8) & 0xFF)) {
|
|
+ pe->pbus = bus;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
void pnv_pci_shutdown(void)
|
|
{
|
|
struct pci_controller *hose;
|
|
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
|
|
index c8ff50e90766..36a99feab7d8 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci.h
|
|
+++ b/arch/powerpc/platforms/powernv/pci.h
|
|
@@ -235,6 +235,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
|
|
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
|
|
|
|
extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
|
|
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
|
|
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
|
|
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
|
|
|
|
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
|
|
index a471cadb9630..79c91853e50e 100644
|
|
--- a/arch/x86/include/asm/pgtable_types.h
|
|
+++ b/arch/x86/include/asm/pgtable_types.h
|
|
@@ -363,20 +363,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
|
|
}
|
|
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
|
|
{
|
|
+ pgprotval_t val = pgprot_val(pgprot);
|
|
pgprot_t new;
|
|
- unsigned long val;
|
|
|
|
- val = pgprot_val(pgprot);
|
|
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
|
|
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
|
|
return new;
|
|
}
|
|
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
|
|
{
|
|
+ pgprotval_t val = pgprot_val(pgprot);
|
|
pgprot_t new;
|
|
- unsigned long val;
|
|
|
|
- val = pgprot_val(pgprot);
|
|
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
|
|
((val & _PAGE_PAT_LARGE) >>
|
|
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
|
|
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
|
|
index 982ce34f4a9b..27f89c79a44b 100644
|
|
--- a/arch/x86/lib/copy_user_64.S
|
|
+++ b/arch/x86/lib/copy_user_64.S
|
|
@@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string)
|
|
|
|
/*
|
|
* copy_user_nocache - Uncached memory copy with exception handling
|
|
- * This will force destination/source out of cache for more performance.
|
|
+ * This will force destination out of cache for more performance.
|
|
+ *
|
|
+ * Note: Cached memory copy is used when destination or size is not
|
|
+ * naturally aligned. That is:
|
|
+ * - Require 8-byte alignment when size is 8 bytes or larger.
|
|
+ * - Require 4-byte alignment when size is 4 bytes.
|
|
*/
|
|
ENTRY(__copy_user_nocache)
|
|
ASM_STAC
|
|
+
|
|
+ /* If size is less than 8 bytes, go to 4-byte copy */
|
|
cmpl $8,%edx
|
|
- jb 20f /* less then 8 bytes, go to byte copy loop */
|
|
+ jb .L_4b_nocache_copy_entry
|
|
+
|
|
+ /* If destination is not 8-byte aligned, "cache" copy to align it */
|
|
ALIGN_DESTINATION
|
|
+
|
|
+ /* Set 4x8-byte copy count and remainder */
|
|
movl %edx,%ecx
|
|
andl $63,%edx
|
|
shrl $6,%ecx
|
|
- jz 17f
|
|
+ jz .L_8b_nocache_copy_entry /* jump if count is 0 */
|
|
+
|
|
+ /* Perform 4x8-byte nocache loop-copy */
|
|
+.L_4x8b_nocache_copy_loop:
|
|
1: movq (%rsi),%r8
|
|
2: movq 1*8(%rsi),%r9
|
|
3: movq 2*8(%rsi),%r10
|
|
@@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache)
|
|
leaq 64(%rsi),%rsi
|
|
leaq 64(%rdi),%rdi
|
|
decl %ecx
|
|
- jnz 1b
|
|
-17: movl %edx,%ecx
|
|
+ jnz .L_4x8b_nocache_copy_loop
|
|
+
|
|
+ /* Set 8-byte copy count and remainder */
|
|
+.L_8b_nocache_copy_entry:
|
|
+ movl %edx,%ecx
|
|
andl $7,%edx
|
|
shrl $3,%ecx
|
|
- jz 20f
|
|
-18: movq (%rsi),%r8
|
|
-19: movnti %r8,(%rdi)
|
|
+ jz .L_4b_nocache_copy_entry /* jump if count is 0 */
|
|
+
|
|
+ /* Perform 8-byte nocache loop-copy */
|
|
+.L_8b_nocache_copy_loop:
|
|
+20: movq (%rsi),%r8
|
|
+21: movnti %r8,(%rdi)
|
|
leaq 8(%rsi),%rsi
|
|
leaq 8(%rdi),%rdi
|
|
decl %ecx
|
|
- jnz 18b
|
|
-20: andl %edx,%edx
|
|
- jz 23f
|
|
+ jnz .L_8b_nocache_copy_loop
|
|
+
|
|
+ /* If no byte left, we're done */
|
|
+.L_4b_nocache_copy_entry:
|
|
+ andl %edx,%edx
|
|
+ jz .L_finish_copy
|
|
+
|
|
+ /* If destination is not 4-byte aligned, go to byte copy: */
|
|
+ movl %edi,%ecx
|
|
+ andl $3,%ecx
|
|
+ jnz .L_1b_cache_copy_entry
|
|
+
|
|
+ /* Set 4-byte copy count (1 or 0) and remainder */
|
|
movl %edx,%ecx
|
|
-21: movb (%rsi),%al
|
|
-22: movb %al,(%rdi)
|
|
+ andl $3,%edx
|
|
+ shrl $2,%ecx
|
|
+ jz .L_1b_cache_copy_entry /* jump if count is 0 */
|
|
+
|
|
+ /* Perform 4-byte nocache copy: */
|
|
+30: movl (%rsi),%r8d
|
|
+31: movnti %r8d,(%rdi)
|
|
+ leaq 4(%rsi),%rsi
|
|
+ leaq 4(%rdi),%rdi
|
|
+
|
|
+ /* If no bytes left, we're done: */
|
|
+ andl %edx,%edx
|
|
+ jz .L_finish_copy
|
|
+
|
|
+ /* Perform byte "cache" loop-copy for the remainder */
|
|
+.L_1b_cache_copy_entry:
|
|
+ movl %edx,%ecx
|
|
+.L_1b_cache_copy_loop:
|
|
+40: movb (%rsi),%al
|
|
+41: movb %al,(%rdi)
|
|
incq %rsi
|
|
incq %rdi
|
|
decl %ecx
|
|
- jnz 21b
|
|
-23: xorl %eax,%eax
|
|
+ jnz .L_1b_cache_copy_loop
|
|
+
|
|
+ /* Finished copying; fence the prior stores */
|
|
+.L_finish_copy:
|
|
+ xorl %eax,%eax
|
|
ASM_CLAC
|
|
sfence
|
|
ret
|
|
|
|
.section .fixup,"ax"
|
|
-30: shll $6,%ecx
|
|
+.L_fixup_4x8b_copy:
|
|
+ shll $6,%ecx
|
|
addl %ecx,%edx
|
|
- jmp 60f
|
|
-40: lea (%rdx,%rcx,8),%rdx
|
|
- jmp 60f
|
|
-50: movl %ecx,%edx
|
|
-60: sfence
|
|
+ jmp .L_fixup_handle_tail
|
|
+.L_fixup_8b_copy:
|
|
+ lea (%rdx,%rcx,8),%rdx
|
|
+ jmp .L_fixup_handle_tail
|
|
+.L_fixup_4b_copy:
|
|
+ lea (%rdx,%rcx,4),%rdx
|
|
+ jmp .L_fixup_handle_tail
|
|
+.L_fixup_1b_copy:
|
|
+ movl %ecx,%edx
|
|
+.L_fixup_handle_tail:
|
|
+ sfence
|
|
jmp copy_user_handle_tail
|
|
.previous
|
|
|
|
- _ASM_EXTABLE(1b,30b)
|
|
- _ASM_EXTABLE(2b,30b)
|
|
- _ASM_EXTABLE(3b,30b)
|
|
- _ASM_EXTABLE(4b,30b)
|
|
- _ASM_EXTABLE(5b,30b)
|
|
- _ASM_EXTABLE(6b,30b)
|
|
- _ASM_EXTABLE(7b,30b)
|
|
- _ASM_EXTABLE(8b,30b)
|
|
- _ASM_EXTABLE(9b,30b)
|
|
- _ASM_EXTABLE(10b,30b)
|
|
- _ASM_EXTABLE(11b,30b)
|
|
- _ASM_EXTABLE(12b,30b)
|
|
- _ASM_EXTABLE(13b,30b)
|
|
- _ASM_EXTABLE(14b,30b)
|
|
- _ASM_EXTABLE(15b,30b)
|
|
- _ASM_EXTABLE(16b,30b)
|
|
- _ASM_EXTABLE(18b,40b)
|
|
- _ASM_EXTABLE(19b,40b)
|
|
- _ASM_EXTABLE(21b,50b)
|
|
- _ASM_EXTABLE(22b,50b)
|
|
+ _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
|
|
+ _ASM_EXTABLE(20b,.L_fixup_8b_copy)
|
|
+ _ASM_EXTABLE(21b,.L_fixup_8b_copy)
|
|
+ _ASM_EXTABLE(30b,.L_fixup_4b_copy)
|
|
+ _ASM_EXTABLE(31b,.L_fixup_4b_copy)
|
|
+ _ASM_EXTABLE(40b,.L_fixup_1b_copy)
|
|
+ _ASM_EXTABLE(41b,.L_fixup_1b_copy)
|
|
ENDPROC(__copy_user_nocache)
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
index eef44d9a3f77..e830c71a1323 100644
|
|
--- a/arch/x86/mm/fault.c
|
|
+++ b/arch/x86/mm/fault.c
|
|
@@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
|
|
if (!pmd_k)
|
|
return -1;
|
|
|
|
+ if (pmd_huge(*pmd_k))
|
|
+ return 0;
|
|
+
|
|
pte_k = pte_offset_kernel(pmd_k, address);
|
|
if (!pte_present(*pte_k))
|
|
return -1;
|
|
@@ -360,8 +363,6 @@ void vmalloc_sync_all(void)
|
|
* 64-bit:
|
|
*
|
|
* Handle a fault on the vmalloc area
|
|
- *
|
|
- * This assumes no large pages in there.
|
|
*/
|
|
static noinline int vmalloc_fault(unsigned long address)
|
|
{
|
|
@@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
|
|
if (pud_none(*pud_ref))
|
|
return -1;
|
|
|
|
- if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
|
|
+ if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
|
|
BUG();
|
|
|
|
+ if (pud_huge(*pud))
|
|
+ return 0;
|
|
+
|
|
pmd = pmd_offset(pud, address);
|
|
pmd_ref = pmd_offset(pud_ref, address);
|
|
if (pmd_none(*pmd_ref))
|
|
return -1;
|
|
|
|
- if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
|
|
+ if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
|
|
BUG();
|
|
|
|
+ if (pmd_huge(*pmd))
|
|
+ return 0;
|
|
+
|
|
pte_ref = pte_offset_kernel(pmd_ref, address);
|
|
if (!pte_present(*pte_ref))
|
|
return -1;
|
|
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
|
|
index a3137a4feed1..db20ee9a413a 100644
|
|
--- a/arch/x86/mm/pageattr.c
|
|
+++ b/arch/x86/mm/pageattr.c
|
|
@@ -33,7 +33,7 @@ struct cpa_data {
|
|
pgd_t *pgd;
|
|
pgprot_t mask_set;
|
|
pgprot_t mask_clr;
|
|
- int numpages;
|
|
+ unsigned long numpages;
|
|
int flags;
|
|
unsigned long pfn;
|
|
unsigned force_split : 1;
|
|
@@ -1345,7 +1345,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
|
|
* CPA operation. Either a large page has been
|
|
* preserved or a single page update happened.
|
|
*/
|
|
- BUG_ON(cpa->numpages > numpages);
|
|
+ BUG_ON(cpa->numpages > numpages || !cpa->numpages);
|
|
numpages -= cpa->numpages;
|
|
if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
|
|
cpa->curpage++;
|
|
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
|
|
index 52f708bcf77f..d50c701b19d6 100644
|
|
--- a/drivers/hwspinlock/hwspinlock_core.c
|
|
+++ b/drivers/hwspinlock/hwspinlock_core.c
|
|
@@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
|
|
hwlock = radix_tree_deref_slot(slot);
|
|
if (unlikely(!hwlock))
|
|
continue;
|
|
+ if (radix_tree_is_indirect_ptr(hwlock)) {
|
|
+ slot = radix_tree_iter_retry(&iter);
|
|
+ continue;
|
|
+ }
|
|
|
|
if (hwlock->bank->dev->of_node == args.np) {
|
|
ret = 0;
|
|
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
|
|
index 969428dd6329..16cc5c691a55 100644
|
|
--- a/drivers/iio/accel/Kconfig
|
|
+++ b/drivers/iio/accel/Kconfig
|
|
@@ -173,6 +173,7 @@ config STK8312
|
|
config STK8BA50
|
|
tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
|
|
depends on I2C
|
|
+ depends on IIO_TRIGGER
|
|
help
|
|
Say yes here to get support for the Sensortek STK8BA50 3-axis
|
|
accelerometer.
|
|
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
|
|
index 7868c744fd4b..1e7aded53117 100644
|
|
--- a/drivers/iio/adc/Kconfig
|
|
+++ b/drivers/iio/adc/Kconfig
|
|
@@ -372,6 +372,7 @@ config TWL6030_GPADC
|
|
config VF610_ADC
|
|
tristate "Freescale vf610 ADC driver"
|
|
depends on OF
|
|
+ depends on HAS_IOMEM
|
|
select IIO_BUFFER
|
|
select IIO_TRIGGERED_BUFFER
|
|
help
|
|
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
|
|
index 942320e32753..c1e05532d437 100644
|
|
--- a/drivers/iio/adc/ti_am335x_adc.c
|
|
+++ b/drivers/iio/adc/ti_am335x_adc.c
|
|
@@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
|
|
goto error_kfifo_free;
|
|
|
|
indio_dev->setup_ops = setup_ops;
|
|
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
|
|
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
|
|
index 43d14588448d..b4dde8315210 100644
|
|
--- a/drivers/iio/dac/mcp4725.c
|
|
+++ b/drivers/iio/dac/mcp4725.c
|
|
@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
|
|
data->client = client;
|
|
|
|
indio_dev->dev.parent = &client->dev;
|
|
+ indio_dev->name = id->name;
|
|
indio_dev->info = &mcp4725_info;
|
|
indio_dev->channels = &mcp4725_channel;
|
|
indio_dev->num_channels = 1;
|
|
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
|
|
index cb32b593f1c5..36607d52fee0 100644
|
|
--- a/drivers/iio/imu/adis_buffer.c
|
|
+++ b/drivers/iio/imu/adis_buffer.c
|
|
@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
|
|
return -ENOMEM;
|
|
|
|
rx = adis->buffer;
|
|
- tx = rx + indio_dev->scan_bytes;
|
|
+ tx = rx + scan_count;
|
|
|
|
spi_message_init(&adis->msg);
|
|
|
|
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
|
|
index c8bad3cf891d..217e9306aa0f 100644
|
|
--- a/drivers/iio/inkern.c
|
|
+++ b/drivers/iio/inkern.c
|
|
@@ -351,6 +351,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
|
|
|
|
void iio_channel_release(struct iio_channel *channel)
|
|
{
|
|
+ if (!channel)
|
|
+ return;
|
|
iio_device_put(channel->indio_dev);
|
|
kfree(channel);
|
|
}
|
|
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
|
|
index 60537ec0c923..53201d99a16c 100644
|
|
--- a/drivers/iio/light/acpi-als.c
|
|
+++ b/drivers/iio/light/acpi-als.c
|
|
@@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
|
|
.realbits = 32,
|
|
.storagebits = 32,
|
|
},
|
|
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
|
|
+ /* _RAW is here for backward ABI compatibility */
|
|
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
|
|
+ BIT(IIO_CHAN_INFO_PROCESSED),
|
|
},
|
|
};
|
|
|
|
@@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
|
|
s32 temp_val;
|
|
int ret;
|
|
|
|
- if (mask != IIO_CHAN_INFO_RAW)
|
|
+ if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
|
|
return -EINVAL;
|
|
|
|
/* we support only illumination (_ALI) so far. */
|
|
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
|
|
index 809a961b9a7f..6bf89d8f3741 100644
|
|
--- a/drivers/iio/light/ltr501.c
|
|
+++ b/drivers/iio/light/ltr501.c
|
|
@@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
|
|
{500000, 2000000}
|
|
};
|
|
|
|
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
|
|
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
|
|
int len, int val, int val2)
|
|
{
|
|
int i, freq;
|
|
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
|
|
index f5ecd6e19f5d..a0d7deeac62f 100644
|
|
--- a/drivers/iio/pressure/mpl115.c
|
|
+++ b/drivers/iio/pressure/mpl115.c
|
|
@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
|
|
*val = ret >> 6;
|
|
return IIO_VAL_INT;
|
|
case IIO_CHAN_INFO_OFFSET:
|
|
- *val = 605;
|
|
+ *val = -605;
|
|
*val2 = 750000;
|
|
return IIO_VAL_INT_PLUS_MICRO;
|
|
case IIO_CHAN_INFO_SCALE:
|
|
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
|
|
index 537ebb0e193a..78f93cf68840 100644
|
|
--- a/drivers/input/mouse/elantech.c
|
|
+++ b/drivers/input/mouse/elantech.c
|
|
@@ -1222,7 +1222,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
|
|
input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
|
|
ETP_WMAX_V2, 0, 0);
|
|
}
|
|
- input_mt_init_slots(dev, 2, 0);
|
|
+ input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
|
|
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
|
|
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
|
|
break;
|
|
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
|
|
index e272f06258ce..a3f0f5a47490 100644
|
|
--- a/drivers/input/mouse/vmmouse.c
|
|
+++ b/drivers/input/mouse/vmmouse.c
|
|
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
|
|
priv->abs_dev = abs_dev;
|
|
psmouse->private = priv;
|
|
|
|
- input_set_capability(rel_dev, EV_REL, REL_WHEEL);
|
|
-
|
|
/* Set up and register absolute device */
|
|
snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
|
|
psmouse->ps2dev.serio->phys);
|
|
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
|
|
abs_dev->id.version = psmouse->model;
|
|
abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
|
|
|
|
- error = input_register_device(priv->abs_dev);
|
|
- if (error)
|
|
- goto init_fail;
|
|
-
|
|
/* Set absolute device capabilities */
|
|
input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
|
|
input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
|
|
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
|
|
input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
|
|
input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
|
|
|
|
+ error = input_register_device(priv->abs_dev);
|
|
+ if (error)
|
|
+ goto init_fail;
|
|
+
|
|
+ /* Add wheel capability to the relative device */
|
|
+ input_set_capability(rel_dev, EV_REL, REL_WHEEL);
|
|
+
|
|
psmouse->protocol_handler = vmmouse_process_byte;
|
|
psmouse->disconnect = vmmouse_disconnect;
|
|
psmouse->reconnect = vmmouse_reconnect;
|
|
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
|
|
index c11556563ef0..68f5f4a0f1e7 100644
|
|
--- a/drivers/input/serio/i8042-x86ia64io.h
|
|
+++ b/drivers/input/serio/i8042-x86ia64io.h
|
|
@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
|
|
},
|
|
},
|
|
{
|
|
+ /* Fujitsu Lifebook U745 */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
/* Fujitsu T70H */
|
|
.matches = {
|
|
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
|
|
index 8b2be1e7714f..fc836f523afa 100644
|
|
--- a/drivers/iommu/amd_iommu.c
|
|
+++ b/drivers/iommu/amd_iommu.c
|
|
@@ -1905,7 +1905,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
|
/* Update device table */
|
|
set_dte_entry(dev_data->devid, domain, ats);
|
|
if (alias != dev_data->devid)
|
|
- set_dte_entry(dev_data->devid, domain, ats);
|
|
+ set_dte_entry(alias, domain, ats);
|
|
|
|
device_flush_dte(dev_data);
|
|
}
|
|
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
|
|
index 80e3c176008e..55a19e49205b 100644
|
|
--- a/drivers/iommu/dmar.c
|
|
+++ b/drivers/iommu/dmar.c
|
|
@@ -1347,7 +1347,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
|
|
|
|
raw_spin_lock_irqsave(&iommu->register_lock, flags);
|
|
|
|
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
|
|
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
|
|
if (!(sts & DMA_GSTS_QIES))
|
|
goto end;
|
|
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index ac7387686ddc..986a53e3eb96 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
|
|
{
|
|
struct pci_dev *pdev;
|
|
|
|
- if (dev_is_pci(info->dev))
|
|
+ if (!dev_is_pci(info->dev))
|
|
return;
|
|
|
|
pdev = to_pci_dev(info->dev);
|
|
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
|
|
index 50464833d0b8..d9939fa9b588 100644
|
|
--- a/drivers/iommu/intel-svm.c
|
|
+++ b/drivers/iommu/intel-svm.c
|
|
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
|
|
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|
{
|
|
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
|
+ struct intel_svm_dev *sdev;
|
|
|
|
+ /* This might end up being called from exit_mmap(), *before* the page
|
|
+ * tables are cleared. And __mmu_notifier_release() will delete us from
|
|
+ * the list of notifiers so that our invalidate_range() callback doesn't
|
|
+ * get called when the page tables are cleared. So we need to protect
|
|
+ * against hardware accessing those page tables.
|
|
+ *
|
|
+ * We do it by clearing the entry in the PASID table and then flushing
|
|
+ * the IOTLB and the PASID table caches. This might upset hardware;
|
|
+ * perhaps we'll want to point the PASID to a dummy PGD (like the zero
|
|
+ * page) so that we end up taking a fault that the hardware really
|
|
+ * *has* to handle gracefully without affecting other processes.
|
|
+ */
|
|
svm->iommu->pasid_table[svm->pasid].val = 0;
|
|
+ wmb();
|
|
+
|
|
+ rcu_read_lock();
|
|
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
|
|
+ intel_flush_pasid_dev(svm, sdev, svm->pasid);
|
|
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
|
|
+ }
|
|
+ rcu_read_unlock();
|
|
|
|
- /* There's no need to do any flush because we can't get here if there
|
|
- * are any devices left anyway. */
|
|
- WARN_ON(!list_empty(&svm->devs));
|
|
}
|
|
|
|
static const struct mmu_notifier_ops intel_mmuops = {
|
|
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
|
goto out;
|
|
}
|
|
iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
|
|
- mm = NULL;
|
|
} else
|
|
iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
|
|
wmb();
|
|
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
|
|
kfree_rcu(sdev, rcu);
|
|
|
|
if (list_empty(&svm->devs)) {
|
|
- mmu_notifier_unregister(&svm->notifier, svm->mm);
|
|
|
|
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
|
|
if (svm->mm)
|
|
- mmput(svm->mm);
|
|
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
|
|
+
|
|
/* We mandate that no page faults may be outstanding
|
|
* for the PASID when intel_svm_unbind_mm() is called.
|
|
* If that is not obeyed, subtle errors will happen.
|
|
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
|
struct intel_svm *svm = NULL;
|
|
int head, tail, handled = 0;
|
|
|
|
+ /* Clear PPR bit before reading head/tail registers, to
|
|
+ * ensure that we get a new interrupt if needed. */
|
|
+ writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
|
|
+
|
|
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
|
|
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
|
|
while (head != tail) {
|
|
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
|
* any faults on kernel addresses. */
|
|
if (!svm->mm)
|
|
goto bad_req;
|
|
+ /* If the mm is already defunct, don't handle faults. */
|
|
+ if (!atomic_inc_not_zero(&svm->mm->mm_users))
|
|
+ goto bad_req;
|
|
down_read(&svm->mm->mmap_sem);
|
|
vma = find_extend_vma(svm->mm, address);
|
|
if (!vma || address < vma->vm_start)
|
|
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
|
result = QI_RESP_SUCCESS;
|
|
invalid:
|
|
up_read(&svm->mm->mmap_sem);
|
|
+ mmput(svm->mm);
|
|
bad_req:
|
|
/* Accounting for major/minor faults? */
|
|
rcu_read_lock();
|
|
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
|
|
index 1fae1881648c..e9b241b1c9dd 100644
|
|
--- a/drivers/iommu/intel_irq_remapping.c
|
|
+++ b/drivers/iommu/intel_irq_remapping.c
|
|
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
|
|
|
|
raw_spin_lock_irqsave(&iommu->register_lock, flags);
|
|
|
|
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
|
|
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
|
|
if (!(sts & DMA_GSTS_IRES))
|
|
goto end;
|
|
|
|
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
|
|
index 0955b2cb10fe..62120c38d56b 100644
|
|
--- a/drivers/nvdimm/namespace_devs.c
|
|
+++ b/drivers/nvdimm/namespace_devs.c
|
|
@@ -77,6 +77,59 @@ static bool is_namespace_io(struct device *dev)
|
|
return dev ? dev->type == &namespace_io_device_type : false;
|
|
}
|
|
|
|
+static int is_uuid_busy(struct device *dev, void *data)
|
|
+{
|
|
+ u8 *uuid1 = data, *uuid2 = NULL;
|
|
+
|
|
+ if (is_namespace_pmem(dev)) {
|
|
+ struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
+
|
|
+ uuid2 = nspm->uuid;
|
|
+ } else if (is_namespace_blk(dev)) {
|
|
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
+
|
|
+ uuid2 = nsblk->uuid;
|
|
+ } else if (is_nd_btt(dev)) {
|
|
+ struct nd_btt *nd_btt = to_nd_btt(dev);
|
|
+
|
|
+ uuid2 = nd_btt->uuid;
|
|
+ } else if (is_nd_pfn(dev)) {
|
|
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
|
+
|
|
+ uuid2 = nd_pfn->uuid;
|
|
+ }
|
|
+
|
|
+ if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
|
|
+ return -EBUSY;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int is_namespace_uuid_busy(struct device *dev, void *data)
|
|
+{
|
|
+ if (is_nd_pmem(dev) || is_nd_blk(dev))
|
|
+ return device_for_each_child(dev, data, is_uuid_busy);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * nd_is_uuid_unique - verify that no other namespace has @uuid
|
|
+ * @dev: any device on a nvdimm_bus
|
|
+ * @uuid: uuid to check
|
|
+ */
|
|
+bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
|
+{
|
|
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
+
|
|
+ if (!nvdimm_bus)
|
|
+ return false;
|
|
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
|
|
+ if (device_for_each_child(&nvdimm_bus->dev, uuid,
|
|
+ is_namespace_uuid_busy) != 0)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
bool pmem_should_map_pages(struct device *dev)
|
|
{
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
|
|
index 529f3f02e7b2..9521696c9385 100644
|
|
--- a/drivers/nvdimm/region_devs.c
|
|
+++ b/drivers/nvdimm/region_devs.c
|
|
@@ -134,62 +134,6 @@ int nd_region_to_nstype(struct nd_region *nd_region)
|
|
}
|
|
EXPORT_SYMBOL(nd_region_to_nstype);
|
|
|
|
-static int is_uuid_busy(struct device *dev, void *data)
|
|
-{
|
|
- struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
- u8 *uuid = data;
|
|
-
|
|
- switch (nd_region_to_nstype(nd_region)) {
|
|
- case ND_DEVICE_NAMESPACE_PMEM: {
|
|
- struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
|
|
-
|
|
- if (!nspm->uuid)
|
|
- break;
|
|
- if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0)
|
|
- return -EBUSY;
|
|
- break;
|
|
- }
|
|
- case ND_DEVICE_NAMESPACE_BLK: {
|
|
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
|
|
-
|
|
- if (!nsblk->uuid)
|
|
- break;
|
|
- if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0)
|
|
- return -EBUSY;
|
|
- break;
|
|
- }
|
|
- default:
|
|
- break;
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int is_namespace_uuid_busy(struct device *dev, void *data)
|
|
-{
|
|
- if (is_nd_pmem(dev) || is_nd_blk(dev))
|
|
- return device_for_each_child(dev, data, is_uuid_busy);
|
|
- return 0;
|
|
-}
|
|
-
|
|
-/**
|
|
- * nd_is_uuid_unique - verify that no other namespace has @uuid
|
|
- * @dev: any device on a nvdimm_bus
|
|
- * @uuid: uuid to check
|
|
- */
|
|
-bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
|
-{
|
|
- struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
|
|
-
|
|
- if (!nvdimm_bus)
|
|
- return false;
|
|
- WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
|
|
- if (device_for_each_child(&nvdimm_bus->dev, uuid,
|
|
- is_namespace_uuid_busy) != 0)
|
|
- return false;
|
|
- return true;
|
|
-}
|
|
-
|
|
static ssize_t size_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
|
|
index 3a707dd14238..f96065a81d1e 100644
|
|
--- a/drivers/phy/phy-twl4030-usb.c
|
|
+++ b/drivers/phy/phy-twl4030-usb.c
|
|
@@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
|
|
pm_runtime_use_autosuspend(&pdev->dev);
|
|
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
|
|
pm_runtime_enable(&pdev->dev);
|
|
+ pm_runtime_get_sync(&pdev->dev);
|
|
|
|
/* Our job is to use irqs and status from the power module
|
|
* to keep the transceiver disabled when nothing's connected.
|
|
@@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
|
|
struct twl4030_usb *twl = platform_get_drvdata(pdev);
|
|
int val;
|
|
|
|
+ usb_remove_phy(&twl->phy);
|
|
pm_runtime_get_sync(twl->dev);
|
|
cancel_delayed_work(&twl->id_workaround_work);
|
|
device_remove_file(twl->dev, &dev_attr_vbus);
|
|
@@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
|
|
/* set transceiver mode to power on defaults */
|
|
twl4030_usb_set_mode(twl, -1);
|
|
|
|
+ /* idle ulpi before powering off */
|
|
+ if (cable_present(twl->linkstat))
|
|
+ pm_runtime_put_noidle(twl->dev);
|
|
+ pm_runtime_mark_last_busy(twl->dev);
|
|
+ pm_runtime_put_sync_suspend(twl->dev);
|
|
+ pm_runtime_disable(twl->dev);
|
|
+
|
|
/* autogate 60MHz ULPI clock,
|
|
* clear dpll clock request for i2c access,
|
|
* disable 32KHz
|
|
@@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
|
|
/* disable complete OTG block */
|
|
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
|
|
|
|
- if (cable_present(twl->linkstat))
|
|
- pm_runtime_put_noidle(twl->dev);
|
|
- pm_runtime_mark_last_busy(twl->dev);
|
|
- pm_runtime_put(twl->dev);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
|
|
index 02bc5a6343c3..aa454241489c 100644
|
|
--- a/drivers/platform/x86/intel_scu_ipcutil.c
|
|
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
|
|
@@ -49,7 +49,7 @@ struct scu_ipc_data {
|
|
|
|
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
|
|
{
|
|
- int count = data->count;
|
|
+ unsigned int count = data->count;
|
|
|
|
if (count == 0 || count == 3 || count > 4)
|
|
return -EINVAL;
|
|
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
|
|
index 361358134315..93880ed6291c 100644
|
|
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
|
|
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
|
|
@@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
|
|
/*
|
|
* Command Lock contention
|
|
*/
|
|
- err = SCSI_DH_RETRY;
|
|
+ err = SCSI_DH_IMM_RETRY;
|
|
break;
|
|
default:
|
|
break;
|
|
@@ -612,6 +612,8 @@ retry:
|
|
err = mode_select_handle_sense(sdev, h->sense);
|
|
if (err == SCSI_DH_RETRY && retry_cnt--)
|
|
goto retry;
|
|
+ if (err == SCSI_DH_IMM_RETRY)
|
|
+ goto retry;
|
|
}
|
|
if (err == SCSI_DH_OK) {
|
|
h->state = RDAC_STATE_ACTIVE;
|
|
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
|
|
index 2c1160c7ec92..da2e068ee47d 100644
|
|
--- a/drivers/scsi/scsi_devinfo.c
|
|
+++ b/drivers/scsi/scsi_devinfo.c
|
|
@@ -205,6 +205,7 @@ static struct {
|
|
{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
|
|
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
|
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
|
|
+ {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
|
|
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
|
|
{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
|
{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
|
|
@@ -227,6 +228,7 @@ static struct {
|
|
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
|
|
{"Promise", "", NULL, BLIST_SPARSELUN},
|
|
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
|
|
+ {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
|
|
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
|
|
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
|
|
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
|
|
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
|
|
index 21930c9ac9cd..c8115b4fe474 100644
|
|
--- a/drivers/scsi/scsi_sysfs.c
|
|
+++ b/drivers/scsi/scsi_sysfs.c
|
|
@@ -1192,16 +1192,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
|
|
void scsi_remove_target(struct device *dev)
|
|
{
|
|
struct Scsi_Host *shost = dev_to_shost(dev->parent);
|
|
- struct scsi_target *starget;
|
|
+ struct scsi_target *starget, *last_target = NULL;
|
|
unsigned long flags;
|
|
|
|
restart:
|
|
spin_lock_irqsave(shost->host_lock, flags);
|
|
list_for_each_entry(starget, &shost->__targets, siblings) {
|
|
- if (starget->state == STARGET_DEL)
|
|
+ if (starget->state == STARGET_DEL ||
|
|
+ starget == last_target)
|
|
continue;
|
|
if (starget->dev.parent == dev || &starget->dev == dev) {
|
|
kref_get(&starget->reap_ref);
|
|
+ last_target = starget;
|
|
spin_unlock_irqrestore(shost->host_lock, flags);
|
|
__scsi_remove_target(starget);
|
|
scsi_target_reap(starget);
|
|
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
|
|
index 4e08d1cd704d..84fa4c46eaa6 100644
|
|
--- a/drivers/scsi/sd.c
|
|
+++ b/drivers/scsi/sd.c
|
|
@@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
|
|
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
|
int ret = 0;
|
|
|
|
- if (!sdkp)
|
|
- return 0; /* this can happen */
|
|
+ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
|
|
+ return 0;
|
|
|
|
if (sdkp->WCE && sdkp->media_present) {
|
|
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
|
|
@@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev)
|
|
{
|
|
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
|
|
|
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
|
|
+ return 0;
|
|
+
|
|
if (!sdkp->device->manage_start_stop)
|
|
return 0;
|
|
|
|
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
|
|
index 503ab8b46c0b..5e820674432c 100644
|
|
--- a/drivers/scsi/sg.c
|
|
+++ b/drivers/scsi/sg.c
|
|
@@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
}
|
|
|
|
sfp->mmap_called = 1;
|
|
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
|
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
|
vma->vm_private_data = sfp;
|
|
vma->vm_ops = &sg_mmap_vm_ops;
|
|
return 0;
|
|
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
|
|
index 8bd54a64efd6..64c867405ad4 100644
|
|
--- a/drivers/scsi/sr.c
|
|
+++ b/drivers/scsi/sr.c
|
|
@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
|
|
{
|
|
struct scsi_cd *cd = dev_get_drvdata(dev);
|
|
|
|
+ if (!cd) /* E.g.: runtime suspend following sr_remove() */
|
|
+ return 0;
|
|
+
|
|
if (cd->media_present)
|
|
return -EBUSY;
|
|
else
|
|
@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
|
|
scsi_autopm_get_device(cd->device);
|
|
|
|
del_gendisk(cd->disk);
|
|
+ dev_set_drvdata(dev, NULL);
|
|
|
|
mutex_lock(&sr_ref_mutex);
|
|
kref_put(&cd->kref, sr_kref_release);
|
|
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
|
|
index aa5ab6c80ed4..41ef099b7aa6 100644
|
|
--- a/drivers/staging/speakup/selection.c
|
|
+++ b/drivers/staging/speakup/selection.c
|
|
@@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
|
|
struct tty_ldisc *ld;
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
- ld = tty_ldisc_ref_wait(tty);
|
|
+ ld = tty_ldisc_ref(tty);
|
|
+ if (!ld)
|
|
+ goto tty_unref;
|
|
tty_buffer_lock_exclusive(&vc->port);
|
|
|
|
add_wait_queue(&vc->paste_wait, &wait);
|
|
@@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
|
|
|
|
tty_buffer_unlock_exclusive(&vc->port);
|
|
tty_ldisc_deref(ld);
|
|
+tty_unref:
|
|
tty_kref_put(tty);
|
|
}
|
|
|
|
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
|
|
index 255204cc43e6..b4bfd706ac94 100644
|
|
--- a/drivers/target/iscsi/iscsi_target_configfs.c
|
|
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
|
|
@@ -1593,7 +1593,8 @@ static int lio_tpg_check_prot_fabric_only(
|
|
}
|
|
|
|
/*
|
|
- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
|
|
+ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
|
|
+ * or not held.
|
|
*
|
|
* Also, this function calls iscsit_inc_session_usage_count() on the
|
|
* struct iscsi_session in question.
|
|
@@ -1601,19 +1602,32 @@ static int lio_tpg_check_prot_fabric_only(
|
|
static int lio_tpg_shutdown_session(struct se_session *se_sess)
|
|
{
|
|
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
|
|
+ struct se_portal_group *se_tpg = se_sess->se_tpg;
|
|
+ bool local_lock = false;
|
|
+
|
|
+ if (!spin_is_locked(&se_tpg->session_lock)) {
|
|
+ spin_lock_irq(&se_tpg->session_lock);
|
|
+ local_lock = true;
|
|
+ }
|
|
|
|
spin_lock(&sess->conn_lock);
|
|
if (atomic_read(&sess->session_fall_back_to_erl0) ||
|
|
atomic_read(&sess->session_logout) ||
|
|
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
|
|
spin_unlock(&sess->conn_lock);
|
|
+ if (local_lock)
|
|
+ spin_unlock_irq(&sess->conn_lock);
|
|
return 0;
|
|
}
|
|
atomic_set(&sess->session_reinstatement, 1);
|
|
spin_unlock(&sess->conn_lock);
|
|
|
|
iscsit_stop_time2retain_timer(sess);
|
|
+ spin_unlock_irq(&se_tpg->session_lock);
|
|
+
|
|
iscsit_stop_session(sess, 1, 1);
|
|
+ if (!local_lock)
|
|
+ spin_lock_irq(&se_tpg->session_lock);
|
|
|
|
return 1;
|
|
}
|
|
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
|
|
index a45660f62db5..78e983677339 100644
|
|
--- a/drivers/tty/pty.c
|
|
+++ b/drivers/tty/pty.c
|
|
@@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
|
|
/* this is called once with whichever end is closed last */
|
|
static void pty_unix98_shutdown(struct tty_struct *tty)
|
|
{
|
|
- devpts_kill_index(tty->driver_data, tty->index);
|
|
+ struct inode *ptmx_inode;
|
|
+
|
|
+ if (tty->driver->subtype == PTY_TYPE_MASTER)
|
|
+ ptmx_inode = tty->driver_data;
|
|
+ else
|
|
+ ptmx_inode = tty->link->driver_data;
|
|
+ devpts_kill_index(ptmx_inode, tty->index);
|
|
+ devpts_del_ref(ptmx_inode);
|
|
}
|
|
|
|
static const struct tty_operations ptm_unix98_ops = {
|
|
@@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
|
|
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
|
|
tty->driver_data = inode;
|
|
|
|
+ /*
|
|
+ * In the case where all references to ptmx inode are dropped and we
|
|
+ * still have /dev/tty opened pointing to the master/slave pair (ptmx
|
|
+ * is closed/released before /dev/tty), we must make sure that the inode
|
|
+ * is still valid when we call the final pty_unix98_shutdown, thus we
|
|
+ * hold an additional reference to the ptmx inode. For the same /dev/tty
|
|
+ * last close case, we also need to make sure the super_block isn't
|
|
+ * destroyed (devpts instance unmounted), before /dev/tty is closed and
|
|
+ * on its release devpts_kill_index is called.
|
|
+ */
|
|
+ devpts_add_ref(inode);
|
|
+
|
|
tty_add_file(tty, filp);
|
|
|
|
slave_inode = devpts_pty_new(inode,
|
|
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
|
|
index 4097f3f65b3b..7cd6f9a90542 100644
|
|
--- a/drivers/tty/serial/8250/8250_pci.c
|
|
+++ b/drivers/tty/serial/8250/8250_pci.c
|
|
@@ -1379,6 +1379,9 @@ ce4100_serial_setup(struct serial_private *priv,
|
|
#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
|
|
#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
|
|
|
|
+#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
|
|
+#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
|
|
+
|
|
#define BYT_PRV_CLK 0x800
|
|
#define BYT_PRV_CLK_EN (1 << 0)
|
|
#define BYT_PRV_CLK_M_VAL_SHIFT 1
|
|
@@ -1461,11 +1464,13 @@ byt_serial_setup(struct serial_private *priv,
|
|
switch (pdev->device) {
|
|
case PCI_DEVICE_ID_INTEL_BYT_UART1:
|
|
case PCI_DEVICE_ID_INTEL_BSW_UART1:
|
|
+ case PCI_DEVICE_ID_INTEL_BDW_UART1:
|
|
rx_param->src_id = 3;
|
|
tx_param->dst_id = 2;
|
|
break;
|
|
case PCI_DEVICE_ID_INTEL_BYT_UART2:
|
|
case PCI_DEVICE_ID_INTEL_BSW_UART2:
|
|
+ case PCI_DEVICE_ID_INTEL_BDW_UART2:
|
|
rx_param->src_id = 5;
|
|
tx_param->dst_id = 4;
|
|
break;
|
|
@@ -1936,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
|
|
#define PCIE_VENDOR_ID_WCH 0x1c00
|
|
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
|
|
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
|
|
+#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
|
|
|
|
#define PCI_VENDOR_ID_PERICOM 0x12D8
|
|
#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
|
|
@@ -2062,6 +2068,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
|
.subdevice = PCI_ANY_ID,
|
|
.setup = byt_serial_setup,
|
|
},
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_INTEL,
|
|
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = byt_serial_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_INTEL,
|
|
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = byt_serial_setup,
|
|
+ },
|
|
/*
|
|
* ITE
|
|
*/
|
|
@@ -2618,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
|
.subdevice = PCI_ANY_ID,
|
|
.setup = pci_wch_ch353_setup,
|
|
},
|
|
+ /* WCH CH382 2S card (16850 clone) */
|
|
+ {
|
|
+ .vendor = PCIE_VENDOR_ID_WCH,
|
|
+ .device = PCIE_DEVICE_ID_WCH_CH382_2S,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_wch_ch38x_setup,
|
|
+ },
|
|
/* WCH CH382 2S1P card (16850 clone) */
|
|
{
|
|
.vendor = PCIE_VENDOR_ID_WCH,
|
|
@@ -2936,6 +2964,7 @@ enum pci_board_num_t {
|
|
pbn_fintek_4,
|
|
pbn_fintek_8,
|
|
pbn_fintek_12,
|
|
+ pbn_wch382_2,
|
|
pbn_wch384_4,
|
|
pbn_pericom_PI7C9X7951,
|
|
pbn_pericom_PI7C9X7952,
|
|
@@ -3756,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
|
|
.base_baud = 115200,
|
|
.first_offset = 0x40,
|
|
},
|
|
+ [pbn_wch382_2] = {
|
|
+ .flags = FL_BASE0,
|
|
+ .num_ports = 2,
|
|
+ .base_baud = 115200,
|
|
+ .uart_offset = 8,
|
|
+ .first_offset = 0xC0,
|
|
+ },
|
|
[pbn_wch384_4] = {
|
|
.flags = FL_BASE0,
|
|
.num_ports = 4,
|
|
@@ -5506,6 +5542,16 @@ static struct pci_device_id serial_pci_tbl[] = {
|
|
PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
|
|
pbn_byt },
|
|
|
|
+ /* Intel Broadwell */
|
|
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
|
|
+ PCI_ANY_ID, PCI_ANY_ID,
|
|
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
|
|
+ pbn_byt },
|
|
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
|
|
+ PCI_ANY_ID, PCI_ANY_ID,
|
|
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
|
|
+ pbn_byt },
|
|
+
|
|
/*
|
|
* Intel Quark x1000
|
|
*/
|
|
@@ -5545,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
|
|
PCI_ANY_ID, PCI_ANY_ID,
|
|
0, 0, pbn_b0_bt_2_115200 },
|
|
|
|
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
|
|
+ PCI_ANY_ID, PCI_ANY_ID,
|
|
+ 0, 0, pbn_wch382_2 },
|
|
+
|
|
{ PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
|
|
PCI_ANY_ID, PCI_ANY_ID,
|
|
0, 0, pbn_wch384_4 },
|
|
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
|
|
index 9d4c84f7485f..24280d9a05e9 100644
|
|
--- a/drivers/tty/serial/omap-serial.c
|
|
+++ b/drivers/tty/serial/omap-serial.c
|
|
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
|
|
|
|
/* Enable or disable the rs485 support */
|
|
static int
|
|
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
|
|
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
|
|
{
|
|
struct uart_omap_port *up = to_uart_omap_port(port);
|
|
unsigned int mode;
|
|
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
|
|
up->ier = 0;
|
|
serial_out(up, UART_IER, 0);
|
|
|
|
+ /* Clamp the delays to [0, 100ms] */
|
|
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
|
|
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
|
|
+
|
|
/* store new config */
|
|
- port->rs485 = *rs485conf;
|
|
+ port->rs485 = *rs485;
|
|
|
|
/*
|
|
* Just as a precaution, only allow rs485
|
|
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
|
|
index eeaa6c6bd540..db0f0831b94f 100644
|
|
--- a/drivers/usb/host/xhci-ring.c
|
|
+++ b/drivers/usb/host/xhci-ring.c
|
|
@@ -2192,10 +2192,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
}
|
|
/* Fast path - was this the last TRB in the TD for this URB? */
|
|
} else if (event_trb == td->last_trb) {
|
|
- if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
|
|
- return finish_td(xhci, td, event_trb, event, ep,
|
|
- status, false);
|
|
-
|
|
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
|
|
td->urb->actual_length =
|
|
td->urb->transfer_buffer_length -
|
|
@@ -2247,12 +2243,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
td->urb->actual_length +=
|
|
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
|
|
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
|
|
-
|
|
- if (trb_comp_code == COMP_SHORT_TX) {
|
|
- xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
|
|
- td->urb_length_set = true;
|
|
- return 0;
|
|
- }
|
|
}
|
|
|
|
return finish_td(xhci, td, event_trb, event, ep, status, false);
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index dca0a4692f08..776d59c32bc5 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -1549,7 +1549,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
|
"HW died, freeing TD.");
|
|
urb_priv = urb->hcpriv;
|
|
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
|
|
+ for (i = urb_priv->td_cnt;
|
|
+ i < urb_priv->length && xhci->devs[urb->dev->slot_id];
|
|
+ i++) {
|
|
td = urb_priv->td[i];
|
|
if (!list_empty(&td->td_list))
|
|
list_del_init(&td->td_list);
|
|
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
|
|
index d453d62ab0c6..e2f659dc5745 100644
|
|
--- a/fs/btrfs/backref.c
|
|
+++ b/fs/btrfs/backref.c
|
|
@@ -1417,7 +1417,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|
read_extent_buffer(eb, dest + bytes_left,
|
|
name_off, name_len);
|
|
if (eb != eb_in) {
|
|
- btrfs_tree_read_unlock_blocking(eb);
|
|
+ if (!path->skip_locking)
|
|
+ btrfs_tree_read_unlock_blocking(eb);
|
|
free_extent_buffer(eb);
|
|
}
|
|
ret = btrfs_find_item(fs_root, path, parent, 0,
|
|
@@ -1437,9 +1438,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|
eb = path->nodes[0];
|
|
/* make sure we can use eb after releasing the path */
|
|
if (eb != eb_in) {
|
|
- atomic_inc(&eb->refs);
|
|
- btrfs_tree_read_lock(eb);
|
|
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
|
+ if (!path->skip_locking)
|
|
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
|
+ path->nodes[0] = NULL;
|
|
+ path->locks[0] = 0;
|
|
}
|
|
btrfs_release_path(path);
|
|
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
|
|
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
|
|
index e0941fbb913c..02b934d0ee65 100644
|
|
--- a/fs/btrfs/delayed-inode.c
|
|
+++ b/fs/btrfs/delayed-inode.c
|
|
@@ -1694,7 +1694,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
|
|
*
|
|
*/
|
|
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
|
- struct list_head *ins_list)
|
|
+ struct list_head *ins_list, bool *emitted)
|
|
{
|
|
struct btrfs_dir_item *di;
|
|
struct btrfs_delayed_item *curr, *next;
|
|
@@ -1738,6 +1738,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
|
|
|
if (over)
|
|
return 1;
|
|
+ *emitted = true;
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
|
|
index f70119f25421..0167853c84ae 100644
|
|
--- a/fs/btrfs/delayed-inode.h
|
|
+++ b/fs/btrfs/delayed-inode.h
|
|
@@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
|
|
int btrfs_should_delete_dir_index(struct list_head *del_list,
|
|
u64 index);
|
|
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
|
|
- struct list_head *ins_list);
|
|
+ struct list_head *ins_list, bool *emitted);
|
|
|
|
/* for init */
|
|
int __init btrfs_delayed_inode_init(void);
|
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
|
index 974be09e7556..0ddca6734494 100644
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -1762,7 +1762,6 @@ static int cleaner_kthread(void *arg)
|
|
int again;
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
- set_freezable();
|
|
do {
|
|
again = 0;
|
|
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index a70c5790f8f5..54b5f0de623b 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -5741,6 +5741,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
|
|
char *name_ptr;
|
|
int name_len;
|
|
int is_curr = 0; /* ctx->pos points to the current index? */
|
|
+ bool emitted;
|
|
|
|
/* FIXME, use a real flag for deciding about the key type */
|
|
if (root->fs_info->tree_root == root)
|
|
@@ -5769,6 +5770,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
|
|
if (ret < 0)
|
|
goto err;
|
|
|
|
+ emitted = false;
|
|
while (1) {
|
|
leaf = path->nodes[0];
|
|
slot = path->slots[0];
|
|
@@ -5848,6 +5850,7 @@ skip:
|
|
|
|
if (over)
|
|
goto nopos;
|
|
+ emitted = true;
|
|
di_len = btrfs_dir_name_len(leaf, di) +
|
|
btrfs_dir_data_len(leaf, di) + sizeof(*di);
|
|
di_cur += di_len;
|
|
@@ -5860,11 +5863,20 @@ next:
|
|
if (key_type == BTRFS_DIR_INDEX_KEY) {
|
|
if (is_curr)
|
|
ctx->pos++;
|
|
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
|
|
+ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
|
|
if (ret)
|
|
goto nopos;
|
|
}
|
|
|
|
+ /*
|
|
+ * If we haven't emitted any dir entry, we must not touch ctx->pos as
|
|
+ * it was was set to the termination value in previous call. We assume
|
|
+ * that "." and ".." were emitted if we reach this point and set the
|
|
+ * termination value as well for an empty directory.
|
|
+ */
|
|
+ if (ctx->pos > 2 && !emitted)
|
|
+ goto nopos;
|
|
+
|
|
/* Reached end of directory/root. Bump pos past the last item. */
|
|
ctx->pos++;
|
|
|
|
@@ -7985,6 +7997,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
|
|
|
|
kfree(dip);
|
|
|
|
+ dio_bio->bi_error = bio->bi_error;
|
|
dio_end_io(dio_bio, bio->bi_error);
|
|
|
|
if (io_bio->end_io)
|
|
@@ -8030,6 +8043,7 @@ out_test:
|
|
|
|
kfree(dip);
|
|
|
|
+ dio_bio->bi_error = bio->bi_error;
|
|
dio_end_io(dio_bio, bio->bi_error);
|
|
bio_put(bio);
|
|
}
|
|
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
|
|
index da94138eb85e..08fd3f0f34fd 100644
|
|
--- a/fs/btrfs/ioctl.c
|
|
+++ b/fs/btrfs/ioctl.c
|
|
@@ -2782,24 +2782,29 @@ out:
|
|
static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
|
|
{
|
|
struct page *page;
|
|
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
|
|
|
page = grab_cache_page(inode->i_mapping, index);
|
|
if (!page)
|
|
- return NULL;
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
if (!PageUptodate(page)) {
|
|
- if (extent_read_full_page_nolock(tree, page, btrfs_get_extent,
|
|
- 0))
|
|
- return NULL;
|
|
+ int ret;
|
|
+
|
|
+ ret = btrfs_readpage(NULL, page);
|
|
+ if (ret)
|
|
+ return ERR_PTR(ret);
|
|
lock_page(page);
|
|
if (!PageUptodate(page)) {
|
|
unlock_page(page);
|
|
page_cache_release(page);
|
|
- return NULL;
|
|
+ return ERR_PTR(-EIO);
|
|
+ }
|
|
+ if (page->mapping != inode->i_mapping) {
|
|
+ unlock_page(page);
|
|
+ page_cache_release(page);
|
|
+ return ERR_PTR(-EAGAIN);
|
|
}
|
|
}
|
|
- unlock_page(page);
|
|
|
|
return page;
|
|
}
|
|
@@ -2811,17 +2816,31 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
|
|
pgoff_t index = off >> PAGE_CACHE_SHIFT;
|
|
|
|
for (i = 0; i < num_pages; i++) {
|
|
+again:
|
|
pages[i] = extent_same_get_page(inode, index + i);
|
|
- if (!pages[i])
|
|
- return -ENOMEM;
|
|
+ if (IS_ERR(pages[i])) {
|
|
+ int err = PTR_ERR(pages[i]);
|
|
+
|
|
+ if (err == -EAGAIN)
|
|
+ goto again;
|
|
+ pages[i] = NULL;
|
|
+ return err;
|
|
+ }
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
-static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
|
|
+static int lock_extent_range(struct inode *inode, u64 off, u64 len,
|
|
+ bool retry_range_locking)
|
|
{
|
|
- /* do any pending delalloc/csum calc on src, one way or
|
|
- another, and lock file content */
|
|
+ /*
|
|
+ * Do any pending delalloc/csum calculations on inode, one way or
|
|
+ * another, and lock file content.
|
|
+ * The locking order is:
|
|
+ *
|
|
+ * 1) pages
|
|
+ * 2) range in the inode's io tree
|
|
+ */
|
|
while (1) {
|
|
struct btrfs_ordered_extent *ordered;
|
|
lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
|
|
@@ -2839,8 +2858,11 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
|
|
unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
|
|
if (ordered)
|
|
btrfs_put_ordered_extent(ordered);
|
|
+ if (!retry_range_locking)
|
|
+ return -EAGAIN;
|
|
btrfs_wait_ordered_range(inode, off, len);
|
|
}
|
|
+ return 0;
|
|
}
|
|
|
|
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
|
|
@@ -2865,15 +2887,24 @@ static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
|
|
unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
|
|
}
|
|
|
|
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
|
|
- struct inode *inode2, u64 loff2, u64 len)
|
|
+static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
|
|
+ struct inode *inode2, u64 loff2, u64 len,
|
|
+ bool retry_range_locking)
|
|
{
|
|
+ int ret;
|
|
+
|
|
if (inode1 < inode2) {
|
|
swap(inode1, inode2);
|
|
swap(loff1, loff2);
|
|
}
|
|
- lock_extent_range(inode1, loff1, len);
|
|
- lock_extent_range(inode2, loff2, len);
|
|
+ ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
|
|
+ if (ret)
|
|
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
|
|
+ loff1 + len - 1);
|
|
+ return ret;
|
|
}
|
|
|
|
struct cmp_pages {
|
|
@@ -2889,11 +2920,15 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
|
|
|
|
for (i = 0; i < cmp->num_pages; i++) {
|
|
pg = cmp->src_pages[i];
|
|
- if (pg)
|
|
+ if (pg) {
|
|
+ unlock_page(pg);
|
|
page_cache_release(pg);
|
|
+ }
|
|
pg = cmp->dst_pages[i];
|
|
- if (pg)
|
|
+ if (pg) {
|
|
+ unlock_page(pg);
|
|
page_cache_release(pg);
|
|
+ }
|
|
}
|
|
kfree(cmp->src_pages);
|
|
kfree(cmp->dst_pages);
|
|
@@ -2954,6 +2989,8 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
|
|
|
|
src_page = cmp->src_pages[i];
|
|
dst_page = cmp->dst_pages[i];
|
|
+ ASSERT(PageLocked(src_page));
|
|
+ ASSERT(PageLocked(dst_page));
|
|
|
|
addr = kmap_atomic(src_page);
|
|
dst_addr = kmap_atomic(dst_page);
|
|
@@ -3066,14 +3103,46 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
|
|
goto out_unlock;
|
|
}
|
|
|
|
+again:
|
|
ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
|
|
if (ret)
|
|
goto out_unlock;
|
|
|
|
if (same_inode)
|
|
- lock_extent_range(src, same_lock_start, same_lock_len);
|
|
+ ret = lock_extent_range(src, same_lock_start, same_lock_len,
|
|
+ false);
|
|
else
|
|
- btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
|
|
+ ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
|
|
+ false);
|
|
+ /*
|
|
+ * If one of the inodes has dirty pages in the respective range or
|
|
+ * ordered extents, we need to flush dellaloc and wait for all ordered
|
|
+ * extents in the range. We must unlock the pages and the ranges in the
|
|
+ * io trees to avoid deadlocks when flushing delalloc (requires locking
|
|
+ * pages) and when waiting for ordered extents to complete (they require
|
|
+ * range locking).
|
|
+ */
|
|
+ if (ret == -EAGAIN) {
|
|
+ /*
|
|
+ * Ranges in the io trees already unlocked. Now unlock all
|
|
+ * pages before waiting for all IO to complete.
|
|
+ */
|
|
+ btrfs_cmp_data_free(&cmp);
|
|
+ if (same_inode) {
|
|
+ btrfs_wait_ordered_range(src, same_lock_start,
|
|
+ same_lock_len);
|
|
+ } else {
|
|
+ btrfs_wait_ordered_range(src, loff, len);
|
|
+ btrfs_wait_ordered_range(dst, dst_loff, len);
|
|
+ }
|
|
+ goto again;
|
|
+ }
|
|
+ ASSERT(ret == 0);
|
|
+ if (WARN_ON(ret)) {
|
|
+ /* ranges in the io trees already unlocked */
|
|
+ btrfs_cmp_data_free(&cmp);
|
|
+ return ret;
|
|
+ }
|
|
|
|
/* pass original length for comparison so we stay within i_size */
|
|
ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
|
|
@@ -3895,9 +3964,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|
u64 lock_start = min_t(u64, off, destoff);
|
|
u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
|
|
|
|
- lock_extent_range(src, lock_start, lock_len);
|
|
+ ret = lock_extent_range(src, lock_start, lock_len, true);
|
|
} else {
|
|
- btrfs_double_extent_lock(src, off, inode, destoff, len);
|
|
+ ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
|
|
+ true);
|
|
+ }
|
|
+ ASSERT(ret == 0);
|
|
+ if (WARN_ON(ret)) {
|
|
+ /* ranges in the io trees already unlocked */
|
|
+ goto out_unlock;
|
|
}
|
|
|
|
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
|
|
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
|
|
index a23399e8e3ab..9e084477d320 100644
|
|
--- a/fs/btrfs/volumes.c
|
|
+++ b/fs/btrfs/volumes.c
|
|
@@ -1257,6 +1257,15 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
|
|
int ret;
|
|
int slot;
|
|
struct extent_buffer *l;
|
|
+ u64 min_search_start;
|
|
+
|
|
+ /*
|
|
+ * We don't want to overwrite the superblock on the drive nor any area
|
|
+ * used by the boot loader (grub for example), so we make sure to start
|
|
+ * at an offset of at least 1MB.
|
|
+ */
|
|
+ min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
|
|
+ search_start = max(search_start, min_search_start);
|
|
|
|
path = btrfs_alloc_path();
|
|
if (!path)
|
|
@@ -1397,18 +1406,9 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
|
struct btrfs_device *device, u64 num_bytes,
|
|
u64 *start, u64 *len)
|
|
{
|
|
- struct btrfs_root *root = device->dev_root;
|
|
- u64 search_start;
|
|
-
|
|
/* FIXME use last free of some kind */
|
|
-
|
|
- /*
|
|
- * we don't want to overwrite the superblock on the drive,
|
|
- * so we make sure to start at an offset of at least 1MB
|
|
- */
|
|
- search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
|
|
return find_free_dev_extent_start(trans->transaction, device,
|
|
- num_bytes, search_start, start, len);
|
|
+ num_bytes, 0, start, len);
|
|
}
|
|
|
|
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
|
|
@@ -6512,6 +6512,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
|
|
goto out_short_read;
|
|
|
|
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
|
|
+ if (!num_stripes) {
|
|
+ printk(KERN_ERR
|
|
+ "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
|
|
+ num_stripes, cur_offset);
|
|
+ ret = -EIO;
|
|
+ break;
|
|
+ }
|
|
+
|
|
len = btrfs_chunk_item_size(num_stripes);
|
|
if (cur_offset + len > array_size)
|
|
goto out_short_read;
|
|
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
|
|
index 7febcf2475c5..50b268483302 100644
|
|
--- a/fs/cifs/cifs_debug.c
|
|
+++ b/fs/cifs/cifs_debug.c
|
|
@@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
|
|
vaf.fmt = fmt;
|
|
vaf.va = &args;
|
|
|
|
- pr_err("CIFS VFS: %pV", &vaf);
|
|
+ pr_err_ratelimited("CIFS VFS: %pV", &vaf);
|
|
|
|
va_end(args);
|
|
}
|
|
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
|
|
index f40fbaca1b2a..66cf0f9fff89 100644
|
|
--- a/fs/cifs/cifs_debug.h
|
|
+++ b/fs/cifs/cifs_debug.h
|
|
@@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
|
|
/* information message: e.g., configuration, major event */
|
|
#define cifs_dbg(type, fmt, ...) \
|
|
do { \
|
|
- if (type == FYI) { \
|
|
- if (cifsFYI & CIFS_INFO) { \
|
|
- pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__); \
|
|
- } \
|
|
+ if (type == FYI && cifsFYI & CIFS_INFO) { \
|
|
+ pr_debug_ratelimited("%s: " \
|
|
+ fmt, __FILE__, ##__VA_ARGS__); \
|
|
} else if (type == VFS) { \
|
|
cifs_vfs_err(fmt, ##__VA_ARGS__); \
|
|
} else if (type == NOISY && type != 0) { \
|
|
- pr_debug(fmt, ##__VA_ARGS__); \
|
|
+ pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
|
|
} \
|
|
} while (0)
|
|
|
|
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
|
|
index afa09fce8151..e682b36a210f 100644
|
|
--- a/fs/cifs/cifsencrypt.c
|
|
+++ b/fs/cifs/cifsencrypt.c
|
|
@@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
|
|
|
|
ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
|
|
if (!ses->auth_key.response) {
|
|
- rc = ENOMEM;
|
|
+ rc = -ENOMEM;
|
|
ses->auth_key.len = 0;
|
|
goto setup_ntlmv2_rsp_ret;
|
|
}
|
|
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
|
|
index ecb0803bdb0e..3c194ff0d2f0 100644
|
|
--- a/fs/cifs/connect.c
|
|
+++ b/fs/cifs/connect.c
|
|
@@ -368,7 +368,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
server->session_key.response = NULL;
|
|
server->session_key.len = 0;
|
|
server->lstrp = jiffies;
|
|
- mutex_unlock(&server->srv_mutex);
|
|
|
|
/* mark submitted MIDs for retry and issue callback */
|
|
INIT_LIST_HEAD(&retry_list);
|
|
@@ -381,6 +380,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|
list_move(&mid_entry->qhead, &retry_list);
|
|
}
|
|
spin_unlock(&GlobalMid_Lock);
|
|
+ mutex_unlock(&server->srv_mutex);
|
|
|
|
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
|
|
list_for_each_safe(tmp, tmp2, &retry_list) {
|
|
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
|
|
index 0557c45e9c33..b30a4a6d98a0 100644
|
|
--- a/fs/cifs/readdir.c
|
|
+++ b/fs/cifs/readdir.c
|
|
@@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
|
|
* if buggy server returns . and .. late do we want to
|
|
* check for that here?
|
|
*/
|
|
+ *tmp_buf = 0;
|
|
rc = cifs_filldir(current_entry, file, ctx,
|
|
tmp_buf, max_len);
|
|
if (rc) {
|
|
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
|
|
index 2a24c524fb9a..87abe8ed074c 100644
|
|
--- a/fs/cifs/transport.c
|
|
+++ b/fs/cifs/transport.c
|
|
@@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
|
|
cifs_in_send_dec(server);
|
|
cifs_save_when_sent(mid);
|
|
|
|
- if (rc < 0)
|
|
+ if (rc < 0) {
|
|
server->sequence_number -= 2;
|
|
+ cifs_delete_mid(mid);
|
|
+ }
|
|
+
|
|
mutex_unlock(&server->srv_mutex);
|
|
|
|
if (rc == 0)
|
|
return 0;
|
|
|
|
- cifs_delete_mid(mid);
|
|
add_credits_and_wake_if(server, credits, optype);
|
|
return rc;
|
|
}
|
|
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
|
|
index c35ffdc12bba..706de324f2a6 100644
|
|
--- a/fs/devpts/inode.c
|
|
+++ b/fs/devpts/inode.c
|
|
@@ -575,6 +575,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
|
|
mutex_unlock(&allocated_ptys_lock);
|
|
}
|
|
|
|
+/*
|
|
+ * pty code needs to hold extra references in case of last /dev/tty close
|
|
+ */
|
|
+
|
|
+void devpts_add_ref(struct inode *ptmx_inode)
|
|
+{
|
|
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
|
|
+
|
|
+ atomic_inc(&sb->s_active);
|
|
+ ihold(ptmx_inode);
|
|
+}
|
|
+
|
|
+void devpts_del_ref(struct inode *ptmx_inode)
|
|
+{
|
|
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
|
|
+
|
|
+ iput(ptmx_inode);
|
|
+ deactivate_super(sb);
|
|
+}
|
|
+
|
|
/**
|
|
* devpts_pty_new -- create a new inode in /dev/pts/
|
|
* @ptmx_inode: inode of the master
|
|
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
|
|
index ec0668a60678..fe1f50fe764f 100644
|
|
--- a/fs/ext4/balloc.c
|
|
+++ b/fs/ext4/balloc.c
|
|
@@ -191,7 +191,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
|
|
/* If checksum is bad mark all blocks used to prevent allocation
|
|
* essentially implementing a per-group read-only flag. */
|
|
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
|
|
- ext4_error(sb, "Checksum bad for group %u", block_group);
|
|
grp = ext4_get_group_info(sb, block_group);
|
|
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
|
|
percpu_counter_sub(&sbi->s_freeclusters_counter,
|
|
@@ -442,14 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
|
|
}
|
|
ext4_lock_group(sb, block_group);
|
|
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
|
|
-
|
|
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
|
|
set_bitmap_uptodate(bh);
|
|
set_buffer_uptodate(bh);
|
|
ext4_unlock_group(sb, block_group);
|
|
unlock_buffer(bh);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ ext4_error(sb, "Failed to init block bitmap for group "
|
|
+ "%u: %d", block_group, err);
|
|
goto out;
|
|
+ }
|
|
goto verify;
|
|
}
|
|
ext4_unlock_group(sb, block_group);
|
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
|
index 1b8024d26f65..53f2b98a69f3 100644
|
|
--- a/fs/ext4/ialloc.c
|
|
+++ b/fs/ext4/ialloc.c
|
|
@@ -76,7 +76,6 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
|
|
/* If checksum is bad mark all blocks and inodes use to prevent
|
|
* allocation, essentially implementing a per-group read-only flag. */
|
|
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
|
|
- ext4_error(sb, "Checksum bad for group %u", block_group);
|
|
grp = ext4_get_group_info(sb, block_group);
|
|
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
|
|
percpu_counter_sub(&sbi->s_freeclusters_counter,
|
|
@@ -191,8 +190,11 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
|
set_buffer_verified(bh);
|
|
ext4_unlock_group(sb, block_group);
|
|
unlock_buffer(bh);
|
|
- if (err)
|
|
+ if (err) {
|
|
+ ext4_error(sb, "Failed to init inode bitmap for group "
|
|
+ "%u: %d", block_group, err);
|
|
goto out;
|
|
+ }
|
|
return bh;
|
|
}
|
|
ext4_unlock_group(sb, block_group);
|
|
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
|
|
index fb6f11709ae6..e032a0423e35 100644
|
|
--- a/fs/ext4/move_extent.c
|
|
+++ b/fs/ext4/move_extent.c
|
|
@@ -265,11 +265,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
|
|
ext4_lblk_t orig_blk_offset, donor_blk_offset;
|
|
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
|
|
unsigned int tmp_data_size, data_size, replaced_size;
|
|
- int err2, jblocks, retries = 0;
|
|
+ int i, err2, jblocks, retries = 0;
|
|
int replaced_count = 0;
|
|
int from = data_offset_in_page << orig_inode->i_blkbits;
|
|
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
|
|
struct super_block *sb = orig_inode->i_sb;
|
|
+ struct buffer_head *bh = NULL;
|
|
|
|
/*
|
|
* It needs twice the amount of ordinary journal buffers because
|
|
@@ -380,8 +381,16 @@ data_copy:
|
|
}
|
|
/* Perform all necessary steps similar write_begin()/write_end()
|
|
* but keeping in mind that i_size will not change */
|
|
- *err = __block_write_begin(pagep[0], from, replaced_size,
|
|
- ext4_get_block);
|
|
+ if (!page_has_buffers(pagep[0]))
|
|
+ create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
|
|
+ bh = page_buffers(pagep[0]);
|
|
+ for (i = 0; i < data_offset_in_page; i++)
|
|
+ bh = bh->b_this_page;
|
|
+ for (i = 0; i < block_len_in_page; i++) {
|
|
+ *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
|
|
+ if (*err < 0)
|
|
+ break;
|
|
+ }
|
|
if (!*err)
|
|
*err = block_commit_write(pagep[0], from, from + replaced_size);
|
|
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index ad62d7acc315..34038e3598d5 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
|
|
if (flex_gd == NULL)
|
|
goto out3;
|
|
|
|
- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
|
|
+ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
|
|
goto out2;
|
|
flex_gd->count = flexbg_size;
|
|
|
|
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
|
|
index de4bdfac0cec..595ebdb41846 100644
|
|
--- a/fs/hugetlbfs/inode.c
|
|
+++ b/fs/hugetlbfs/inode.c
|
|
@@ -463,6 +463,7 @@ hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
|
|
*/
|
|
vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
|
|
unsigned long v_offset;
|
|
+ unsigned long v_end;
|
|
|
|
/*
|
|
* Can the expression below overflow on 32-bit arches?
|
|
@@ -475,15 +476,17 @@ hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
|
|
else
|
|
v_offset = 0;
|
|
|
|
- if (end) {
|
|
- end = ((end - start) << PAGE_SHIFT) +
|
|
- vma->vm_start + v_offset;
|
|
- if (end > vma->vm_end)
|
|
- end = vma->vm_end;
|
|
- } else
|
|
- end = vma->vm_end;
|
|
+ if (!end)
|
|
+ v_end = vma->vm_end;
|
|
+ else {
|
|
+ v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
|
|
+ + vma->vm_start;
|
|
+ if (v_end > vma->vm_end)
|
|
+ v_end = vma->vm_end;
|
|
+ }
|
|
|
|
- unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
|
|
+ unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
|
|
+ NULL);
|
|
}
|
|
}
|
|
|
|
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
index 03516c80855a..2a2e2d8ddee5 100644
|
|
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
@@ -145,7 +145,7 @@ static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
|
|
return false;
|
|
for (i = 0; i < m1->fh_versions_cnt; i++) {
|
|
bool found_fh = false;
|
|
- for (j = 0; j < m2->fh_versions_cnt; i++) {
|
|
+ for (j = 0; j < m2->fh_versions_cnt; j++) {
|
|
if (nfs_compare_fh(&m1->fh_versions[i],
|
|
&m2->fh_versions[j]) == 0) {
|
|
found_fh = true;
|
|
@@ -1859,11 +1859,9 @@ ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
|
|
start = xdr_reserve_space(xdr, 4);
|
|
BUG_ON(!start);
|
|
|
|
- if (ff_layout_encode_ioerr(flo, xdr, args))
|
|
- goto out;
|
|
-
|
|
+ ff_layout_encode_ioerr(flo, xdr, args);
|
|
ff_layout_encode_iostats(flo, xdr, args);
|
|
-out:
|
|
+
|
|
*start = cpu_to_be32((xdr->p - start - 1) * 4);
|
|
dprintk("%s: Return\n", __func__);
|
|
}
|
|
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
|
|
index c7e8b87da5b2..3e2071a177fd 100644
|
|
--- a/fs/nfs/inode.c
|
|
+++ b/fs/nfs/inode.c
|
|
@@ -1641,6 +1641,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
unsigned long invalid = 0;
|
|
unsigned long now = jiffies;
|
|
unsigned long save_cache_validity;
|
|
+ bool cache_revalidated = true;
|
|
|
|
dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
|
|
__func__, inode->i_sb->s_id, inode->i_ino,
|
|
@@ -1702,22 +1703,28 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
nfs_force_lookup_revalidate(inode);
|
|
inode->i_version = fattr->change_attr;
|
|
}
|
|
- } else
|
|
+ } else {
|
|
nfsi->cache_validity |= save_cache_validity;
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
|
|
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
|
|
- } else if (server->caps & NFS_CAP_MTIME)
|
|
+ } else if (server->caps & NFS_CAP_MTIME) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
|
|
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
|
|
- } else if (server->caps & NFS_CAP_CTIME)
|
|
+ } else if (server->caps & NFS_CAP_CTIME) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
/* Check if our cached file size is stale */
|
|
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
|
|
@@ -1737,19 +1744,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
(long long)cur_isize,
|
|
(long long)new_isize);
|
|
}
|
|
- } else
|
|
+ } else {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_REVAL_PAGECACHE
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
|
|
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
|
|
- else if (server->caps & NFS_CAP_ATIME)
|
|
+ else if (server->caps & NFS_CAP_ATIME) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATIME
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
|
|
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
|
|
@@ -1758,36 +1769,42 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
inode->i_mode = newmode;
|
|
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
|
}
|
|
- } else if (server->caps & NFS_CAP_MODE)
|
|
+ } else if (server->caps & NFS_CAP_MODE) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_INVALID_ACCESS
|
|
| NFS_INO_INVALID_ACL
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
|
|
if (!uid_eq(inode->i_uid, fattr->uid)) {
|
|
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
|
inode->i_uid = fattr->uid;
|
|
}
|
|
- } else if (server->caps & NFS_CAP_OWNER)
|
|
+ } else if (server->caps & NFS_CAP_OWNER) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_INVALID_ACCESS
|
|
| NFS_INO_INVALID_ACL
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
|
|
if (!gid_eq(inode->i_gid, fattr->gid)) {
|
|
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
|
|
inode->i_gid = fattr->gid;
|
|
}
|
|
- } else if (server->caps & NFS_CAP_OWNER_GROUP)
|
|
+ } else if (server->caps & NFS_CAP_OWNER_GROUP) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_INVALID_ACCESS
|
|
| NFS_INO_INVALID_ACL
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
|
|
if (inode->i_nlink != fattr->nlink) {
|
|
@@ -1796,19 +1813,22 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
invalid |= NFS_INO_INVALID_DATA;
|
|
set_nlink(inode, fattr->nlink);
|
|
}
|
|
- } else if (server->caps & NFS_CAP_NLINK)
|
|
+ } else if (server->caps & NFS_CAP_NLINK) {
|
|
nfsi->cache_validity |= save_cache_validity &
|
|
(NFS_INO_INVALID_ATTR
|
|
| NFS_INO_REVAL_FORCED);
|
|
+ cache_revalidated = false;
|
|
+ }
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
|
|
/*
|
|
* report the blocks in 512byte units
|
|
*/
|
|
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
|
|
- }
|
|
- if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
|
|
+ } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
|
|
inode->i_blocks = fattr->du.nfs2.blocks;
|
|
+ else
|
|
+ cache_revalidated = false;
|
|
|
|
/* Update attrtimeo value if we're out of the unstable period */
|
|
if (invalid & NFS_INO_INVALID_ATTR) {
|
|
@@ -1818,9 +1838,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
/* Set barrier to be more recent than all outstanding updates */
|
|
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
|
|
} else {
|
|
- if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
|
|
- if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
|
|
- nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
|
|
+ if (cache_revalidated) {
|
|
+ if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
|
|
+ nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
|
|
+ nfsi->attrtimeo <<= 1;
|
|
+ if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
|
|
+ nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
|
|
+ }
|
|
nfsi->attrtimeo_timestamp = now;
|
|
}
|
|
/* Set the barrier to be more recent than this fattr */
|
|
@@ -1829,7 +1853,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|
|
}
|
|
|
|
/* Don't declare attrcache up to date if there were no attrs! */
|
|
- if (fattr->valid != 0)
|
|
+ if (cache_revalidated)
|
|
invalid &= ~NFS_INO_INVALID_ATTR;
|
|
|
|
/* Don't invalidate the data if we were to blame */
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index 343b0f1f15b1..f496ed721d27 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -1385,6 +1385,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
|
|
* Protect the call to nfs4_state_set_mode_locked and
|
|
* serialise the stateid update
|
|
*/
|
|
+ spin_lock(&state->owner->so_lock);
|
|
write_seqlock(&state->seqlock);
|
|
if (deleg_stateid != NULL) {
|
|
nfs4_stateid_copy(&state->stateid, deleg_stateid);
|
|
@@ -1393,7 +1394,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
|
|
if (open_stateid != NULL)
|
|
nfs_set_open_stateid_locked(state, open_stateid, fmode);
|
|
write_sequnlock(&state->seqlock);
|
|
- spin_lock(&state->owner->so_lock);
|
|
update_open_stateflags(state, fmode);
|
|
spin_unlock(&state->owner->so_lock);
|
|
}
|
|
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
|
|
index 0a8983492d91..eff6319d5037 100644
|
|
--- a/fs/overlayfs/copy_up.c
|
|
+++ b/fs/overlayfs/copy_up.c
|
|
@@ -22,9 +22,9 @@
|
|
|
|
int ovl_copy_xattr(struct dentry *old, struct dentry *new)
|
|
{
|
|
- ssize_t list_size, size;
|
|
- char *buf, *name, *value;
|
|
- int error;
|
|
+ ssize_t list_size, size, value_size = 0;
|
|
+ char *buf, *name, *value = NULL;
|
|
+ int uninitialized_var(error);
|
|
|
|
if (!old->d_inode->i_op->getxattr ||
|
|
!new->d_inode->i_op->getxattr)
|
|
@@ -41,29 +41,40 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
|
|
if (!buf)
|
|
return -ENOMEM;
|
|
|
|
- error = -ENOMEM;
|
|
- value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
|
|
- if (!value)
|
|
- goto out;
|
|
-
|
|
list_size = vfs_listxattr(old, buf, list_size);
|
|
if (list_size <= 0) {
|
|
error = list_size;
|
|
- goto out_free_value;
|
|
+ goto out;
|
|
}
|
|
|
|
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
|
|
- size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
|
|
- if (size <= 0) {
|
|
+retry:
|
|
+ size = vfs_getxattr(old, name, value, value_size);
|
|
+ if (size == -ERANGE)
|
|
+ size = vfs_getxattr(old, name, NULL, 0);
|
|
+
|
|
+ if (size < 0) {
|
|
error = size;
|
|
- goto out_free_value;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (size > value_size) {
|
|
+ void *new;
|
|
+
|
|
+ new = krealloc(value, size, GFP_KERNEL);
|
|
+ if (!new) {
|
|
+ error = -ENOMEM;
|
|
+ break;
|
|
+ }
|
|
+ value = new;
|
|
+ value_size = size;
|
|
+ goto retry;
|
|
}
|
|
+
|
|
error = vfs_setxattr(new, name, value, size, 0);
|
|
if (error)
|
|
- goto out_free_value;
|
|
+ break;
|
|
}
|
|
-
|
|
-out_free_value:
|
|
kfree(value);
|
|
out:
|
|
kfree(buf);
|
|
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
|
|
index 4060ffde8722..b29036aa8d7c 100644
|
|
--- a/fs/overlayfs/inode.c
|
|
+++ b/fs/overlayfs/inode.c
|
|
@@ -42,6 +42,19 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
|
|
int err;
|
|
struct dentry *upperdentry;
|
|
|
|
+ /*
|
|
+ * Check for permissions before trying to copy-up. This is redundant
|
|
+ * since it will be rechecked later by ->setattr() on upper dentry. But
|
|
+ * without this, copy-up can be triggered by just about anybody.
|
|
+ *
|
|
+ * We don't initialize inode->size, which just means that
|
|
+ * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
|
|
+ * check for a swapfile (which this won't be anyway).
|
|
+ */
|
|
+ err = inode_change_ok(dentry->d_inode, attr);
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
err = ovl_want_write(dentry);
|
|
if (err)
|
|
goto out;
|
|
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
|
|
index 70e9af551600..adcb1398c481 100644
|
|
--- a/fs/overlayfs/readdir.c
|
|
+++ b/fs/overlayfs/readdir.c
|
|
@@ -571,7 +571,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
|
|
(int) PTR_ERR(dentry));
|
|
continue;
|
|
}
|
|
- ovl_cleanup(upper->d_inode, dentry);
|
|
+ if (dentry->d_inode)
|
|
+ ovl_cleanup(upper->d_inode, dentry);
|
|
dput(dentry);
|
|
}
|
|
mutex_unlock(&upper->d_inode->i_mutex);
|
|
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
|
|
index e38ee0fed24a..f42c9407fbad 100644
|
|
--- a/fs/overlayfs/super.c
|
|
+++ b/fs/overlayfs/super.c
|
|
@@ -9,6 +9,7 @@
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/namei.h>
|
|
+#include <linux/pagemap.h>
|
|
#include <linux/xattr.h>
|
|
#include <linux/security.h>
|
|
#include <linux/mount.h>
|
|
@@ -910,6 +911,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
|
}
|
|
|
|
sb->s_stack_depth = 0;
|
|
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
|
|
if (ufs->config.upperdir) {
|
|
if (!ufs->config.workdir) {
|
|
pr_err("overlayfs: missing 'workdir'\n");
|
|
@@ -1053,6 +1055,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
|
|
|
root_dentry->d_fsdata = oe;
|
|
|
|
+ ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
|
|
+ root_dentry->d_inode);
|
|
+
|
|
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
|
|
sb->s_op = &ovl_super_operations;
|
|
sb->s_root = root_dentry;
|
|
diff --git a/fs/proc/array.c b/fs/proc/array.c
|
|
index d73291f5f0fc..b6c00ce0e29e 100644
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -395,7 +395,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|
|
|
state = *get_task_state(task);
|
|
vsize = eip = esp = 0;
|
|
- permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
|
|
+ permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
|
|
mm = get_task_mm(task);
|
|
if (mm) {
|
|
vsize = task_vsize(mm);
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index 4bd5d3118acd..b7de324bec11 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -403,7 +403,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
|
|
static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
|
|
struct pid *pid, struct task_struct *task)
|
|
{
|
|
- struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
|
|
+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
|
|
if (mm && !IS_ERR(mm)) {
|
|
unsigned int nwords = 0;
|
|
do {
|
|
@@ -430,7 +430,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
|
|
|
|
wchan = get_wchan(task);
|
|
|
|
- if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
|
|
+ if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
|
|
+ && !lookup_symbol_name(wchan, symname))
|
|
seq_printf(m, "%s", symname);
|
|
else
|
|
seq_putc(m, '0');
|
|
@@ -444,7 +445,7 @@ static int lock_trace(struct task_struct *task)
|
|
int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
|
|
if (err)
|
|
return err;
|
|
- if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
|
|
+ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
|
|
mutex_unlock(&task->signal->cred_guard_mutex);
|
|
return -EPERM;
|
|
}
|
|
@@ -697,7 +698,7 @@ static int proc_fd_access_allowed(struct inode *inode)
|
|
*/
|
|
task = get_proc_task(inode);
|
|
if (task) {
|
|
- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
|
|
+ allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
|
|
put_task_struct(task);
|
|
}
|
|
return allowed;
|
|
@@ -732,7 +733,7 @@ static bool has_pid_permissions(struct pid_namespace *pid,
|
|
return true;
|
|
if (in_group_p(pid->pid_gid))
|
|
return true;
|
|
- return ptrace_may_access(task, PTRACE_MODE_READ);
|
|
+ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
|
|
}
|
|
|
|
|
|
@@ -809,7 +810,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
|
|
struct mm_struct *mm = ERR_PTR(-ESRCH);
|
|
|
|
if (task) {
|
|
- mm = mm_access(task, mode);
|
|
+ mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
|
|
put_task_struct(task);
|
|
|
|
if (!IS_ERR_OR_NULL(mm)) {
|
|
@@ -1856,7 +1857,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
|
|
if (!task)
|
|
goto out_notask;
|
|
|
|
- mm = mm_access(task, PTRACE_MODE_READ);
|
|
+ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
|
|
if (IS_ERR_OR_NULL(mm))
|
|
goto out;
|
|
|
|
@@ -2007,7 +2008,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
|
|
goto out;
|
|
|
|
result = -EACCES;
|
|
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
|
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
|
|
goto out_put_task;
|
|
|
|
result = -ENOENT;
|
|
@@ -2060,7 +2061,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
|
|
goto out;
|
|
|
|
ret = -EACCES;
|
|
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
|
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
|
|
goto out_put_task;
|
|
|
|
ret = 0;
|
|
@@ -2530,7 +2531,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
|
|
if (result)
|
|
return result;
|
|
|
|
- if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
|
|
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
|
|
result = -EACCES;
|
|
goto out_unlock;
|
|
}
|
|
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
|
|
index f6e8354b8cea..1b0ea4a5d89e 100644
|
|
--- a/fs/proc/namespaces.c
|
|
+++ b/fs/proc/namespaces.c
|
|
@@ -42,7 +42,7 @@ static const char *proc_ns_follow_link(struct dentry *dentry, void **cookie)
|
|
if (!task)
|
|
return error;
|
|
|
|
- if (ptrace_may_access(task, PTRACE_MODE_READ)) {
|
|
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
|
|
error = ns_get_path(&ns_path, task, ns_ops);
|
|
if (!error)
|
|
nd_jump_link(&ns_path);
|
|
@@ -63,7 +63,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
|
|
if (!task)
|
|
return res;
|
|
|
|
- if (ptrace_may_access(task, PTRACE_MODE_READ)) {
|
|
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
|
|
res = ns_get_name(name, sizeof(name), task, ns_ops);
|
|
if (res >= 0)
|
|
res = readlink_copy(buffer, buflen, name);
|
|
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
|
index 187b3b5f242e..09cd3edde08a 100644
|
|
--- a/fs/proc/task_mmu.c
|
|
+++ b/fs/proc/task_mmu.c
|
|
@@ -1473,18 +1473,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
|
|
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
|
|
unsigned long addr, unsigned long end, struct mm_walk *walk)
|
|
{
|
|
+ pte_t huge_pte = huge_ptep_get(pte);
|
|
struct numa_maps *md;
|
|
struct page *page;
|
|
|
|
- if (!pte_present(*pte))
|
|
+ if (!pte_present(huge_pte))
|
|
return 0;
|
|
|
|
- page = pte_page(*pte);
|
|
+ page = pte_page(huge_pte);
|
|
if (!page)
|
|
return 0;
|
|
|
|
md = walk->private;
|
|
- gather_stats(page, md, pte_dirty(*pte), 1);
|
|
+ gather_stats(page, md, pte_dirty(huge_pte), 1);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/timerfd.c b/fs/timerfd.c
|
|
index b94fa6c3c6eb..053818dd6c18 100644
|
|
--- a/fs/timerfd.c
|
|
+++ b/fs/timerfd.c
|
|
@@ -153,7 +153,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
|
|
if (isalarm(ctx))
|
|
remaining = alarm_expires_remaining(&ctx->t.alarm);
|
|
else
|
|
- remaining = hrtimer_expires_remaining(&ctx->t.tmr);
|
|
+ remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
|
|
|
|
return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
|
|
}
|
|
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
|
|
index 8d0b3ade0ff0..566df9b5a6cb 100644
|
|
--- a/fs/udf/inode.c
|
|
+++ b/fs/udf/inode.c
|
|
@@ -2047,14 +2047,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
|
|
epos->offset += adsize;
|
|
}
|
|
|
|
+/*
|
|
+ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
|
|
+ * someone does some weird stuff.
|
|
+ */
|
|
+#define UDF_MAX_INDIR_EXTS 16
|
|
+
|
|
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
|
|
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
|
|
{
|
|
int8_t etype;
|
|
+ unsigned int indirections = 0;
|
|
|
|
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
|
|
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
|
|
int block;
|
|
+
|
|
+ if (++indirections > UDF_MAX_INDIR_EXTS) {
|
|
+ udf_err(inode->i_sb,
|
|
+ "too many indirect extents in inode %lu\n",
|
|
+ inode->i_ino);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
epos->block = *eloc;
|
|
epos->offset = sizeof(struct allocExtDesc);
|
|
brelse(epos->bh);
|
|
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
|
|
index ab478e62baae..e788a05aab83 100644
|
|
--- a/fs/udf/unicode.c
|
|
+++ b/fs/udf/unicode.c
|
|
@@ -128,11 +128,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
|
|
if (c < 0x80U)
|
|
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
|
|
else if (c < 0x800U) {
|
|
+ if (utf_o->u_len > (UDF_NAME_LEN - 4))
|
|
+ break;
|
|
utf_o->u_name[utf_o->u_len++] =
|
|
(uint8_t)(0xc0 | (c >> 6));
|
|
utf_o->u_name[utf_o->u_len++] =
|
|
(uint8_t)(0x80 | (c & 0x3f));
|
|
} else {
|
|
+ if (utf_o->u_len > (UDF_NAME_LEN - 5))
|
|
+ break;
|
|
utf_o->u_name[utf_o->u_len++] =
|
|
(uint8_t)(0xe0 | (c >> 12));
|
|
utf_o->u_name[utf_o->u_len++] =
|
|
@@ -173,17 +177,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
|
|
static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
|
|
{
|
|
unsigned c, i, max_val, utf_char;
|
|
- int utf_cnt, u_len;
|
|
+ int utf_cnt, u_len, u_ch;
|
|
|
|
memset(ocu, 0, sizeof(dstring) * length);
|
|
ocu[0] = 8;
|
|
max_val = 0xffU;
|
|
+ u_ch = 1;
|
|
|
|
try_again:
|
|
u_len = 0U;
|
|
utf_char = 0U;
|
|
utf_cnt = 0U;
|
|
for (i = 0U; i < utf->u_len; i++) {
|
|
+ /* Name didn't fit? */
|
|
+ if (u_len + 1 + u_ch >= length)
|
|
+ return 0;
|
|
+
|
|
c = (uint8_t)utf->u_name[i];
|
|
|
|
/* Complete a multi-byte UTF-8 character */
|
|
@@ -225,6 +234,7 @@ try_again:
|
|
if (max_val == 0xffU) {
|
|
max_val = 0xffffU;
|
|
ocu[0] = (uint8_t)0x10U;
|
|
+ u_ch = 2;
|
|
goto try_again;
|
|
}
|
|
goto error_out;
|
|
@@ -277,7 +287,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
|
|
c = (c << 8) | ocu[i++];
|
|
|
|
len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
|
|
- UDF_NAME_LEN - utf_o->u_len);
|
|
+ UDF_NAME_LEN - 2 - utf_o->u_len);
|
|
/* Valid character? */
|
|
if (len >= 0)
|
|
utf_o->u_len += len;
|
|
@@ -295,15 +305,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
|
|
int len;
|
|
unsigned i, max_val;
|
|
uint16_t uni_char;
|
|
- int u_len;
|
|
+ int u_len, u_ch;
|
|
|
|
memset(ocu, 0, sizeof(dstring) * length);
|
|
ocu[0] = 8;
|
|
max_val = 0xffU;
|
|
+ u_ch = 1;
|
|
|
|
try_again:
|
|
u_len = 0U;
|
|
for (i = 0U; i < uni->u_len; i++) {
|
|
+ /* Name didn't fit? */
|
|
+ if (u_len + 1 + u_ch >= length)
|
|
+ return 0;
|
|
len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
|
|
if (!len)
|
|
continue;
|
|
@@ -316,6 +330,7 @@ try_again:
|
|
if (uni_char > max_val) {
|
|
max_val = 0xffffU;
|
|
ocu[0] = (uint8_t)0x10U;
|
|
+ u_ch = 2;
|
|
goto try_again;
|
|
}
|
|
|
|
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
|
|
index 8774498ce0ff..e2536bb1c760 100644
|
|
--- a/fs/xfs/libxfs/xfs_format.h
|
|
+++ b/fs/xfs/libxfs/xfs_format.h
|
|
@@ -786,7 +786,7 @@ typedef struct xfs_agfl {
|
|
__be64 agfl_lsn;
|
|
__be32 agfl_crc;
|
|
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
|
|
-} xfs_agfl_t;
|
|
+} __attribute__((packed)) xfs_agfl_t;
|
|
|
|
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
|
|
|
|
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
|
|
index 268c00f4f83a..65485cfc4ade 100644
|
|
--- a/fs/xfs/libxfs/xfs_inode_buf.c
|
|
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
|
|
@@ -62,11 +62,12 @@ xfs_inobp_check(
|
|
* has not had the inode cores stamped into it. Hence for readahead, the buffer
|
|
* may be potentially invalid.
|
|
*
|
|
- * If the readahead buffer is invalid, we don't want to mark it with an error,
|
|
- * but we do want to clear the DONE status of the buffer so that a followup read
|
|
- * will re-read it from disk. This will ensure that we don't get an unnecessary
|
|
- * warnings during log recovery and we don't get unnecssary panics on debug
|
|
- * kernels.
|
|
+ * If the readahead buffer is invalid, we need to mark it with an error and
|
|
+ * clear the DONE status of the buffer so that a followup read will re-read it
|
|
+ * from disk. We don't report the error otherwise to avoid warnings during log
|
|
+ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
|
|
+ * because all we want to do is say readahead failed; there is no-one to report
|
|
+ * the error to, so this will distinguish it from a non-ra verifier failure.
|
|
*/
|
|
static void
|
|
xfs_inode_buf_verify(
|
|
@@ -93,6 +94,7 @@ xfs_inode_buf_verify(
|
|
XFS_RANDOM_ITOBP_INOTOBP))) {
|
|
if (readahead) {
|
|
bp->b_flags &= ~XBF_DONE;
|
|
+ xfs_buf_ioerror(bp, -EIO);
|
|
return;
|
|
}
|
|
|
|
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
|
|
index 3243cdf97f33..39090fc56f09 100644
|
|
--- a/fs/xfs/xfs_buf.c
|
|
+++ b/fs/xfs/xfs_buf.c
|
|
@@ -604,6 +604,13 @@ found:
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * Clear b_error if this is a lookup from a caller that doesn't expect
|
|
+ * valid data to be found in the buffer.
|
|
+ */
|
|
+ if (!(flags & XBF_READ))
|
|
+ xfs_buf_ioerror(bp, 0);
|
|
+
|
|
XFS_STATS_INC(target->bt_mount, xb_get);
|
|
trace_xfs_buf_get(bp, flags, _RET_IP_);
|
|
return bp;
|
|
@@ -1520,6 +1527,16 @@ xfs_wait_buftarg(
|
|
LIST_HEAD(dispose);
|
|
int loop = 0;
|
|
|
|
+ /*
|
|
+ * We need to flush the buffer workqueue to ensure that all IO
|
|
+ * completion processing is 100% done. Just waiting on buffer locks is
|
|
+ * not sufficient for async IO as the reference count held over IO is
|
|
+ * not released until after the buffer lock is dropped. Hence we need to
|
|
+ * ensure here that all reference counts have been dropped before we
|
|
+ * start walking the LRU list.
|
|
+ */
|
|
+ drain_workqueue(btp->bt_mount->m_buf_workqueue);
|
|
+
|
|
/* loop until there is nothing left on the lru list. */
|
|
while (list_lru_count(&btp->bt_lru)) {
|
|
list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
|
|
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
|
|
index aa67339b9537..4f18fd92ca13 100644
|
|
--- a/fs/xfs/xfs_trans_ail.c
|
|
+++ b/fs/xfs/xfs_trans_ail.c
|
|
@@ -497,7 +497,6 @@ xfsaild(
|
|
long tout = 0; /* milliseconds */
|
|
|
|
current->flags |= PF_MEMALLOC;
|
|
- set_freezable();
|
|
|
|
while (!kthread_should_stop()) {
|
|
if (tout && tout <= 20)
|
|
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
|
|
index 4dac1036594f..6fc9a6dd5ed2 100644
|
|
--- a/include/linux/compiler.h
|
|
+++ b/include/linux/compiler.h
|
|
@@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
|
*/
|
|
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
|
|
#define __trace_if(cond) \
|
|
- if (__builtin_constant_p((cond)) ? !!(cond) : \
|
|
+ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
|
|
({ \
|
|
int ______r; \
|
|
static struct ftrace_branch_data \
|
|
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
|
|
index 251a2090a554..e0ee0b3000b2 100644
|
|
--- a/include/linux/devpts_fs.h
|
|
+++ b/include/linux/devpts_fs.h
|
|
@@ -19,6 +19,8 @@
|
|
|
|
int devpts_new_index(struct inode *ptmx_inode);
|
|
void devpts_kill_index(struct inode *ptmx_inode, int idx);
|
|
+void devpts_add_ref(struct inode *ptmx_inode);
|
|
+void devpts_del_ref(struct inode *ptmx_inode);
|
|
/* mknod in devpts */
|
|
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
|
|
void *priv);
|
|
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
|
|
/* Dummy stubs in the no-pty case */
|
|
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
|
|
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
|
|
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
|
|
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
|
|
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
|
|
dev_t device, int index, void *priv)
|
|
{
|
|
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
|
|
index 821273ca4873..2d9b650047a5 100644
|
|
--- a/include/linux/intel-iommu.h
|
|
+++ b/include/linux/intel-iommu.h
|
|
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|
/* low 64 bit */
|
|
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
|
|
|
|
+/* PRS_REG */
|
|
+#define DMA_PRS_PPR ((u32)1)
|
|
+
|
|
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
|
|
do { \
|
|
cycles_t start_time = get_cycles(); \
|
|
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
|
|
index 061265f92876..504c98a278d4 100644
|
|
--- a/include/linux/ptrace.h
|
|
+++ b/include/linux/ptrace.h
|
|
@@ -57,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
|
|
#define PTRACE_MODE_READ 0x01
|
|
#define PTRACE_MODE_ATTACH 0x02
|
|
#define PTRACE_MODE_NOAUDIT 0x04
|
|
-/* Returns true on success, false on denial. */
|
|
+#define PTRACE_MODE_FSCREDS 0x08
|
|
+#define PTRACE_MODE_REALCREDS 0x10
|
|
+
|
|
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
|
|
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
|
|
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
|
|
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
|
|
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
|
|
+
|
|
+/**
|
|
+ * ptrace_may_access - check whether the caller is permitted to access
|
|
+ * a target task.
|
|
+ * @task: target task
|
|
+ * @mode: selects type of access and caller credentials
|
|
+ *
|
|
+ * Returns true on success, false on denial.
|
|
+ *
|
|
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
|
|
+ * be set in @mode to specify whether the access was requested through
|
|
+ * a filesystem syscall (should use effective capabilities and fsuid
|
|
+ * of the caller) or through an explicit syscall such as
|
|
+ * process_vm_writev or ptrace (and should use the real credentials).
|
|
+ */
|
|
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
|
|
|
|
static inline int ptrace_reparented(struct task_struct *child)
|
|
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
|
|
index 33170dbd9db4..5d5174b59802 100644
|
|
--- a/include/linux/radix-tree.h
|
|
+++ b/include/linux/radix-tree.h
|
|
@@ -370,12 +370,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
|
|
struct radix_tree_iter *iter, unsigned flags);
|
|
|
|
/**
|
|
+ * radix_tree_iter_retry - retry this chunk of the iteration
|
|
+ * @iter: iterator state
|
|
+ *
|
|
+ * If we iterate over a tree protected only by the RCU lock, a race
|
|
+ * against deletion or creation may result in seeing a slot for which
|
|
+ * radix_tree_deref_retry() returns true. If so, call this function
|
|
+ * and continue the iteration.
|
|
+ */
|
|
+static inline __must_check
|
|
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
|
|
+{
|
|
+ iter->next_index = iter->index;
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
* radix_tree_chunk_size - get current chunk size
|
|
*
|
|
* @iter: pointer to radix tree iterator
|
|
* Returns: current chunk size
|
|
*/
|
|
-static __always_inline unsigned
|
|
+static __always_inline long
|
|
radix_tree_chunk_size(struct radix_tree_iter *iter)
|
|
{
|
|
return iter->next_index - iter->index;
|
|
@@ -409,9 +425,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
|
|
return slot + offset + 1;
|
|
}
|
|
} else {
|
|
- unsigned size = radix_tree_chunk_size(iter) - 1;
|
|
+ long size = radix_tree_chunk_size(iter);
|
|
|
|
- while (size--) {
|
|
+ while (--size > 0) {
|
|
slot++;
|
|
iter->index++;
|
|
if (likely(*slot))
|
|
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
|
|
index 29446aeef36e..ddda2ac3446e 100644
|
|
--- a/include/linux/rmap.h
|
|
+++ b/include/linux/rmap.h
|
|
@@ -108,20 +108,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
|
|
__put_anon_vma(anon_vma);
|
|
}
|
|
|
|
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
|
|
-{
|
|
- struct anon_vma *anon_vma = vma->anon_vma;
|
|
- if (anon_vma)
|
|
- down_write(&anon_vma->root->rwsem);
|
|
-}
|
|
-
|
|
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
|
|
-{
|
|
- struct anon_vma *anon_vma = vma->anon_vma;
|
|
- if (anon_vma)
|
|
- up_write(&anon_vma->root->rwsem);
|
|
-}
|
|
-
|
|
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
|
|
{
|
|
down_write(&anon_vma->root->rwsem);
|
|
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
|
|
index 696a339c592c..03c7efb60c91 100644
|
|
--- a/include/linux/tracepoint.h
|
|
+++ b/include/linux/tracepoint.h
|
|
@@ -14,8 +14,10 @@
|
|
* See the file COPYING for more details.
|
|
*/
|
|
|
|
+#include <linux/smp.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/types.h>
|
|
+#include <linux/cpumask.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/static_key.h>
|
|
|
|
@@ -146,6 +148,9 @@ extern void syscall_unregfunc(void);
|
|
void *it_func; \
|
|
void *__data; \
|
|
\
|
|
+ if (!cpu_online(raw_smp_processor_id())) \
|
|
+ return; \
|
|
+ \
|
|
if (!(cond)) \
|
|
return; \
|
|
prercu; \
|
|
diff --git a/ipc/shm.c b/ipc/shm.c
|
|
index 41787276e141..3174634ca4e5 100644
|
|
--- a/ipc/shm.c
|
|
+++ b/ipc/shm.c
|
|
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
|
|
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
|
|
|
|
/*
|
|
- * We raced in the idr lookup or with shm_destroy(). Either way, the
|
|
- * ID is busted.
|
|
+ * Callers of shm_lock() must validate the status of the returned ipc
|
|
+ * object pointer (as returned by ipc_lock()), and error out as
|
|
+ * appropriate.
|
|
*/
|
|
- WARN_ON(IS_ERR(ipcp));
|
|
-
|
|
+ if (IS_ERR(ipcp))
|
|
+ return (void *)ipcp;
|
|
return container_of(ipcp, struct shmid_kernel, shm_perm);
|
|
}
|
|
|
|
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
|
|
}
|
|
|
|
|
|
-/* This is called by fork, once for every shm attach. */
|
|
-static void shm_open(struct vm_area_struct *vma)
|
|
+static int __shm_open(struct vm_area_struct *vma)
|
|
{
|
|
struct file *file = vma->vm_file;
|
|
struct shm_file_data *sfd = shm_file_data(file);
|
|
struct shmid_kernel *shp;
|
|
|
|
shp = shm_lock(sfd->ns, sfd->id);
|
|
+
|
|
+ if (IS_ERR(shp))
|
|
+ return PTR_ERR(shp);
|
|
+
|
|
shp->shm_atim = get_seconds();
|
|
shp->shm_lprid = task_tgid_vnr(current);
|
|
shp->shm_nattch++;
|
|
shm_unlock(shp);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* This is called by fork, once for every shm attach. */
|
|
+static void shm_open(struct vm_area_struct *vma)
|
|
+{
|
|
+ int err = __shm_open(vma);
|
|
+ /*
|
|
+ * We raced in the idr lookup or with shm_destroy().
|
|
+ * Either way, the ID is busted.
|
|
+ */
|
|
+ WARN_ON_ONCE(err);
|
|
}
|
|
|
|
/*
|
|
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
|
|
down_write(&shm_ids(ns).rwsem);
|
|
/* remove from the list of attaches of the shm segment */
|
|
shp = shm_lock(ns, sfd->id);
|
|
+
|
|
+ /*
|
|
+ * We raced in the idr lookup or with shm_destroy().
|
|
+ * Either way, the ID is busted.
|
|
+ */
|
|
+ if (WARN_ON_ONCE(IS_ERR(shp)))
|
|
+ goto done; /* no-op */
|
|
+
|
|
shp->shm_lprid = task_tgid_vnr(current);
|
|
shp->shm_dtim = get_seconds();
|
|
shp->shm_nattch--;
|
|
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
|
|
shm_destroy(ns, shp);
|
|
else
|
|
shm_unlock(shp);
|
|
+done:
|
|
up_write(&shm_ids(ns).rwsem);
|
|
}
|
|
|
|
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
|
|
struct shm_file_data *sfd = shm_file_data(file);
|
|
int ret;
|
|
|
|
+ /*
|
|
+ * In case of remap_file_pages() emulation, the file can represent
|
|
+ * removed IPC ID: propogate shm_lock() error to caller.
|
|
+ */
|
|
+ ret =__shm_open(vma);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
ret = sfd->file->f_op->mmap(sfd->file, vma);
|
|
- if (ret != 0)
|
|
+ if (ret) {
|
|
+ shm_close(vma);
|
|
return ret;
|
|
+ }
|
|
sfd->vm_ops = vma->vm_ops;
|
|
#ifdef CONFIG_MMU
|
|
WARN_ON(!sfd->vm_ops->fault);
|
|
#endif
|
|
vma->vm_ops = &shm_vm_ops;
|
|
- shm_open(vma);
|
|
-
|
|
- return ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static int shm_release(struct inode *ino, struct file *file)
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index cfc227ccfceb..1087bbeb152b 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -3434,7 +3434,7 @@ find_lively_task_by_vpid(pid_t vpid)
|
|
|
|
/* Reuse ptrace permission checks for now. */
|
|
err = -EACCES;
|
|
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
|
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
|
|
goto errout;
|
|
|
|
return task;
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 684d7549825a..461c72b2dac2 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -2755,6 +2755,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
spin_lock(q.lock_ptr);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
+ /*
|
|
+ * Drop the reference to the pi state which
|
|
+ * the requeue_pi() code acquired for us.
|
|
+ */
|
|
+ free_pi_state(q.pi_state);
|
|
spin_unlock(q.lock_ptr);
|
|
}
|
|
} else {
|
|
@@ -2881,7 +2886,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
|
|
}
|
|
|
|
ret = -EPERM;
|
|
- if (!ptrace_may_access(p, PTRACE_MODE_READ))
|
|
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
|
|
goto err_unlock;
|
|
|
|
head = p->robust_list;
|
|
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
|
|
index 55c8c9349cfe..4ae3232e7a28 100644
|
|
--- a/kernel/futex_compat.c
|
|
+++ b/kernel/futex_compat.c
|
|
@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
|
|
}
|
|
|
|
ret = -EPERM;
|
|
- if (!ptrace_may_access(p, PTRACE_MODE_READ))
|
|
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
|
|
goto err_unlock;
|
|
|
|
head = p->compat_robust_list;
|
|
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
|
|
index 0aa69ea1d8fd..3a47fa998fe0 100644
|
|
--- a/kernel/kcmp.c
|
|
+++ b/kernel/kcmp.c
|
|
@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
|
|
&task2->signal->cred_guard_mutex);
|
|
if (ret)
|
|
goto err;
|
|
- if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
|
|
- !ptrace_may_access(task2, PTRACE_MODE_READ)) {
|
|
+ if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
|
|
+ !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
|
|
ret = -EPERM;
|
|
goto err_unlock;
|
|
}
|
|
diff --git a/kernel/memremap.c b/kernel/memremap.c
|
|
index 7658d32c5c78..7a4e473cea4d 100644
|
|
--- a/kernel/memremap.c
|
|
+++ b/kernel/memremap.c
|
|
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(memunmap);
|
|
|
|
static void devm_memremap_release(struct device *dev, void *res)
|
|
{
|
|
- memunmap(res);
|
|
+ memunmap(*(void **)res);
|
|
}
|
|
|
|
static int devm_memremap_match(struct device *dev, void *res, void *match_data)
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 38c7bd5583ff..14833e6d5e37 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -3515,7 +3515,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
|
|
|
/* Module is ready to execute: parsing args may do that. */
|
|
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
|
|
- -32768, 32767, NULL,
|
|
+ -32768, 32767, mod,
|
|
unknown_module_param_cb);
|
|
if (IS_ERR(after_dashes)) {
|
|
err = PTR_ERR(after_dashes);
|
|
@@ -3646,6 +3646,11 @@ static inline int is_arm_mapping_symbol(const char *str)
|
|
&& (str[2] == '\0' || str[2] == '.');
|
|
}
|
|
|
|
+static const char *symname(struct module *mod, unsigned int symnum)
|
|
+{
|
|
+ return mod->strtab + mod->symtab[symnum].st_name;
|
|
+}
|
|
+
|
|
static const char *get_ksymbol(struct module *mod,
|
|
unsigned long addr,
|
|
unsigned long *size,
|
|
@@ -3668,15 +3673,15 @@ static const char *get_ksymbol(struct module *mod,
|
|
|
|
/* We ignore unnamed symbols: they're uninformative
|
|
* and inserted at a whim. */
|
|
+ if (*symname(mod, i) == '\0'
|
|
+ || is_arm_mapping_symbol(symname(mod, i)))
|
|
+ continue;
|
|
+
|
|
if (mod->symtab[i].st_value <= addr
|
|
- && mod->symtab[i].st_value > mod->symtab[best].st_value
|
|
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
|
|
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
|
|
+ && mod->symtab[i].st_value > mod->symtab[best].st_value)
|
|
best = i;
|
|
if (mod->symtab[i].st_value > addr
|
|
- && mod->symtab[i].st_value < nextval
|
|
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
|
|
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
|
|
+ && mod->symtab[i].st_value < nextval)
|
|
nextval = mod->symtab[i].st_value;
|
|
}
|
|
|
|
@@ -3687,7 +3692,7 @@ static const char *get_ksymbol(struct module *mod,
|
|
*size = nextval - mod->symtab[best].st_value;
|
|
if (offset)
|
|
*offset = addr - mod->symtab[best].st_value;
|
|
- return mod->strtab + mod->symtab[best].st_name;
|
|
+ return symname(mod, best);
|
|
}
|
|
|
|
/* For kallsyms to ask for address resolution. NULL means not found. Careful
|
|
@@ -3782,8 +3787,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
|
if (symnum < mod->num_symtab) {
|
|
*value = mod->symtab[symnum].st_value;
|
|
*type = mod->symtab[symnum].st_info;
|
|
- strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
|
|
- KSYM_NAME_LEN);
|
|
+ strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
|
|
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
|
|
*exported = is_exported(name, *value, mod);
|
|
preempt_enable();
|
|
@@ -3800,7 +3804,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < mod->num_symtab; i++)
|
|
- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
|
|
+ if (strcmp(name, symname(mod, i)) == 0 &&
|
|
mod->symtab[i].st_info != 'U')
|
|
return mod->symtab[i].st_value;
|
|
return 0;
|
|
@@ -3844,7 +3848,7 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
|
|
if (mod->state == MODULE_STATE_UNFORMED)
|
|
continue;
|
|
for (i = 0; i < mod->num_symtab; i++) {
|
|
- ret = fn(data, mod->strtab + mod->symtab[i].st_name,
|
|
+ ret = fn(data, symname(mod, i),
|
|
mod, mod->symtab[i].st_value);
|
|
if (ret != 0)
|
|
return ret;
|
|
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
|
|
index b760bae64cf1..3189e51db7e8 100644
|
|
--- a/kernel/ptrace.c
|
|
+++ b/kernel/ptrace.c
|
|
@@ -219,6 +219,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
|
|
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
|
{
|
|
const struct cred *cred = current_cred(), *tcred;
|
|
+ int dumpable = 0;
|
|
+ kuid_t caller_uid;
|
|
+ kgid_t caller_gid;
|
|
+
|
|
+ if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
|
|
+ WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
|
|
+ return -EPERM;
|
|
+ }
|
|
|
|
/* May we inspect the given task?
|
|
* This check is used both for attaching with ptrace
|
|
@@ -228,18 +236,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
|
* because setting up the necessary parent/child relationship
|
|
* or halting the specified task is impossible.
|
|
*/
|
|
- int dumpable = 0;
|
|
+
|
|
/* Don't let security modules deny introspection */
|
|
if (same_thread_group(task, current))
|
|
return 0;
|
|
rcu_read_lock();
|
|
+ if (mode & PTRACE_MODE_FSCREDS) {
|
|
+ caller_uid = cred->fsuid;
|
|
+ caller_gid = cred->fsgid;
|
|
+ } else {
|
|
+ /*
|
|
+ * Using the euid would make more sense here, but something
|
|
+ * in userland might rely on the old behavior, and this
|
|
+ * shouldn't be a security problem since
|
|
+ * PTRACE_MODE_REALCREDS implies that the caller explicitly
|
|
+ * used a syscall that requests access to another process
|
|
+ * (and not a filesystem syscall to procfs).
|
|
+ */
|
|
+ caller_uid = cred->uid;
|
|
+ caller_gid = cred->gid;
|
|
+ }
|
|
tcred = __task_cred(task);
|
|
- if (uid_eq(cred->uid, tcred->euid) &&
|
|
- uid_eq(cred->uid, tcred->suid) &&
|
|
- uid_eq(cred->uid, tcred->uid) &&
|
|
- gid_eq(cred->gid, tcred->egid) &&
|
|
- gid_eq(cred->gid, tcred->sgid) &&
|
|
- gid_eq(cred->gid, tcred->gid))
|
|
+ if (uid_eq(caller_uid, tcred->euid) &&
|
|
+ uid_eq(caller_uid, tcred->suid) &&
|
|
+ uid_eq(caller_uid, tcred->uid) &&
|
|
+ gid_eq(caller_gid, tcred->egid) &&
|
|
+ gid_eq(caller_gid, tcred->sgid) &&
|
|
+ gid_eq(caller_gid, tcred->gid))
|
|
goto ok;
|
|
if (ptrace_has_cap(tcred->user_ns, mode))
|
|
goto ok;
|
|
@@ -306,7 +329,7 @@ static int ptrace_attach(struct task_struct *task, long request,
|
|
goto out;
|
|
|
|
task_lock(task);
|
|
- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
|
|
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
|
|
task_unlock(task);
|
|
if (retval)
|
|
goto unlock_creds;
|
|
diff --git a/kernel/sys.c b/kernel/sys.c
|
|
index 6af9212ab5aa..78947de6f969 100644
|
|
--- a/kernel/sys.c
|
|
+++ b/kernel/sys.c
|
|
@@ -1853,11 +1853,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
|
|
user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
|
|
}
|
|
|
|
- if (prctl_map.exe_fd != (u32)-1)
|
|
+ if (prctl_map.exe_fd != (u32)-1) {
|
|
error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
|
|
- down_read(&mm->mmap_sem);
|
|
- if (error)
|
|
- goto out;
|
|
+ if (error)
|
|
+ return error;
|
|
+ }
|
|
+
|
|
+ down_write(&mm->mmap_sem);
|
|
|
|
/*
|
|
* We don't validate if these members are pointing to
|
|
@@ -1894,10 +1896,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
|
|
if (prctl_map.auxv_size)
|
|
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
|
|
|
|
- error = 0;
|
|
-out:
|
|
- up_read(&mm->mmap_sem);
|
|
- return error;
|
|
+ up_write(&mm->mmap_sem);
|
|
+ return 0;
|
|
}
|
|
#endif /* CONFIG_CHECKPOINT_RESTORE */
|
|
|
|
@@ -1963,7 +1963,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
|
|
|
|
error = -EINVAL;
|
|
|
|
- down_read(&mm->mmap_sem);
|
|
+ down_write(&mm->mmap_sem);
|
|
vma = find_vma(mm, addr);
|
|
|
|
prctl_map.start_code = mm->start_code;
|
|
@@ -2056,7 +2056,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
|
|
|
|
error = 0;
|
|
out:
|
|
- up_read(&mm->mmap_sem);
|
|
+ up_write(&mm->mmap_sem);
|
|
return error;
|
|
}
|
|
|
|
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
|
|
index 8d262b467573..1d5c7204ddc9 100644
|
|
--- a/kernel/time/itimer.c
|
|
+++ b/kernel/time/itimer.c
|
|
@@ -26,7 +26,7 @@
|
|
*/
|
|
static struct timeval itimer_get_remtime(struct hrtimer *timer)
|
|
{
|
|
- ktime_t rem = hrtimer_get_remaining(timer);
|
|
+ ktime_t rem = __hrtimer_get_remaining(timer, true);
|
|
|
|
/*
|
|
* Racy but safe: if the itimer expires after the above
|
|
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
|
|
index 31d11ac9fa47..f2826c35e918 100644
|
|
--- a/kernel/time/posix-timers.c
|
|
+++ b/kernel/time/posix-timers.c
|
|
@@ -760,7 +760,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
|
|
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
|
|
timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
|
|
|
|
- remaining = ktime_sub(hrtimer_get_expires(timer), now);
|
|
+ remaining = __hrtimer_expires_remaining_adjusted(timer, now);
|
|
/* Return 0 only, when the timer is expired and not pending */
|
|
if (remaining.tv64 <= 0) {
|
|
/*
|
|
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
|
|
index d34bd24c2c84..4a1515f4b452 100644
|
|
--- a/lib/dma-debug.c
|
|
+++ b/lib/dma-debug.c
|
|
@@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
|
|
|
|
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
|
|
{
|
|
- if (overlap(addr, len, _text, _etext) ||
|
|
+ if (overlap(addr, len, _stext, _etext) ||
|
|
overlap(addr, len, __start_rodata, __end_rodata))
|
|
err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
|
|
}
|
|
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
|
|
index 6745c6230db3..c30d07e99dba 100644
|
|
--- a/lib/dump_stack.c
|
|
+++ b/lib/dump_stack.c
|
|
@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
|
|
|
|
asmlinkage __visible void dump_stack(void)
|
|
{
|
|
+ unsigned long flags;
|
|
int was_locked;
|
|
int old;
|
|
int cpu;
|
|
@@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
|
|
* Permit this cpu to perform nested stack dumps while serialising
|
|
* against other CPUs
|
|
*/
|
|
- preempt_disable();
|
|
-
|
|
retry:
|
|
+ local_irq_save(flags);
|
|
cpu = smp_processor_id();
|
|
old = atomic_cmpxchg(&dump_lock, -1, cpu);
|
|
if (old == -1) {
|
|
@@ -43,6 +43,7 @@ retry:
|
|
} else if (old == cpu) {
|
|
was_locked = 1;
|
|
} else {
|
|
+ local_irq_restore(flags);
|
|
cpu_relax();
|
|
goto retry;
|
|
}
|
|
@@ -52,7 +53,7 @@ retry:
|
|
if (!was_locked)
|
|
atomic_set(&dump_lock, -1);
|
|
|
|
- preempt_enable();
|
|
+ local_irq_restore(flags);
|
|
}
|
|
#else
|
|
asmlinkage __visible void dump_stack(void)
|
|
diff --git a/lib/klist.c b/lib/klist.c
|
|
index d74cf7a29afd..0507fa5d84c5 100644
|
|
--- a/lib/klist.c
|
|
+++ b/lib/klist.c
|
|
@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
|
|
struct klist_node *n)
|
|
{
|
|
i->i_klist = k;
|
|
- i->i_cur = n;
|
|
- if (n)
|
|
- kref_get(&n->n_ref);
|
|
+ i->i_cur = NULL;
|
|
+ if (n && kref_get_unless_zero(&n->n_ref))
|
|
+ i->i_cur = n;
|
|
}
|
|
EXPORT_SYMBOL_GPL(klist_iter_init_node);
|
|
|
|
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
|
index fcf5d98574ce..6b79e9026e24 100644
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
|
return 0;
|
|
|
|
radix_tree_for_each_slot(slot, root, &iter, first_index) {
|
|
- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
|
|
+ results[ret] = rcu_dereference_raw(*slot);
|
|
if (!results[ret])
|
|
continue;
|
|
+ if (radix_tree_is_indirect_ptr(results[ret])) {
|
|
+ slot = radix_tree_iter_retry(&iter);
|
|
+ continue;
|
|
+ }
|
|
if (++ret == max_items)
|
|
break;
|
|
}
|
|
@@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
|
|
return 0;
|
|
|
|
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
|
|
- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
|
|
+ results[ret] = rcu_dereference_raw(*slot);
|
|
if (!results[ret])
|
|
continue;
|
|
+ if (radix_tree_is_indirect_ptr(results[ret])) {
|
|
+ slot = radix_tree_iter_retry(&iter);
|
|
+ continue;
|
|
+ }
|
|
if (++ret == max_items)
|
|
break;
|
|
}
|
|
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
|
|
index 5939f63d90cd..5c88204b6f1f 100644
|
|
--- a/lib/string_helpers.c
|
|
+++ b/lib/string_helpers.c
|
|
@@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
|
|
[STRING_UNITS_10] = 1000,
|
|
[STRING_UNITS_2] = 1024,
|
|
};
|
|
- int i, j;
|
|
- u32 remainder = 0, sf_cap, exp;
|
|
+ static const unsigned int rounding[] = { 500, 50, 5 };
|
|
+ int i = 0, j;
|
|
+ u32 remainder = 0, sf_cap;
|
|
char tmp[8];
|
|
const char *unit;
|
|
|
|
tmp[0] = '\0';
|
|
- i = 0;
|
|
- if (!size)
|
|
+
|
|
+ if (blk_size == 0)
|
|
+ size = 0;
|
|
+ if (size == 0)
|
|
goto out;
|
|
|
|
- while (blk_size >= divisor[units]) {
|
|
- remainder = do_div(blk_size, divisor[units]);
|
|
+ /* This is Napier's algorithm. Reduce the original block size to
|
|
+ *
|
|
+ * coefficient * divisor[units]^i
|
|
+ *
|
|
+ * we do the reduction so both coefficients are just under 32 bits so
|
|
+ * that multiplying them together won't overflow 64 bits and we keep
|
|
+ * as much precision as possible in the numbers.
|
|
+ *
|
|
+ * Note: it's safe to throw away the remainders here because all the
|
|
+ * precision is in the coefficients.
|
|
+ */
|
|
+ while (blk_size >> 32) {
|
|
+ do_div(blk_size, divisor[units]);
|
|
i++;
|
|
}
|
|
|
|
- exp = divisor[units] / (u32)blk_size;
|
|
- /*
|
|
- * size must be strictly greater than exp here to ensure that remainder
|
|
- * is greater than divisor[units] coming out of the if below.
|
|
- */
|
|
- if (size > exp) {
|
|
- remainder = do_div(size, divisor[units]);
|
|
- remainder *= blk_size;
|
|
+ while (size >> 32) {
|
|
+ do_div(size, divisor[units]);
|
|
i++;
|
|
- } else {
|
|
- remainder *= size;
|
|
}
|
|
|
|
+ /* now perform the actual multiplication keeping i as the sum of the
|
|
+ * two logarithms */
|
|
size *= blk_size;
|
|
- size += remainder / divisor[units];
|
|
- remainder %= divisor[units];
|
|
|
|
+ /* and logarithmically reduce it until it's just under the divisor */
|
|
while (size >= divisor[units]) {
|
|
remainder = do_div(size, divisor[units]);
|
|
i++;
|
|
}
|
|
|
|
+ /* work out in j how many digits of precision we need from the
|
|
+ * remainder */
|
|
sf_cap = size;
|
|
for (j = 0; sf_cap*10 < 1000; j++)
|
|
sf_cap *= 10;
|
|
|
|
- if (j) {
|
|
+ if (units == STRING_UNITS_2) {
|
|
+ /* express the remainder as a decimal. It's currently the
|
|
+ * numerator of a fraction whose denominator is
|
|
+ * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
|
|
remainder *= 1000;
|
|
- remainder /= divisor[units];
|
|
+ remainder >>= 10;
|
|
+ }
|
|
+
|
|
+ /* add a 5 to the digit below what will be printed to ensure
|
|
+ * an arithmetical round up and carry it through to size */
|
|
+ remainder += rounding[j];
|
|
+ if (remainder >= 1000) {
|
|
+ remainder -= 1000;
|
|
+ size += 1;
|
|
+ }
|
|
+
|
|
+ if (j) {
|
|
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
|
|
tmp[j+1] = '\0';
|
|
}
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index fc10620967c7..ee6acd279953 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -3522,16 +3522,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
|
|
swap_buffers:
|
|
/* Swap primary and spare array */
|
|
thresholds->spare = thresholds->primary;
|
|
- /* If all events are unregistered, free the spare array */
|
|
- if (!new) {
|
|
- kfree(thresholds->spare);
|
|
- thresholds->spare = NULL;
|
|
- }
|
|
|
|
rcu_assign_pointer(thresholds->primary, new);
|
|
|
|
/* To be sure that nobody uses thresholds */
|
|
synchronize_rcu();
|
|
+
|
|
+ /* If all events are unregistered, free the spare array */
|
|
+ if (!new) {
|
|
+ kfree(thresholds->spare);
|
|
+ thresholds->spare = NULL;
|
|
+ }
|
|
unlock:
|
|
mutex_unlock(&memcg->thresholds_lock);
|
|
}
|
|
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
|
|
index 8424b64711ac..750b7893ee3a 100644
|
|
--- a/mm/memory-failure.c
|
|
+++ b/mm/memory-failure.c
|
|
@@ -1572,7 +1572,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
|
|
* Did it turn free?
|
|
*/
|
|
ret = __get_any_page(page, pfn, 0);
|
|
- if (!PageLRU(page)) {
|
|
+ if (ret == 1 && !PageLRU(page)) {
|
|
/* Drop page reference which is from __get_any_page() */
|
|
put_hwpoison_page(page);
|
|
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
|
|
diff --git a/mm/mlock.c b/mm/mlock.c
|
|
index 339d9e0949b6..d6006b146fea 100644
|
|
--- a/mm/mlock.c
|
|
+++ b/mm/mlock.c
|
|
@@ -172,7 +172,7 @@ static void __munlock_isolation_failed(struct page *page)
|
|
*/
|
|
unsigned int munlock_vma_page(struct page *page)
|
|
{
|
|
- unsigned int nr_pages;
|
|
+ int nr_pages;
|
|
struct zone *zone = page_zone(page);
|
|
|
|
/* For try_to_munlock() and to serialize with page migration */
|
|
diff --git a/mm/mmap.c b/mm/mmap.c
|
|
index 2ce04a649f6b..455772a05e54 100644
|
|
--- a/mm/mmap.c
|
|
+++ b/mm/mmap.c
|
|
@@ -441,12 +441,16 @@ static void validate_mm(struct mm_struct *mm)
|
|
struct vm_area_struct *vma = mm->mmap;
|
|
|
|
while (vma) {
|
|
+ struct anon_vma *anon_vma = vma->anon_vma;
|
|
struct anon_vma_chain *avc;
|
|
|
|
- vma_lock_anon_vma(vma);
|
|
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
- anon_vma_interval_tree_verify(avc);
|
|
- vma_unlock_anon_vma(vma);
|
|
+ if (anon_vma) {
|
|
+ anon_vma_lock_read(anon_vma);
|
|
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
|
+ anon_vma_interval_tree_verify(avc);
|
|
+ anon_vma_unlock_read(anon_vma);
|
|
+ }
|
|
+
|
|
highest_address = vma->vm_end;
|
|
vma = vma->vm_next;
|
|
i++;
|
|
@@ -2147,32 +2151,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
|
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
- int error;
|
|
+ int error = 0;
|
|
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
return -EFAULT;
|
|
|
|
- /*
|
|
- * We must make sure the anon_vma is allocated
|
|
- * so that the anon_vma locking is not a noop.
|
|
- */
|
|
+ /* Guard against wrapping around to address 0. */
|
|
+ if (address < PAGE_ALIGN(address+4))
|
|
+ address = PAGE_ALIGN(address+4);
|
|
+ else
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /* We must make sure the anon_vma is allocated. */
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
return -ENOMEM;
|
|
- vma_lock_anon_vma(vma);
|
|
|
|
/*
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
- * Also guard against wrapping around to address 0.
|
|
*/
|
|
- if (address < PAGE_ALIGN(address+4))
|
|
- address = PAGE_ALIGN(address+4);
|
|
- else {
|
|
- vma_unlock_anon_vma(vma);
|
|
- return -ENOMEM;
|
|
- }
|
|
- error = 0;
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
if (address > vma->vm_end) {
|
|
@@ -2190,7 +2189,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
* updates, but we only hold a shared mmap_sem
|
|
* lock here, so we need to protect against
|
|
* concurrent vma expansions.
|
|
- * vma_lock_anon_vma() doesn't help here, as
|
|
+ * anon_vma_lock_write() doesn't help here, as
|
|
* we don't guarantee that all growable vmas
|
|
* in a mm share the same root anon vma.
|
|
* So, we reuse mm->page_table_lock to guard
|
|
@@ -2214,7 +2213,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
}
|
|
}
|
|
}
|
|
- vma_unlock_anon_vma(vma);
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
validate_mm(mm);
|
|
return error;
|
|
@@ -2230,25 +2229,21 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
int error;
|
|
|
|
- /*
|
|
- * We must make sure the anon_vma is allocated
|
|
- * so that the anon_vma locking is not a noop.
|
|
- */
|
|
- if (unlikely(anon_vma_prepare(vma)))
|
|
- return -ENOMEM;
|
|
-
|
|
address &= PAGE_MASK;
|
|
error = security_mmap_addr(address);
|
|
if (error)
|
|
return error;
|
|
|
|
- vma_lock_anon_vma(vma);
|
|
+ /* We must make sure the anon_vma is allocated. */
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
+ return -ENOMEM;
|
|
|
|
/*
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
* anon_vma lock to serialize against concurrent expand_stacks.
|
|
*/
|
|
+ anon_vma_lock_write(vma->anon_vma);
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
if (address < vma->vm_start) {
|
|
@@ -2266,7 +2261,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
* updates, but we only hold a shared mmap_sem
|
|
* lock here, so we need to protect against
|
|
* concurrent vma expansions.
|
|
- * vma_lock_anon_vma() doesn't help here, as
|
|
+ * anon_vma_lock_write() doesn't help here, as
|
|
* we don't guarantee that all growable vmas
|
|
* in a mm share the same root anon vma.
|
|
* So, we reuse mm->page_table_lock to guard
|
|
@@ -2288,7 +2283,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
}
|
|
}
|
|
}
|
|
- vma_unlock_anon_vma(vma);
|
|
+ anon_vma_unlock_write(vma->anon_vma);
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
validate_mm(mm);
|
|
return error;
|
|
@@ -2673,12 +2668,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|
if (!vma || !(vma->vm_flags & VM_SHARED))
|
|
goto out;
|
|
|
|
- if (start < vma->vm_start || start + size > vma->vm_end)
|
|
+ if (start < vma->vm_start)
|
|
goto out;
|
|
|
|
- if (pgoff == linear_page_index(vma, start)) {
|
|
- ret = 0;
|
|
- goto out;
|
|
+ if (start + size > vma->vm_end) {
|
|
+ struct vm_area_struct *next;
|
|
+
|
|
+ for (next = vma->vm_next; next; next = next->vm_next) {
|
|
+ /* hole between vmas ? */
|
|
+ if (next->vm_start != next->vm_prev->vm_end)
|
|
+ goto out;
|
|
+
|
|
+ if (next->vm_file != vma->vm_file)
|
|
+ goto out;
|
|
+
|
|
+ if (next->vm_flags != vma->vm_flags)
|
|
+ goto out;
|
|
+
|
|
+ if (start + size <= next->vm_end)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!next)
|
|
+ goto out;
|
|
}
|
|
|
|
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
|
|
@@ -2688,9 +2700,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|
flags &= MAP_NONBLOCK;
|
|
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
+ struct vm_area_struct *tmp;
|
|
flags |= MAP_LOCKED;
|
|
+
|
|
/* drop PG_Mlocked flag for over-mapped range */
|
|
- munlock_vma_pages_range(vma, start, start + size);
|
|
+ for (tmp = vma; tmp->vm_start >= start + size;
|
|
+ tmp = tmp->vm_next) {
|
|
+ munlock_vma_pages_range(tmp,
|
|
+ max(tmp->vm_start, start),
|
|
+ min(tmp->vm_end, start + size));
|
|
+ }
|
|
}
|
|
|
|
file = get_file(vma->vm_file);
|
|
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
|
|
index 7d3db0247983..1ba58213ad65 100644
|
|
--- a/mm/pgtable-generic.c
|
|
+++ b/mm/pgtable-generic.c
|
|
@@ -210,7 +210,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
|
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
|
- flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
+
|
|
+ /* collapse entails shooting down ptes not pmd */
|
|
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
return pmd;
|
|
}
|
|
#endif
|
|
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
|
|
index e88d071648c2..5d453e58ddbf 100644
|
|
--- a/mm/process_vm_access.c
|
|
+++ b/mm/process_vm_access.c
|
|
@@ -194,7 +194,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
|
|
goto free_proc_pages;
|
|
}
|
|
|
|
- mm = mm_access(task, PTRACE_MODE_ATTACH);
|
|
+ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
|
|
if (!mm || IS_ERR(mm)) {
|
|
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
|
|
/*
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index 2ffaf6a79499..027c9ef8a263 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -398,7 +398,6 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
|
|
if (unlikely(!sock))
|
|
return -ENOTSOCK;
|
|
|
|
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
|
|
if (base != 0) {
|
|
addr = NULL;
|
|
addrlen = 0;
|
|
@@ -442,7 +441,6 @@ static void xs_nospace_callback(struct rpc_task *task)
|
|
struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
|
|
|
|
transport->inet->sk_write_pending--;
|
|
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
|
|
}
|
|
|
|
/**
|
|
@@ -467,20 +465,11 @@ static int xs_nospace(struct rpc_task *task)
|
|
|
|
/* Don't race with disconnect */
|
|
if (xprt_connected(xprt)) {
|
|
- if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
|
|
- /*
|
|
- * Notify TCP that we're limited by the application
|
|
- * window size
|
|
- */
|
|
- set_bit(SOCK_NOSPACE, &transport->sock->flags);
|
|
- sk->sk_write_pending++;
|
|
- /* ...and wait for more buffer space */
|
|
- xprt_wait_for_buffer_space(task, xs_nospace_callback);
|
|
- }
|
|
- } else {
|
|
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
|
|
+ /* wait for more buffer space */
|
|
+ sk->sk_write_pending++;
|
|
+ xprt_wait_for_buffer_space(task, xs_nospace_callback);
|
|
+ } else
|
|
ret = -ENOTCONN;
|
|
- }
|
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
|
|
|
@@ -616,9 +605,6 @@ process_status:
|
|
case -EAGAIN:
|
|
status = xs_nospace(task);
|
|
break;
|
|
- default:
|
|
- dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
|
- -status);
|
|
case -ENETUNREACH:
|
|
case -ENOBUFS:
|
|
case -EPIPE:
|
|
@@ -626,7 +612,10 @@ process_status:
|
|
case -EPERM:
|
|
/* When the server has died, an ICMP port unreachable message
|
|
* prompts ECONNREFUSED. */
|
|
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
|
|
+ break;
|
|
+ default:
|
|
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
|
+ -status);
|
|
}
|
|
|
|
return status;
|
|
@@ -706,16 +695,16 @@ static int xs_tcp_send_request(struct rpc_task *task)
|
|
case -EAGAIN:
|
|
status = xs_nospace(task);
|
|
break;
|
|
- default:
|
|
- dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
|
- -status);
|
|
case -ECONNRESET:
|
|
case -ECONNREFUSED:
|
|
case -ENOTCONN:
|
|
case -EADDRINUSE:
|
|
case -ENOBUFS:
|
|
case -EPIPE:
|
|
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
|
|
+ break;
|
|
+ default:
|
|
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
|
+ -status);
|
|
}
|
|
|
|
return status;
|
|
@@ -1609,19 +1598,23 @@ static void xs_tcp_state_change(struct sock *sk)
|
|
|
|
static void xs_write_space(struct sock *sk)
|
|
{
|
|
- struct socket *sock;
|
|
+ struct socket_wq *wq;
|
|
struct rpc_xprt *xprt;
|
|
|
|
- if (unlikely(!(sock = sk->sk_socket)))
|
|
+ if (!sk->sk_socket)
|
|
return;
|
|
- clear_bit(SOCK_NOSPACE, &sock->flags);
|
|
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
|
|
if (unlikely(!(xprt = xprt_from_sock(sk))))
|
|
return;
|
|
- if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
|
|
- return;
|
|
+ rcu_read_lock();
|
|
+ wq = rcu_dereference(sk->sk_wq);
|
|
+ if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
|
|
+ goto out;
|
|
|
|
xprt_write_space(xprt);
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
/**
|
|
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
|
|
index 23e78dcd12bf..38b64f487315 100755
|
|
--- a/scripts/bloat-o-meter
|
|
+++ b/scripts/bloat-o-meter
|
|
@@ -58,8 +58,8 @@ for name in common:
|
|
delta.sort()
|
|
delta.reverse()
|
|
|
|
-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
|
|
- (add, remove, grow, shrink, up, -down, up-down)
|
|
-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
|
|
+print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
|
|
+ (add, remove, grow, shrink, up, -down, up-down))
|
|
+print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
|
|
for d, n in delta:
|
|
- if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
|
|
+ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
|
|
diff --git a/security/commoncap.c b/security/commoncap.c
|
|
index 1832cf701c3d..48071ed7c445 100644
|
|
--- a/security/commoncap.c
|
|
+++ b/security/commoncap.c
|
|
@@ -137,12 +137,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
|
|
{
|
|
int ret = 0;
|
|
const struct cred *cred, *child_cred;
|
|
+ const kernel_cap_t *caller_caps;
|
|
|
|
rcu_read_lock();
|
|
cred = current_cred();
|
|
child_cred = __task_cred(child);
|
|
+ if (mode & PTRACE_MODE_FSCREDS)
|
|
+ caller_caps = &cred->cap_effective;
|
|
+ else
|
|
+ caller_caps = &cred->cap_permitted;
|
|
if (cred->user_ns == child_cred->user_ns &&
|
|
- cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
|
|
+ cap_issubset(child_cred->cap_permitted, *caller_caps))
|
|
goto out;
|
|
if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
|
|
goto out;
|
|
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
|
|
index a8b27cdc2844..4ba64fd49759 100644
|
|
--- a/sound/core/pcm_native.c
|
|
+++ b/sound/core/pcm_native.c
|
|
@@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
|
|
static DEFINE_RWLOCK(snd_pcm_link_rwlock);
|
|
static DECLARE_RWSEM(snd_pcm_link_rwsem);
|
|
|
|
+/* Writer in rwsem may block readers even during its waiting in queue,
|
|
+ * and this may lead to a deadlock when the code path takes read sem
|
|
+ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
|
|
+ * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
|
|
+ * spin until it gets the lock.
|
|
+ */
|
|
+static inline void down_write_nonblock(struct rw_semaphore *lock)
|
|
+{
|
|
+ while (!down_write_trylock(lock))
|
|
+ cond_resched();
|
|
+}
|
|
+
|
|
/**
|
|
* snd_pcm_stream_lock - Lock the PCM stream
|
|
* @substream: PCM substream
|
|
@@ -1813,7 +1825,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
|
|
res = -ENOMEM;
|
|
goto _nolock;
|
|
}
|
|
- down_write(&snd_pcm_link_rwsem);
|
|
+ down_write_nonblock(&snd_pcm_link_rwsem);
|
|
write_lock_irq(&snd_pcm_link_rwlock);
|
|
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
|
|
substream->runtime->status->state != substream1->runtime->status->state ||
|
|
@@ -1860,7 +1872,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
|
|
struct snd_pcm_substream *s;
|
|
int res = 0;
|
|
|
|
- down_write(&snd_pcm_link_rwsem);
|
|
+ down_write_nonblock(&snd_pcm_link_rwsem);
|
|
write_lock_irq(&snd_pcm_link_rwlock);
|
|
if (!snd_pcm_stream_linked(substream)) {
|
|
res = -EALREADY;
|
|
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
|
|
index 801076687bb1..c850345c43b5 100644
|
|
--- a/sound/core/seq/seq_memory.c
|
|
+++ b/sound/core/seq/seq_memory.c
|
|
@@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
|
|
|
|
if (snd_BUG_ON(!pool))
|
|
return -EINVAL;
|
|
- if (pool->ptr) /* should be atomic? */
|
|
- return 0;
|
|
|
|
- pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
|
|
- if (!pool->ptr)
|
|
+ cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
|
|
+ if (!cellptr)
|
|
return -ENOMEM;
|
|
|
|
/* add new cells to the free cell list */
|
|
spin_lock_irqsave(&pool->lock, flags);
|
|
+ if (pool->ptr) {
|
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
|
+ vfree(cellptr);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ pool->ptr = cellptr;
|
|
pool->free = NULL;
|
|
|
|
for (cell = 0; cell < pool->size; cell++) {
|
|
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
|
|
index 921fb2bd8fad..fe686ee41c6d 100644
|
|
--- a/sound/core/seq/seq_ports.c
|
|
+++ b/sound/core/seq/seq_ports.c
|
|
@@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
|
|
bool is_src, bool ack)
|
|
{
|
|
struct snd_seq_port_subs_info *grp;
|
|
+ struct list_head *list;
|
|
+ bool empty;
|
|
|
|
grp = is_src ? &port->c_src : &port->c_dest;
|
|
+ list = is_src ? &subs->src_list : &subs->dest_list;
|
|
down_write(&grp->list_mutex);
|
|
write_lock_irq(&grp->list_lock);
|
|
- if (is_src)
|
|
- list_del(&subs->src_list);
|
|
- else
|
|
- list_del(&subs->dest_list);
|
|
+ empty = list_empty(list);
|
|
+ if (!empty)
|
|
+ list_del_init(list);
|
|
grp->exclusive = 0;
|
|
write_unlock_irq(&grp->list_lock);
|
|
up_write(&grp->list_mutex);
|
|
|
|
- unsubscribe_port(client, port, grp, &subs->info, ack);
|
|
+ if (!empty)
|
|
+ unsubscribe_port(client, port, grp, &subs->info, ack);
|
|
}
|
|
|
|
/* connect two ports */
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index 02a86ba5ba22..2c13298e80b7 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -2143,10 +2143,10 @@ static void azx_remove(struct pci_dev *pci)
|
|
struct hda_intel *hda;
|
|
|
|
if (card) {
|
|
- /* flush the pending probing work */
|
|
+ /* cancel the pending probing work */
|
|
chip = card->private_data;
|
|
hda = container_of(chip, struct hda_intel, chip);
|
|
- flush_work(&hda->probe_work);
|
|
+ cancel_work_sync(&hda->probe_work);
|
|
|
|
snd_card_free(card);
|
|
}
|
|
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
|
|
index 2a912df6771b..68276f35e323 100644
|
|
--- a/tools/lib/traceevent/event-parse.c
|
|
+++ b/tools/lib/traceevent/event-parse.c
|
|
@@ -4968,13 +4968,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
|
|
sizeof(long) != 8) {
|
|
char *p;
|
|
|
|
- ls = 2;
|
|
/* make %l into %ll */
|
|
- p = strchr(format, 'l');
|
|
- if (p)
|
|
+ if (ls == 1 && (p = strchr(format, 'l')))
|
|
memmove(p+1, p, strlen(p)+1);
|
|
else if (strcmp(format, "%p") == 0)
|
|
strcpy(format, "0x%llx");
|
|
+ ls = 2;
|
|
}
|
|
switch (ls) {
|
|
case -2:
|
|
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
|
|
index 6fc8cd753e1a..b48e87693aa5 100644
|
|
--- a/tools/perf/util/parse-events.c
|
|
+++ b/tools/perf/util/parse-events.c
|
|
@@ -399,6 +399,9 @@ static void tracepoint_error(struct parse_events_error *e, int err,
|
|
{
|
|
char help[BUFSIZ];
|
|
|
|
+ if (!e)
|
|
+ return;
|
|
+
|
|
/*
|
|
* We get error directly from syscall errno ( > 0),
|
|
* or from encoded pointer's error ( < 0).
|
|
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
|
|
index c35ffdd360fe..468de95bc8bb 100644
|
|
--- a/tools/perf/util/session.c
|
|
+++ b/tools/perf/util/session.c
|
|
@@ -972,7 +972,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
|
|
|
|
machine = machines__find(machines, pid);
|
|
if (!machine)
|
|
- machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
|
|
+ machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
|
|
return machine;
|
|
}
|
|
|
|
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
|
|
index 69bca185c471..ea6064696fe4 100644
|
|
--- a/virt/kvm/arm/arch_timer.c
|
|
+++ b/virt/kvm/arm/arch_timer.c
|
|
@@ -143,7 +143,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
|
|
* Check if there was a change in the timer state (should we raise or lower
|
|
* the line level to the GIC).
|
|
*/
|
|
-static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
|
|
+static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
|
|
|
@@ -154,10 +154,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
|
|
* until we call this function from kvm_timer_flush_hwstate.
|
|
*/
|
|
if (!vgic_initialized(vcpu->kvm))
|
|
- return;
|
|
+ return -ENODEV;
|
|
|
|
if (kvm_timer_should_fire(vcpu) != timer->irq.level)
|
|
kvm_timer_update_irq(vcpu, !timer->irq.level);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -218,7 +220,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
bool phys_active;
|
|
int ret;
|
|
|
|
- kvm_timer_update_state(vcpu);
|
|
+ if (kvm_timer_update_state(vcpu))
|
|
+ return;
|
|
|
|
/*
|
|
* If we enter the guest with the virtual input level to the VGIC
|