mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-25 08:11:45 +00:00
12008 lines
384 KiB
Diff
12008 lines
384 KiB
Diff
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
|
|
index 3b2f2dd82225..3c6fc2e08d04 100644
|
|
--- a/Documentation/arm64/silicon-errata.txt
|
|
+++ b/Documentation/arm64/silicon-errata.txt
|
|
@@ -44,6 +44,8 @@ stable kernels.
|
|
|
|
| Implementor | Component | Erratum ID | Kconfig |
|
|
+----------------+-----------------+-----------------+-----------------------------+
|
|
+| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
|
|
+| | | | |
|
|
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
|
|
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
|
|
| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
|
|
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
|
|
index 0de6f6145cc6..7ba8cd567f84 100644
|
|
--- a/Documentation/process/stable-kernel-rules.rst
|
|
+++ b/Documentation/process/stable-kernel-rules.rst
|
|
@@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree
|
|
- If the patch covers files in net/ or drivers/net please follow netdev stable
|
|
submission guidelines as described in
|
|
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
|
|
+ after first checking the stable networking queue at
|
|
+ https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
|
|
+ to ensure the requested patch is not already queued up.
|
|
- Security patches should not be handled (solely) by the -stable review
|
|
process but should follow the procedures in
|
|
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
|
|
diff --git a/Makefile b/Makefile
|
|
index 72e27c379eaf..3b1c6cff6700 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 19
|
|
-SUBLEVEL = 30
|
|
+SUBLEVEL = 31
|
|
EXTRAVERSION =
|
|
NAME = "People's Front"
|
|
|
|
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
|
|
index ac69f307dcfe..74953e76a57d 100644
|
|
--- a/arch/arc/Kconfig
|
|
+++ b/arch/arc/Kconfig
|
|
@@ -420,6 +420,14 @@ config ARC_HAS_ACCL_REGS
|
|
(also referred to as r58:r59). These can also be used by gcc as GPR so
|
|
kernel needs to save/restore per process
|
|
|
|
+config ARC_IRQ_NO_AUTOSAVE
|
|
+ bool "Disable hardware autosave regfile on interrupts"
|
|
+ default n
|
|
+ help
|
|
+ On HS cores, taken interrupt auto saves the regfile on stack.
|
|
+ This is programmable and can be optionally disabled in which case
|
|
+ software INTERRUPT_PROLOGUE/EPILGUE do the needed work
|
|
+
|
|
endif # ISA_ARCV2
|
|
|
|
endmenu # "ARC CPU Configuration"
|
|
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
|
|
index 49bfbd879caa..bdbdaef902eb 100644
|
|
--- a/arch/arc/include/asm/arcregs.h
|
|
+++ b/arch/arc/include/asm/arcregs.h
|
|
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
|
|
#endif
|
|
};
|
|
|
|
+struct bcr_uarch_build_arcv2 {
|
|
+#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
+ unsigned int pad:8, prod:8, maj:8, min:8;
|
|
+#else
|
|
+ unsigned int min:8, maj:8, prod:8, pad:8;
|
|
+#endif
|
|
+};
|
|
+
|
|
struct bcr_mpy {
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
|
|
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
|
|
index 309f4e6721b3..225e7df2d8ed 100644
|
|
--- a/arch/arc/include/asm/entry-arcv2.h
|
|
+++ b/arch/arc/include/asm/entry-arcv2.h
|
|
@@ -17,6 +17,33 @@
|
|
;
|
|
; Now manually save: r12, sp, fp, gp, r25
|
|
|
|
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
|
+.ifnc \called_from, exception
|
|
+ st.as r9, [sp, -10] ; save r9 in it's final stack slot
|
|
+ sub sp, sp, 12 ; skip JLI, LDI, EI
|
|
+
|
|
+ PUSH lp_count
|
|
+ PUSHAX lp_start
|
|
+ PUSHAX lp_end
|
|
+ PUSH blink
|
|
+
|
|
+ PUSH r11
|
|
+ PUSH r10
|
|
+
|
|
+ sub sp, sp, 4 ; skip r9
|
|
+
|
|
+ PUSH r8
|
|
+ PUSH r7
|
|
+ PUSH r6
|
|
+ PUSH r5
|
|
+ PUSH r4
|
|
+ PUSH r3
|
|
+ PUSH r2
|
|
+ PUSH r1
|
|
+ PUSH r0
|
|
+.endif
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
|
PUSH r59
|
|
PUSH r58
|
|
@@ -86,6 +113,33 @@
|
|
POP r59
|
|
#endif
|
|
|
|
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
|
+.ifnc \called_from, exception
|
|
+ POP r0
|
|
+ POP r1
|
|
+ POP r2
|
|
+ POP r3
|
|
+ POP r4
|
|
+ POP r5
|
|
+ POP r6
|
|
+ POP r7
|
|
+ POP r8
|
|
+ POP r9
|
|
+ POP r10
|
|
+ POP r11
|
|
+
|
|
+ POP blink
|
|
+ POPAX lp_end
|
|
+ POPAX lp_start
|
|
+
|
|
+ POP r9
|
|
+ mov lp_count, r9
|
|
+
|
|
+ add sp, sp, 12 ; skip JLI, LDI, EI
|
|
+ ld.as r9, [sp, -10] ; reload r9 which got clobbered
|
|
+.endif
|
|
+#endif
|
|
+
|
|
.endm
|
|
|
|
/*------------------------------------------------------------------------*/
|
|
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
|
|
index c9173c02081c..eabc3efa6c6d 100644
|
|
--- a/arch/arc/include/asm/uaccess.h
|
|
+++ b/arch/arc/include/asm/uaccess.h
|
|
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
*/
|
|
"=&r" (tmp), "+r" (to), "+r" (from)
|
|
:
|
|
- : "lp_count", "lp_start", "lp_end", "memory");
|
|
+ : "lp_count", "memory");
|
|
|
|
return n;
|
|
}
|
|
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
*/
|
|
"=&r" (tmp), "+r" (to), "+r" (from)
|
|
:
|
|
- : "lp_count", "lp_start", "lp_end", "memory");
|
|
+ : "lp_count", "memory");
|
|
|
|
return n;
|
|
}
|
|
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
|
|
" .previous \n"
|
|
: "+r"(d_char), "+r"(res)
|
|
: "i"(0)
|
|
- : "lp_count", "lp_start", "lp_end", "memory");
|
|
+ : "lp_count", "memory");
|
|
|
|
return res;
|
|
}
|
|
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
|
" .previous \n"
|
|
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
|
|
: "g"(-EFAULT), "r"(count)
|
|
- : "lp_count", "lp_start", "lp_end", "memory");
|
|
+ : "lp_count", "memory");
|
|
|
|
return res;
|
|
}
|
|
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
|
|
index cc558a25b8fa..562089d62d9d 100644
|
|
--- a/arch/arc/kernel/entry-arcv2.S
|
|
+++ b/arch/arc/kernel/entry-arcv2.S
|
|
@@ -209,7 +209,9 @@ restore_regs:
|
|
;####### Return from Intr #######
|
|
|
|
debug_marker_l1:
|
|
- bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
|
|
+ ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
|
|
+ btst r0, STATUS_DE_BIT ; Z flag set if bit clear
|
|
+ bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
|
|
|
|
.Lisr_ret_fast_path:
|
|
; Handle special case #1: (Entry via Exception, Return via IRQ)
|
|
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
|
|
index 067ea362fb3e..cf18b3e5a934 100644
|
|
--- a/arch/arc/kernel/intc-arcv2.c
|
|
+++ b/arch/arc/kernel/intc-arcv2.c
|
|
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
|
|
|
|
*(unsigned int *)&ictrl = 0;
|
|
|
|
+#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
|
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
|
|
ictrl.save_blink = 1;
|
|
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
|
|
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
|
|
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
|
|
+#endif
|
|
|
|
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
|
|
|
|
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
|
|
index 62a30e58441c..3320ca2fe20f 100644
|
|
--- a/arch/arc/kernel/setup.c
|
|
+++ b/arch/arc/kernel/setup.c
|
|
@@ -196,13 +196,29 @@ static void read_arc_build_cfg_regs(void)
|
|
cpu->bpu.num_pred = 2048 << bpu.pte;
|
|
|
|
if (cpu->core.family >= 0x54) {
|
|
- unsigned int exec_ctrl;
|
|
|
|
- READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
|
|
- cpu->extn.dual_enb = !(exec_ctrl & 1);
|
|
+ struct bcr_uarch_build_arcv2 uarch;
|
|
|
|
- /* dual issue always present for this core */
|
|
- cpu->extn.dual = 1;
|
|
+ /*
|
|
+ * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
|
|
+ * dual issue only (HS4x). But next uarch rev (1:0)
|
|
+ * allows it be configured for single issue (HS3x)
|
|
+ * Ensure we fiddle with dual issue only on HS4x
|
|
+ */
|
|
+ READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
|
|
+
|
|
+ if (uarch.prod == 4) {
|
|
+ unsigned int exec_ctrl;
|
|
+
|
|
+ /* dual issue hardware always present */
|
|
+ cpu->extn.dual = 1;
|
|
+
|
|
+ READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
|
|
+
|
|
+ /* dual issue hardware enabled ? */
|
|
+ cpu->extn.dual_enb = !(exec_ctrl & 1);
|
|
+
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
|
|
index d61044dd8b58..ea14b0bf3116 100644
|
|
--- a/arch/arc/lib/memcpy-archs.S
|
|
+++ b/arch/arc/lib/memcpy-archs.S
|
|
@@ -25,15 +25,11 @@
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARC_HAS_LL64
|
|
-# define PREFETCH_READ(RX) prefetch [RX, 56]
|
|
-# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
|
|
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
|
|
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
|
|
# define ZOLSHFT 5
|
|
# define ZOLAND 0x1F
|
|
#else
|
|
-# define PREFETCH_READ(RX) prefetch [RX, 28]
|
|
-# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
|
|
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
|
|
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
|
|
# define ZOLSHFT 4
|
|
@@ -41,8 +37,6 @@
|
|
#endif
|
|
|
|
ENTRY_CFI(memcpy)
|
|
- prefetch [r1] ; Prefetch the read location
|
|
- prefetchw [r0] ; Prefetch the write location
|
|
mov.f 0, r2
|
|
;;; if size is zero
|
|
jz.d [blink]
|
|
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
|
|
lpnz @.Lcopy32_64bytes
|
|
;; LOOP START
|
|
LOADX (r6, r1)
|
|
- PREFETCH_READ (r1)
|
|
- PREFETCH_WRITE (r3)
|
|
LOADX (r8, r1)
|
|
LOADX (r10, r1)
|
|
LOADX (r4, r1)
|
|
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
|
|
lpnz @.Lcopy8bytes_1
|
|
;; LOOP START
|
|
ld.ab r6, [r1, 4]
|
|
- prefetch [r1, 28] ;Prefetch the next read location
|
|
ld.ab r8, [r1,4]
|
|
- prefetchw [r3, 32] ;Prefetch the next write location
|
|
|
|
SHIFT_1 (r7, r6, 24)
|
|
or r7, r7, r5
|
|
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
|
|
lpnz @.Lcopy8bytes_2
|
|
;; LOOP START
|
|
ld.ab r6, [r1, 4]
|
|
- prefetch [r1, 28] ;Prefetch the next read location
|
|
ld.ab r8, [r1,4]
|
|
- prefetchw [r3, 32] ;Prefetch the next write location
|
|
|
|
SHIFT_1 (r7, r6, 16)
|
|
or r7, r7, r5
|
|
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
|
|
lpnz @.Lcopy8bytes_3
|
|
;; LOOP START
|
|
ld.ab r6, [r1, 4]
|
|
- prefetch [r1, 28] ;Prefetch the next read location
|
|
ld.ab r8, [r1,4]
|
|
- prefetchw [r3, 32] ;Prefetch the next write location
|
|
|
|
SHIFT_1 (r7, r6, 8)
|
|
or r7, r7, r5
|
|
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
|
|
index 9356753c2ed8..c285a83cbf08 100644
|
|
--- a/arch/arc/plat-hsdk/Kconfig
|
|
+++ b/arch/arc/plat-hsdk/Kconfig
|
|
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
|
|
bool "ARC HS Development Kit SOC"
|
|
depends on ISA_ARCV2
|
|
select ARC_HAS_ACCL_REGS
|
|
+ select ARC_IRQ_NO_AUTOSAVE
|
|
select CLK_HSDK
|
|
select RESET_HSDK
|
|
select MIGHT_HAVE_PCI
|
|
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
|
|
index e8cd55a5b04c..cd4c74daf71e 100644
|
|
--- a/arch/arm/Kconfig
|
|
+++ b/arch/arm/Kconfig
|
|
@@ -1444,6 +1444,7 @@ config NR_CPUS
|
|
config HOTPLUG_CPU
|
|
bool "Support for hot-pluggable CPUs"
|
|
depends on SMP
|
|
+ select GENERIC_IRQ_MIGRATION
|
|
help
|
|
Say Y here to experiment with turning CPUs off and on. CPUs
|
|
can be controlled through /sys/devices/system/cpu.
|
|
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
|
|
index f3ac7483afed..5d04dc68cf57 100644
|
|
--- a/arch/arm/boot/dts/armada-xp-db.dts
|
|
+++ b/arch/arm/boot/dts/armada-xp-db.dts
|
|
@@ -144,30 +144,32 @@
|
|
status = "okay";
|
|
};
|
|
|
|
- nand@d0000 {
|
|
+ nand-controller@d0000 {
|
|
status = "okay";
|
|
- label = "pxa3xx_nand-0";
|
|
- num-cs = <1>;
|
|
- marvell,nand-keep-config;
|
|
- nand-on-flash-bbt;
|
|
-
|
|
- partitions {
|
|
- compatible = "fixed-partitions";
|
|
- #address-cells = <1>;
|
|
- #size-cells = <1>;
|
|
-
|
|
- partition@0 {
|
|
- label = "U-Boot";
|
|
- reg = <0 0x800000>;
|
|
- };
|
|
- partition@800000 {
|
|
- label = "Linux";
|
|
- reg = <0x800000 0x800000>;
|
|
- };
|
|
- partition@1000000 {
|
|
- label = "Filesystem";
|
|
- reg = <0x1000000 0x3f000000>;
|
|
|
|
+ nand@0 {
|
|
+ reg = <0>;
|
|
+ label = "pxa3xx_nand-0";
|
|
+ nand-rb = <0>;
|
|
+ nand-on-flash-bbt;
|
|
+
|
|
+ partitions {
|
|
+ compatible = "fixed-partitions";
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <1>;
|
|
+
|
|
+ partition@0 {
|
|
+ label = "U-Boot";
|
|
+ reg = <0 0x800000>;
|
|
+ };
|
|
+ partition@800000 {
|
|
+ label = "Linux";
|
|
+ reg = <0x800000 0x800000>;
|
|
+ };
|
|
+ partition@1000000 {
|
|
+ label = "Filesystem";
|
|
+ reg = <0x1000000 0x3f000000>;
|
|
+ };
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
|
|
index 1139e9469a83..b4cca507cf13 100644
|
|
--- a/arch/arm/boot/dts/armada-xp-gp.dts
|
|
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
|
|
@@ -160,12 +160,15 @@
|
|
status = "okay";
|
|
};
|
|
|
|
- nand@d0000 {
|
|
+ nand-controller@d0000 {
|
|
status = "okay";
|
|
- label = "pxa3xx_nand-0";
|
|
- num-cs = <1>;
|
|
- marvell,nand-keep-config;
|
|
- nand-on-flash-bbt;
|
|
+
|
|
+ nand@0 {
|
|
+ reg = <0>;
|
|
+ label = "pxa3xx_nand-0";
|
|
+ nand-rb = <0>;
|
|
+ nand-on-flash-bbt;
|
|
+ };
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
|
|
index bbbb38888bb8..87dcb502f72d 100644
|
|
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
|
|
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
|
|
@@ -81,49 +81,52 @@
|
|
|
|
};
|
|
|
|
- nand@d0000 {
|
|
+ nand-controller@d0000 {
|
|
status = "okay";
|
|
- label = "pxa3xx_nand-0";
|
|
- num-cs = <1>;
|
|
- marvell,nand-keep-config;
|
|
- nand-on-flash-bbt;
|
|
-
|
|
- partitions {
|
|
- compatible = "fixed-partitions";
|
|
- #address-cells = <1>;
|
|
- #size-cells = <1>;
|
|
-
|
|
- partition@0 {
|
|
- label = "u-boot";
|
|
- reg = <0x00000000 0x000e0000>;
|
|
- read-only;
|
|
- };
|
|
-
|
|
- partition@e0000 {
|
|
- label = "u-boot-env";
|
|
- reg = <0x000e0000 0x00020000>;
|
|
- read-only;
|
|
- };
|
|
-
|
|
- partition@100000 {
|
|
- label = "u-boot-env2";
|
|
- reg = <0x00100000 0x00020000>;
|
|
- read-only;
|
|
- };
|
|
-
|
|
- partition@120000 {
|
|
- label = "zImage";
|
|
- reg = <0x00120000 0x00400000>;
|
|
- };
|
|
-
|
|
- partition@520000 {
|
|
- label = "initrd";
|
|
- reg = <0x00520000 0x00400000>;
|
|
- };
|
|
|
|
- partition@e00000 {
|
|
- label = "boot";
|
|
- reg = <0x00e00000 0x3f200000>;
|
|
+ nand@0 {
|
|
+ reg = <0>;
|
|
+ label = "pxa3xx_nand-0";
|
|
+ nand-rb = <0>;
|
|
+ nand-on-flash-bbt;
|
|
+
|
|
+ partitions {
|
|
+ compatible = "fixed-partitions";
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <1>;
|
|
+
|
|
+ partition@0 {
|
|
+ label = "u-boot";
|
|
+ reg = <0x00000000 0x000e0000>;
|
|
+ read-only;
|
|
+ };
|
|
+
|
|
+ partition@e0000 {
|
|
+ label = "u-boot-env";
|
|
+ reg = <0x000e0000 0x00020000>;
|
|
+ read-only;
|
|
+ };
|
|
+
|
|
+ partition@100000 {
|
|
+ label = "u-boot-env2";
|
|
+ reg = <0x00100000 0x00020000>;
|
|
+ read-only;
|
|
+ };
|
|
+
|
|
+ partition@120000 {
|
|
+ label = "zImage";
|
|
+ reg = <0x00120000 0x00400000>;
|
|
+ };
|
|
+
|
|
+ partition@520000 {
|
|
+ label = "initrd";
|
|
+ reg = <0x00520000 0x00400000>;
|
|
+ };
|
|
+
|
|
+ partition@e00000 {
|
|
+ label = "boot";
|
|
+ reg = <0x00e00000 0x3f200000>;
|
|
+ };
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
|
|
index 04758a2a87f0..67d77eee9433 100644
|
|
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
|
|
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
|
|
@@ -644,6 +644,17 @@
|
|
};
|
|
};
|
|
|
|
+/* Configure pwm clock source for timers 8 & 9 */
|
|
+&timer8 {
|
|
+ assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
|
|
+ assigned-clock-parents = <&sys_clkin_ck>;
|
|
+};
|
|
+
|
|
+&timer9 {
|
|
+ assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
|
|
+ assigned-clock-parents = <&sys_clkin_ck>;
|
|
+};
|
|
+
|
|
/*
|
|
* As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
|
|
* uart1 wakeirq.
|
|
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
|
|
index d5f11d6d987e..bc85b6a166c7 100644
|
|
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
|
|
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
|
|
@@ -13,10 +13,25 @@
|
|
stdout-path = "serial0:115200n8";
|
|
};
|
|
|
|
- memory@80000000 {
|
|
+ /*
|
|
+ * Note that recent version of the device tree compiler (starting with
|
|
+ * version 1.4.2) warn about this node containing a reg property, but
|
|
+ * missing a unit-address. However, the bootloader on these Chromebook
|
|
+ * devices relies on the full name of this node to be exactly /memory.
|
|
+ * Adding the unit-address causes the bootloader to create a /memory
|
|
+ * node and write the memory bank configuration to that node, which in
|
|
+ * turn leads the kernel to believe that the device has 2 GiB of
|
|
+ * memory instead of the amount detected by the bootloader.
|
|
+ *
|
|
+ * The name of this node is effectively ABI and must not be changed.
|
|
+ */
|
|
+ memory {
|
|
+ device_type = "memory";
|
|
reg = <0x0 0x80000000 0x0 0x80000000>;
|
|
};
|
|
|
|
+ /delete-node/ memory@80000000;
|
|
+
|
|
host1x@50000000 {
|
|
hdmi@54280000 {
|
|
status = "okay";
|
|
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
|
|
index ce45ba0c0687..16019b5961e7 100644
|
|
--- a/arch/arm/crypto/crct10dif-ce-core.S
|
|
+++ b/arch/arm/crypto/crct10dif-ce-core.S
|
|
@@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
|
|
vext.8 q10, qzr, q0, #4
|
|
|
|
// receive the initial 64B data, xor the initial crc value
|
|
- vld1.64 {q0-q1}, [arg2, :128]!
|
|
- vld1.64 {q2-q3}, [arg2, :128]!
|
|
- vld1.64 {q4-q5}, [arg2, :128]!
|
|
- vld1.64 {q6-q7}, [arg2, :128]!
|
|
+ vld1.64 {q0-q1}, [arg2]!
|
|
+ vld1.64 {q2-q3}, [arg2]!
|
|
+ vld1.64 {q4-q5}, [arg2]!
|
|
+ vld1.64 {q6-q7}, [arg2]!
|
|
CPU_LE( vrev64.8 q0, q0 )
|
|
CPU_LE( vrev64.8 q1, q1 )
|
|
CPU_LE( vrev64.8 q2, q2 )
|
|
@@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 )
|
|
_fold_64_B_loop:
|
|
|
|
.macro fold64, reg1, reg2
|
|
- vld1.64 {q11-q12}, [arg2, :128]!
|
|
+ vld1.64 {q11-q12}, [arg2]!
|
|
|
|
vmull.p64 q8, \reg1\()h, d21
|
|
vmull.p64 \reg1, \reg1\()l, d20
|
|
@@ -238,7 +238,7 @@ _16B_reduction_loop:
|
|
vmull.p64 q7, d15, d21
|
|
veor.8 q7, q7, q8
|
|
|
|
- vld1.64 {q0}, [arg2, :128]!
|
|
+ vld1.64 {q0}, [arg2]!
|
|
CPU_LE( vrev64.8 q0, q0 )
|
|
vswp d0, d1
|
|
veor.8 q7, q7, q0
|
|
@@ -335,7 +335,7 @@ _less_than_128:
|
|
vmov.i8 q0, #0
|
|
vmov s3, arg1_low32 // get the initial crc value
|
|
|
|
- vld1.64 {q7}, [arg2, :128]!
|
|
+ vld1.64 {q7}, [arg2]!
|
|
CPU_LE( vrev64.8 q7, q7 )
|
|
vswp d14, d15
|
|
veor.8 q7, q7, q0
|
|
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
|
|
index d428355cf38d..14c19c70a841 100644
|
|
--- a/arch/arm/crypto/crct10dif-ce-glue.c
|
|
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
|
|
@@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
|
|
unsigned int length)
|
|
{
|
|
u16 *crc = shash_desc_ctx(desc);
|
|
- unsigned int l;
|
|
|
|
- if (!may_use_simd()) {
|
|
- *crc = crc_t10dif_generic(*crc, data, length);
|
|
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
|
+ kernel_neon_begin();
|
|
+ *crc = crc_t10dif_pmull(*crc, data, length);
|
|
+ kernel_neon_end();
|
|
} else {
|
|
- if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
|
|
- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
|
|
- ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
|
|
-
|
|
- *crc = crc_t10dif_generic(*crc, data, l);
|
|
-
|
|
- length -= l;
|
|
- data += l;
|
|
- }
|
|
- if (length > 0) {
|
|
- kernel_neon_begin();
|
|
- *crc = crc_t10dif_pmull(*crc, data, length);
|
|
- kernel_neon_end();
|
|
- }
|
|
+ *crc = crc_t10dif_generic(*crc, data, length);
|
|
}
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
|
|
index c883fcbe93b6..46d41140df27 100644
|
|
--- a/arch/arm/include/asm/irq.h
|
|
+++ b/arch/arm/include/asm/irq.h
|
|
@@ -25,7 +25,6 @@
|
|
#ifndef __ASSEMBLY__
|
|
struct irqaction;
|
|
struct pt_regs;
|
|
-extern void migrate_irqs(void);
|
|
|
|
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
|
void handle_IRQ(unsigned int, struct pt_regs *);
|
|
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
|
|
index 3ad482d2f1eb..d0d0227fc70d 100644
|
|
--- a/arch/arm/include/asm/kvm_host.h
|
|
+++ b/arch/arm/include/asm/kvm_host.h
|
|
@@ -48,6 +48,7 @@
|
|
#define KVM_REQ_SLEEP \
|
|
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
|
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
|
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
|
|
|
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
|
|
|
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
|
|
|
|
typedef struct kvm_cpu_context kvm_cpu_context_t;
|
|
|
|
+struct vcpu_reset_state {
|
|
+ unsigned long pc;
|
|
+ unsigned long r0;
|
|
+ bool be;
|
|
+ bool reset;
|
|
+};
|
|
+
|
|
struct kvm_vcpu_arch {
|
|
struct kvm_cpu_context ctxt;
|
|
|
|
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
|
|
/* Cache some mmu pages needed inside spinlock regions */
|
|
struct kvm_mmu_memory_cache mmu_page_cache;
|
|
|
|
+ struct vcpu_reset_state reset_state;
|
|
+
|
|
/* Detect first run of a vcpu */
|
|
bool has_run_once;
|
|
};
|
|
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
|
|
index 9908dacf9229..844861368cd5 100644
|
|
--- a/arch/arm/kernel/irq.c
|
|
+++ b/arch/arm/kernel/irq.c
|
|
@@ -31,7 +31,6 @@
|
|
#include <linux/smp.h>
|
|
#include <linux/init.h>
|
|
#include <linux/seq_file.h>
|
|
-#include <linux/ratelimit.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/list.h>
|
|
#include <linux/kallsyms.h>
|
|
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
|
|
return nr_irqs;
|
|
}
|
|
#endif
|
|
-
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
-static bool migrate_one_irq(struct irq_desc *desc)
|
|
-{
|
|
- struct irq_data *d = irq_desc_get_irq_data(desc);
|
|
- const struct cpumask *affinity = irq_data_get_affinity_mask(d);
|
|
- struct irq_chip *c;
|
|
- bool ret = false;
|
|
-
|
|
- /*
|
|
- * If this is a per-CPU interrupt, or the affinity does not
|
|
- * include this CPU, then we have nothing to do.
|
|
- */
|
|
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
|
- return false;
|
|
-
|
|
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
|
- affinity = cpu_online_mask;
|
|
- ret = true;
|
|
- }
|
|
-
|
|
- c = irq_data_get_irq_chip(d);
|
|
- if (!c->irq_set_affinity)
|
|
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
|
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
|
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-/*
|
|
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
|
|
- * If the affinity settings do not allow other CPUs, force them onto any
|
|
- * available CPU.
|
|
- *
|
|
- * Note: we must iterate over all IRQs, whether they have an attached
|
|
- * action structure or not, as we need to get chained interrupts too.
|
|
- */
|
|
-void migrate_irqs(void)
|
|
-{
|
|
- unsigned int i;
|
|
- struct irq_desc *desc;
|
|
- unsigned long flags;
|
|
-
|
|
- local_irq_save(flags);
|
|
-
|
|
- for_each_irq_desc(i, desc) {
|
|
- bool affinity_broken;
|
|
-
|
|
- raw_spin_lock(&desc->lock);
|
|
- affinity_broken = migrate_one_irq(desc);
|
|
- raw_spin_unlock(&desc->lock);
|
|
-
|
|
- if (affinity_broken)
|
|
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
|
|
- i, smp_processor_id());
|
|
- }
|
|
-
|
|
- local_irq_restore(flags);
|
|
-}
|
|
-#endif /* CONFIG_HOTPLUG_CPU */
|
|
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
|
|
index 3bf82232b1be..1d6f5ea522f4 100644
|
|
--- a/arch/arm/kernel/smp.c
|
|
+++ b/arch/arm/kernel/smp.c
|
|
@@ -254,7 +254,7 @@ int __cpu_disable(void)
|
|
/*
|
|
* OK - migrate IRQs away from this CPU
|
|
*/
|
|
- migrate_irqs();
|
|
+ irq_migrate_all_off_this_cpu();
|
|
|
|
/*
|
|
* Flush user cache and TLB mappings, and then remove this CPU
|
|
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
|
|
index cb094e55dc5f..fd6cde23bb5d 100644
|
|
--- a/arch/arm/kvm/coproc.c
|
|
+++ b/arch/arm/kvm/coproc.c
|
|
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
|
|
reset_coproc_regs(vcpu, table, num);
|
|
|
|
for (num = 1; num < NR_CP15_REGS; num++)
|
|
- if (vcpu_cp15(vcpu, num) == 0x42424242)
|
|
- panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
|
|
+ WARN(vcpu_cp15(vcpu, num) == 0x42424242,
|
|
+ "Didn't reset vcpu_cp15(vcpu, %zi)", num);
|
|
}
|
|
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
|
|
index 5ed0c3ee33d6..e53327912adc 100644
|
|
--- a/arch/arm/kvm/reset.c
|
|
+++ b/arch/arm/kvm/reset.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <asm/cputype.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_coproc.h>
|
|
+#include <asm/kvm_emulate.h>
|
|
|
|
#include <kvm/arm_arch_timer.h>
|
|
|
|
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
/* Reset CP15 registers */
|
|
kvm_reset_coprocs(vcpu);
|
|
|
|
+ /*
|
|
+ * Additional reset state handling that PSCI may have imposed on us.
|
|
+ * Must be done after all the sys_reg reset.
|
|
+ */
|
|
+ if (READ_ONCE(vcpu->arch.reset_state.reset)) {
|
|
+ unsigned long target_pc = vcpu->arch.reset_state.pc;
|
|
+
|
|
+ /* Gracefully handle Thumb2 entry point */
|
|
+ if (target_pc & 1) {
|
|
+ target_pc &= ~1UL;
|
|
+ vcpu_set_thumb(vcpu);
|
|
+ }
|
|
+
|
|
+ /* Propagate caller endianness */
|
|
+ if (vcpu->arch.reset_state.be)
|
|
+ kvm_vcpu_set_be(vcpu);
|
|
+
|
|
+ *vcpu_pc(vcpu) = target_pc;
|
|
+ vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
|
|
+
|
|
+ vcpu->arch.reset_state.reset = false;
|
|
+ }
|
|
+
|
|
/* Reset arch_timer context */
|
|
return kvm_timer_vcpu_reset(vcpu);
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
|
|
index a8b291f00109..dae514c8276a 100644
|
|
--- a/arch/arm/mach-omap2/cpuidle44xx.c
|
|
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
|
|
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|
mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
|
|
(cx->mpu_logic_state == PWRDM_POWER_OFF);
|
|
|
|
+ /* Enter broadcast mode for periodic timers */
|
|
+ tick_broadcast_enable();
|
|
+
|
|
+ /* Enter broadcast mode for one-shot timers */
|
|
tick_broadcast_enter();
|
|
|
|
/*
|
|
@@ -218,15 +222,6 @@ fail:
|
|
return index;
|
|
}
|
|
|
|
-/*
|
|
- * For each cpu, setup the broadcast timer because local timers
|
|
- * stops for the states above C1.
|
|
- */
|
|
-static void omap_setup_broadcast_timer(void *arg)
|
|
-{
|
|
- tick_broadcast_enable();
|
|
-}
|
|
-
|
|
static struct cpuidle_driver omap4_idle_driver = {
|
|
.name = "omap4_idle",
|
|
.owner = THIS_MODULE,
|
|
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
|
|
if (!cpu_clkdm[0] || !cpu_clkdm[1])
|
|
return -ENODEV;
|
|
|
|
- /* Configure the broadcast timer on each cpu */
|
|
- on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
|
|
-
|
|
return cpuidle_register(idle_driver, cpu_online_mask);
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
|
|
index 9500b6e27380..5d73f2c0b117 100644
|
|
--- a/arch/arm/mach-omap2/display.c
|
|
+++ b/arch/arm/mach-omap2/display.c
|
|
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
|
u32 enable_mask, enable_shift;
|
|
u32 pipd_mask, pipd_shift;
|
|
u32 reg;
|
|
+ int ret;
|
|
|
|
if (dsi_id == 0) {
|
|
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
|
|
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
|
return -ENODEV;
|
|
}
|
|
|
|
- regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®);
|
|
+ ret = regmap_read(omap4_dsi_mux_syscon,
|
|
+ OMAP4_DSIPHY_SYSCON_OFFSET,
|
|
+ ®);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
reg &= ~enable_mask;
|
|
reg &= ~pipd_mask;
|
|
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
|
|
index 058ce73137e8..5d819b6ea428 100644
|
|
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
|
|
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
|
|
@@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
|
|
|
|
switch (val) {
|
|
case CPUFREQ_PRECHANGE:
|
|
- if (old_dvs & !new_dvs ||
|
|
- cur_dvs & !new_dvs) {
|
|
+ if ((old_dvs && !new_dvs) ||
|
|
+ (cur_dvs && !new_dvs)) {
|
|
pr_debug("%s: exiting dvs\n", __func__);
|
|
cur_dvs = false;
|
|
gpio_set_value(OSIRIS_GPIO_DVS, 1);
|
|
}
|
|
break;
|
|
case CPUFREQ_POSTCHANGE:
|
|
- if (!old_dvs & new_dvs ||
|
|
- !cur_dvs & new_dvs) {
|
|
+ if ((!old_dvs && new_dvs) ||
|
|
+ (!cur_dvs && new_dvs)) {
|
|
pr_debug("entering dvs\n");
|
|
cur_dvs = true;
|
|
gpio_set_value(OSIRIS_GPIO_DVS, 0);
|
|
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
|
|
index 1cb9c0f9b5d6..8211cf45ece1 100644
|
|
--- a/arch/arm/mm/dma-mapping.c
|
|
+++ b/arch/arm/mm/dma-mapping.c
|
|
@@ -2400,4 +2400,6 @@ void arch_teardown_dma_ops(struct device *dev)
|
|
return;
|
|
|
|
arm_teardown_iommu_dma_ops(dev);
|
|
+ /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
|
|
+ set_dma_ops(dev, NULL);
|
|
}
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
|
|
index 1ee0dc0d9f10..d1cf404b8708 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
|
|
@@ -22,7 +22,7 @@
|
|
backlight = <&backlight>;
|
|
power-supply = <&pp3300_disp>;
|
|
|
|
- ports {
|
|
+ port {
|
|
panel_in_edp: endpoint {
|
|
remote-endpoint = <&edp_out_panel>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
|
|
index 2cc7c47d6a85..65637a5a4b21 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
|
|
@@ -43,7 +43,7 @@
|
|
backlight = <&backlight>;
|
|
power-supply = <&pp3300_disp>;
|
|
|
|
- ports {
|
|
+ port {
|
|
panel_in_edp: endpoint {
|
|
remote-endpoint = <&edp_out_panel>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
|
|
index fef2c0608999..b14d83919f14 100644
|
|
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
|
|
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
|
|
@@ -50,7 +50,7 @@
|
|
pinctrl-0 = <&lcd_panel_reset>;
|
|
power-supply = <&vcc3v3_s0>;
|
|
|
|
- ports {
|
|
+ port {
|
|
panel_in_edp: endpoint {
|
|
remote-endpoint = <&edp_out_panel>;
|
|
};
|
|
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
|
|
index e3a375c4cb83..1b151442dac1 100644
|
|
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
|
|
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
|
|
@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
|
|
beq 10f
|
|
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
|
|
b 7b
|
|
-8: mov w7, w8
|
|
+8: cbz w8, 91f
|
|
+ mov w7, w8
|
|
add w8, w8, #16
|
|
9: ext v1.16b, v1.16b, v1.16b, #1
|
|
adds w7, w7, #1
|
|
bne 9b
|
|
- eor v0.16b, v0.16b, v1.16b
|
|
+91: eor v0.16b, v0.16b, v1.16b
|
|
st1 {v0.16b}, [x0]
|
|
10: str w8, [x3]
|
|
ret
|
|
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
|
|
index 68b11aa690e4..986191e8c058 100644
|
|
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
|
|
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
|
|
@@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
|
abytes -= added;
|
|
}
|
|
|
|
- while (abytes > AES_BLOCK_SIZE) {
|
|
+ while (abytes >= AES_BLOCK_SIZE) {
|
|
__aes_arm64_encrypt(key->key_enc, mac, mac,
|
|
num_rounds(key));
|
|
crypto_xor(mac, in, AES_BLOCK_SIZE);
|
|
@@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
|
num_rounds(key));
|
|
crypto_xor(mac, in, abytes);
|
|
*macp = abytes;
|
|
- } else {
|
|
- *macp = 0;
|
|
}
|
|
}
|
|
}
|
|
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
|
|
index e613a87f8b53..8432c8d0dea6 100644
|
|
--- a/arch/arm64/crypto/aes-neonbs-core.S
|
|
+++ b/arch/arm64/crypto/aes-neonbs-core.S
|
|
@@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
|
|
|
|
8: next_ctr v0
|
|
st1 {v0.16b}, [x24]
|
|
- cbz x23, 0f
|
|
+ cbz x23, .Lctr_done
|
|
|
|
cond_yield_neon 98b
|
|
b 99b
|
|
|
|
-0: frame_pop
|
|
+.Lctr_done:
|
|
+ frame_pop
|
|
ret
|
|
|
|
/*
|
|
* If we are handling the tail of the input (x6 != NULL), return the
|
|
* final keystream block back to the caller.
|
|
*/
|
|
+0: cbz x25, 8b
|
|
+ st1 {v0.16b}, [x25]
|
|
+ b 8b
|
|
1: cbz x25, 8b
|
|
st1 {v1.16b}, [x25]
|
|
b 8b
|
|
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
|
|
index 96f0cae4a022..617bcfc1b080 100644
|
|
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
|
|
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
|
|
@@ -36,26 +36,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
|
|
unsigned int length)
|
|
{
|
|
u16 *crc = shash_desc_ctx(desc);
|
|
- unsigned int l;
|
|
|
|
- if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
|
|
- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
|
|
- ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
|
|
-
|
|
- *crc = crc_t10dif_generic(*crc, data, l);
|
|
-
|
|
- length -= l;
|
|
- data += l;
|
|
- }
|
|
-
|
|
- if (length > 0) {
|
|
- if (may_use_simd()) {
|
|
- kernel_neon_begin();
|
|
- *crc = crc_t10dif_pmull(*crc, data, length);
|
|
- kernel_neon_end();
|
|
- } else {
|
|
- *crc = crc_t10dif_generic(*crc, data, length);
|
|
- }
|
|
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
|
+ kernel_neon_begin();
|
|
+ *crc = crc_t10dif_pmull(*crc, data, length);
|
|
+ kernel_neon_end();
|
|
+ } else {
|
|
+ *crc = crc_t10dif_generic(*crc, data, length);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
|
|
index 1473fc2f7ab7..89691c86640a 100644
|
|
--- a/arch/arm64/include/asm/hardirq.h
|
|
+++ b/arch/arm64/include/asm/hardirq.h
|
|
@@ -17,8 +17,12 @@
|
|
#define __ASM_HARDIRQ_H
|
|
|
|
#include <linux/cache.h>
|
|
+#include <linux/percpu.h>
|
|
#include <linux/threads.h>
|
|
+#include <asm/barrier.h>
|
|
#include <asm/irq.h>
|
|
+#include <asm/kvm_arm.h>
|
|
+#include <asm/sysreg.h>
|
|
|
|
#define NR_IPI 7
|
|
|
|
@@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
|
|
|
|
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
|
|
|
+struct nmi_ctx {
|
|
+ u64 hcr;
|
|
+};
|
|
+
|
|
+DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
|
+
|
|
+#define arch_nmi_enter() \
|
|
+ do { \
|
|
+ if (is_kernel_in_hyp_mode()) { \
|
|
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
|
|
+ nmi_ctx->hcr = read_sysreg(hcr_el2); \
|
|
+ if (!(nmi_ctx->hcr & HCR_TGE)) { \
|
|
+ write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
|
|
+ isb(); \
|
|
+ } \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+#define arch_nmi_exit() \
|
|
+ do { \
|
|
+ if (is_kernel_in_hyp_mode()) { \
|
|
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
|
|
+ if (!(nmi_ctx->hcr & HCR_TGE)) \
|
|
+ write_sysreg(nmi_ctx->hcr, hcr_el2); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
static inline void ack_bad_irq(unsigned int irq)
|
|
{
|
|
extern unsigned long irq_err_count;
|
|
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
|
|
index 3d6d7336f871..6abe4002945f 100644
|
|
--- a/arch/arm64/include/asm/kvm_host.h
|
|
+++ b/arch/arm64/include/asm/kvm_host.h
|
|
@@ -48,6 +48,7 @@
|
|
#define KVM_REQ_SLEEP \
|
|
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
|
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
|
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
|
|
|
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
|
|
|
@@ -206,6 +207,13 @@ struct kvm_cpu_context {
|
|
|
|
typedef struct kvm_cpu_context kvm_cpu_context_t;
|
|
|
|
+struct vcpu_reset_state {
|
|
+ unsigned long pc;
|
|
+ unsigned long r0;
|
|
+ bool be;
|
|
+ bool reset;
|
|
+};
|
|
+
|
|
struct kvm_vcpu_arch {
|
|
struct kvm_cpu_context ctxt;
|
|
|
|
@@ -295,6 +303,9 @@ struct kvm_vcpu_arch {
|
|
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
|
|
u64 vsesr_el2;
|
|
|
|
+ /* Additional reset state */
|
|
+ struct vcpu_reset_state reset_state;
|
|
+
|
|
/* True when deferrable sysregs are loaded on the physical CPU,
|
|
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
|
|
bool sysregs_loaded_on_cpu;
|
|
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
|
|
index 651a06b1980f..77ca59598c8b 100644
|
|
--- a/arch/arm64/kernel/head.S
|
|
+++ b/arch/arm64/kernel/head.S
|
|
@@ -522,8 +522,7 @@ set_hcr:
|
|
/* GICv3 system register access */
|
|
mrs x0, id_aa64pfr0_el1
|
|
ubfx x0, x0, #24, #4
|
|
- cmp x0, #1
|
|
- b.ne 3f
|
|
+ cbz x0, 3f
|
|
|
|
mrs_s x0, SYS_ICC_SRE_EL2
|
|
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
|
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
|
|
index 780a12f59a8f..92fa81798fb9 100644
|
|
--- a/arch/arm64/kernel/irq.c
|
|
+++ b/arch/arm64/kernel/irq.c
|
|
@@ -33,6 +33,9 @@
|
|
|
|
unsigned long irq_err_count;
|
|
|
|
+/* Only access this in an NMI enter/exit */
|
|
+DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
|
+
|
|
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
|
|
|
|
int arch_show_interrupts(struct seq_file *p, int prec)
|
|
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
|
|
index a20de58061a8..35f184a8fd85 100644
|
|
--- a/arch/arm64/kernel/kgdb.c
|
|
+++ b/arch/arm64/kernel/kgdb.c
|
|
@@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
|
|
|
|
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
|
|
{
|
|
+ if (user_mode(regs))
|
|
+ return DBG_HOOK_ERROR;
|
|
+
|
|
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
|
- return 0;
|
|
+ return DBG_HOOK_HANDLED;
|
|
}
|
|
NOKPROBE_SYMBOL(kgdb_brk_fn)
|
|
|
|
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
|
|
{
|
|
+ if (user_mode(regs))
|
|
+ return DBG_HOOK_ERROR;
|
|
+
|
|
compiled_break = 1;
|
|
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
|
|
|
- return 0;
|
|
+ return DBG_HOOK_HANDLED;
|
|
}
|
|
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
|
|
|
|
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
|
|
{
|
|
- if (!kgdb_single_step)
|
|
+ if (user_mode(regs) || !kgdb_single_step)
|
|
return DBG_HOOK_ERROR;
|
|
|
|
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
|
- return 0;
|
|
+ return DBG_HOOK_HANDLED;
|
|
}
|
|
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
|
|
|
|
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
|
|
index 30bb13797034..2d63df112b89 100644
|
|
--- a/arch/arm64/kernel/probes/kprobes.c
|
|
+++ b/arch/arm64/kernel/probes/kprobes.c
|
|
@@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
|
int retval;
|
|
|
|
+ if (user_mode(regs))
|
|
+ return DBG_HOOK_ERROR;
|
|
+
|
|
/* return error if this is not our step */
|
|
retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
|
|
|
|
@@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
|
int __kprobes
|
|
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
|
|
{
|
|
+ if (user_mode(regs))
|
|
+ return DBG_HOOK_ERROR;
|
|
+
|
|
kprobe_handler(regs);
|
|
return DBG_HOOK_HANDLED;
|
|
}
|
|
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
|
|
index a1c32c1f2267..6290a4e81d57 100644
|
|
--- a/arch/arm64/kvm/hyp/switch.c
|
|
+++ b/arch/arm64/kvm/hyp/switch.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <kvm/arm_psci.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
+#include <asm/kprobes.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_host.h>
|
|
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
|
|
|
|
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
|
|
}
|
|
+NOKPROBE_SYMBOL(activate_traps_vhe);
|
|
|
|
static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
|
|
{
|
|
@@ -146,6 +148,7 @@ static void deactivate_traps_vhe(void)
|
|
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
|
|
write_sysreg(vectors, vbar_el1);
|
|
}
|
|
+NOKPROBE_SYMBOL(deactivate_traps_vhe);
|
|
|
|
static void __hyp_text __deactivate_traps_nvhe(void)
|
|
{
|
|
@@ -529,6 +532,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|
|
|
return exit_code;
|
|
}
|
|
+NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
|
|
|
|
/* Switch to the guest for legacy non-VHE systems */
|
|
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
|
@@ -636,6 +640,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
|
|
read_sysreg_el2(esr), read_sysreg_el2(far),
|
|
read_sysreg(hpfar_el2), par, vcpu);
|
|
}
|
|
+NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
|
|
|
|
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
|
|
{
|
|
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
|
|
index 9ce223944983..963d669ae3a2 100644
|
|
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
|
|
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/compiler.h>
|
|
#include <linux/kvm_host.h>
|
|
|
|
+#include <asm/kprobes.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_hyp.h>
|
|
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
|
|
{
|
|
__sysreg_save_common_state(ctxt);
|
|
}
|
|
+NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
|
|
|
|
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
|
|
{
|
|
__sysreg_save_common_state(ctxt);
|
|
__sysreg_save_el2_return_state(ctxt);
|
|
}
|
|
+NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
|
|
|
|
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
|
|
{
|
|
@@ -171,12 +174,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
|
|
{
|
|
__sysreg_restore_common_state(ctxt);
|
|
}
|
|
+NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
|
|
|
|
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
|
|
{
|
|
__sysreg_restore_common_state(ctxt);
|
|
__sysreg_restore_el2_return_state(ctxt);
|
|
}
|
|
+NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
|
|
|
|
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
|
|
{
|
|
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
|
|
index e37c78bbe1ca..18b9a522a2b3 100644
|
|
--- a/arch/arm64/kvm/reset.c
|
|
+++ b/arch/arm64/kvm/reset.c
|
|
@@ -31,6 +31,7 @@
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_coproc.h>
|
|
+#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_mmu.h>
|
|
|
|
/*
|
|
@@ -99,16 +100,33 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
|
|
* This function finds the right table above and sets the registers on
|
|
* the virtual CPU struct to their architecturally defined reset
|
|
* values.
|
|
+ *
|
|
+ * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
|
|
+ * ioctl or as part of handling a request issued by another VCPU in the PSCI
|
|
+ * handling code. In the first case, the VCPU will not be loaded, and in the
|
|
+ * second case the VCPU will be loaded. Because this function operates purely
|
|
+ * on the memory-backed valus of system registers, we want to do a full put if
|
|
+ * we were loaded (handling a request) and load the values back at the end of
|
|
+ * the function. Otherwise we leave the state alone. In both cases, we
|
|
+ * disable preemption around the vcpu reset as we would otherwise race with
|
|
+ * preempt notifiers which also call put/load.
|
|
*/
|
|
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
{
|
|
const struct kvm_regs *cpu_reset;
|
|
+ int ret = -EINVAL;
|
|
+ bool loaded;
|
|
+
|
|
+ preempt_disable();
|
|
+ loaded = (vcpu->cpu != -1);
|
|
+ if (loaded)
|
|
+ kvm_arch_vcpu_put(vcpu);
|
|
|
|
switch (vcpu->arch.target) {
|
|
default:
|
|
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
|
|
if (!cpu_has_32bit_el1())
|
|
- return -EINVAL;
|
|
+ goto out;
|
|
cpu_reset = &default_regs_reset32;
|
|
} else {
|
|
cpu_reset = &default_regs_reset;
|
|
@@ -123,6 +141,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
/* Reset system registers */
|
|
kvm_reset_sys_regs(vcpu);
|
|
|
|
+ /*
|
|
+ * Additional reset state handling that PSCI may have imposed on us.
|
|
+ * Must be done after all the sys_reg reset.
|
|
+ */
|
|
+ if (vcpu->arch.reset_state.reset) {
|
|
+ unsigned long target_pc = vcpu->arch.reset_state.pc;
|
|
+
|
|
+ /* Gracefully handle Thumb2 entry point */
|
|
+ if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
|
+ target_pc &= ~1UL;
|
|
+ vcpu_set_thumb(vcpu);
|
|
+ }
|
|
+
|
|
+ /* Propagate caller endianness */
|
|
+ if (vcpu->arch.reset_state.be)
|
|
+ kvm_vcpu_set_be(vcpu);
|
|
+
|
|
+ *vcpu_pc(vcpu) = target_pc;
|
|
+ vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
|
|
+
|
|
+ vcpu->arch.reset_state.reset = false;
|
|
+ }
|
|
+
|
|
/* Reset PMU */
|
|
kvm_pmu_vcpu_reset(vcpu);
|
|
|
|
@@ -131,5 +172,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
|
|
|
|
/* Reset timer */
|
|
- return kvm_timer_vcpu_reset(vcpu);
|
|
+ ret = kvm_timer_vcpu_reset(vcpu);
|
|
+out:
|
|
+ if (loaded)
|
|
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
|
+ preempt_enable();
|
|
+ return ret;
|
|
}
|
|
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
|
|
index 22fbbdbece3c..d112af75680b 100644
|
|
--- a/arch/arm64/kvm/sys_regs.c
|
|
+++ b/arch/arm64/kvm/sys_regs.c
|
|
@@ -1456,7 +1456,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|
|
|
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
|
|
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
|
|
- { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
|
|
+ { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
|
|
};
|
|
|
|
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
|
@@ -2586,7 +2586,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|
table = get_target_table(vcpu->arch.target, true, &num);
|
|
reset_sys_reg_descs(vcpu, table, num);
|
|
|
|
- for (num = 1; num < NR_SYS_REGS; num++)
|
|
- if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
|
|
- panic("Didn't reset __vcpu_sys_reg(%zi)", num);
|
|
+ for (num = 1; num < NR_SYS_REGS; num++) {
|
|
+ if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
|
|
+ "Didn't reset __vcpu_sys_reg(%zi)\n", num))
|
|
+ break;
|
|
+ }
|
|
}
|
|
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
|
|
index 997c9f20ea0f..4474e51ee53e 100644
|
|
--- a/arch/m68k/Makefile
|
|
+++ b/arch/m68k/Makefile
|
|
@@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
|
|
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
|
|
|
|
KBUILD_AFLAGS += $(cpuflags-y)
|
|
-KBUILD_CFLAGS += $(cpuflags-y) -pipe
|
|
+KBUILD_CFLAGS += $(cpuflags-y)
|
|
+
|
|
+KBUILD_CFLAGS += -pipe -ffreestanding
|
|
+
|
|
ifdef CONFIG_MMU
|
|
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
|
|
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
|
|
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
|
|
index 2c1c53d12179..f567ace7a9e9 100644
|
|
--- a/arch/mips/include/asm/kvm_host.h
|
|
+++ b/arch/mips/include/asm/kvm_host.h
|
|
@@ -1131,7 +1131,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
|
|
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
|
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
|
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
|
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
|
|
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
|
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
|
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
|
|
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
|
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
|
|
index 50888388a359..02544939ef0b 100644
|
|
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
|
|
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
|
|
@@ -35,6 +35,14 @@ static inline int hstate_get_psize(struct hstate *hstate)
|
|
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
|
static inline bool gigantic_page_supported(void)
|
|
{
|
|
+ /*
|
|
+ * We used gigantic page reservation with hypervisor assist in some case.
|
|
+ * We cannot use runtime allocation of gigantic pages in those platforms
|
|
+ * This is hash translation mode LPARs.
|
|
+ */
|
|
+ if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
|
|
+ return false;
|
|
+
|
|
return true;
|
|
}
|
|
#endif
|
|
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
|
|
index 906bcbdfd2a1..bccc5051249e 100644
|
|
--- a/arch/powerpc/include/asm/kvm_host.h
|
|
+++ b/arch/powerpc/include/asm/kvm_host.h
|
|
@@ -822,7 +822,7 @@ struct kvm_vcpu_arch {
|
|
static inline void kvm_arch_hardware_disable(void) {}
|
|
static inline void kvm_arch_hardware_unsetup(void) {}
|
|
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
|
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
|
|
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
|
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
|
|
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
|
static inline void kvm_arch_exit(void) {}
|
|
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
|
|
index 2f3ff7a27881..d85fcfea32ca 100644
|
|
--- a/arch/powerpc/include/asm/powernv.h
|
|
+++ b/arch/powerpc/include/asm/powernv.h
|
|
@@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
|
|
unsigned long *flags, unsigned long *status,
|
|
int count);
|
|
|
|
+void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
|
|
+
|
|
void pnv_tm_init(void);
|
|
#else
|
|
static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
|
|
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
|
|
index e58c3f467db5..26b3f853cbf6 100644
|
|
--- a/arch/powerpc/kernel/entry_32.S
|
|
+++ b/arch/powerpc/kernel/entry_32.S
|
|
@@ -745,6 +745,9 @@ fast_exception_return:
|
|
mtcr r10
|
|
lwz r10,_LINK(r11)
|
|
mtlr r10
|
|
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
|
+ li r10, 0
|
|
+ stw r10, 8(r11)
|
|
REST_GPR(10, r11)
|
|
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
|
|
mtspr SPRN_NRI, r0
|
|
@@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
|
mtcrf 0xFF,r10
|
|
mtlr r11
|
|
|
|
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
|
+ li r10, 0
|
|
+ stw r10, 8(r1)
|
|
/*
|
|
* Once we put values in SRR0 and SRR1, we are in a state
|
|
* where exceptions are not recoverable, since taking an
|
|
@@ -1021,6 +1027,9 @@ exc_exit_restart_end:
|
|
mtlr r11
|
|
lwz r10,_CCR(r1)
|
|
mtcrf 0xff,r10
|
|
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
|
+ li r10, 0
|
|
+ stw r10, 8(r1)
|
|
REST_2GPRS(9, r1)
|
|
.globl exc_exit_restart
|
|
exc_exit_restart:
|
|
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
|
|
index bb6ac471a784..d29f2dca725b 100644
|
|
--- a/arch/powerpc/kernel/process.c
|
|
+++ b/arch/powerpc/kernel/process.c
|
|
@@ -180,7 +180,7 @@ static void __giveup_fpu(struct task_struct *tsk)
|
|
|
|
save_fpu(tsk);
|
|
msr = tsk->thread.regs->msr;
|
|
- msr &= ~MSR_FP;
|
|
+ msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
|
|
#ifdef CONFIG_VSX
|
|
if (cpu_has_feature(CPU_FTR_VSX))
|
|
msr &= ~MSR_VSX;
|
|
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
|
|
index 9667666eb18e..e08b32ccf1d9 100644
|
|
--- a/arch/powerpc/kernel/ptrace.c
|
|
+++ b/arch/powerpc/kernel/ptrace.c
|
|
@@ -561,6 +561,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
|
|
/*
|
|
* Copy out only the low-order word of vrsave.
|
|
*/
|
|
+ int start, end;
|
|
union {
|
|
elf_vrreg_t reg;
|
|
u32 word;
|
|
@@ -569,8 +570,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
vrsave.word = target->thread.vrsave;
|
|
|
|
+ start = 33 * sizeof(vector128);
|
|
+ end = start + sizeof(vrsave);
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
|
|
- 33 * sizeof(vector128), -1);
|
|
+ start, end);
|
|
}
|
|
|
|
return ret;
|
|
@@ -608,6 +611,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
|
|
/*
|
|
* We use only the first word of vrsave.
|
|
*/
|
|
+ int start, end;
|
|
union {
|
|
elf_vrreg_t reg;
|
|
u32 word;
|
|
@@ -616,8 +620,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
vrsave.word = target->thread.vrsave;
|
|
|
|
+ start = 33 * sizeof(vector128);
|
|
+ end = start + sizeof(vrsave);
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
|
|
- 33 * sizeof(vector128), -1);
|
|
+ start, end);
|
|
if (!ret)
|
|
target->thread.vrsave = vrsave.word;
|
|
}
|
|
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
|
|
index 8689a02b7df8..02fe6d020174 100644
|
|
--- a/arch/powerpc/kernel/traps.c
|
|
+++ b/arch/powerpc/kernel/traps.c
|
|
@@ -767,15 +767,15 @@ void machine_check_exception(struct pt_regs *regs)
|
|
if (check_io_access(regs))
|
|
goto bail;
|
|
|
|
- /* Must die if the interrupt is not recoverable */
|
|
- if (!(regs->msr & MSR_RI))
|
|
- nmi_panic(regs, "Unrecoverable Machine check");
|
|
-
|
|
if (!nested)
|
|
nmi_exit();
|
|
|
|
die("Machine check", regs, SIGBUS);
|
|
|
|
+ /* Must die if the interrupt is not recoverable */
|
|
+ if (!(regs->msr & MSR_RI))
|
|
+ nmi_panic(regs, "Unrecoverable Machine check");
|
|
+
|
|
return;
|
|
|
|
bail:
|
|
@@ -1545,8 +1545,8 @@ bail:
|
|
|
|
void StackOverflow(struct pt_regs *regs)
|
|
{
|
|
- printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
|
|
- current, regs->gpr[1]);
|
|
+ pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
|
|
+ current->comm, task_pid_nr(current), regs->gpr[1]);
|
|
debugger(regs);
|
|
show_regs(regs);
|
|
panic("kernel stack overflow");
|
|
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
|
|
index 3d1ecd211776..8137f77abad5 100644
|
|
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
|
|
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
|
|
@@ -26,13 +26,13 @@
|
|
#define SS_MSR 0x74
|
|
#define SS_SDR1 0x78
|
|
#define SS_LR 0x7c
|
|
-#define SS_SPRG 0x80 /* 4 SPRGs */
|
|
-#define SS_DBAT 0x90 /* 8 DBATs */
|
|
-#define SS_IBAT 0xd0 /* 8 IBATs */
|
|
-#define SS_TB 0x110
|
|
-#define SS_CR 0x118
|
|
-#define SS_GPREG 0x11c /* r12-r31 */
|
|
-#define STATE_SAVE_SIZE 0x16c
|
|
+#define SS_SPRG 0x80 /* 8 SPRGs */
|
|
+#define SS_DBAT 0xa0 /* 8 DBATs */
|
|
+#define SS_IBAT 0xe0 /* 8 IBATs */
|
|
+#define SS_TB 0x120
|
|
+#define SS_CR 0x128
|
|
+#define SS_GPREG 0x12c /* r12-r31 */
|
|
+#define STATE_SAVE_SIZE 0x17c
|
|
|
|
.section .data
|
|
.align 5
|
|
@@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
|
|
stw r7, SS_SPRG+12(r3)
|
|
stw r8, SS_SDR1(r3)
|
|
|
|
+ mfspr r4, SPRN_SPRG4
|
|
+ mfspr r5, SPRN_SPRG5
|
|
+ mfspr r6, SPRN_SPRG6
|
|
+ mfspr r7, SPRN_SPRG7
|
|
+
|
|
+ stw r4, SS_SPRG+16(r3)
|
|
+ stw r5, SS_SPRG+20(r3)
|
|
+ stw r6, SS_SPRG+24(r3)
|
|
+ stw r7, SS_SPRG+28(r3)
|
|
+
|
|
mfspr r4, SPRN_DBAT0U
|
|
mfspr r5, SPRN_DBAT0L
|
|
mfspr r6, SPRN_DBAT1U
|
|
@@ -493,6 +503,16 @@ mpc83xx_deep_resume:
|
|
mtspr SPRN_IBAT7U, r6
|
|
mtspr SPRN_IBAT7L, r7
|
|
|
|
+ lwz r4, SS_SPRG+16(r3)
|
|
+ lwz r5, SS_SPRG+20(r3)
|
|
+ lwz r6, SS_SPRG+24(r3)
|
|
+ lwz r7, SS_SPRG+28(r3)
|
|
+
|
|
+ mtspr SPRN_SPRG4, r4
|
|
+ mtspr SPRN_SPRG5, r5
|
|
+ mtspr SPRN_SPRG6, r6
|
|
+ mtspr SPRN_SPRG7, r7
|
|
+
|
|
lwz r4, SS_SPRG+0(r3)
|
|
lwz r5, SS_SPRG+4(r3)
|
|
lwz r6, SS_SPRG+8(r3)
|
|
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
|
|
index 403523c061ba..343bffd20fca 100644
|
|
--- a/arch/powerpc/platforms/embedded6xx/wii.c
|
|
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
|
|
@@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
|
|
/* MEM2 64MB@0x10000000 */
|
|
delta = wii_hole_start + wii_hole_size;
|
|
size = top - delta;
|
|
+
|
|
+ if (__map_without_bats)
|
|
+ return delta;
|
|
+
|
|
for (bl = 128<<10; bl < max_size; bl <<= 1) {
|
|
if (bl * 2 > size)
|
|
break;
|
|
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
|
|
index 35f699ebb662..e52f9b06dd9c 100644
|
|
--- a/arch/powerpc/platforms/powernv/idle.c
|
|
+++ b/arch/powerpc/platforms/powernv/idle.c
|
|
@@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
|
|
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
-static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
|
|
+
|
|
+void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
|
|
{
|
|
u64 pir = get_hard_smp_processor_id(cpu);
|
|
|
|
@@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
|
{
|
|
unsigned long srr1;
|
|
u32 idle_states = pnv_get_supported_cpuidle_states();
|
|
- u64 lpcr_val;
|
|
-
|
|
- /*
|
|
- * We don't want to take decrementer interrupts while we are
|
|
- * offline, so clear LPCR:PECE1. We keep PECE2 (and
|
|
- * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
|
|
- *
|
|
- * If the CPU gets woken up by a special wakeup, ensure that
|
|
- * the SLW engine sets LPCR with decrementer bit cleared, else
|
|
- * the CPU will come back to the kernel due to a spurious
|
|
- * wakeup.
|
|
- */
|
|
- lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
|
|
- pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
|
|
|
__ppc64_runlatch_off();
|
|
|
|
@@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
|
|
|
__ppc64_runlatch_on();
|
|
|
|
- /*
|
|
- * Re-enable decrementer interrupts in LPCR.
|
|
- *
|
|
- * Further, we want stop states to be woken up by decrementer
|
|
- * for non-hotplug cases. So program the LPCR via stop api as
|
|
- * well.
|
|
- */
|
|
- lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
|
|
- pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
|
-
|
|
return srr1;
|
|
}
|
|
#endif
|
|
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
|
|
index acd3206dfae3..06628c71cef6 100644
|
|
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
|
|
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
|
|
@@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
|
|
}
|
|
|
|
static struct bin_attribute opal_msglog_attr = {
|
|
- .attr = {.name = "msglog", .mode = 0444},
|
|
+ .attr = {.name = "msglog", .mode = 0400},
|
|
.read = opal_msglog_read
|
|
};
|
|
|
|
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
|
|
index 0d354e19ef92..db09c7022635 100644
|
|
--- a/arch/powerpc/platforms/powernv/smp.c
|
|
+++ b/arch/powerpc/platforms/powernv/smp.c
|
|
@@ -39,6 +39,7 @@
|
|
#include <asm/cpuidle.h>
|
|
#include <asm/kexec.h>
|
|
#include <asm/reg.h>
|
|
+#include <asm/powernv.h>
|
|
|
|
#include "powernv.h"
|
|
|
|
@@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void)
|
|
{
|
|
unsigned int cpu;
|
|
unsigned long srr1, wmask;
|
|
+ u64 lpcr_val;
|
|
|
|
/* Standard hot unplug procedure */
|
|
/*
|
|
@@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void)
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
wmask = SRR1_WAKEMASK_P8;
|
|
|
|
+ /*
|
|
+ * We don't want to take decrementer interrupts while we are
|
|
+ * offline, so clear LPCR:PECE1. We keep PECE2 (and
|
|
+ * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
|
|
+ *
|
|
+ * If the CPU gets woken up by a special wakeup, ensure that
|
|
+ * the SLW engine sets LPCR with decrementer bit cleared, else
|
|
+ * the CPU will come back to the kernel due to a spurious
|
|
+ * wakeup.
|
|
+ */
|
|
+ lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
|
|
+ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
|
+
|
|
while (!generic_check_cpu_restart(cpu)) {
|
|
/*
|
|
* Clear IPI flag, since we don't handle IPIs while
|
|
@@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void)
|
|
|
|
}
|
|
|
|
+ /*
|
|
+ * Re-enable decrementer interrupts in LPCR.
|
|
+ *
|
|
+ * Further, we want stop states to be woken up by decrementer
|
|
+ * for non-hotplug cases. So program the LPCR via stop api as
|
|
+ * well.
|
|
+ */
|
|
+ lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
|
|
+ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
|
+
|
|
DBG("CPU%d coming online...\n", cpu);
|
|
}
|
|
|
|
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
|
|
index 29c940bf8506..dad110e9f41b 100644
|
|
--- a/arch/s390/include/asm/kvm_host.h
|
|
+++ b/arch/s390/include/asm/kvm_host.h
|
|
@@ -865,7 +865,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
|
|
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
|
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
|
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
|
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
|
|
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
|
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
|
|
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot) {}
|
|
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
|
|
index a0097f8bada8..5f85e0dfa66d 100644
|
|
--- a/arch/s390/kernel/setup.c
|
|
+++ b/arch/s390/kernel/setup.c
|
|
@@ -303,7 +303,7 @@ early_param("vmalloc", parse_vmalloc);
|
|
|
|
void *restart_stack __section(.data);
|
|
|
|
-static void __init setup_lowcore(void)
|
|
+static void __init setup_lowcore_dat_off(void)
|
|
{
|
|
struct lowcore *lc;
|
|
|
|
@@ -314,19 +314,16 @@ static void __init setup_lowcore(void)
|
|
lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
|
|
lc->restart_psw.mask = PSW_KERNEL_BITS;
|
|
lc->restart_psw.addr = (unsigned long) restart_int_handler;
|
|
- lc->external_new_psw.mask = PSW_KERNEL_BITS |
|
|
- PSW_MASK_DAT | PSW_MASK_MCHECK;
|
|
+ lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
|
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
|
|
lc->svc_new_psw.mask = PSW_KERNEL_BITS |
|
|
- PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
|
|
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
|
|
lc->svc_new_psw.addr = (unsigned long) system_call;
|
|
- lc->program_new_psw.mask = PSW_KERNEL_BITS |
|
|
- PSW_MASK_DAT | PSW_MASK_MCHECK;
|
|
+ lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
|
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
|
|
lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
|
|
lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
|
|
- lc->io_new_psw.mask = PSW_KERNEL_BITS |
|
|
- PSW_MASK_DAT | PSW_MASK_MCHECK;
|
|
+ lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
|
lc->io_new_psw.addr = (unsigned long) io_int_handler;
|
|
lc->clock_comparator = clock_comparator_max;
|
|
lc->kernel_stack = ((unsigned long) &init_thread_union)
|
|
@@ -388,6 +385,16 @@ static void __init setup_lowcore(void)
|
|
lowcore_ptr[0] = lc;
|
|
}
|
|
|
|
+static void __init setup_lowcore_dat_on(void)
|
|
+{
|
|
+ __ctl_clear_bit(0, 28);
|
|
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
|
|
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
|
|
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
|
|
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
|
|
+ __ctl_set_bit(0, 28);
|
|
+}
|
|
+
|
|
static struct resource code_resource = {
|
|
.name = "Kernel code",
|
|
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
|
|
@@ -946,7 +953,7 @@ void __init setup_arch(char **cmdline_p)
|
|
#endif
|
|
|
|
setup_resources();
|
|
- setup_lowcore();
|
|
+ setup_lowcore_dat_off();
|
|
smp_fill_possible_mask();
|
|
cpu_detect_mhz_feature();
|
|
cpu_init();
|
|
@@ -959,6 +966,12 @@ void __init setup_arch(char **cmdline_p)
|
|
*/
|
|
paging_init();
|
|
|
|
+ /*
|
|
+ * After paging_init created the kernel page table, the new PSWs
|
|
+ * in lowcore can now run with DAT enabled.
|
|
+ */
|
|
+ setup_lowcore_dat_on();
|
|
+
|
|
/* Setup default console */
|
|
conmode_default();
|
|
set_preferred_console();
|
|
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
|
|
index 2a356b948720..3ea71b871813 100644
|
|
--- a/arch/x86/crypto/aegis128-aesni-glue.c
|
|
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
|
|
@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
|
|
}
|
|
|
|
static void crypto_aegis128_aesni_process_crypt(
|
|
- struct aegis_state *state, struct aead_request *req,
|
|
+ struct aegis_state *state, struct skcipher_walk *walk,
|
|
const struct aegis_crypt_ops *ops)
|
|
{
|
|
- struct skcipher_walk walk;
|
|
- u8 *src, *dst;
|
|
- unsigned int chunksize, base;
|
|
-
|
|
- ops->skcipher_walk_init(&walk, req, false);
|
|
-
|
|
- while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
-
|
|
- ops->crypt_blocks(state, chunksize, src, dst);
|
|
-
|
|
- base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
|
|
- src += base;
|
|
- dst += base;
|
|
- chunksize &= AEGIS128_BLOCK_SIZE - 1;
|
|
-
|
|
- if (chunksize > 0)
|
|
- ops->crypt_tail(state, chunksize, src, dst);
|
|
+ while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
|
|
+ ops->crypt_blocks(state,
|
|
+ round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
|
|
+ walk->src.virt.addr, walk->dst.virt.addr);
|
|
+ skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
|
|
+ }
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ if (walk->nbytes) {
|
|
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
|
|
+ walk->dst.virt.addr);
|
|
+ skcipher_walk_done(walk, 0);
|
|
}
|
|
}
|
|
|
|
@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
|
|
{
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
|
|
+ struct skcipher_walk walk;
|
|
struct aegis_state state;
|
|
|
|
+ ops->skcipher_walk_init(&walk, req, true);
|
|
+
|
|
kernel_fpu_begin();
|
|
|
|
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
|
|
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
|
|
- crypto_aegis128_aesni_process_crypt(&state, req, ops);
|
|
+ crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
|
|
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
|
|
|
|
kernel_fpu_end();
|
|
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
|
|
index dbe8bb980da1..1b1b39c66c5e 100644
|
|
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
|
|
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
|
|
@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
|
|
}
|
|
|
|
static void crypto_aegis128l_aesni_process_crypt(
|
|
- struct aegis_state *state, struct aead_request *req,
|
|
+ struct aegis_state *state, struct skcipher_walk *walk,
|
|
const struct aegis_crypt_ops *ops)
|
|
{
|
|
- struct skcipher_walk walk;
|
|
- u8 *src, *dst;
|
|
- unsigned int chunksize, base;
|
|
-
|
|
- ops->skcipher_walk_init(&walk, req, false);
|
|
-
|
|
- while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
-
|
|
- ops->crypt_blocks(state, chunksize, src, dst);
|
|
-
|
|
- base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
|
|
- src += base;
|
|
- dst += base;
|
|
- chunksize &= AEGIS128L_BLOCK_SIZE - 1;
|
|
-
|
|
- if (chunksize > 0)
|
|
- ops->crypt_tail(state, chunksize, src, dst);
|
|
+ while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
|
|
+ ops->crypt_blocks(state, round_down(walk->nbytes,
|
|
+ AEGIS128L_BLOCK_SIZE),
|
|
+ walk->src.virt.addr, walk->dst.virt.addr);
|
|
+ skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
|
|
+ }
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ if (walk->nbytes) {
|
|
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
|
|
+ walk->dst.virt.addr);
|
|
+ skcipher_walk_done(walk, 0);
|
|
}
|
|
}
|
|
|
|
@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
|
|
{
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
|
|
+ struct skcipher_walk walk;
|
|
struct aegis_state state;
|
|
|
|
+ ops->skcipher_walk_init(&walk, req, true);
|
|
+
|
|
kernel_fpu_begin();
|
|
|
|
crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
|
|
crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
|
|
- crypto_aegis128l_aesni_process_crypt(&state, req, ops);
|
|
+ crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
|
|
crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
|
|
|
|
kernel_fpu_end();
|
|
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
|
|
index 8bebda2de92f..6227ca3220a0 100644
|
|
--- a/arch/x86/crypto/aegis256-aesni-glue.c
|
|
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
|
|
@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
|
|
}
|
|
|
|
static void crypto_aegis256_aesni_process_crypt(
|
|
- struct aegis_state *state, struct aead_request *req,
|
|
+ struct aegis_state *state, struct skcipher_walk *walk,
|
|
const struct aegis_crypt_ops *ops)
|
|
{
|
|
- struct skcipher_walk walk;
|
|
- u8 *src, *dst;
|
|
- unsigned int chunksize, base;
|
|
-
|
|
- ops->skcipher_walk_init(&walk, req, false);
|
|
-
|
|
- while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
-
|
|
- ops->crypt_blocks(state, chunksize, src, dst);
|
|
-
|
|
- base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
|
|
- src += base;
|
|
- dst += base;
|
|
- chunksize &= AEGIS256_BLOCK_SIZE - 1;
|
|
-
|
|
- if (chunksize > 0)
|
|
- ops->crypt_tail(state, chunksize, src, dst);
|
|
+ while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
|
|
+ ops->crypt_blocks(state,
|
|
+ round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
|
|
+ walk->src.virt.addr, walk->dst.virt.addr);
|
|
+ skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
|
|
+ }
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ if (walk->nbytes) {
|
|
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
|
|
+ walk->dst.virt.addr);
|
|
+ skcipher_walk_done(walk, 0);
|
|
}
|
|
}
|
|
|
|
@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
|
|
{
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
|
|
+ struct skcipher_walk walk;
|
|
struct aegis_state state;
|
|
|
|
+ ops->skcipher_walk_init(&walk, req, true);
|
|
+
|
|
kernel_fpu_begin();
|
|
|
|
crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
|
|
crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
|
|
- crypto_aegis256_aesni_process_crypt(&state, req, ops);
|
|
+ crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
|
|
crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
|
|
|
|
kernel_fpu_end();
|
|
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
|
|
index e4b78f962874..917f25e4d0a8 100644
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -830,11 +830,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
|
|
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
|
|
}
|
|
|
|
- src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
|
|
- scatterwalk_start(&src_sg_walk, src_sg);
|
|
- if (req->src != req->dst) {
|
|
- dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
|
|
- scatterwalk_start(&dst_sg_walk, dst_sg);
|
|
+ if (left) {
|
|
+ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
|
|
+ scatterwalk_start(&src_sg_walk, src_sg);
|
|
+ if (req->src != req->dst) {
|
|
+ dst_sg = scatterwalk_ffwd(dst_start, req->dst,
|
|
+ req->assoclen);
|
|
+ scatterwalk_start(&dst_sg_walk, dst_sg);
|
|
+ }
|
|
}
|
|
|
|
kernel_fpu_begin();
|
|
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
|
|
index 0dccdda1eb3a..7e600f8bcdad 100644
|
|
--- a/arch/x86/crypto/morus1280_glue.c
|
|
+++ b/arch/x86/crypto/morus1280_glue.c
|
|
@@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
|
|
|
|
static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
|
|
struct morus1280_ops ops,
|
|
- struct aead_request *req)
|
|
+ struct skcipher_walk *walk)
|
|
{
|
|
- struct skcipher_walk walk;
|
|
- u8 *cursor_src, *cursor_dst;
|
|
- unsigned int chunksize, base;
|
|
-
|
|
- ops.skcipher_walk_init(&walk, req, false);
|
|
-
|
|
- while (walk.nbytes) {
|
|
- cursor_src = walk.src.virt.addr;
|
|
- cursor_dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
-
|
|
- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
|
|
-
|
|
- base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
|
|
- cursor_src += base;
|
|
- cursor_dst += base;
|
|
- chunksize &= MORUS1280_BLOCK_SIZE - 1;
|
|
-
|
|
- if (chunksize > 0)
|
|
- ops.crypt_tail(state, cursor_src, cursor_dst,
|
|
- chunksize);
|
|
+ while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
|
|
+ ops.crypt_blocks(state, walk->src.virt.addr,
|
|
+ walk->dst.virt.addr,
|
|
+ round_down(walk->nbytes,
|
|
+ MORUS1280_BLOCK_SIZE));
|
|
+ skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
|
|
+ }
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ if (walk->nbytes) {
|
|
+ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
|
|
+ walk->nbytes);
|
|
+ skcipher_walk_done(walk, 0);
|
|
}
|
|
}
|
|
|
|
@@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
|
|
struct morus1280_state state;
|
|
+ struct skcipher_walk walk;
|
|
+
|
|
+ ops.skcipher_walk_init(&walk, req, true);
|
|
|
|
kernel_fpu_begin();
|
|
|
|
ctx->ops->init(&state, &ctx->key, req->iv);
|
|
crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
|
|
- crypto_morus1280_glue_process_crypt(&state, ops, req);
|
|
+ crypto_morus1280_glue_process_crypt(&state, ops, &walk);
|
|
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
|
|
|
|
kernel_fpu_end();
|
|
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
|
|
index 7b58fe4d9bd1..cb3a81732016 100644
|
|
--- a/arch/x86/crypto/morus640_glue.c
|
|
+++ b/arch/x86/crypto/morus640_glue.c
|
|
@@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
|
|
|
|
static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
|
|
struct morus640_ops ops,
|
|
- struct aead_request *req)
|
|
+ struct skcipher_walk *walk)
|
|
{
|
|
- struct skcipher_walk walk;
|
|
- u8 *cursor_src, *cursor_dst;
|
|
- unsigned int chunksize, base;
|
|
-
|
|
- ops.skcipher_walk_init(&walk, req, false);
|
|
-
|
|
- while (walk.nbytes) {
|
|
- cursor_src = walk.src.virt.addr;
|
|
- cursor_dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
-
|
|
- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
|
|
-
|
|
- base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
|
|
- cursor_src += base;
|
|
- cursor_dst += base;
|
|
- chunksize &= MORUS640_BLOCK_SIZE - 1;
|
|
-
|
|
- if (chunksize > 0)
|
|
- ops.crypt_tail(state, cursor_src, cursor_dst,
|
|
- chunksize);
|
|
+ while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
|
|
+ ops.crypt_blocks(state, walk->src.virt.addr,
|
|
+ walk->dst.virt.addr,
|
|
+ round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
|
|
+ skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
|
|
+ }
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ if (walk->nbytes) {
|
|
+ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
|
|
+ walk->nbytes);
|
|
+ skcipher_walk_done(walk, 0);
|
|
}
|
|
}
|
|
|
|
@@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
|
|
struct morus640_state state;
|
|
+ struct skcipher_walk walk;
|
|
+
|
|
+ ops.skcipher_walk_init(&walk, req, true);
|
|
|
|
kernel_fpu_begin();
|
|
|
|
ctx->ops->init(&state, &ctx->key, req->iv);
|
|
crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
|
|
- crypto_morus640_glue_process_crypt(&state, ops, req);
|
|
+ crypto_morus640_glue_process_crypt(&state, ops, &walk);
|
|
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
|
|
|
|
kernel_fpu_end();
|
|
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
|
|
index 27a461414b30..2690135bf83f 100644
|
|
--- a/arch/x86/events/intel/uncore.c
|
|
+++ b/arch/x86/events/intel/uncore.c
|
|
@@ -740,6 +740,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
|
|
/* fixed counters have event field hardcoded to zero */
|
|
hwc->config = 0ULL;
|
|
} else if (is_freerunning_event(event)) {
|
|
+ hwc->config = event->attr.config;
|
|
if (!check_valid_freerunning_event(box, event))
|
|
return -EINVAL;
|
|
event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
|
|
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
|
|
index e17ab885b1e9..cc6dd4f78158 100644
|
|
--- a/arch/x86/events/intel/uncore.h
|
|
+++ b/arch/x86/events/intel/uncore.h
|
|
@@ -285,8 +285,8 @@ static inline
|
|
unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
|
|
struct perf_event *event)
|
|
{
|
|
- unsigned int type = uncore_freerunning_type(event->attr.config);
|
|
- unsigned int idx = uncore_freerunning_idx(event->attr.config);
|
|
+ unsigned int type = uncore_freerunning_type(event->hw.config);
|
|
+ unsigned int idx = uncore_freerunning_idx(event->hw.config);
|
|
struct intel_uncore_pmu *pmu = box->pmu;
|
|
|
|
return pmu->type->freerunning[type].counter_base +
|
|
@@ -360,7 +360,7 @@ static inline
|
|
unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
|
|
struct perf_event *event)
|
|
{
|
|
- unsigned int type = uncore_freerunning_type(event->attr.config);
|
|
+ unsigned int type = uncore_freerunning_type(event->hw.config);
|
|
|
|
return box->pmu->type->freerunning[type].bits;
|
|
}
|
|
@@ -368,7 +368,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
|
|
static inline int uncore_num_freerunning(struct intel_uncore_box *box,
|
|
struct perf_event *event)
|
|
{
|
|
- unsigned int type = uncore_freerunning_type(event->attr.config);
|
|
+ unsigned int type = uncore_freerunning_type(event->hw.config);
|
|
|
|
return box->pmu->type->freerunning[type].num_counters;
|
|
}
|
|
@@ -382,8 +382,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
|
|
static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
|
|
struct perf_event *event)
|
|
{
|
|
- unsigned int type = uncore_freerunning_type(event->attr.config);
|
|
- unsigned int idx = uncore_freerunning_idx(event->attr.config);
|
|
+ unsigned int type = uncore_freerunning_type(event->hw.config);
|
|
+ unsigned int idx = uncore_freerunning_idx(event->hw.config);
|
|
|
|
return (type < uncore_num_freerunning_types(box, event)) &&
|
|
(idx < uncore_num_freerunning(box, event));
|
|
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
|
|
index bfa25814fe5f..2d328386f83a 100644
|
|
--- a/arch/x86/events/intel/uncore_snb.c
|
|
+++ b/arch/x86/events/intel/uncore_snb.c
|
|
@@ -444,9 +444,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
|
|
|
|
/* must be done before validate_group */
|
|
event->hw.event_base = base;
|
|
- event->hw.config = cfg;
|
|
event->hw.idx = idx;
|
|
|
|
+ /* Convert to standard encoding format for freerunning counters */
|
|
+ event->hw.config = ((cfg - 1) << 8) | 0x10ff;
|
|
+
|
|
/* no group validation needed, we have free running counters */
|
|
|
|
return 0;
|
|
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
|
|
index 7ed08a7c3398..0ad25cc895ae 100644
|
|
--- a/arch/x86/include/asm/intel-family.h
|
|
+++ b/arch/x86/include/asm/intel-family.h
|
|
@@ -55,6 +55,8 @@
|
|
|
|
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
|
|
|
|
+#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
|
|
+
|
|
/* "Small Core" Processors (Atom) */
|
|
|
|
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
|
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
index 728dc661ebb6..46f0b621bd37 100644
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -1194,7 +1194,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot,
|
|
gfn_t gfn_offset, unsigned long mask);
|
|
void kvm_mmu_zap_all(struct kvm *kvm);
|
|
-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
|
|
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
|
|
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
|
|
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
|
|
|
|
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
|
|
index 6adf6e6c2933..544bd41a514c 100644
|
|
--- a/arch/x86/kernel/kprobes/opt.c
|
|
+++ b/arch/x86/kernel/kprobes/opt.c
|
|
@@ -141,6 +141,11 @@ asm (
|
|
|
|
void optprobe_template_func(void);
|
|
STACK_FRAME_NON_STANDARD(optprobe_template_func);
|
|
+NOKPROBE_SYMBOL(optprobe_template_func);
|
|
+NOKPROBE_SYMBOL(optprobe_template_entry);
|
|
+NOKPROBE_SYMBOL(optprobe_template_val);
|
|
+NOKPROBE_SYMBOL(optprobe_template_call);
|
|
+NOKPROBE_SYMBOL(optprobe_template_end);
|
|
|
|
#define TMPL_MOVE_IDX \
|
|
((long)optprobe_template_val - (long)optprobe_template_entry)
|
|
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
|
index 1b82bc7c3cca..779ed52047d1 100644
|
|
--- a/arch/x86/kvm/mmu.c
|
|
+++ b/arch/x86/kvm/mmu.c
|
|
@@ -5774,13 +5774,30 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
|
|
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
|
|
}
|
|
|
|
-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
|
|
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
|
|
{
|
|
+ gen &= MMIO_GEN_MASK;
|
|
+
|
|
+ /*
|
|
+ * Shift to eliminate the "update in-progress" flag, which isn't
|
|
+ * included in the spte's generation number.
|
|
+ */
|
|
+ gen >>= 1;
|
|
+
|
|
+ /*
|
|
+ * Generation numbers are incremented in multiples of the number of
|
|
+ * address spaces in order to provide unique generations across all
|
|
+ * address spaces. Strip what is effectively the address space
|
|
+ * modifier prior to checking for a wrap of the MMIO generation so
|
|
+ * that a wrap in any address space is detected.
|
|
+ */
|
|
+ gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
|
|
+
|
|
/*
|
|
- * The very rare case: if the generation-number is round,
|
|
+ * The very rare case: if the MMIO generation number has wrapped,
|
|
* zap all shadow pages.
|
|
*/
|
|
- if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
|
|
+ if (unlikely(gen == 0)) {
|
|
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
|
|
kvm_mmu_invalidate_zap_all_pages(kvm);
|
|
}
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index f6da5c37d2e8..4029d3783e18 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -8184,25 +8184,50 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|
/* Addr = segment_base + offset */
|
|
/* offset = base + [index * scale] + displacement */
|
|
off = exit_qualification; /* holds the displacement */
|
|
+ if (addr_size == 1)
|
|
+ off = (gva_t)sign_extend64(off, 31);
|
|
+ else if (addr_size == 0)
|
|
+ off = (gva_t)sign_extend64(off, 15);
|
|
if (base_is_valid)
|
|
off += kvm_register_read(vcpu, base_reg);
|
|
if (index_is_valid)
|
|
off += kvm_register_read(vcpu, index_reg)<<scaling;
|
|
vmx_get_segment(vcpu, &s, seg_reg);
|
|
- *ret = s.base + off;
|
|
|
|
+ /*
|
|
+ * The effective address, i.e. @off, of a memory operand is truncated
|
|
+ * based on the address size of the instruction. Note that this is
|
|
+ * the *effective address*, i.e. the address prior to accounting for
|
|
+ * the segment's base.
|
|
+ */
|
|
if (addr_size == 1) /* 32 bit */
|
|
- *ret &= 0xffffffff;
|
|
+ off &= 0xffffffff;
|
|
+ else if (addr_size == 0) /* 16 bit */
|
|
+ off &= 0xffff;
|
|
|
|
/* Checks for #GP/#SS exceptions. */
|
|
exn = false;
|
|
if (is_long_mode(vcpu)) {
|
|
+ /*
|
|
+ * The virtual/linear address is never truncated in 64-bit
|
|
+ * mode, e.g. a 32-bit address size can yield a 64-bit virtual
|
|
+ * address when using FS/GS with a non-zero base.
|
|
+ */
|
|
+ *ret = s.base + off;
|
|
+
|
|
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
|
|
* non-canonical form. This is the only check on the memory
|
|
* destination for long mode!
|
|
*/
|
|
exn = is_noncanonical_address(*ret, vcpu);
|
|
} else if (is_protmode(vcpu)) {
|
|
+ /*
|
|
+ * When not in long mode, the virtual/linear address is
|
|
+ * unconditionally truncated to 32 bits regardless of the
|
|
+ * address size.
|
|
+ */
|
|
+ *ret = (s.base + off) & 0xffffffff;
|
|
+
|
|
/* Protected mode: apply checks for segment validity in the
|
|
* following order:
|
|
* - segment type check (#GP(0) may be thrown)
|
|
@@ -8226,10 +8251,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
|
|
*/
|
|
exn = (s.unusable != 0);
|
|
- /* Protected mode: #GP(0)/#SS(0) if the memory
|
|
- * operand is outside the segment limit.
|
|
+
|
|
+ /*
|
|
+ * Protected mode: #GP(0)/#SS(0) if the memory operand is
|
|
+ * outside the segment limit. All CPUs that support VMX ignore
|
|
+ * limit checks for flat segments, i.e. segments with base==0,
|
|
+ * limit==0xffffffff and of type expand-up data or code.
|
|
*/
|
|
- exn = exn || (off + sizeof(u64) > s.limit);
|
|
+ if (!(s.base == 0 && s.limit == 0xffffffff &&
|
|
+ ((s.type & 8) || !(s.type & 4))))
|
|
+ exn = exn || (off + sizeof(u64) > s.limit);
|
|
}
|
|
if (exn) {
|
|
kvm_queue_exception_e(vcpu,
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 3a7cf7c6b28a..6181ec19bed2 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -9108,13 +9108,13 @@ out_free:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
|
|
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
|
{
|
|
/*
|
|
* memslots->generation has been incremented.
|
|
* mmio generation may have reached its maximum value.
|
|
*/
|
|
- kvm_mmu_invalidate_mmio_sptes(kvm, slots);
|
|
+ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
|
|
}
|
|
|
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
|
|
index 67b9568613f3..1826ed9dd1c8 100644
|
|
--- a/arch/x86/kvm/x86.h
|
|
+++ b/arch/x86/kvm/x86.h
|
|
@@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_address(u64 la,
|
|
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
|
gva_t gva, gfn_t gfn, unsigned access)
|
|
{
|
|
+ u64 gen = kvm_memslots(vcpu->kvm)->generation;
|
|
+
|
|
+ if (unlikely(gen & 1))
|
|
+ return;
|
|
+
|
|
/*
|
|
* If this is a shadow nested page table, the "GVA" is
|
|
* actually a nGPA.
|
|
@@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
|
vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
|
|
vcpu->arch.access = access;
|
|
vcpu->arch.mmio_gfn = gfn;
|
|
- vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
|
|
+ vcpu->arch.mmio_gen = gen;
|
|
}
|
|
|
|
static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
|
|
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
|
|
index c8f011e07a15..73aa0b89a74a 100644
|
|
--- a/arch/x86/xen/mmu_pv.c
|
|
+++ b/arch/x86/xen/mmu_pv.c
|
|
@@ -2106,10 +2106,10 @@ void __init xen_relocate_p2m(void)
|
|
pt = early_memremap(pt_phys, PAGE_SIZE);
|
|
clear_page(pt);
|
|
for (idx_pte = 0;
|
|
- idx_pte < min(n_pte, PTRS_PER_PTE);
|
|
- idx_pte++) {
|
|
- set_pte(pt + idx_pte,
|
|
- pfn_pte(p2m_pfn, PAGE_KERNEL));
|
|
+ idx_pte < min(n_pte, PTRS_PER_PTE);
|
|
+ idx_pte++) {
|
|
+ pt[idx_pte] = pfn_pte(p2m_pfn,
|
|
+ PAGE_KERNEL);
|
|
p2m_pfn++;
|
|
}
|
|
n_pte -= PTRS_PER_PTE;
|
|
@@ -2117,8 +2117,7 @@ void __init xen_relocate_p2m(void)
|
|
make_lowmem_page_readonly(__va(pt_phys));
|
|
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
|
|
PFN_DOWN(pt_phys));
|
|
- set_pmd(pmd + idx_pt,
|
|
- __pmd(_PAGE_TABLE | pt_phys));
|
|
+ pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
|
|
pt_phys += PAGE_SIZE;
|
|
}
|
|
n_pt -= PTRS_PER_PMD;
|
|
@@ -2126,7 +2125,7 @@ void __init xen_relocate_p2m(void)
|
|
make_lowmem_page_readonly(__va(pmd_phys));
|
|
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
|
|
PFN_DOWN(pmd_phys));
|
|
- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
|
|
+ pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
|
|
pmd_phys += PAGE_SIZE;
|
|
}
|
|
n_pmd -= PTRS_PER_PUD;
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
index 23a53b67cf0d..7d53f2314d7c 100644
|
|
--- a/block/blk-mq.c
|
|
+++ b/block/blk-mq.c
|
|
@@ -701,12 +701,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|
spin_unlock_irq(&q->requeue_lock);
|
|
|
|
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
|
|
- if (!(rq->rq_flags & RQF_SOFTBARRIER))
|
|
+ if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
|
|
continue;
|
|
|
|
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
|
list_del_init(&rq->queuelist);
|
|
- blk_mq_sched_insert_request(rq, true, false, false);
|
|
+ /*
|
|
+ * If RQF_DONTPREP, rq has contained some driver specific
|
|
+ * data, so insert it to hctx dispatch list to avoid any
|
|
+ * merge.
|
|
+ */
|
|
+ if (rq->rq_flags & RQF_DONTPREP)
|
|
+ blk_mq_request_bypass_insert(rq, false);
|
|
+ else
|
|
+ blk_mq_sched_insert_request(rq, true, false, false);
|
|
}
|
|
|
|
while (!list_empty(&rq_list)) {
|
|
diff --git a/crypto/aead.c b/crypto/aead.c
|
|
index 60b3bbe973e7..9688ada13981 100644
|
|
--- a/crypto/aead.c
|
|
+++ b/crypto/aead.c
|
|
@@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
|
|
else
|
|
err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
|
|
|
|
- if (err)
|
|
+ if (unlikely(err)) {
|
|
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return err;
|
|
+ }
|
|
|
|
crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
|
|
index c22f4414856d..789716f92e4c 100644
|
|
--- a/crypto/aegis128.c
|
|
+++ b/crypto/aegis128.c
|
|
@@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
|
|
const struct aegis128_ops *ops)
|
|
{
|
|
struct skcipher_walk walk;
|
|
- u8 *src, *dst;
|
|
- unsigned int chunksize;
|
|
|
|
ops->skcipher_walk_init(&walk, req, false);
|
|
|
|
while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
+ unsigned int nbytes = walk.nbytes;
|
|
|
|
- ops->crypt_chunk(state, dst, src, chunksize);
|
|
+ if (nbytes < walk.total)
|
|
+ nbytes = round_down(nbytes, walk.stride);
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ nbytes);
|
|
+
|
|
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
}
|
|
}
|
|
|
|
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
|
|
index b6fb21ebdc3e..73811448cb6b 100644
|
|
--- a/crypto/aegis128l.c
|
|
+++ b/crypto/aegis128l.c
|
|
@@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
|
|
const struct aegis128l_ops *ops)
|
|
{
|
|
struct skcipher_walk walk;
|
|
- u8 *src, *dst;
|
|
- unsigned int chunksize;
|
|
|
|
ops->skcipher_walk_init(&walk, req, false);
|
|
|
|
while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
+ unsigned int nbytes = walk.nbytes;
|
|
|
|
- ops->crypt_chunk(state, dst, src, chunksize);
|
|
+ if (nbytes < walk.total)
|
|
+ nbytes = round_down(nbytes, walk.stride);
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ nbytes);
|
|
+
|
|
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
}
|
|
}
|
|
|
|
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
|
|
index 11f0f8ec9c7c..8a71e9c06193 100644
|
|
--- a/crypto/aegis256.c
|
|
+++ b/crypto/aegis256.c
|
|
@@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
|
|
const struct aegis256_ops *ops)
|
|
{
|
|
struct skcipher_walk walk;
|
|
- u8 *src, *dst;
|
|
- unsigned int chunksize;
|
|
|
|
ops->skcipher_walk_init(&walk, req, false);
|
|
|
|
while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
- chunksize = walk.nbytes;
|
|
+ unsigned int nbytes = walk.nbytes;
|
|
|
|
- ops->crypt_chunk(state, dst, src, chunksize);
|
|
+ if (nbytes < walk.total)
|
|
+ nbytes = round_down(nbytes, walk.stride);
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ nbytes);
|
|
+
|
|
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
}
|
|
}
|
|
|
|
diff --git a/crypto/ahash.c b/crypto/ahash.c
|
|
index a64c143165b1..158e716f21a1 100644
|
|
--- a/crypto/ahash.c
|
|
+++ b/crypto/ahash.c
|
|
@@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
|
|
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
|
{
|
|
unsigned int alignmask = walk->alignmask;
|
|
- unsigned int nbytes = walk->entrylen;
|
|
|
|
walk->data -= walk->offset;
|
|
|
|
- if (nbytes && walk->offset & alignmask && !err) {
|
|
- walk->offset = ALIGN(walk->offset, alignmask + 1);
|
|
- nbytes = min(nbytes,
|
|
- ((unsigned int)(PAGE_SIZE)) - walk->offset);
|
|
- walk->entrylen -= nbytes;
|
|
+ if (walk->entrylen && (walk->offset & alignmask) && !err) {
|
|
+ unsigned int nbytes;
|
|
|
|
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
|
|
+ nbytes = min(walk->entrylen,
|
|
+ (unsigned int)(PAGE_SIZE - walk->offset));
|
|
if (nbytes) {
|
|
+ walk->entrylen -= nbytes;
|
|
walk->data += walk->offset;
|
|
return nbytes;
|
|
}
|
|
@@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
|
if (err)
|
|
return err;
|
|
|
|
- if (nbytes) {
|
|
+ if (walk->entrylen) {
|
|
walk->offset = 0;
|
|
walk->pg++;
|
|
return hash_walk_next(walk);
|
|
@@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
|
|
return ret;
|
|
}
|
|
|
|
+static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
|
+ unsigned int keylen)
|
|
+{
|
|
+ return -ENOSYS;
|
|
+}
|
|
+
|
|
+static void ahash_set_needkey(struct crypto_ahash *tfm)
|
|
+{
|
|
+ const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
|
|
+
|
|
+ if (tfm->setkey != ahash_nosetkey &&
|
|
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
|
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
+}
|
|
+
|
|
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
@@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
else
|
|
err = tfm->setkey(tfm, key, keylen);
|
|
|
|
- if (err)
|
|
+ if (unlikely(err)) {
|
|
+ ahash_set_needkey(tfm);
|
|
return err;
|
|
+ }
|
|
|
|
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
|
|
|
|
-static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
|
- unsigned int keylen)
|
|
-{
|
|
- return -ENOSYS;
|
|
-}
|
|
-
|
|
static inline unsigned int ahash_align_buffer_size(unsigned len,
|
|
unsigned long mask)
|
|
{
|
|
@@ -467,8 +478,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
if (alg->setkey) {
|
|
hash->setkey = alg->setkey;
|
|
- if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
|
- crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
|
|
+ ahash_set_needkey(hash);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/crypto/cfb.c b/crypto/cfb.c
|
|
index e81e45673498..4abfe32ff845 100644
|
|
--- a/crypto/cfb.c
|
|
+++ b/crypto/cfb.c
|
|
@@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
|
|
do {
|
|
crypto_cfb_encrypt_one(tfm, iv, dst);
|
|
crypto_xor(dst, src, bsize);
|
|
- memcpy(iv, dst, bsize);
|
|
+ iv = dst;
|
|
|
|
src += bsize;
|
|
dst += bsize;
|
|
} while ((nbytes -= bsize) >= bsize);
|
|
|
|
+ memcpy(walk->iv, iv, bsize);
|
|
+
|
|
return nbytes;
|
|
}
|
|
|
|
@@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
|
|
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
|
unsigned int nbytes = walk->nbytes;
|
|
u8 *src = walk->src.virt.addr;
|
|
- u8 *iv = walk->iv;
|
|
+ u8 * const iv = walk->iv;
|
|
u8 tmp[MAX_CIPHER_BLOCKSIZE];
|
|
|
|
do {
|
|
@@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
|
|
src += bsize;
|
|
} while ((nbytes -= bsize) >= bsize);
|
|
|
|
- memcpy(walk->iv, iv, bsize);
|
|
-
|
|
return nbytes;
|
|
}
|
|
|
|
@@ -298,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|
inst->alg.base.cra_blocksize = 1;
|
|
inst->alg.base.cra_alignmask = alg->cra_alignmask;
|
|
|
|
+ /*
|
|
+ * To simplify the implementation, configure the skcipher walk to only
|
|
+ * give a partial block at the very end, never earlier.
|
|
+ */
|
|
+ inst->alg.chunksize = alg->cra_blocksize;
|
|
+
|
|
inst->alg.ivsize = alg->cra_blocksize;
|
|
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
|
|
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
|
|
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
|
|
index 3889c188f266..b83576b4eb55 100644
|
|
--- a/crypto/morus1280.c
|
|
+++ b/crypto/morus1280.c
|
|
@@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
|
|
const struct morus1280_ops *ops)
|
|
{
|
|
struct skcipher_walk walk;
|
|
- u8 *dst;
|
|
- const u8 *src;
|
|
|
|
ops->skcipher_walk_init(&walk, req, false);
|
|
|
|
while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
+ unsigned int nbytes = walk.nbytes;
|
|
|
|
- ops->crypt_chunk(state, dst, src, walk.nbytes);
|
|
+ if (nbytes < walk.total)
|
|
+ nbytes = round_down(nbytes, walk.stride);
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ nbytes);
|
|
+
|
|
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
}
|
|
}
|
|
|
|
diff --git a/crypto/morus640.c b/crypto/morus640.c
|
|
index da06ec2f6a80..b6a477444f6d 100644
|
|
--- a/crypto/morus640.c
|
|
+++ b/crypto/morus640.c
|
|
@@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
|
|
const struct morus640_ops *ops)
|
|
{
|
|
struct skcipher_walk walk;
|
|
- u8 *dst;
|
|
- const u8 *src;
|
|
|
|
ops->skcipher_walk_init(&walk, req, false);
|
|
|
|
while (walk.nbytes) {
|
|
- src = walk.src.virt.addr;
|
|
- dst = walk.dst.virt.addr;
|
|
+ unsigned int nbytes = walk.nbytes;
|
|
|
|
- ops->crypt_chunk(state, dst, src, walk.nbytes);
|
|
+ if (nbytes < walk.total)
|
|
+ nbytes = round_down(nbytes, walk.stride);
|
|
|
|
- skcipher_walk_done(&walk, 0);
|
|
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ nbytes);
|
|
+
|
|
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
}
|
|
}
|
|
|
|
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
|
|
index 8aa10144407c..1b182dfedc94 100644
|
|
--- a/crypto/pcbc.c
|
|
+++ b/crypto/pcbc.c
|
|
@@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
|
|
unsigned int nbytes = walk->nbytes;
|
|
u8 *src = walk->src.virt.addr;
|
|
u8 *dst = walk->dst.virt.addr;
|
|
- u8 *iv = walk->iv;
|
|
+ u8 * const iv = walk->iv;
|
|
|
|
do {
|
|
crypto_xor(iv, src, bsize);
|
|
@@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
|
|
int bsize = crypto_cipher_blocksize(tfm);
|
|
unsigned int nbytes = walk->nbytes;
|
|
u8 *src = walk->src.virt.addr;
|
|
- u8 *iv = walk->iv;
|
|
+ u8 * const iv = walk->iv;
|
|
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
|
|
|
|
do {
|
|
@@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
|
|
src += bsize;
|
|
} while ((nbytes -= bsize) >= bsize);
|
|
|
|
- memcpy(walk->iv, iv, bsize);
|
|
-
|
|
return nbytes;
|
|
}
|
|
|
|
@@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
|
|
unsigned int nbytes = walk->nbytes;
|
|
u8 *src = walk->src.virt.addr;
|
|
u8 *dst = walk->dst.virt.addr;
|
|
- u8 *iv = walk->iv;
|
|
+ u8 * const iv = walk->iv;
|
|
|
|
do {
|
|
crypto_cipher_decrypt_one(tfm, dst, src);
|
|
@@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
|
|
dst += bsize;
|
|
} while ((nbytes -= bsize) >= bsize);
|
|
|
|
- memcpy(walk->iv, iv, bsize);
|
|
-
|
|
return nbytes;
|
|
}
|
|
|
|
@@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
|
|
int bsize = crypto_cipher_blocksize(tfm);
|
|
unsigned int nbytes = walk->nbytes;
|
|
u8 *src = walk->src.virt.addr;
|
|
- u8 *iv = walk->iv;
|
|
+ u8 * const iv = walk->iv;
|
|
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
|
|
|
|
do {
|
|
@@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
|
|
src += bsize;
|
|
} while ((nbytes -= bsize) >= bsize);
|
|
|
|
- memcpy(walk->iv, iv, bsize);
|
|
-
|
|
return nbytes;
|
|
}
|
|
|
|
diff --git a/crypto/shash.c b/crypto/shash.c
|
|
index 5d732c6bb4b2..a04145e5306a 100644
|
|
--- a/crypto/shash.c
|
|
+++ b/crypto/shash.c
|
|
@@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
|
|
return err;
|
|
}
|
|
|
|
+static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
|
|
+{
|
|
+ if (crypto_shash_alg_has_setkey(alg) &&
|
|
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
|
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
+}
|
|
+
|
|
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
|
unsigned int keylen)
|
|
{
|
|
@@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
|
else
|
|
err = shash->setkey(tfm, key, keylen);
|
|
|
|
- if (err)
|
|
+ if (unlikely(err)) {
|
|
+ shash_set_needkey(tfm, shash);
|
|
return err;
|
|
+ }
|
|
|
|
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
@@ -368,7 +377,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
|
|
crt->final = shash_async_final;
|
|
crt->finup = shash_async_finup;
|
|
crt->digest = shash_async_digest;
|
|
- crt->setkey = shash_async_setkey;
|
|
+ if (crypto_shash_alg_has_setkey(alg))
|
|
+ crt->setkey = shash_async_setkey;
|
|
|
|
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
|
|
CRYPTO_TFM_NEED_KEY);
|
|
@@ -390,9 +400,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
hash->descsize = alg->descsize;
|
|
|
|
- if (crypto_shash_alg_has_setkey(alg) &&
|
|
- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
|
- crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
|
|
+ shash_set_needkey(hash, alg);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
|
|
index 0bd8c6caa498..46bb300d418f 100644
|
|
--- a/crypto/skcipher.c
|
|
+++ b/crypto/skcipher.c
|
|
@@ -584,6 +584,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
|
|
return crypto_alg_extsize(alg);
|
|
}
|
|
|
|
+static void skcipher_set_needkey(struct crypto_skcipher *tfm)
|
|
+{
|
|
+ if (tfm->keysize)
|
|
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
+}
|
|
+
|
|
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
|
const u8 *key, unsigned int keylen)
|
|
{
|
|
@@ -597,8 +603,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
|
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
|
|
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
|
|
CRYPTO_TFM_RES_MASK);
|
|
- if (err)
|
|
+ if (unlikely(err)) {
|
|
+ skcipher_set_needkey(tfm);
|
|
return err;
|
|
+ }
|
|
|
|
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
@@ -676,8 +684,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
|
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
|
|
skcipher->keysize = calg->cra_blkcipher.max_keysize;
|
|
|
|
- if (skcipher->keysize)
|
|
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
|
|
+ skcipher_set_needkey(skcipher);
|
|
|
|
return 0;
|
|
}
|
|
@@ -697,8 +704,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
|
|
crypto_skcipher_set_flags(tfm,
|
|
crypto_ablkcipher_get_flags(ablkcipher) &
|
|
CRYPTO_TFM_RES_MASK);
|
|
- if (err)
|
|
+ if (unlikely(err)) {
|
|
+ skcipher_set_needkey(tfm);
|
|
return err;
|
|
+ }
|
|
|
|
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
@@ -775,8 +784,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
|
sizeof(struct ablkcipher_request);
|
|
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
|
|
|
|
- if (skcipher->keysize)
|
|
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
|
|
+ skcipher_set_needkey(skcipher);
|
|
|
|
return 0;
|
|
}
|
|
@@ -819,8 +827,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|
else
|
|
err = cipher->setkey(tfm, key, keylen);
|
|
|
|
- if (err)
|
|
+ if (unlikely(err)) {
|
|
+ skcipher_set_needkey(tfm);
|
|
return err;
|
|
+ }
|
|
|
|
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
|
return 0;
|
|
@@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
|
skcipher->ivsize = alg->ivsize;
|
|
skcipher->keysize = alg->max_keysize;
|
|
|
|
- if (skcipher->keysize)
|
|
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
|
|
+ skcipher_set_needkey(skcipher);
|
|
|
|
if (alg->exit)
|
|
skcipher->base.exit = crypto_skcipher_exit_tfm;
|
|
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
|
|
index 54d882ffe438..3664c26f4838 100644
|
|
--- a/crypto/testmgr.c
|
|
+++ b/crypto/testmgr.c
|
|
@@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
|
|
|
|
err = alg_test_hash(desc, driver, type, mask);
|
|
if (err)
|
|
- goto out;
|
|
+ return err;
|
|
|
|
tfm = crypto_alloc_shash(driver, type, mask);
|
|
if (IS_ERR(tfm)) {
|
|
+ if (PTR_ERR(tfm) == -ENOENT) {
|
|
+ /*
|
|
+ * This crc32c implementation is only available through
|
|
+ * ahash API, not the shash API, so the remaining part
|
|
+ * of the test is not applicable to it.
|
|
+ */
|
|
+ return 0;
|
|
+ }
|
|
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
|
|
"%ld\n", driver, PTR_ERR(tfm));
|
|
- err = PTR_ERR(tfm);
|
|
- goto out;
|
|
+ return PTR_ERR(tfm);
|
|
}
|
|
|
|
do {
|
|
@@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
|
|
|
|
crypto_free_shash(tfm);
|
|
|
|
-out:
|
|
return err;
|
|
}
|
|
|
|
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
|
|
index 11e6f17fe724..862ee1d04263 100644
|
|
--- a/crypto/testmgr.h
|
|
+++ b/crypto/testmgr.h
|
|
@@ -11416,6 +11416,31 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
|
|
"\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
|
|
"\x20\x31\x62\x3d\x55\xb1\xe4\x71",
|
|
.len = 64,
|
|
+ .also_non_np = 1,
|
|
+ .np = 2,
|
|
+ .tap = { 31, 33 },
|
|
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
|
|
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
|
|
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
|
|
+ .klen = 16,
|
|
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
|
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
|
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
|
|
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
|
|
+ "\xae",
|
|
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
|
|
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
|
|
+ "\xc8",
|
|
+ .len = 17,
|
|
+ }, { /* < 16 bytes */
|
|
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
|
|
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
|
|
+ .klen = 16,
|
|
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
|
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
|
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
|
|
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
|
|
+ .len = 7,
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
|
|
index 545e91420cde..8940054d6250 100644
|
|
--- a/drivers/acpi/device_sysfs.c
|
|
+++ b/drivers/acpi/device_sysfs.c
|
|
@@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
|
|
{
|
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
|
|
const union acpi_object *of_compatible, *obj;
|
|
+ acpi_status status;
|
|
int len, count;
|
|
int i, nval;
|
|
char *c;
|
|
|
|
- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
|
|
+ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
|
|
+ if (ACPI_FAILURE(status))
|
|
+ return -ENODEV;
|
|
+
|
|
/* DT strings are all in lower case */
|
|
for (c = buf.pointer; *c != '\0'; c++)
|
|
*c = tolower(*c);
|
|
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
|
|
index f530d3541242..df2175b1169a 100644
|
|
--- a/drivers/acpi/nfit/core.c
|
|
+++ b/drivers/acpi/nfit/core.c
|
|
@@ -397,7 +397,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
|
if (call_pkg) {
|
|
int i;
|
|
|
|
- if (nfit_mem->family != call_pkg->nd_family)
|
|
+ if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
|
|
return -ENOTTY;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
|
@@ -406,6 +406,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
|
return call_pkg->nd_command;
|
|
}
|
|
|
|
+ /* In the !call_pkg case, bus commands == bus functions */
|
|
+ if (!nfit_mem)
|
|
+ return cmd;
|
|
+
|
|
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
|
|
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
|
return cmd;
|
|
@@ -436,17 +440,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
if (cmd_rc)
|
|
*cmd_rc = -EINVAL;
|
|
|
|
+ if (cmd == ND_CMD_CALL)
|
|
+ call_pkg = buf;
|
|
+ func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
|
+ if (func < 0)
|
|
+ return func;
|
|
+
|
|
if (nvdimm) {
|
|
struct acpi_device *adev = nfit_mem->adev;
|
|
|
|
if (!adev)
|
|
return -ENOTTY;
|
|
|
|
- if (cmd == ND_CMD_CALL)
|
|
- call_pkg = buf;
|
|
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
|
- if (func < 0)
|
|
- return func;
|
|
dimm_name = nvdimm_name(nvdimm);
|
|
cmd_name = nvdimm_cmd_name(cmd);
|
|
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
|
@@ -457,12 +462,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
} else {
|
|
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
|
|
|
- func = cmd;
|
|
cmd_name = nvdimm_bus_cmd_name(cmd);
|
|
cmd_mask = nd_desc->cmd_mask;
|
|
- dsm_mask = cmd_mask;
|
|
- if (cmd == ND_CMD_CALL)
|
|
- dsm_mask = nd_desc->bus_dsm_mask;
|
|
+ dsm_mask = nd_desc->bus_dsm_mask;
|
|
desc = nd_cmd_bus_desc(cmd);
|
|
guid = to_nfit_uuid(NFIT_DEV_BUS);
|
|
handle = adev->handle;
|
|
@@ -533,6 +535,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
|
|
+ dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
|
|
+ dimm_name, cmd_name, out_obj->type);
|
|
+ rc = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
if (call_pkg) {
|
|
call_pkg->nd_fw_size = out_obj->buffer.length;
|
|
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
|
|
@@ -551,13 +560,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|
return 0;
|
|
}
|
|
|
|
- if (out_obj->package.type != ACPI_TYPE_BUFFER) {
|
|
- dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
|
|
- dimm_name, cmd_name, out_obj->type);
|
|
- rc = -EINVAL;
|
|
- goto out;
|
|
- }
|
|
-
|
|
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
|
|
cmd_name, out_obj->buffer.length);
|
|
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
|
|
@@ -2890,14 +2892,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
|
{
|
|
int rc;
|
|
|
|
- if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
|
+ if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
|
return acpi_nfit_register_region(acpi_desc, nfit_spa);
|
|
|
|
set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
|
|
- set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
|
|
+ if (!no_init_ars)
|
|
+ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
|
|
|
|
switch (acpi_nfit_query_poison(acpi_desc)) {
|
|
case 0:
|
|
+ case -ENOSPC:
|
|
case -EAGAIN:
|
|
rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
|
|
/* shouldn't happen, try again later */
|
|
@@ -2922,7 +2926,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
|
break;
|
|
case -EBUSY:
|
|
case -ENOMEM:
|
|
- case -ENOSPC:
|
|
/*
|
|
* BIOS was using ARS, wait for it to complete (or
|
|
* resources to become available) and then perform our
|
|
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
|
|
index a43276c76fc6..21393ec3b9a4 100644
|
|
--- a/drivers/auxdisplay/ht16k33.c
|
|
+++ b/drivers/auxdisplay/ht16k33.c
|
|
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
|
|
struct ht16k33_priv *priv = i2c_get_clientdata(client);
|
|
struct ht16k33_fbdev *fbdev = &priv->fbdev;
|
|
|
|
- cancel_delayed_work(&fbdev->work);
|
|
+ cancel_delayed_work_sync(&fbdev->work);
|
|
unregister_framebuffer(fbdev->info);
|
|
framebuffer_release(fbdev->info);
|
|
free_page((unsigned long) fbdev->buffer);
|
|
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
|
|
index 5fa1898755a3..7c84f64c74f7 100644
|
|
--- a/drivers/base/power/wakeup.c
|
|
+++ b/drivers/base/power/wakeup.c
|
|
@@ -118,7 +118,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
|
|
if (!ws)
|
|
return;
|
|
|
|
- del_timer_sync(&ws->timer);
|
|
__pm_relax(ws);
|
|
}
|
|
EXPORT_SYMBOL_GPL(wakeup_source_drop);
|
|
@@ -205,6 +204,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
|
|
list_del_rcu(&ws->entry);
|
|
raw_spin_unlock_irqrestore(&events_lock, flags);
|
|
synchronize_srcu(&wakeup_srcu);
|
|
+
|
|
+ del_timer_sync(&ws->timer);
|
|
+ /*
|
|
+ * Clear timer.function to make wakeup_source_not_registered() treat
|
|
+ * this wakeup source as not registered.
|
|
+ */
|
|
+ ws->timer.function = NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(wakeup_source_remove);
|
|
|
|
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
|
|
index fdabd0b74492..a8de56f1936d 100644
|
|
--- a/drivers/block/floppy.c
|
|
+++ b/drivers/block/floppy.c
|
|
@@ -4084,7 +4084,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
|
|
|
|
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
|
|
if (lock_fdc(drive))
|
|
- return -EINTR;
|
|
+ return 0;
|
|
poll_drive(false, 0);
|
|
process_fd_request();
|
|
}
|
|
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
|
|
index 5faa917df1b6..82d831b103f9 100644
|
|
--- a/drivers/char/ipmi/ipmi_si_intf.c
|
|
+++ b/drivers/char/ipmi/ipmi_si_intf.c
|
|
@@ -2085,6 +2085,11 @@ static int try_smi_init(struct smi_info *new_smi)
|
|
WARN_ON(new_smi->io.dev->init_name != NULL);
|
|
|
|
out_err:
|
|
+ if (rv && new_smi->io.io_cleanup) {
|
|
+ new_smi->io.io_cleanup(&new_smi->io);
|
|
+ new_smi->io.io_cleanup = NULL;
|
|
+ }
|
|
+
|
|
kfree(init_name);
|
|
return rv;
|
|
}
|
|
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
|
|
index 1b869d530884..638f4ab88f44 100644
|
|
--- a/drivers/char/ipmi/ipmi_si_mem_io.c
|
|
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
|
|
@@ -81,8 +81,6 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
|
|
if (!addr)
|
|
return -ENODEV;
|
|
|
|
- io->io_cleanup = mem_cleanup;
|
|
-
|
|
/*
|
|
* Figure out the actual readb/readw/readl/etc routine to use based
|
|
* upon the register size.
|
|
@@ -141,5 +139,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
|
|
mem_region_cleanup(io, io->io_size);
|
|
return -EIO;
|
|
}
|
|
+
|
|
+ io->io_cleanup = mem_cleanup;
|
|
+
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
|
|
index ef6dffcea9fa..03924c32b6e9 100644
|
|
--- a/drivers/char/ipmi/ipmi_si_port_io.c
|
|
+++ b/drivers/char/ipmi/ipmi_si_port_io.c
|
|
@@ -68,8 +68,6 @@ int ipmi_si_port_setup(struct si_sm_io *io)
|
|
if (!addr)
|
|
return -ENODEV;
|
|
|
|
- io->io_cleanup = port_cleanup;
|
|
-
|
|
/*
|
|
* Figure out the actual inb/inw/inl/etc routine to use based
|
|
* upon the register size.
|
|
@@ -109,5 +107,8 @@ int ipmi_si_port_setup(struct si_sm_io *io)
|
|
return -EIO;
|
|
}
|
|
}
|
|
+
|
|
+ io->io_cleanup = port_cleanup;
|
|
+
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
|
|
index abd675bec88c..694fc58888c1 100644
|
|
--- a/drivers/char/tpm/st33zp24/st33zp24.c
|
|
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
|
|
@@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
|
|
goto out_err;
|
|
}
|
|
|
|
- return len;
|
|
+ return 0;
|
|
out_err:
|
|
st33zp24_cancel(chip);
|
|
release_locality(chip);
|
|
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
|
|
index 1010cb79dcc6..43c3f9b87614 100644
|
|
--- a/drivers/char/tpm/tpm-interface.c
|
|
+++ b/drivers/char/tpm/tpm-interface.c
|
|
@@ -495,10 +495,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
|
|
if (rc < 0) {
|
|
if (rc != -EPIPE)
|
|
dev_err(&chip->dev,
|
|
- "%s: tpm_send: error %d\n", __func__, rc);
|
|
+ "%s: send(): error %d\n", __func__, rc);
|
|
goto out;
|
|
}
|
|
|
|
+ /* A sanity check. send() should just return zero on success e.g.
|
|
+ * not the command length.
|
|
+ */
|
|
+ if (rc > 0) {
|
|
+ dev_warn(&chip->dev,
|
|
+ "%s: send(): invalid value %d\n", __func__, rc);
|
|
+ rc = 0;
|
|
+ }
|
|
+
|
|
if (chip->flags & TPM_CHIP_FLAG_IRQ)
|
|
goto out_recv;
|
|
|
|
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
|
|
index 66a14526aaf4..a290b30a0c35 100644
|
|
--- a/drivers/char/tpm/tpm_atmel.c
|
|
+++ b/drivers/char/tpm/tpm_atmel.c
|
|
@@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
iowrite8(buf[i], priv->iobase);
|
|
}
|
|
|
|
- return count;
|
|
+ return 0;
|
|
}
|
|
|
|
static void tpm_atml_cancel(struct tpm_chip *chip)
|
|
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
|
|
index 36952ef98f90..763fc7e6c005 100644
|
|
--- a/drivers/char/tpm/tpm_crb.c
|
|
+++ b/drivers/char/tpm/tpm_crb.c
|
|
@@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
|
|
unsigned int expected;
|
|
|
|
- /* sanity check */
|
|
- if (count < 6)
|
|
+ /* A sanity check that the upper layer wants to get at least the header
|
|
+ * as that is the minimum size for any TPM response.
|
|
+ */
|
|
+ if (count < TPM_HEADER_SIZE)
|
|
return -EIO;
|
|
|
|
+ /* If this bit is set, according to the spec, the TPM is in
|
|
+ * unrecoverable condition.
|
|
+ */
|
|
if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
|
|
return -EIO;
|
|
|
|
- memcpy_fromio(buf, priv->rsp, 6);
|
|
- expected = be32_to_cpup((__be32 *) &buf[2]);
|
|
- if (expected > count || expected < 6)
|
|
+ /* Read the first 8 bytes in order to get the length of the response.
|
|
+ * We read exactly a quad word in order to make sure that the remaining
|
|
+ * reads will be aligned.
|
|
+ */
|
|
+ memcpy_fromio(buf, priv->rsp, 8);
|
|
+
|
|
+ expected = be32_to_cpup((__be32 *)&buf[2]);
|
|
+ if (expected > count || expected < TPM_HEADER_SIZE)
|
|
return -EIO;
|
|
|
|
- memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
|
|
+ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
|
|
|
|
return expected;
|
|
}
|
|
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
|
|
index 95ce2e9ccdc6..32a8e27c5382 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_atmel.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
|
|
@@ -65,7 +65,11 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
dev_dbg(&chip->dev,
|
|
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
|
|
(int)min_t(size_t, 64, len), buf, len, status);
|
|
- return status;
|
|
+
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
|
|
index 9086edc9066b..977fd42daa1b 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_infineon.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
|
|
@@ -587,7 +587,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
/* go and do it */
|
|
iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
|
|
|
|
- return len;
|
|
+ return 0;
|
|
out_err:
|
|
tpm_tis_i2c_ready(chip);
|
|
/* The TPM needs some time to clean up here,
|
|
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
index f74f451baf6a..b8defdfdf2dc 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
@@ -469,7 +469,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
}
|
|
|
|
dev_dbg(dev, "%s() -> %zd\n", __func__, len);
|
|
- return len;
|
|
+ return 0;
|
|
}
|
|
|
|
static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
|
|
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
|
|
index 25f6e2665385..77e47dc5aacc 100644
|
|
--- a/drivers/char/tpm/tpm_ibmvtpm.c
|
|
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
|
|
@@ -141,14 +141,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
}
|
|
|
|
/**
|
|
- * tpm_ibmvtpm_send - Send tpm request
|
|
- *
|
|
+ * tpm_ibmvtpm_send() - Send a TPM command
|
|
* @chip: tpm chip struct
|
|
* @buf: buffer contains data to send
|
|
* @count: size of buffer
|
|
*
|
|
* Return:
|
|
- * Number of bytes sent or < 0 on error.
|
|
+ * 0 on success,
|
|
+ * -errno on error
|
|
*/
|
|
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
{
|
|
@@ -194,7 +194,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
rc = 0;
|
|
ibmvtpm->tpm_processing_cmd = false;
|
|
} else
|
|
- rc = count;
|
|
+ rc = 0;
|
|
|
|
spin_unlock(&ibmvtpm->rtce_lock);
|
|
return rc;
|
|
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
|
|
index d8f10047fbba..97f6d4fe0aee 100644
|
|
--- a/drivers/char/tpm/tpm_infineon.c
|
|
+++ b/drivers/char/tpm/tpm_infineon.c
|
|
@@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
|
|
for (i = 0; i < count; i++) {
|
|
wait_and_send(chip, buf[i]);
|
|
}
|
|
- return count;
|
|
+ return 0;
|
|
}
|
|
|
|
static void tpm_inf_cancel(struct tpm_chip *chip)
|
|
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
|
|
index 5d6cce74cd3f..9bee3c5eb4bf 100644
|
|
--- a/drivers/char/tpm/tpm_nsc.c
|
|
+++ b/drivers/char/tpm/tpm_nsc.c
|
|
@@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
|
|
}
|
|
outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
|
|
|
|
- return count;
|
|
+ return 0;
|
|
}
|
|
|
|
static void tpm_nsc_cancel(struct tpm_chip *chip)
|
|
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
|
|
index d2345d9fd7b5..0eaea3a7b8f4 100644
|
|
--- a/drivers/char/tpm/tpm_tis_core.c
|
|
+++ b/drivers/char/tpm/tpm_tis_core.c
|
|
@@ -485,7 +485,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
|
|
goto out_err;
|
|
}
|
|
}
|
|
- return len;
|
|
+ return 0;
|
|
out_err:
|
|
tpm_tis_ready(chip);
|
|
return rc;
|
|
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
|
|
index 87a0ce47f201..ecbb63f8d231 100644
|
|
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
|
|
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
|
|
@@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
|
|
static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
{
|
|
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
|
|
- int rc = 0;
|
|
|
|
if (count > sizeof(proxy_dev->buffer)) {
|
|
dev_err(&chip->dev,
|
|
@@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
|
|
wake_up_interruptible(&proxy_dev->wq);
|
|
|
|
- return rc;
|
|
+ return 0;
|
|
}
|
|
|
|
static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
|
|
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
|
|
index b150f87f38f5..5a327eb7f63a 100644
|
|
--- a/drivers/char/tpm/xen-tpmfront.c
|
|
+++ b/drivers/char/tpm/xen-tpmfront.c
|
|
@@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
return -ETIME;
|
|
}
|
|
|
|
- return count;
|
|
+ return 0;
|
|
}
|
|
|
|
static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
|
|
index 25dfe050ae9f..4bd1b32a4f93 100644
|
|
--- a/drivers/clk/clk-twl6040.c
|
|
+++ b/drivers/clk/clk-twl6040.c
|
|
@@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
|
|
return pdmclk->enabled;
|
|
}
|
|
|
|
+static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
|
|
+ unsigned int reg)
|
|
+{
|
|
+ const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
|
|
+ int ret;
|
|
+
|
|
+ ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
|
|
+ * Cold Temperature". This affects cold boot and deeper idle states it
|
|
+ * seems. The workaround consists of resetting HPPLL and LPPLL.
|
|
+ */
|
|
+static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int twl6040_pdmclk_prepare(struct clk_hw *hw)
|
|
{
|
|
struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
|
|
@@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
|
|
int ret;
|
|
|
|
ret = twl6040_power(pdmclk->twl6040, 1);
|
|
- if (!ret)
|
|
- pdmclk->enabled = 1;
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
|
|
+ if (ret)
|
|
+ goto out_err;
|
|
+
|
|
+ pdmclk->enabled = 1;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out_err:
|
|
+ dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
|
|
+ twl6040_power(pdmclk->twl6040, 0);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
|
|
index 5ef7d9ba2195..b40160eb3372 100644
|
|
--- a/drivers/clk/ingenic/cgu.c
|
|
+++ b/drivers/clk/ingenic/cgu.c
|
|
@@ -426,16 +426,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
|
|
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
|
|
struct ingenic_cgu *cgu = ingenic_clk->cgu;
|
|
const struct ingenic_cgu_clk_info *clk_info;
|
|
- long rate = *parent_rate;
|
|
+ unsigned int div = 1;
|
|
|
|
clk_info = &cgu->clock_info[ingenic_clk->idx];
|
|
|
|
if (clk_info->type & CGU_CLK_DIV)
|
|
- rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
|
|
+ div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
|
|
else if (clk_info->type & CGU_CLK_FIXDIV)
|
|
- rate /= clk_info->fixdiv.div;
|
|
+ div = clk_info->fixdiv.div;
|
|
|
|
- return rate;
|
|
+ return DIV_ROUND_UP(*parent_rate, div);
|
|
}
|
|
|
|
static int
|
|
@@ -455,7 +455,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
|
|
|
|
if (clk_info->type & CGU_CLK_DIV) {
|
|
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
|
|
- rate = parent_rate / div;
|
|
+ rate = DIV_ROUND_UP(parent_rate, div);
|
|
|
|
if (rate != req_rate)
|
|
return -EINVAL;
|
|
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
|
|
index 502bcbb61b04..e12716d8ce3c 100644
|
|
--- a/drivers/clk/ingenic/cgu.h
|
|
+++ b/drivers/clk/ingenic/cgu.h
|
|
@@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info {
|
|
* @reg: offset of the divider control register within the CGU
|
|
* @shift: number of bits to left shift the divide value by (ie. the index of
|
|
* the lowest bit of the divide value within its control register)
|
|
- * @div: number of bits to divide the divider value by (i.e. if the
|
|
+ * @div: number to divide the divider value by (i.e. if the
|
|
* effective divider value is the value written to the register
|
|
* multiplied by some constant)
|
|
* @bits: the size of the divide value in bits
|
|
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
|
|
index 93306283d764..8ae44b5db4c2 100644
|
|
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
|
|
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
|
|
@@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
|
|
{
|
|
struct of_phandle_args genpdspec = { .np = pd_node };
|
|
struct platform_device *pdev;
|
|
+ int ret;
|
|
+
|
|
+ pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
|
|
+ if (!pdev)
|
|
+ return -ENOMEM;
|
|
|
|
- pdev = platform_device_alloc(info->pd_name, -1);
|
|
pdev->dev.parent = parent;
|
|
- pdev->driver_override = "exynos5-subcmu";
|
|
platform_set_drvdata(pdev, (void *)info);
|
|
of_genpd_add_device(&genpdspec, &pdev->dev);
|
|
- platform_device_add(pdev);
|
|
+ ret = platform_device_add(pdev);
|
|
+ if (ret)
|
|
+ platform_device_put(pdev);
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static int __init exynos5_clk_probe(struct platform_device *pdev)
|
|
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
|
|
index 3b97f60540ad..609970c0b666 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
|
|
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
|
|
static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
|
|
0x060, BIT(10), 0);
|
|
static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
|
|
- 0x060, BIT(12), 0);
|
|
+ 0x060, BIT(11), 0);
|
|
static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
|
|
- 0x060, BIT(13), 0);
|
|
+ 0x060, BIT(12), 0);
|
|
static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
|
|
0x060, BIT(13), 0);
|
|
static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
|
|
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
|
|
index 621b1cd996db..ac12f261f8ca 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
|
|
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
|
|
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
|
|
|
|
[RST_BUS_VE] = { 0x2c4, BIT(0) },
|
|
- [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
|
|
+ [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
|
|
[RST_BUS_CSI] = { 0x2c4, BIT(8) },
|
|
[RST_BUS_DE] = { 0x2c4, BIT(12) },
|
|
[RST_BUS_DBG] = { 0x2c4, BIT(31) },
|
|
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
|
|
index ec11f55594ad..5d2d42b7e182 100644
|
|
--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
|
|
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
|
|
@@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
|
|
return ret;
|
|
|
|
ret = regmap_write_bits(gear->regmap,
|
|
- gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
|
|
+ gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
|
|
UNIPHIER_CLK_CPUGEAR_UPD_BIT,
|
|
UNIPHIER_CLK_CPUGEAR_UPD_BIT);
|
|
if (ret)
|
|
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
|
|
index 316d48d7be72..c1ddafa4c299 100644
|
|
--- a/drivers/clocksource/Kconfig
|
|
+++ b/drivers/clocksource/Kconfig
|
|
@@ -365,6 +365,16 @@ config ARM64_ERRATUM_858921
|
|
The workaround will be dynamically enabled when an affected
|
|
core is detected.
|
|
|
|
+config SUN50I_ERRATUM_UNKNOWN1
|
|
+ bool "Workaround for Allwinner A64 erratum UNKNOWN1"
|
|
+ default y
|
|
+ depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI
|
|
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
|
|
+ help
|
|
+ This option enables a workaround for instability in the timer on
|
|
+ the Allwinner A64 SoC. The workaround will only be active if the
|
|
+ allwinner,erratum-unknown1 property is found in the timer node.
|
|
+
|
|
config ARM_GLOBAL_TIMER
|
|
bool "Support for the ARM global timer" if COMPILE_TEST
|
|
select TIMER_OF if OF
|
|
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
|
|
index d8c7f5750cdb..0445ad7e559e 100644
|
|
--- a/drivers/clocksource/arm_arch_timer.c
|
|
+++ b/drivers/clocksource/arm_arch_timer.c
|
|
@@ -319,6 +319,48 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
|
|
+/*
|
|
+ * The low bits of the counter registers are indeterminate while bit 10 or
|
|
+ * greater is rolling over. Since the counter value can jump both backward
|
|
+ * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
|
|
+ * with all ones or all zeros in the low bits. Bound the loop by the maximum
|
|
+ * number of CPU cycles in 3 consecutive 24 MHz counter periods.
|
|
+ */
|
|
+#define __sun50i_a64_read_reg(reg) ({ \
|
|
+ u64 _val; \
|
|
+ int _retries = 150; \
|
|
+ \
|
|
+ do { \
|
|
+ _val = read_sysreg(reg); \
|
|
+ _retries--; \
|
|
+ } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
|
|
+ \
|
|
+ WARN_ON_ONCE(!_retries); \
|
|
+ _val; \
|
|
+})
|
|
+
|
|
+static u64 notrace sun50i_a64_read_cntpct_el0(void)
|
|
+{
|
|
+ return __sun50i_a64_read_reg(cntpct_el0);
|
|
+}
|
|
+
|
|
+static u64 notrace sun50i_a64_read_cntvct_el0(void)
|
|
+{
|
|
+ return __sun50i_a64_read_reg(cntvct_el0);
|
|
+}
|
|
+
|
|
+static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
|
|
+{
|
|
+ return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
|
|
+}
|
|
+
|
|
+static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
|
|
+{
|
|
+ return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
|
|
+}
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
|
|
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
|
|
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
|
|
@@ -408,6 +450,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
|
|
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
|
|
},
|
|
#endif
|
|
+#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
|
|
+ {
|
|
+ .match_type = ate_match_dt,
|
|
+ .id = "allwinner,erratum-unknown1",
|
|
+ .desc = "Allwinner erratum UNKNOWN1",
|
|
+ .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
|
|
+ .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
|
|
+ .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
|
|
+ .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
|
|
+ .set_next_event_phys = erratum_set_next_event_tval_phys,
|
|
+ .set_next_event_virt = erratum_set_next_event_tval_virt,
|
|
+ },
|
|
+#endif
|
|
};
|
|
|
|
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
|
|
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
|
|
index 7a244b681876..d55c30f6981d 100644
|
|
--- a/drivers/clocksource/exynos_mct.c
|
|
+++ b/drivers/clocksource/exynos_mct.c
|
|
@@ -388,6 +388,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
|
|
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
|
|
}
|
|
|
|
+static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
|
|
+{
|
|
+ /* Clear the MCT tick interrupt */
|
|
+ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
|
|
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
|
|
+}
|
|
+
|
|
static int exynos4_tick_set_next_event(unsigned long cycles,
|
|
struct clock_event_device *evt)
|
|
{
|
|
@@ -404,6 +411,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
|
|
|
|
mevt = container_of(evt, struct mct_clock_event_device, evt);
|
|
exynos4_mct_tick_stop(mevt);
|
|
+ exynos4_mct_tick_clear(mevt);
|
|
return 0;
|
|
}
|
|
|
|
@@ -420,8 +428,11 @@ static int set_state_periodic(struct clock_event_device *evt)
|
|
return 0;
|
|
}
|
|
|
|
-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
|
|
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
|
|
{
|
|
+ struct mct_clock_event_device *mevt = dev_id;
|
|
+ struct clock_event_device *evt = &mevt->evt;
|
|
+
|
|
/*
|
|
* This is for supporting oneshot mode.
|
|
* Mct would generate interrupt periodically
|
|
@@ -430,16 +441,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
|
|
if (!clockevent_state_periodic(&mevt->evt))
|
|
exynos4_mct_tick_stop(mevt);
|
|
|
|
- /* Clear the MCT tick interrupt */
|
|
- if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
|
|
- exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
|
|
-}
|
|
-
|
|
-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
|
|
-{
|
|
- struct mct_clock_event_device *mevt = dev_id;
|
|
- struct clock_event_device *evt = &mevt->evt;
|
|
-
|
|
exynos4_mct_tick_clear(mevt);
|
|
|
|
evt->event_handler(evt);
|
|
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
|
|
index 4cce6b224b87..3ecf84706640 100644
|
|
--- a/drivers/clocksource/timer-ti-dm.c
|
|
+++ b/drivers/clocksource/timer-ti-dm.c
|
|
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
|
|
if (IS_ERR(parent))
|
|
return -ENODEV;
|
|
|
|
+ /* Bail out if both clocks point to fck */
|
|
+ if (clk_is_match(parent, timer->fclk))
|
|
+ return 0;
|
|
+
|
|
ret = clk_set_parent(timer->fclk, parent);
|
|
if (ret < 0)
|
|
pr_err("%s: failed to set parent\n", __func__);
|
|
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
|
|
index 46254e583982..74e0e0c20c46 100644
|
|
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
|
|
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
|
|
@@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
|
|
return ret;
|
|
}
|
|
|
|
-static void __init pxa_cpufreq_init_voltages(void)
|
|
+static void pxa_cpufreq_init_voltages(void)
|
|
{
|
|
vcc_core = regulator_get(NULL, "vcc_core");
|
|
if (IS_ERR(vcc_core)) {
|
|
@@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
|
|
return 0;
|
|
}
|
|
|
|
-static void __init pxa_cpufreq_init_voltages(void) { }
|
|
+static void pxa_cpufreq_init_voltages(void) { }
|
|
#endif
|
|
|
|
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
|
|
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
|
|
index 2a3675c24032..a472b814058f 100644
|
|
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
|
|
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
|
|
@@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
|
|
|
|
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
|
{
|
|
- struct opp_table *opp_tables[NR_CPUS] = {0};
|
|
+ struct opp_table **opp_tables;
|
|
enum _msm8996_version msm8996_version;
|
|
struct nvmem_cell *speedbin_nvmem;
|
|
struct device_node *np;
|
|
@@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
|
}
|
|
kfree(speedbin);
|
|
|
|
+ opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
|
|
+ if (!opp_tables)
|
|
+ return -ENOMEM;
|
|
+
|
|
for_each_possible_cpu(cpu) {
|
|
cpu_dev = get_cpu_device(cpu);
|
|
if (NULL == cpu_dev) {
|
|
@@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
|
|
|
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
|
|
NULL, 0);
|
|
- if (!IS_ERR(cpufreq_dt_pdev))
|
|
+ if (!IS_ERR(cpufreq_dt_pdev)) {
|
|
+ platform_set_drvdata(pdev, opp_tables);
|
|
return 0;
|
|
+ }
|
|
|
|
ret = PTR_ERR(cpufreq_dt_pdev);
|
|
dev_err(cpu_dev, "Failed to register platform device\n");
|
|
@@ -163,13 +169,23 @@ free_opp:
|
|
break;
|
|
dev_pm_opp_put_supported_hw(opp_tables[cpu]);
|
|
}
|
|
+ kfree(opp_tables);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
|
|
{
|
|
+ struct opp_table **opp_tables = platform_get_drvdata(pdev);
|
|
+ unsigned int cpu;
|
|
+
|
|
platform_device_unregister(cpufreq_dt_pdev);
|
|
+
|
|
+ for_each_possible_cpu(cpu)
|
|
+ dev_pm_opp_put_supported_hw(opp_tables[cpu]);
|
|
+
|
|
+ kfree(opp_tables);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
|
|
index 43530254201a..4bb154f6c54c 100644
|
|
--- a/drivers/cpufreq/tegra124-cpufreq.c
|
|
+++ b/drivers/cpufreq/tegra124-cpufreq.c
|
|
@@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
|
|
|
|
platform_set_drvdata(pdev, priv);
|
|
|
|
+ of_node_put(np);
|
|
+
|
|
return 0;
|
|
|
|
out_switch_to_pllx:
|
|
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
|
|
index ec40f991e6c6..9bc54c3c2cb9 100644
|
|
--- a/drivers/crypto/caam/caamalg.c
|
|
+++ b/drivers/crypto/caam/caamalg.c
|
|
@@ -1005,6 +1005,7 @@ static void init_aead_job(struct aead_request *req,
|
|
if (unlikely(req->src != req->dst)) {
|
|
if (edesc->dst_nents == 1) {
|
|
dst_dma = sg_dma_address(req->dst);
|
|
+ out_options = 0;
|
|
} else {
|
|
dst_dma = edesc->sec4_sg_dma +
|
|
sec4_sg_index *
|
|
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
|
|
index f84ca2ff61de..f5fd00065650 100644
|
|
--- a/drivers/crypto/caam/caamhash.c
|
|
+++ b/drivers/crypto/caam/caamhash.c
|
|
@@ -118,6 +118,7 @@ struct caam_hash_ctx {
|
|
struct caam_hash_state {
|
|
dma_addr_t buf_dma;
|
|
dma_addr_t ctx_dma;
|
|
+ int ctx_dma_len;
|
|
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
|
int buflen_0;
|
|
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
|
@@ -170,6 +171,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
|
|
struct caam_hash_state *state,
|
|
int ctx_len)
|
|
{
|
|
+ state->ctx_dma_len = ctx_len;
|
|
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
|
|
ctx_len, DMA_FROM_DEVICE);
|
|
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
|
@@ -183,18 +185,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
|
|
return 0;
|
|
}
|
|
|
|
-/* Map req->result, and append seq_out_ptr command that points to it */
|
|
-static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
|
|
- u8 *result, int digestsize)
|
|
-{
|
|
- dma_addr_t dst_dma;
|
|
-
|
|
- dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
|
|
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
|
|
-
|
|
- return dst_dma;
|
|
-}
|
|
-
|
|
/* Map current buffer in state (if length > 0) and put it in link table */
|
|
static inline int buf_map_to_sec4_sg(struct device *jrdev,
|
|
struct sec4_sg_entry *sec4_sg,
|
|
@@ -223,6 +213,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
|
|
struct caam_hash_state *state, int ctx_len,
|
|
struct sec4_sg_entry *sec4_sg, u32 flag)
|
|
{
|
|
+ state->ctx_dma_len = ctx_len;
|
|
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
|
|
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
|
dev_err(jrdev, "unable to map ctx\n");
|
|
@@ -485,7 +476,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
|
|
|
/*
|
|
* ahash_edesc - s/w-extended ahash descriptor
|
|
- * @dst_dma: physical mapped address of req->result
|
|
* @sec4_sg_dma: physical mapped address of h/w link table
|
|
* @src_nents: number of segments in input scatterlist
|
|
* @sec4_sg_bytes: length of dma mapped sec4_sg space
|
|
@@ -493,7 +483,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
|
* @sec4_sg: h/w link table
|
|
*/
|
|
struct ahash_edesc {
|
|
- dma_addr_t dst_dma;
|
|
dma_addr_t sec4_sg_dma;
|
|
int src_nents;
|
|
int sec4_sg_bytes;
|
|
@@ -509,8 +498,6 @@ static inline void ahash_unmap(struct device *dev,
|
|
|
|
if (edesc->src_nents)
|
|
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
|
- if (edesc->dst_dma)
|
|
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
|
|
|
|
if (edesc->sec4_sg_bytes)
|
|
dma_unmap_single(dev, edesc->sec4_sg_dma,
|
|
@@ -527,12 +514,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
|
|
struct ahash_edesc *edesc,
|
|
struct ahash_request *req, int dst_len, u32 flag)
|
|
{
|
|
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
if (state->ctx_dma) {
|
|
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
|
|
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
|
|
state->ctx_dma = 0;
|
|
}
|
|
ahash_unmap(dev, edesc, req, dst_len);
|
|
@@ -545,9 +530,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|
struct ahash_edesc *edesc;
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
+ struct caam_hash_state *state = ahash_request_ctx(req);
|
|
#ifdef DEBUG
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
- struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
#endif
|
|
@@ -556,17 +541,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
|
if (err)
|
|
caam_jr_strstatus(jrdev, err);
|
|
|
|
- ahash_unmap(jrdev, edesc, req, digestsize);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
+ memcpy(req->result, state->caam_ctx, digestsize);
|
|
kfree(edesc);
|
|
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
ctx->ctx_len, 1);
|
|
- if (req->result)
|
|
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
|
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
- digestsize, 1);
|
|
#endif
|
|
|
|
req->base.complete(&req->base, err);
|
|
@@ -614,9 +596,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|
struct ahash_edesc *edesc;
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
int digestsize = crypto_ahash_digestsize(ahash);
|
|
+ struct caam_hash_state *state = ahash_request_ctx(req);
|
|
#ifdef DEBUG
|
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
|
- struct caam_hash_state *state = ahash_request_ctx(req);
|
|
|
|
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
|
#endif
|
|
@@ -625,17 +607,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
|
if (err)
|
|
caam_jr_strstatus(jrdev, err);
|
|
|
|
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
|
+ memcpy(req->result, state->caam_ctx, digestsize);
|
|
kfree(edesc);
|
|
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
|
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
|
ctx->ctx_len, 1);
|
|
- if (req->result)
|
|
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
|
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
|
- digestsize, 1);
|
|
#endif
|
|
|
|
req->base.complete(&req->base, err);
|
|
@@ -896,7 +875,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
|
|
|
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
|
- edesc->sec4_sg, DMA_TO_DEVICE);
|
|
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
|
if (ret)
|
|
goto unmap_ctx;
|
|
|
|
@@ -916,14 +895,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|
|
|
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
|
|
LDST_SGF);
|
|
-
|
|
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
- digestsize);
|
|
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
- dev_err(jrdev, "unable to map dst\n");
|
|
- ret = -ENOMEM;
|
|
- goto unmap_ctx;
|
|
- }
|
|
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
|
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
|
@@ -936,7 +908,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
|
|
|
return -EINPROGRESS;
|
|
unmap_ctx:
|
|
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
|
kfree(edesc);
|
|
return ret;
|
|
}
|
|
@@ -990,7 +962,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|
edesc->src_nents = src_nents;
|
|
|
|
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
|
|
- edesc->sec4_sg, DMA_TO_DEVICE);
|
|
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
|
if (ret)
|
|
goto unmap_ctx;
|
|
|
|
@@ -1004,13 +976,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|
if (ret)
|
|
goto unmap_ctx;
|
|
|
|
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
- digestsize);
|
|
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
- dev_err(jrdev, "unable to map dst\n");
|
|
- ret = -ENOMEM;
|
|
- goto unmap_ctx;
|
|
- }
|
|
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
|
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
|
@@ -1023,7 +989,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
|
|
|
return -EINPROGRESS;
|
|
unmap_ctx:
|
|
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
|
|
kfree(edesc);
|
|
return ret;
|
|
}
|
|
@@ -1082,10 +1048,8 @@ static int ahash_digest(struct ahash_request *req)
|
|
|
|
desc = edesc->hw_desc;
|
|
|
|
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
- digestsize);
|
|
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
- dev_err(jrdev, "unable to map dst\n");
|
|
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
|
|
+ if (ret) {
|
|
ahash_unmap(jrdev, edesc, req, digestsize);
|
|
kfree(edesc);
|
|
return -ENOMEM;
|
|
@@ -1100,7 +1064,7 @@ static int ahash_digest(struct ahash_request *req)
|
|
if (!ret) {
|
|
ret = -EINPROGRESS;
|
|
} else {
|
|
- ahash_unmap(jrdev, edesc, req, digestsize);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
kfree(edesc);
|
|
}
|
|
|
|
@@ -1142,12 +1106,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
|
|
}
|
|
|
|
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
- digestsize);
|
|
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
- dev_err(jrdev, "unable to map dst\n");
|
|
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
|
|
+ if (ret)
|
|
goto unmap;
|
|
- }
|
|
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
|
@@ -1158,7 +1119,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
|
if (!ret) {
|
|
ret = -EINPROGRESS;
|
|
} else {
|
|
- ahash_unmap(jrdev, edesc, req, digestsize);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
kfree(edesc);
|
|
}
|
|
|
|
@@ -1357,12 +1318,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|
goto unmap;
|
|
}
|
|
|
|
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
|
- digestsize);
|
|
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
|
- dev_err(jrdev, "unable to map dst\n");
|
|
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
|
|
+ if (ret)
|
|
goto unmap;
|
|
- }
|
|
|
|
#ifdef DEBUG
|
|
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
|
@@ -1373,7 +1331,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
|
if (!ret) {
|
|
ret = -EINPROGRESS;
|
|
} else {
|
|
- ahash_unmap(jrdev, edesc, req, digestsize);
|
|
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
|
kfree(edesc);
|
|
}
|
|
|
|
@@ -1505,6 +1463,7 @@ static int ahash_init(struct ahash_request *req)
|
|
state->final = ahash_final_no_ctx;
|
|
|
|
state->ctx_dma = 0;
|
|
+ state->ctx_dma_len = 0;
|
|
state->current_buf = 0;
|
|
state->buf_dma = 0;
|
|
state->buflen_0 = 0;
|
|
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
|
|
index dd948e1df9e5..3bcb6bce666e 100644
|
|
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
|
|
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
|
|
@@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
|
hw_iv_size, DMA_BIDIRECTIONAL);
|
|
}
|
|
|
|
- /*In case a pool was set, a table was
|
|
- *allocated and should be released
|
|
- */
|
|
- if (areq_ctx->mlli_params.curr_pool) {
|
|
+ /* Release pool */
|
|
+ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
|
|
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
|
|
+ (areq_ctx->mlli_params.mlli_virt_addr)) {
|
|
dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
|
|
&areq_ctx->mlli_params.mlli_dma_addr,
|
|
areq_ctx->mlli_params.mlli_virt_addr);
|
|
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
|
|
index 7623b29911af..54a39164aab8 100644
|
|
--- a/drivers/crypto/ccree/cc_cipher.c
|
|
+++ b/drivers/crypto/ccree/cc_cipher.c
|
|
@@ -79,6 +79,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
|
|
default:
|
|
break;
|
|
}
|
|
+ break;
|
|
case S_DIN_to_DES:
|
|
if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
|
|
return 0;
|
|
@@ -634,6 +635,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
|
|
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
|
|
unsigned int len;
|
|
|
|
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
|
|
+
|
|
switch (ctx_p->cipher_mode) {
|
|
case DRV_CIPHER_CBC:
|
|
/*
|
|
@@ -663,7 +666,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
|
|
break;
|
|
}
|
|
|
|
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
|
|
kzfree(req_ctx->iv);
|
|
|
|
skcipher_request_complete(req, err);
|
|
@@ -781,7 +783,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
|
|
|
|
memset(req_ctx, 0, sizeof(*req_ctx));
|
|
|
|
- if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
|
|
+ if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
|
|
+ (req->cryptlen >= ivsize)) {
|
|
|
|
/* Allocate and save the last IV sized bytes of the source,
|
|
* which will be lost in case of in-place decryption.
|
|
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
|
|
index c9d622abd90c..0ce4a65b95f5 100644
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto.c
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
|
|
@@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev,
|
|
count = (dev->left_bytes > PAGE_SIZE) ?
|
|
PAGE_SIZE : dev->left_bytes;
|
|
|
|
- if (!sg_pcopy_to_buffer(dev->first, dev->nents,
|
|
+ if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
|
|
dev->addr_vir, count,
|
|
dev->total - dev->left_bytes)) {
|
|
dev_err(dev->dev, "[%s:%d] pcopy err\n",
|
|
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
|
|
index d5fb4013fb42..54ee5b3ed9db 100644
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto.h
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
|
|
@@ -207,7 +207,8 @@ struct rk_crypto_info {
|
|
void *addr_vir;
|
|
int aligned;
|
|
int align_size;
|
|
- size_t nents;
|
|
+ size_t src_nents;
|
|
+ size_t dst_nents;
|
|
unsigned int total;
|
|
unsigned int count;
|
|
dma_addr_t addr_in;
|
|
@@ -244,6 +245,7 @@ struct rk_cipher_ctx {
|
|
struct rk_crypto_info *dev;
|
|
unsigned int keylen;
|
|
u32 mode;
|
|
+ u8 iv[AES_BLOCK_SIZE];
|
|
};
|
|
|
|
enum alg_type {
|
|
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
|
|
index 639c15c5364b..23305f22072f 100644
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
|
|
@@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev)
|
|
static int rk_set_data_start(struct rk_crypto_info *dev)
|
|
{
|
|
int err;
|
|
+ struct ablkcipher_request *req =
|
|
+ ablkcipher_request_cast(dev->async_req);
|
|
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
|
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
|
+ u32 ivsize = crypto_ablkcipher_ivsize(tfm);
|
|
+ u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
|
|
+ dev->sg_src->offset + dev->sg_src->length - ivsize;
|
|
+
|
|
+ /* store the iv that need to be updated in chain mode */
|
|
+ if (ctx->mode & RK_CRYPTO_DEC)
|
|
+ memcpy(ctx->iv, src_last_blk, ivsize);
|
|
|
|
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
|
|
if (!err)
|
|
@@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
|
|
dev->total = req->nbytes;
|
|
dev->sg_src = req->src;
|
|
dev->first = req->src;
|
|
- dev->nents = sg_nents(req->src);
|
|
+ dev->src_nents = sg_nents(req->src);
|
|
dev->sg_dst = req->dst;
|
|
+ dev->dst_nents = sg_nents(req->dst);
|
|
dev->aligned = 1;
|
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
|
@@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
|
|
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
|
|
}
|
|
|
|
+static void rk_update_iv(struct rk_crypto_info *dev)
|
|
+{
|
|
+ struct ablkcipher_request *req =
|
|
+ ablkcipher_request_cast(dev->async_req);
|
|
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
|
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
|
+ u32 ivsize = crypto_ablkcipher_ivsize(tfm);
|
|
+ u8 *new_iv = NULL;
|
|
+
|
|
+ if (ctx->mode & RK_CRYPTO_DEC) {
|
|
+ new_iv = ctx->iv;
|
|
+ } else {
|
|
+ new_iv = page_address(sg_page(dev->sg_dst)) +
|
|
+ dev->sg_dst->offset + dev->sg_dst->length - ivsize;
|
|
+ }
|
|
+
|
|
+ if (ivsize == DES_BLOCK_SIZE)
|
|
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
|
|
+ else if (ivsize == AES_BLOCK_SIZE)
|
|
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
|
|
+}
|
|
+
|
|
/* return:
|
|
* true some err was occurred
|
|
* fault no err, continue
|
|
@@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
|
|
|
|
dev->unload_data(dev);
|
|
if (!dev->aligned) {
|
|
- if (!sg_pcopy_from_buffer(req->dst, dev->nents,
|
|
+ if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
|
|
dev->addr_vir, dev->count,
|
|
dev->total - dev->left_bytes -
|
|
dev->count)) {
|
|
@@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
|
|
}
|
|
}
|
|
if (dev->left_bytes) {
|
|
+ rk_update_iv(dev);
|
|
if (dev->aligned) {
|
|
if (sg_is_last(dev->sg_src)) {
|
|
dev_err(dev->dev, "[%s:%d] Lack of data\n",
|
|
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
|
|
index 821a506b9e17..c336ae75e361 100644
|
|
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
|
|
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
|
|
@@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
|
|
dev->sg_dst = NULL;
|
|
dev->sg_src = req->src;
|
|
dev->first = req->src;
|
|
- dev->nents = sg_nents(req->src);
|
|
+ dev->src_nents = sg_nents(req->src);
|
|
rctx = ahash_request_ctx(req);
|
|
rctx->mode = 0;
|
|
|
|
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
|
|
index 1bb1a8e09025..6c94ed750049 100644
|
|
--- a/drivers/dma/sh/usb-dmac.c
|
|
+++ b/drivers/dma/sh/usb-dmac.c
|
|
@@ -697,6 +697,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
|
|
#endif /* CONFIG_PM */
|
|
|
|
static const struct dev_pm_ops usb_dmac_pm = {
|
|
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
|
+ pm_runtime_force_resume)
|
|
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
|
|
NULL)
|
|
};
|
|
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
|
|
index 023a32cfac42..e0657fc72d31 100644
|
|
--- a/drivers/gpio/gpio-pca953x.c
|
|
+++ b/drivers/gpio/gpio-pca953x.c
|
|
@@ -543,7 +543,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
|
|
|
|
static void pca953x_irq_shutdown(struct irq_data *d)
|
|
{
|
|
- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
|
|
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
|
+ struct pca953x_chip *chip = gpiochip_get_data(gc);
|
|
u8 mask = 1 << (d->hwirq % BANK_SZ);
|
|
|
|
chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
|
|
index bd039322f697..6342f6499351 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
|
|
@@ -1347,12 +1347,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
|
|
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
|
|
bool res;
|
|
|
|
- kernel_fpu_begin();
|
|
-
|
|
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
|
|
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
|
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
|
|
|
|
+ kernel_fpu_begin();
|
|
+
|
|
if (res)
|
|
res = verify_clock_values(&fclks);
|
|
|
|
@@ -1371,9 +1371,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
|
|
} else
|
|
BREAK_TO_DEBUGGER();
|
|
|
|
+ kernel_fpu_end();
|
|
+
|
|
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
|
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
|
|
|
|
+ kernel_fpu_begin();
|
|
+
|
|
if (res)
|
|
res = verify_clock_values(&dcfclks);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
|
|
index 052e60dfaf9f..b52ccab428a9 100644
|
|
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
|
|
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
|
|
@@ -3487,14 +3487,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
|
|
|
|
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
|
|
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
|
|
- ixSMU_PM_STATUS_94, 0);
|
|
+ ixSMU_PM_STATUS_95, 0);
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
- mdelay(1);
|
|
+ mdelay(500);
|
|
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
|
|
tmp = cgs_read_ind_register(hwmgr->device,
|
|
CGS_IND_REG__SMC,
|
|
- ixSMU_PM_STATUS_94);
|
|
+ ixSMU_PM_STATUS_95);
|
|
if (tmp != 0)
|
|
break;
|
|
}
|
|
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
|
|
index 1bda809a7289..e65596617239 100644
|
|
--- a/drivers/gpu/drm/drm_fb_helper.c
|
|
+++ b/drivers/gpu/drm/drm_fb_helper.c
|
|
@@ -3156,9 +3156,7 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
|
|
|
|
static int drm_fbdev_client_restore(struct drm_client_dev *client)
|
|
{
|
|
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
|
|
-
|
|
- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
|
|
+ drm_fb_helper_lastclose(client->dev);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
|
|
index 280c851714e6..03cda197fb6b 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem.c
|
|
@@ -1828,7 +1828,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
|
|
if (vma->vm_file != filp)
|
|
return false;
|
|
|
|
- return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
|
|
+ return vma->vm_start == addr &&
|
|
+ (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
|
|
index 3bd0f8a18e74..42daa5c9ff8e 100644
|
|
--- a/drivers/gpu/drm/imx/imx-ldb.c
|
|
+++ b/drivers/gpu/drm/imx/imx-ldb.c
|
|
@@ -651,8 +651,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|
int bus_format;
|
|
|
|
ret = of_property_read_u32(child, "reg", &i);
|
|
- if (ret || i < 0 || i > 1)
|
|
- return -EINVAL;
|
|
+ if (ret || i < 0 || i > 1) {
|
|
+ ret = -EINVAL;
|
|
+ goto free_child;
|
|
+ }
|
|
|
|
if (!of_device_is_available(child))
|
|
continue;
|
|
@@ -665,7 +667,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|
channel = &imx_ldb->channel[i];
|
|
channel->ldb = imx_ldb;
|
|
channel->chno = i;
|
|
- channel->child = child;
|
|
|
|
/*
|
|
* The output port is port@4 with an external 4-port mux or
|
|
@@ -675,13 +676,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|
imx_ldb->lvds_mux ? 4 : 2, 0,
|
|
&channel->panel, &channel->bridge);
|
|
if (ret && ret != -ENODEV)
|
|
- return ret;
|
|
+ goto free_child;
|
|
|
|
/* panel ddc only if there is no bridge */
|
|
if (!channel->bridge) {
|
|
ret = imx_ldb_panel_ddc(dev, channel, child);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto free_child;
|
|
}
|
|
|
|
bus_format = of_get_bus_format(dev, child);
|
|
@@ -697,18 +698,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|
if (bus_format < 0) {
|
|
dev_err(dev, "could not determine data mapping: %d\n",
|
|
bus_format);
|
|
- return bus_format;
|
|
+ ret = bus_format;
|
|
+ goto free_child;
|
|
}
|
|
channel->bus_format = bus_format;
|
|
+ channel->child = child;
|
|
|
|
ret = imx_ldb_register(drm, channel);
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (ret) {
|
|
+ channel->child = NULL;
|
|
+ goto free_child;
|
|
+ }
|
|
}
|
|
|
|
dev_set_drvdata(dev, imx_ldb);
|
|
|
|
return 0;
|
|
+
|
|
+free_child:
|
|
+ of_node_put(child);
|
|
+ return ret;
|
|
}
|
|
|
|
static void imx_ldb_unbind(struct device *dev, struct device *master,
|
|
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
|
|
index 203f247d4854..a323a0db2fc1 100644
|
|
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
|
|
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
|
|
@@ -375,9 +375,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
|
|
if (ret)
|
|
return ret;
|
|
|
|
- /* CRTC should be enabled */
|
|
+ /* nothing to check when disabling or disabled */
|
|
if (!crtc_state->enable)
|
|
- return -EINVAL;
|
|
+ return 0;
|
|
|
|
switch (plane->type) {
|
|
case DRM_PLANE_TYPE_PRIMARY:
|
|
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
|
|
index 54324330b91f..2f0a5bd50174 100644
|
|
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
|
|
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
|
|
@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|
return -EINVAL;
|
|
}
|
|
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
|
|
+ break;
|
|
case CB_TARGET_MASK:
|
|
track->cb_target_mask = radeon_get_ib_value(p, idx);
|
|
track->cb_dirty = true;
|
|
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
|
|
index 474b00e19697..0a7d4395d427 100644
|
|
--- a/drivers/gpu/ipu-v3/ipu-common.c
|
|
+++ b/drivers/gpu/ipu-v3/ipu-common.c
|
|
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
|
|
.cpmem_ofs = 0x1f000000,
|
|
.srm_ofs = 0x1f040000,
|
|
.tpm_ofs = 0x1f060000,
|
|
- .csi0_ofs = 0x1f030000,
|
|
- .csi1_ofs = 0x1f038000,
|
|
+ .csi0_ofs = 0x1e030000,
|
|
+ .csi1_ofs = 0x1e038000,
|
|
.ic_ofs = 0x1e020000,
|
|
.disp0_ofs = 0x1e040000,
|
|
.disp1_ofs = 0x1e048000,
|
|
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
|
|
.cpmem_ofs = 0x07000000,
|
|
.srm_ofs = 0x07040000,
|
|
.tpm_ofs = 0x07060000,
|
|
- .csi0_ofs = 0x07030000,
|
|
- .csi1_ofs = 0x07038000,
|
|
+ .csi0_ofs = 0x06030000,
|
|
+ .csi1_ofs = 0x06038000,
|
|
.ic_ofs = 0x06020000,
|
|
.disp0_ofs = 0x06040000,
|
|
.disp1_ofs = 0x06048000,
|
|
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
|
|
index 8426b7970c14..cc287cf6eb29 100644
|
|
--- a/drivers/hwtracing/intel_th/gth.c
|
|
+++ b/drivers/hwtracing/intel_th/gth.c
|
|
@@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
|
|
{
|
|
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
|
|
int port = othdev->output.port;
|
|
+ int master;
|
|
|
|
if (thdev->host_mode)
|
|
return;
|
|
@@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
|
|
othdev->output.port = -1;
|
|
othdev->output.active = false;
|
|
gth->output[port].output = NULL;
|
|
+ for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
|
|
+ if (gth->master[master] == port)
|
|
+ gth->master[master] = -1;
|
|
spin_unlock(>h->gth_lock);
|
|
}
|
|
|
|
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
|
|
index 10bcb5d73f90..9d55e104400c 100644
|
|
--- a/drivers/hwtracing/stm/core.c
|
|
+++ b/drivers/hwtracing/stm/core.c
|
|
@@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
|
|
;
|
|
if (i == width)
|
|
return pos;
|
|
+
|
|
+ /* step over [pos..pos+i) to continue search */
|
|
+ pos += i;
|
|
}
|
|
|
|
return -1;
|
|
@@ -550,7 +553,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
|
|
{
|
|
struct stm_device *stm = stmf->stm;
|
|
struct stp_policy_id *id;
|
|
- int ret = -EINVAL;
|
|
+ int ret = -EINVAL, wlimit = 1;
|
|
u32 size;
|
|
|
|
if (stmf->output.nr_chans)
|
|
@@ -578,8 +581,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
|
|
if (id->__reserved_0 || id->__reserved_1)
|
|
goto err_free;
|
|
|
|
- if (id->width < 1 ||
|
|
- id->width > PAGE_SIZE / stm->data->sw_mmiosz)
|
|
+ if (stm->data->sw_mmiosz)
|
|
+ wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
|
|
+
|
|
+ if (id->width < 1 || id->width > wlimit)
|
|
goto err_free;
|
|
|
|
ret = stm_file_assign(stmf, id->id, id->width);
|
|
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
|
|
index 44deae78913e..4d19254f78c8 100644
|
|
--- a/drivers/i2c/busses/i2c-bcm2835.c
|
|
+++ b/drivers/i2c/busses/i2c-bcm2835.c
|
|
@@ -191,6 +191,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
|
|
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
|
|
}
|
|
|
|
+static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
|
|
+{
|
|
+ i2c_dev->curr_msg = NULL;
|
|
+ i2c_dev->num_msgs = 0;
|
|
+
|
|
+ i2c_dev->msg_buf = NULL;
|
|
+ i2c_dev->msg_buf_remaining = 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Note about I2C_C_CLEAR on error:
|
|
* The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
|
|
@@ -291,6 +300,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
|
|
|
|
time_left = wait_for_completion_timeout(&i2c_dev->completion,
|
|
adap->timeout);
|
|
+
|
|
+ bcm2835_i2c_finish_transfer(i2c_dev);
|
|
+
|
|
if (!time_left) {
|
|
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
|
|
BCM2835_I2C_C_CLEAR);
|
|
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
|
|
index b13605718291..d917cefc5a19 100644
|
|
--- a/drivers/i2c/busses/i2c-cadence.c
|
|
+++ b/drivers/i2c/busses/i2c-cadence.c
|
|
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
|
|
* Check for the message size against FIFO depth and set the
|
|
* 'hold bus' bit if it is greater than FIFO depth.
|
|
*/
|
|
- if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
|
|
+ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
|
|
ctrl_reg |= CDNS_I2C_CR_HOLD;
|
|
+ else
|
|
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
|
|
|
|
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
|
|
|
|
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
|
|
* Check for the message size against FIFO depth and set the
|
|
* 'hold bus' bit if it is greater than FIFO depth.
|
|
*/
|
|
- if (id->send_count > CDNS_I2C_FIFO_DEPTH)
|
|
+ if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
|
|
ctrl_reg |= CDNS_I2C_CR_HOLD;
|
|
+ else
|
|
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
|
|
+
|
|
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
|
|
|
|
/* Clear the interrupts in interrupt status register. */
|
|
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
|
|
index 60c8561fbe65..ef13b6ce9d8d 100644
|
|
--- a/drivers/i2c/busses/i2c-tegra.c
|
|
+++ b/drivers/i2c/busses/i2c-tegra.c
|
|
@@ -832,7 +832,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
|
|
/* payload size is only 12 bit */
|
|
static const struct i2c_adapter_quirks tegra_i2c_quirks = {
|
|
.max_read_len = 4096,
|
|
- .max_write_len = 4096,
|
|
+ .max_write_len = 4096 - 12,
|
|
};
|
|
|
|
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
|
|
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
|
|
index f10443f92e4c..4be29ed44755 100644
|
|
--- a/drivers/iio/adc/exynos_adc.c
|
|
+++ b/drivers/iio/adc/exynos_adc.c
|
|
@@ -915,7 +915,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
|
|
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
|
|
struct exynos_adc *info = iio_priv(indio_dev);
|
|
|
|
- if (IS_REACHABLE(CONFIG_INPUT)) {
|
|
+ if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
|
|
free_irq(info->tsirq, info);
|
|
input_unregister_device(info->input);
|
|
}
|
|
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
|
|
index cfd252386356..2ea42c04cfd2 100644
|
|
--- a/drivers/infiniband/hw/hfi1/hfi.h
|
|
+++ b/drivers/infiniband/hw/hfi1/hfi.h
|
|
@@ -1425,7 +1425,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
|
struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
|
|
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
|
|
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
|
|
-void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
|
|
+int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
|
|
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
|
|
u16 ctxt);
|
|
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
|
|
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
|
|
index 758d273c32cf..da786eb18558 100644
|
|
--- a/drivers/infiniband/hw/hfi1/init.c
|
|
+++ b/drivers/infiniband/hw/hfi1/init.c
|
|
@@ -213,12 +213,12 @@ static void hfi1_rcd_free(struct kref *kref)
|
|
struct hfi1_ctxtdata *rcd =
|
|
container_of(kref, struct hfi1_ctxtdata, kref);
|
|
|
|
- hfi1_free_ctxtdata(rcd->dd, rcd);
|
|
-
|
|
spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
|
|
rcd->dd->rcd[rcd->ctxt] = NULL;
|
|
spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
|
|
|
|
+ hfi1_free_ctxtdata(rcd->dd, rcd);
|
|
+
|
|
kfree(rcd);
|
|
}
|
|
|
|
@@ -241,10 +241,13 @@ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
|
|
* @rcd: pointer to an initialized rcd data structure
|
|
*
|
|
* Use this to get a reference after the init.
|
|
+ *
|
|
+ * Return : reflect kref_get_unless_zero(), which returns non-zero on
|
|
+ * increment, otherwise 0.
|
|
*/
|
|
-void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
|
|
+int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
|
|
{
|
|
- kref_get(&rcd->kref);
|
|
+ return kref_get_unless_zero(&rcd->kref);
|
|
}
|
|
|
|
/**
|
|
@@ -324,7 +327,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
|
|
spin_lock_irqsave(&dd->uctxt_lock, flags);
|
|
if (dd->rcd[ctxt]) {
|
|
rcd = dd->rcd[ctxt];
|
|
- hfi1_rcd_get(rcd);
|
|
+ if (!hfi1_rcd_get(rcd))
|
|
+ rcd = NULL;
|
|
}
|
|
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
|
|
|
|
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
|
|
index 312916f99597..73686c2460ce 100644
|
|
--- a/drivers/input/keyboard/cap11xx.c
|
|
+++ b/drivers/input/keyboard/cap11xx.c
|
|
@@ -75,9 +75,7 @@
|
|
struct cap11xx_led {
|
|
struct cap11xx_priv *priv;
|
|
struct led_classdev cdev;
|
|
- struct work_struct work;
|
|
u32 reg;
|
|
- enum led_brightness new_brightness;
|
|
};
|
|
#endif
|
|
|
|
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
|
|
}
|
|
|
|
#ifdef CONFIG_LEDS_CLASS
|
|
-static void cap11xx_led_work(struct work_struct *work)
|
|
+static int cap11xx_led_set(struct led_classdev *cdev,
|
|
+ enum led_brightness value)
|
|
{
|
|
- struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
|
|
+ struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
|
|
struct cap11xx_priv *priv = led->priv;
|
|
- int value = led->new_brightness;
|
|
|
|
/*
|
|
- * All LEDs share the same duty cycle as this is a HW limitation.
|
|
- * Brightness levels per LED are either 0 (OFF) and 1 (ON).
|
|
+ * All LEDs share the same duty cycle as this is a HW
|
|
+ * limitation. Brightness levels per LED are either
|
|
+ * 0 (OFF) and 1 (ON).
|
|
*/
|
|
- regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
|
|
- BIT(led->reg), value ? BIT(led->reg) : 0);
|
|
-}
|
|
-
|
|
-static void cap11xx_led_set(struct led_classdev *cdev,
|
|
- enum led_brightness value)
|
|
-{
|
|
- struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
|
|
-
|
|
- if (led->new_brightness == value)
|
|
- return;
|
|
-
|
|
- led->new_brightness = value;
|
|
- schedule_work(&led->work);
|
|
+ return regmap_update_bits(priv->regmap,
|
|
+ CAP11XX_REG_LED_OUTPUT_CONTROL,
|
|
+ BIT(led->reg),
|
|
+ value ? BIT(led->reg) : 0);
|
|
}
|
|
|
|
static int cap11xx_init_leds(struct device *dev,
|
|
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
|
|
led->cdev.default_trigger =
|
|
of_get_property(child, "linux,default-trigger", NULL);
|
|
led->cdev.flags = 0;
|
|
- led->cdev.brightness_set = cap11xx_led_set;
|
|
+ led->cdev.brightness_set_blocking = cap11xx_led_set;
|
|
led->cdev.max_brightness = 1;
|
|
led->cdev.brightness = LED_OFF;
|
|
|
|
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
|
|
led->reg = reg;
|
|
led->priv = priv;
|
|
|
|
- INIT_WORK(&led->work, cap11xx_led_work);
|
|
-
|
|
error = devm_led_classdev_register(dev, &led->cdev);
|
|
if (error) {
|
|
of_node_put(child);
|
|
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
|
|
index 403452ef00e6..3d1cb7bf5e35 100644
|
|
--- a/drivers/input/keyboard/matrix_keypad.c
|
|
+++ b/drivers/input/keyboard/matrix_keypad.c
|
|
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
|
|
keypad->stopped = true;
|
|
spin_unlock_irq(&keypad->lock);
|
|
|
|
- flush_work(&keypad->work.work);
|
|
+ flush_delayed_work(&keypad->work);
|
|
/*
|
|
* matrix_keypad_scan() will leave IRQs enabled;
|
|
* we should disable them now.
|
|
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
|
|
index babcfb165e4f..3b85631fde91 100644
|
|
--- a/drivers/input/keyboard/st-keyscan.c
|
|
+++ b/drivers/input/keyboard/st-keyscan.c
|
|
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
|
|
|
|
input_dev->id.bustype = BUS_HOST;
|
|
|
|
+ keypad_data->input_dev = input_dev;
|
|
+
|
|
error = keypad_matrix_key_parse_dt(keypad_data);
|
|
if (error)
|
|
return error;
|
|
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
|
|
|
|
input_set_drvdata(input_dev, keypad_data);
|
|
|
|
- keypad_data->input_dev = input_dev;
|
|
-
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
|
|
if (IS_ERR(keypad_data->base))
|
|
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
|
|
index 55da191ae550..dbb6d9e1b947 100644
|
|
--- a/drivers/input/misc/pwm-vibra.c
|
|
+++ b/drivers/input/misc/pwm-vibra.c
|
|
@@ -34,6 +34,7 @@ struct pwm_vibrator {
|
|
struct work_struct play_work;
|
|
u16 level;
|
|
u32 direction_duty_cycle;
|
|
+ bool vcc_on;
|
|
};
|
|
|
|
static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
|
|
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
|
|
struct pwm_state state;
|
|
int err;
|
|
|
|
- err = regulator_enable(vibrator->vcc);
|
|
- if (err) {
|
|
- dev_err(pdev, "failed to enable regulator: %d", err);
|
|
- return err;
|
|
+ if (!vibrator->vcc_on) {
|
|
+ err = regulator_enable(vibrator->vcc);
|
|
+ if (err) {
|
|
+ dev_err(pdev, "failed to enable regulator: %d", err);
|
|
+ return err;
|
|
+ }
|
|
+ vibrator->vcc_on = true;
|
|
}
|
|
|
|
pwm_get_state(vibrator->pwm, &state);
|
|
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
|
|
|
|
static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
|
|
{
|
|
- regulator_disable(vibrator->vcc);
|
|
-
|
|
if (vibrator->pwm_dir)
|
|
pwm_disable(vibrator->pwm_dir);
|
|
pwm_disable(vibrator->pwm);
|
|
+
|
|
+ if (vibrator->vcc_on) {
|
|
+ regulator_disable(vibrator->vcc);
|
|
+ vibrator->vcc_on = false;
|
|
+ }
|
|
}
|
|
|
|
static void pwm_vibrator_play_work(struct work_struct *work)
|
|
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
|
|
index c62cceb97bb1..5e8d8384aa2a 100644
|
|
--- a/drivers/input/serio/ps2-gpio.c
|
|
+++ b/drivers/input/serio/ps2-gpio.c
|
|
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
|
|
{
|
|
struct ps2_gpio_data *drvdata = serio->port_data;
|
|
|
|
+ flush_delayed_work(&drvdata->tx_work);
|
|
disable_irq(drvdata->irq);
|
|
}
|
|
|
|
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
|
|
index 0e65f609352e..83364fedbf0a 100644
|
|
--- a/drivers/irqchip/irq-brcmstb-l2.c
|
|
+++ b/drivers/irqchip/irq-brcmstb-l2.c
|
|
@@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
|
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
|
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
struct brcmstb_l2_intc_data *b = gc->private;
|
|
+ unsigned long flags;
|
|
|
|
- irq_gc_lock(gc);
|
|
+ irq_gc_lock_irqsave(gc, flags);
|
|
/* Save the current mask */
|
|
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
|
|
|
|
@@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
|
|
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
|
|
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
|
|
}
|
|
- irq_gc_unlock(gc);
|
|
+ irq_gc_unlock_irqrestore(gc, flags);
|
|
}
|
|
|
|
static void brcmstb_l2_intc_resume(struct irq_data *d)
|
|
@@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
|
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
|
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
|
struct brcmstb_l2_intc_data *b = gc->private;
|
|
+ unsigned long flags;
|
|
|
|
- irq_gc_lock(gc);
|
|
+ irq_gc_lock_irqsave(gc, flags);
|
|
if (ct->chip.irq_ack) {
|
|
/* Clear unmasked non-wakeup interrupts */
|
|
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
|
|
@@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
|
|
/* Restore the saved mask */
|
|
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
|
|
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
|
|
- irq_gc_unlock(gc);
|
|
+ irq_gc_unlock_irqrestore(gc, flags);
|
|
}
|
|
|
|
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
|
|
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
|
|
index 15579cba1a88..78970cdf2ef6 100644
|
|
--- a/drivers/irqchip/irq-gic-v3-its.c
|
|
+++ b/drivers/irqchip/irq-gic-v3-its.c
|
|
@@ -1893,6 +1893,8 @@ static int its_alloc_tables(struct its_node *its)
|
|
indirect = its_parse_indirect_baser(its, baser,
|
|
psz, &order,
|
|
its->device_ids);
|
|
+ break;
|
|
+
|
|
case GITS_BASER_TYPE_VCPU:
|
|
indirect = its_parse_indirect_baser(its, baser,
|
|
psz, &order,
|
|
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
|
|
index 8ab077ff58f4..96bcabfebc23 100644
|
|
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
|
|
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
|
|
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
|
|
|
|
/* Clear ring flush state */
|
|
timeout = 1000; /* timeout of 1s */
|
|
- writel_relaxed(0x0, ring + RING_CONTROL);
|
|
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
|
|
do {
|
|
- if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
|
|
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
|
|
FLUSH_DONE_MASK))
|
|
break;
|
|
mdelay(1);
|
|
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
|
|
index 22944aa7d8e5..4ca3e3d3f9c7 100644
|
|
--- a/drivers/md/bcache/request.c
|
|
+++ b/drivers/md/bcache/request.c
|
|
@@ -392,10 +392,11 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
|
|
|
|
/*
|
|
* Flag for bypass if the IO is for read-ahead or background,
|
|
- * unless the read-ahead request is for metadata (eg, for gfs2).
|
|
+ * unless the read-ahead request is for metadata
|
|
+ * (eg, for gfs2 or xfs).
|
|
*/
|
|
if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
|
|
- !(bio->bi_opf & REQ_META))
|
|
+ !(bio->bi_opf & (REQ_META|REQ_PRIO)))
|
|
goto skip;
|
|
|
|
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
|
|
@@ -877,7 +878,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
|
}
|
|
|
|
if (!(bio->bi_opf & REQ_RAHEAD) &&
|
|
- !(bio->bi_opf & REQ_META) &&
|
|
+ !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
|
|
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
|
|
reada = min_t(sector_t, dc->readahead >> 9,
|
|
get_capacity(bio->bi_disk) - bio_end_sector(bio));
|
|
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
|
|
index d2b9fdbc8994..e75dc33339f6 100644
|
|
--- a/drivers/md/bcache/writeback.h
|
|
+++ b/drivers/md/bcache/writeback.h
|
|
@@ -63,6 +63,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
|
|
in_use > CUTOFF_WRITEBACK_SYNC)
|
|
return false;
|
|
|
|
+ if (bio_op(bio) == REQ_OP_DISCARD)
|
|
+ return false;
|
|
+
|
|
if (dc->partial_stripes_expensive &&
|
|
bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
|
|
bio_sectors(bio)))
|
|
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
|
|
index e1fa6baf4e8e..96d5fb3f6199 100644
|
|
--- a/drivers/md/dm-integrity.c
|
|
+++ b/drivers/md/dm-integrity.c
|
|
@@ -1357,8 +1357,8 @@ again:
|
|
checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
|
|
if (unlikely(r)) {
|
|
if (r > 0) {
|
|
- DMERR("Checksum failed at sector 0x%llx",
|
|
- (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
|
|
+ DMERR_LIMIT("Checksum failed at sector 0x%llx",
|
|
+ (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
|
|
r = -EILSEQ;
|
|
atomic64_inc(&ic->number_of_mismatches);
|
|
}
|
|
@@ -1550,8 +1550,8 @@ retry_kmap:
|
|
|
|
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
|
|
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
|
|
- DMERR("Checksum failed when reading from journal, at sector 0x%llx",
|
|
- (unsigned long long)logical_sector);
|
|
+ DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
|
|
+ (unsigned long long)logical_sector);
|
|
}
|
|
}
|
|
#endif
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index 9df1334608b7..25e97de36717 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -3959,6 +3959,8 @@ static int raid10_run(struct mddev *mddev)
|
|
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
|
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
|
|
"reshape");
|
|
+ if (!mddev->sync_thread)
|
|
+ goto out_free_conf;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 45a3551d3afd..ae38895c44b2 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -7390,6 +7390,8 @@ static int raid5_run(struct mddev *mddev)
|
|
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
|
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
|
|
"reshape");
|
|
+ if (!mddev->sync_thread)
|
|
+ goto abort;
|
|
}
|
|
|
|
/* Ok, everything is just fine now */
|
|
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
|
|
index 886a2d8d5c6c..9d4a81bb0e59 100644
|
|
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
|
|
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
|
|
@@ -145,7 +145,6 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
|
|
return;
|
|
|
|
check_once = true;
|
|
- WARN_ON(1);
|
|
|
|
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
|
|
if (vb->vb2_queue->allow_zero_bytesused)
|
|
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
|
|
index 10d584ce538d..9ee1c1360ab8 100644
|
|
--- a/drivers/media/dvb-frontends/lgdt330x.c
|
|
+++ b/drivers/media/dvb-frontends/lgdt330x.c
|
|
@@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe,
|
|
|
|
if ((buf[0] & 0x02) == 0x00)
|
|
*status |= FE_HAS_SYNC;
|
|
- if ((buf[0] & 0xfd) == 0x01)
|
|
+ if ((buf[0] & 0x01) == 0x01)
|
|
*status |= FE_HAS_VITERBI | FE_HAS_LOCK;
|
|
break;
|
|
default:
|
|
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
|
|
index 8e7a2a59cd32..d5c0ffc55d46 100644
|
|
--- a/drivers/media/i2c/ov5640.c
|
|
+++ b/drivers/media/i2c/ov5640.c
|
|
@@ -1759,7 +1759,7 @@ static void ov5640_reset(struct ov5640_dev *sensor)
|
|
usleep_range(1000, 2000);
|
|
|
|
gpiod_set_value_cansleep(sensor->reset_gpio, 0);
|
|
- usleep_range(5000, 10000);
|
|
+ usleep_range(20000, 25000);
|
|
}
|
|
|
|
static int ov5640_set_power_on(struct ov5640_dev *sensor)
|
|
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
|
|
index 4b2e3de7856e..c4fc8e7d365a 100644
|
|
--- a/drivers/media/platform/vimc/Makefile
|
|
+++ b/drivers/media/platform/vimc/Makefile
|
|
@@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
|
|
vimc_debayer-objs := vimc-debayer.o
|
|
vimc_scaler-objs := vimc-scaler.o
|
|
vimc_sensor-objs := vimc-sensor.o
|
|
+vimc_streamer-objs := vimc-streamer.o
|
|
|
|
obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
|
|
- vimc_scaler.o vimc_sensor.o
|
|
+ vimc_scaler.o vimc_sensor.o vimc_streamer.o
|
|
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
|
|
index ec68feaac378..65d657daf66f 100644
|
|
--- a/drivers/media/platform/vimc/vimc-capture.c
|
|
+++ b/drivers/media/platform/vimc/vimc-capture.c
|
|
@@ -24,6 +24,7 @@
|
|
#include <media/videobuf2-vmalloc.h>
|
|
|
|
#include "vimc-common.h"
|
|
+#include "vimc-streamer.h"
|
|
|
|
#define VIMC_CAP_DRV_NAME "vimc-capture"
|
|
|
|
@@ -44,7 +45,7 @@ struct vimc_cap_device {
|
|
spinlock_t qlock;
|
|
struct mutex lock;
|
|
u32 sequence;
|
|
- struct media_pipeline pipe;
|
|
+ struct vimc_stream stream;
|
|
};
|
|
|
|
static const struct v4l2_pix_format fmt_default = {
|
|
@@ -248,14 +249,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
|
|
vcap->sequence = 0;
|
|
|
|
/* Start the media pipeline */
|
|
- ret = media_pipeline_start(entity, &vcap->pipe);
|
|
+ ret = media_pipeline_start(entity, &vcap->stream.pipe);
|
|
if (ret) {
|
|
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
|
|
return ret;
|
|
}
|
|
|
|
- /* Enable streaming from the pipe */
|
|
- ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
|
|
+ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
|
|
if (ret) {
|
|
media_pipeline_stop(entity);
|
|
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
|
|
@@ -273,8 +273,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
|
|
{
|
|
struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
|
|
|
|
- /* Disable streaming from the pipe */
|
|
- vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
|
|
+ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
|
|
|
|
/* Stop the media pipeline */
|
|
media_pipeline_stop(&vcap->vdev.entity);
|
|
@@ -355,8 +354,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
|
|
kfree(vcap);
|
|
}
|
|
|
|
-static void vimc_cap_process_frame(struct vimc_ent_device *ved,
|
|
- struct media_pad *sink, const void *frame)
|
|
+static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
|
|
+ const void *frame)
|
|
{
|
|
struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
|
|
ved);
|
|
@@ -370,7 +369,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
|
|
typeof(*vimc_buf), list);
|
|
if (!vimc_buf) {
|
|
spin_unlock(&vcap->qlock);
|
|
- return;
|
|
+ return ERR_PTR(-EAGAIN);
|
|
}
|
|
|
|
/* Remove this entry from the list */
|
|
@@ -391,6 +390,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
|
|
vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
|
|
vcap->format.sizeimage);
|
|
vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
|
|
+ return NULL;
|
|
}
|
|
|
|
static int vimc_cap_comp_bind(struct device *comp, struct device *master,
|
|
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
|
|
index 617415c224fe..204aa6f554e4 100644
|
|
--- a/drivers/media/platform/vimc/vimc-common.c
|
|
+++ b/drivers/media/platform/vimc/vimc-common.c
|
|
@@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
|
|
}
|
|
EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
|
|
|
|
-int vimc_propagate_frame(struct media_pad *src, const void *frame)
|
|
-{
|
|
- struct media_link *link;
|
|
-
|
|
- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
|
|
- return -EINVAL;
|
|
-
|
|
- /* Send this frame to all sink pads that are direct linked */
|
|
- list_for_each_entry(link, &src->entity->links, list) {
|
|
- if (link->source == src &&
|
|
- (link->flags & MEDIA_LNK_FL_ENABLED)) {
|
|
- struct vimc_ent_device *ved = NULL;
|
|
- struct media_entity *entity = link->sink->entity;
|
|
-
|
|
- if (is_media_entity_v4l2_subdev(entity)) {
|
|
- struct v4l2_subdev *sd =
|
|
- container_of(entity, struct v4l2_subdev,
|
|
- entity);
|
|
- ved = v4l2_get_subdevdata(sd);
|
|
- } else if (is_media_entity_v4l2_video_device(entity)) {
|
|
- struct video_device *vdev =
|
|
- container_of(entity,
|
|
- struct video_device,
|
|
- entity);
|
|
- ved = video_get_drvdata(vdev);
|
|
- }
|
|
- if (ved && ved->process_frame)
|
|
- ved->process_frame(ved, link->sink, frame);
|
|
- }
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(vimc_propagate_frame);
|
|
-
|
|
/* Helper function to allocate and initialize pads */
|
|
struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
|
|
{
|
|
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
|
|
index 2e9981b18166..6ed969d9efbb 100644
|
|
--- a/drivers/media/platform/vimc/vimc-common.h
|
|
+++ b/drivers/media/platform/vimc/vimc-common.h
|
|
@@ -113,23 +113,12 @@ struct vimc_pix_map {
|
|
struct vimc_ent_device {
|
|
struct media_entity *ent;
|
|
struct media_pad *pads;
|
|
- void (*process_frame)(struct vimc_ent_device *ved,
|
|
- struct media_pad *sink, const void *frame);
|
|
+ void * (*process_frame)(struct vimc_ent_device *ved,
|
|
+ const void *frame);
|
|
void (*vdev_get_format)(struct vimc_ent_device *ved,
|
|
struct v4l2_pix_format *fmt);
|
|
};
|
|
|
|
-/**
|
|
- * vimc_propagate_frame - propagate a frame through the topology
|
|
- *
|
|
- * @src: the source pad where the frame is being originated
|
|
- * @frame: the frame to be propagated
|
|
- *
|
|
- * This function will call the process_frame callback from the vimc_ent_device
|
|
- * struct of the nodes directly connected to the @src pad
|
|
- */
|
|
-int vimc_propagate_frame(struct media_pad *src, const void *frame);
|
|
-
|
|
/**
|
|
* vimc_pads_init - initialize pads
|
|
*
|
|
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
|
|
index 77887f66f323..7d77c63b99d2 100644
|
|
--- a/drivers/media/platform/vimc/vimc-debayer.c
|
|
+++ b/drivers/media/platform/vimc/vimc-debayer.c
|
|
@@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
|
|
static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
|
|
{
|
|
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
|
|
- int ret;
|
|
|
|
if (enable) {
|
|
const struct vimc_pix_map *vpix;
|
|
@@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
|
|
if (!vdeb->src_frame)
|
|
return -ENOMEM;
|
|
|
|
- /* Turn the stream on in the subdevices directly connected */
|
|
- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
|
|
- if (ret) {
|
|
- vfree(vdeb->src_frame);
|
|
- vdeb->src_frame = NULL;
|
|
- return ret;
|
|
- }
|
|
} else {
|
|
if (!vdeb->src_frame)
|
|
return 0;
|
|
|
|
- /* Disable streaming from the pipe */
|
|
- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
vfree(vdeb->src_frame);
|
|
vdeb->src_frame = NULL;
|
|
}
|
|
@@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
|
|
}
|
|
}
|
|
|
|
-static void vimc_deb_process_frame(struct vimc_ent_device *ved,
|
|
- struct media_pad *sink,
|
|
- const void *sink_frame)
|
|
+static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
|
|
+ const void *sink_frame)
|
|
{
|
|
struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
|
|
ved);
|
|
@@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
|
|
|
|
/* If the stream in this node is not active, just return */
|
|
if (!vdeb->src_frame)
|
|
- return;
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
|
for (i = 0; i < vdeb->sink_fmt.height; i++)
|
|
for (j = 0; j < vdeb->sink_fmt.width; j++) {
|
|
@@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
|
|
vdeb->set_rgb_src(vdeb, i, j, rgb);
|
|
}
|
|
|
|
- /* Propagate the frame through all source pads */
|
|
- for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
|
|
- struct media_pad *pad = &vdeb->sd.entity.pads[i];
|
|
+ return vdeb->src_frame;
|
|
|
|
- vimc_propagate_frame(pad, vdeb->src_frame);
|
|
- }
|
|
}
|
|
|
|
static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
|
|
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
|
|
index b0952ee86296..39b2a73dfcc1 100644
|
|
--- a/drivers/media/platform/vimc/vimc-scaler.c
|
|
+++ b/drivers/media/platform/vimc/vimc-scaler.c
|
|
@@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
|
|
static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
|
|
{
|
|
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
|
|
- int ret;
|
|
|
|
if (enable) {
|
|
const struct vimc_pix_map *vpix;
|
|
@@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
|
|
if (!vsca->src_frame)
|
|
return -ENOMEM;
|
|
|
|
- /* Turn the stream on in the subdevices directly connected */
|
|
- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
|
|
- if (ret) {
|
|
- vfree(vsca->src_frame);
|
|
- vsca->src_frame = NULL;
|
|
- return ret;
|
|
- }
|
|
} else {
|
|
if (!vsca->src_frame)
|
|
return 0;
|
|
|
|
- /* Disable streaming from the pipe */
|
|
- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
vfree(vsca->src_frame);
|
|
vsca->src_frame = NULL;
|
|
}
|
|
@@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
|
|
vimc_sca_scale_pix(vsca, i, j, sink_frame);
|
|
}
|
|
|
|
-static void vimc_sca_process_frame(struct vimc_ent_device *ved,
|
|
- struct media_pad *sink,
|
|
- const void *sink_frame)
|
|
+static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
|
|
+ const void *sink_frame)
|
|
{
|
|
struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
|
|
ved);
|
|
- unsigned int i;
|
|
|
|
/* If the stream in this node is not active, just return */
|
|
if (!vsca->src_frame)
|
|
- return;
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
|
vimc_sca_fill_src_frame(vsca, sink_frame);
|
|
|
|
- /* Propagate the frame through all source pads */
|
|
- for (i = 1; i < vsca->sd.entity.num_pads; i++) {
|
|
- struct media_pad *pad = &vsca->sd.entity.pads[i];
|
|
-
|
|
- vimc_propagate_frame(pad, vsca->src_frame);
|
|
- }
|
|
+ return vsca->src_frame;
|
|
};
|
|
|
|
static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
|
|
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
|
|
index b2b89315e7ba..9e0d70e9f119 100644
|
|
--- a/drivers/media/platform/vimc/vimc-sensor.c
|
|
+++ b/drivers/media/platform/vimc/vimc-sensor.c
|
|
@@ -16,8 +16,6 @@
|
|
*/
|
|
|
|
#include <linux/component.h>
|
|
-#include <linux/freezer.h>
|
|
-#include <linux/kthread.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mod_devicetable.h>
|
|
#include <linux/platform_device.h>
|
|
@@ -201,38 +199,27 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
|
|
.set_fmt = vimc_sen_set_fmt,
|
|
};
|
|
|
|
-static int vimc_sen_tpg_thread(void *data)
|
|
+static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
|
|
+ const void *sink_frame)
|
|
{
|
|
- struct vimc_sen_device *vsen = data;
|
|
- unsigned int i;
|
|
-
|
|
- set_freezable();
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
-
|
|
- for (;;) {
|
|
- try_to_freeze();
|
|
- if (kthread_should_stop())
|
|
- break;
|
|
-
|
|
- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
|
|
+ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
|
|
+ ved);
|
|
+ const struct vimc_pix_map *vpix;
|
|
+ unsigned int frame_size;
|
|
|
|
- /* Send the frame to all source pads */
|
|
- for (i = 0; i < vsen->sd.entity.num_pads; i++)
|
|
- vimc_propagate_frame(&vsen->sd.entity.pads[i],
|
|
- vsen->frame);
|
|
+ /* Calculate the frame size */
|
|
+ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
|
|
+ frame_size = vsen->mbus_format.width * vpix->bpp *
|
|
+ vsen->mbus_format.height;
|
|
|
|
- /* 60 frames per second */
|
|
- schedule_timeout(HZ/60);
|
|
- }
|
|
-
|
|
- return 0;
|
|
+ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
|
|
+ return vsen->frame;
|
|
}
|
|
|
|
static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
|
|
{
|
|
struct vimc_sen_device *vsen =
|
|
container_of(sd, struct vimc_sen_device, sd);
|
|
- int ret;
|
|
|
|
if (enable) {
|
|
const struct vimc_pix_map *vpix;
|
|
@@ -258,26 +245,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
|
|
/* configure the test pattern generator */
|
|
vimc_sen_tpg_s_format(vsen);
|
|
|
|
- /* Initialize the image generator thread */
|
|
- vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
|
|
- "%s-sen", vsen->sd.v4l2_dev->name);
|
|
- if (IS_ERR(vsen->kthread_sen)) {
|
|
- dev_err(vsen->dev, "%s: kernel_thread() failed\n",
|
|
- vsen->sd.name);
|
|
- vfree(vsen->frame);
|
|
- vsen->frame = NULL;
|
|
- return PTR_ERR(vsen->kthread_sen);
|
|
- }
|
|
} else {
|
|
- if (!vsen->kthread_sen)
|
|
- return 0;
|
|
-
|
|
- /* Stop image generator */
|
|
- ret = kthread_stop(vsen->kthread_sen);
|
|
- if (ret)
|
|
- return ret;
|
|
|
|
- vsen->kthread_sen = NULL;
|
|
vfree(vsen->frame);
|
|
vsen->frame = NULL;
|
|
return 0;
|
|
@@ -393,6 +362,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
|
|
if (ret)
|
|
goto err_free_hdl;
|
|
|
|
+ vsen->ved.process_frame = vimc_sen_process_frame;
|
|
dev_set_drvdata(comp, &vsen->ved);
|
|
vsen->dev = comp;
|
|
|
|
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
|
|
new file mode 100644
|
|
index 000000000000..fcc897fb247b
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/vimc/vimc-streamer.c
|
|
@@ -0,0 +1,188 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * vimc-streamer.c Virtual Media Controller Driver
|
|
+ *
|
|
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/freezer.h>
|
|
+#include <linux/kthread.h>
|
|
+
|
|
+#include "vimc-streamer.h"
|
|
+
|
|
+/**
|
|
+ * vimc_get_source_entity - get the entity connected with the first sink pad
|
|
+ *
|
|
+ * @ent: reference media_entity
|
|
+ *
|
|
+ * Helper function that returns the media entity containing the source pad
|
|
+ * linked with the first sink pad from the given media entity pad list.
|
|
+ */
|
|
+static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
|
|
+{
|
|
+ struct media_pad *pad;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ent->num_pads; i++) {
|
|
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
|
|
+ continue;
|
|
+ pad = media_entity_remote_pad(&ent->pads[i]);
|
|
+ return pad ? pad->entity : NULL;
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
|
|
+ *
|
|
+ * @stream: the pointer to the stream structure with the pipeline to be
|
|
+ * disabled.
|
|
+ *
|
|
+ * Calls s_stream to disable the stream in each entity of the pipeline
|
|
+ *
|
|
+ */
|
|
+static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
|
|
+{
|
|
+ struct media_entity *entity;
|
|
+ struct v4l2_subdev *sd;
|
|
+
|
|
+ while (stream->pipe_size) {
|
|
+ stream->pipe_size--;
|
|
+ entity = stream->ved_pipeline[stream->pipe_size]->ent;
|
|
+ entity = vimc_get_source_entity(entity);
|
|
+ stream->ved_pipeline[stream->pipe_size] = NULL;
|
|
+
|
|
+ if (!is_media_entity_v4l2_subdev(entity))
|
|
+ continue;
|
|
+
|
|
+ sd = media_entity_to_v4l2_subdev(entity);
|
|
+ v4l2_subdev_call(sd, video, s_stream, 0);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * vimc_streamer_pipeline_init - initializes the stream structure
|
|
+ *
|
|
+ * @stream: the pointer to the stream structure to be initialized
|
|
+ * @ved: the pointer to the vimc entity initializing the stream
|
|
+ *
|
|
+ * Initializes the stream structure. Walks through the entity graph to
|
|
+ * construct the pipeline used later on the streamer thread.
|
|
+ * Calls s_stream to enable stream in all entities of the pipeline.
|
|
+ */
|
|
+static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
|
|
+ struct vimc_ent_device *ved)
|
|
+{
|
|
+ struct media_entity *entity;
|
|
+ struct video_device *vdev;
|
|
+ struct v4l2_subdev *sd;
|
|
+ int ret = 0;
|
|
+
|
|
+ stream->pipe_size = 0;
|
|
+ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
|
|
+ if (!ved) {
|
|
+ vimc_streamer_pipeline_terminate(stream);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ stream->ved_pipeline[stream->pipe_size++] = ved;
|
|
+
|
|
+ entity = vimc_get_source_entity(ved->ent);
|
|
+ /* Check if the end of the pipeline was reached*/
|
|
+ if (!entity)
|
|
+ return 0;
|
|
+
|
|
+ if (is_media_entity_v4l2_subdev(entity)) {
|
|
+ sd = media_entity_to_v4l2_subdev(entity);
|
|
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
|
|
+ if (ret && ret != -ENOIOCTLCMD) {
|
|
+ vimc_streamer_pipeline_terminate(stream);
|
|
+ return ret;
|
|
+ }
|
|
+ ved = v4l2_get_subdevdata(sd);
|
|
+ } else {
|
|
+ vdev = container_of(entity,
|
|
+ struct video_device,
|
|
+ entity);
|
|
+ ved = video_get_drvdata(vdev);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ vimc_streamer_pipeline_terminate(stream);
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static int vimc_streamer_thread(void *data)
|
|
+{
|
|
+ struct vimc_stream *stream = data;
|
|
+ int i;
|
|
+
|
|
+ set_freezable();
|
|
+ set_current_state(TASK_UNINTERRUPTIBLE);
|
|
+
|
|
+ for (;;) {
|
|
+ try_to_freeze();
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+
|
|
+ for (i = stream->pipe_size - 1; i >= 0; i--) {
|
|
+ stream->frame = stream->ved_pipeline[i]->process_frame(
|
|
+ stream->ved_pipeline[i],
|
|
+ stream->frame);
|
|
+ if (!stream->frame)
|
|
+ break;
|
|
+ if (IS_ERR(stream->frame))
|
|
+ break;
|
|
+ }
|
|
+ //wait for 60hz
|
|
+ schedule_timeout(HZ / 60);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int vimc_streamer_s_stream(struct vimc_stream *stream,
|
|
+ struct vimc_ent_device *ved,
|
|
+ int enable)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (!stream || !ved)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (enable) {
|
|
+ if (stream->kthread)
|
|
+ return 0;
|
|
+
|
|
+ ret = vimc_streamer_pipeline_init(stream, ved);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ stream->kthread = kthread_run(vimc_streamer_thread, stream,
|
|
+ "vimc-streamer thread");
|
|
+
|
|
+ if (IS_ERR(stream->kthread))
|
|
+ return PTR_ERR(stream->kthread);
|
|
+
|
|
+ } else {
|
|
+ if (!stream->kthread)
|
|
+ return 0;
|
|
+
|
|
+ ret = kthread_stop(stream->kthread);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ stream->kthread = NULL;
|
|
+
|
|
+ vimc_streamer_pipeline_terminate(stream);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
|
|
+
|
|
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
|
|
+MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
|
|
new file mode 100644
|
|
index 000000000000..752af2e2d5a2
|
|
--- /dev/null
|
|
+++ b/drivers/media/platform/vimc/vimc-streamer.h
|
|
@@ -0,0 +1,38 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * vimc-streamer.h Virtual Media Controller Driver
|
|
+ *
|
|
+ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _VIMC_STREAMER_H_
|
|
+#define _VIMC_STREAMER_H_
|
|
+
|
|
+#include <media/media-device.h>
|
|
+
|
|
+#include "vimc-common.h"
|
|
+
|
|
+#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
|
|
+
|
|
+struct vimc_stream {
|
|
+ struct media_pipeline pipe;
|
|
+ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
|
|
+ unsigned int pipe_size;
|
|
+ u8 *frame;
|
|
+ struct task_struct *kthread;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * vimc_streamer_s_streamer - start/stop the stream
|
|
+ *
|
|
+ * @stream: the pointer to the stream to start or stop
|
|
+ * @ved: The last entity of the streamer pipeline
|
|
+ * @enable: any non-zero number start the stream, zero stop
|
|
+ *
|
|
+ */
|
|
+int vimc_streamer_s_stream(struct vimc_stream *stream,
|
|
+ struct vimc_ent_device *ved,
|
|
+ int enable);
|
|
+
|
|
+#endif //_VIMC_STREAMER_H_
|
|
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
|
|
index 86a99f461fd8..ffffb66d51a0 100644
|
|
--- a/drivers/media/usb/uvc/uvc_video.c
|
|
+++ b/drivers/media/usb/uvc/uvc_video.c
|
|
@@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
|
|
if (!uvc_hw_timestamps_param)
|
|
return;
|
|
|
|
+ /*
|
|
+ * We will get called from __vb2_queue_cancel() if there are buffers
|
|
+ * done but not dequeued by the user, but the sample array has already
|
|
+ * been released at that time. Just bail out in that case.
|
|
+ */
|
|
+ if (!clock->samples)
|
|
+ return;
|
|
+
|
|
spin_lock_irqsave(&clock->lock, flags);
|
|
|
|
if (clock->count < clock->size)
|
|
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
|
|
index a530972c5a7e..e0173bf4b0dc 100644
|
|
--- a/drivers/mfd/sm501.c
|
|
+++ b/drivers/mfd/sm501.c
|
|
@@ -1145,6 +1145,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
|
|
lookup = devm_kzalloc(&pdev->dev,
|
|
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
|
|
GFP_KERNEL);
|
|
+ if (!lookup)
|
|
+ return -ENOMEM;
|
|
+
|
|
lookup->dev_id = "i2c-gpio";
|
|
if (iic->pin_sda < 32)
|
|
lookup->table[0].chip_label = "SM501-LOW";
|
|
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
|
|
index 3bc0c15d4d85..b83a373e3a8d 100644
|
|
--- a/drivers/misc/cxl/guest.c
|
|
+++ b/drivers/misc/cxl/guest.c
|
|
@@ -267,6 +267,7 @@ static int guest_reset(struct cxl *adapter)
|
|
int i, rc;
|
|
|
|
pr_devel("Adapter reset request\n");
|
|
+ spin_lock(&adapter->afu_list_lock);
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
if ((afu = adapter->afu[i])) {
|
|
pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
|
|
@@ -283,6 +284,7 @@ static int guest_reset(struct cxl *adapter)
|
|
pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
|
|
}
|
|
}
|
|
+ spin_unlock(&adapter->afu_list_lock);
|
|
return rc;
|
|
}
|
|
|
|
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
|
|
index b66d832d3233..787a69a2a726 100644
|
|
--- a/drivers/misc/cxl/pci.c
|
|
+++ b/drivers/misc/cxl/pci.c
|
|
@@ -1807,7 +1807,7 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
|
|
/* There should only be one entry, but go through the list
|
|
* anyway
|
|
*/
|
|
- if (afu->phb == NULL)
|
|
+ if (afu == NULL || afu->phb == NULL)
|
|
return result;
|
|
|
|
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
|
|
@@ -1834,7 +1834,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
|
|
{
|
|
struct cxl *adapter = pci_get_drvdata(pdev);
|
|
struct cxl_afu *afu;
|
|
- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
|
|
+ pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
|
|
+ pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
|
|
int i;
|
|
|
|
/* At this point, we could still have an interrupt pending.
|
|
@@ -1845,6 +1846,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
|
|
|
|
/* If we're permanently dead, give up. */
|
|
if (state == pci_channel_io_perm_failure) {
|
|
+ spin_lock(&adapter->afu_list_lock);
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
afu = adapter->afu[i];
|
|
/*
|
|
@@ -1853,6 +1855,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
|
|
*/
|
|
cxl_vphb_error_detected(afu, state);
|
|
}
|
|
+ spin_unlock(&adapter->afu_list_lock);
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
}
|
|
|
|
@@ -1934,11 +1937,17 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
|
|
* * In slot_reset, free the old resources and allocate new ones.
|
|
* * In resume, clear the flag to allow things to start.
|
|
*/
|
|
+
|
|
+ /* Make sure no one else changes the afu list */
|
|
+ spin_lock(&adapter->afu_list_lock);
|
|
+
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
afu = adapter->afu[i];
|
|
|
|
- afu_result = cxl_vphb_error_detected(afu, state);
|
|
+ if (afu == NULL)
|
|
+ continue;
|
|
|
|
+ afu_result = cxl_vphb_error_detected(afu, state);
|
|
cxl_context_detach_all(afu);
|
|
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
|
|
pci_deconfigure_afu(afu);
|
|
@@ -1950,6 +1959,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
|
|
(result == PCI_ERS_RESULT_NEED_RESET))
|
|
result = PCI_ERS_RESULT_NONE;
|
|
}
|
|
+ spin_unlock(&adapter->afu_list_lock);
|
|
|
|
/* should take the context lock here */
|
|
if (cxl_adapter_context_lock(adapter) != 0)
|
|
@@ -1982,14 +1992,18 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
|
|
*/
|
|
cxl_adapter_context_unlock(adapter);
|
|
|
|
+ spin_lock(&adapter->afu_list_lock);
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
afu = adapter->afu[i];
|
|
|
|
+ if (afu == NULL)
|
|
+ continue;
|
|
+
|
|
if (pci_configure_afu(afu, adapter, pdev))
|
|
- goto err;
|
|
+ goto err_unlock;
|
|
|
|
if (cxl_afu_select_best_mode(afu))
|
|
- goto err;
|
|
+ goto err_unlock;
|
|
|
|
if (afu->phb == NULL)
|
|
continue;
|
|
@@ -2001,16 +2015,16 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
|
|
ctx = cxl_get_context(afu_dev);
|
|
|
|
if (ctx && cxl_release_context(ctx))
|
|
- goto err;
|
|
+ goto err_unlock;
|
|
|
|
ctx = cxl_dev_context_init(afu_dev);
|
|
if (IS_ERR(ctx))
|
|
- goto err;
|
|
+ goto err_unlock;
|
|
|
|
afu_dev->dev.archdata.cxl_ctx = ctx;
|
|
|
|
if (cxl_ops->afu_check_and_enable(afu))
|
|
- goto err;
|
|
+ goto err_unlock;
|
|
|
|
afu_dev->error_state = pci_channel_io_normal;
|
|
|
|
@@ -2031,8 +2045,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
|
|
result = PCI_ERS_RESULT_DISCONNECT;
|
|
}
|
|
}
|
|
+
|
|
+ spin_unlock(&adapter->afu_list_lock);
|
|
return result;
|
|
|
|
+err_unlock:
|
|
+ spin_unlock(&adapter->afu_list_lock);
|
|
+
|
|
err:
|
|
/* All the bits that happen in both error_detected and cxl_remove
|
|
* should be idempotent, so we don't need to worry about leaving a mix
|
|
@@ -2053,10 +2072,11 @@ static void cxl_pci_resume(struct pci_dev *pdev)
|
|
* This is not the place to be checking if everything came back up
|
|
* properly, because there's no return value: do that in slot_reset.
|
|
*/
|
|
+ spin_lock(&adapter->afu_list_lock);
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
afu = adapter->afu[i];
|
|
|
|
- if (afu->phb == NULL)
|
|
+ if (afu == NULL || afu->phb == NULL)
|
|
continue;
|
|
|
|
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
|
|
@@ -2065,6 +2085,7 @@ static void cxl_pci_resume(struct pci_dev *pdev)
|
|
afu_dev->driver->err_handler->resume(afu_dev);
|
|
}
|
|
}
|
|
+ spin_unlock(&adapter->afu_list_lock);
|
|
}
|
|
|
|
static const struct pci_error_handlers cxl_err_handler = {
|
|
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
|
|
index fc3872fe7b25..c383322ec2ba 100644
|
|
--- a/drivers/misc/mei/bus.c
|
|
+++ b/drivers/misc/mei/bus.c
|
|
@@ -541,17 +541,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
|
|
goto out;
|
|
}
|
|
|
|
- if (!mei_cl_bus_module_get(cldev)) {
|
|
- dev_err(&cldev->dev, "get hw module failed");
|
|
- ret = -ENODEV;
|
|
- goto out;
|
|
- }
|
|
-
|
|
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
|
|
- if (ret < 0) {
|
|
+ if (ret < 0)
|
|
dev_err(&cldev->dev, "cannot connect\n");
|
|
- mei_cl_bus_module_put(cldev);
|
|
- }
|
|
|
|
out:
|
|
mutex_unlock(&bus->device_lock);
|
|
@@ -614,7 +606,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
|
|
if (err < 0)
|
|
dev_err(bus->dev, "Could not disconnect from the ME client\n");
|
|
|
|
- mei_cl_bus_module_put(cldev);
|
|
out:
|
|
/* Flush queues and remove any pending read */
|
|
mei_cl_flush_queues(cl, NULL);
|
|
@@ -725,9 +716,16 @@ static int mei_cl_device_probe(struct device *dev)
|
|
if (!id)
|
|
return -ENODEV;
|
|
|
|
+ if (!mei_cl_bus_module_get(cldev)) {
|
|
+ dev_err(&cldev->dev, "get hw module failed");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
ret = cldrv->probe(cldev, id);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ mei_cl_bus_module_put(cldev);
|
|
return ret;
|
|
+ }
|
|
|
|
__module_get(THIS_MODULE);
|
|
return 0;
|
|
@@ -755,6 +753,7 @@ static int mei_cl_device_remove(struct device *dev)
|
|
|
|
mei_cldev_unregister_callbacks(cldev);
|
|
|
|
+ mei_cl_bus_module_put(cldev);
|
|
module_put(THIS_MODULE);
|
|
dev->driver = NULL;
|
|
return ret;
|
|
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
|
|
index e56f3e72d57a..d39cc2909474 100644
|
|
--- a/drivers/misc/mei/hbm.c
|
|
+++ b/drivers/misc/mei/hbm.c
|
|
@@ -986,29 +986,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
|
|
dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
|
|
dev->hbm_f_pg_supported = 1;
|
|
|
|
+ dev->hbm_f_dc_supported = 0;
|
|
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
|
|
dev->hbm_f_dc_supported = 1;
|
|
|
|
+ dev->hbm_f_ie_supported = 0;
|
|
if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
|
|
dev->hbm_f_ie_supported = 1;
|
|
|
|
/* disconnect on connect timeout instead of link reset */
|
|
+ dev->hbm_f_dot_supported = 0;
|
|
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
|
|
dev->hbm_f_dot_supported = 1;
|
|
|
|
/* Notification Event Support */
|
|
+ dev->hbm_f_ev_supported = 0;
|
|
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
|
|
dev->hbm_f_ev_supported = 1;
|
|
|
|
/* Fixed Address Client Support */
|
|
+ dev->hbm_f_fa_supported = 0;
|
|
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
|
|
dev->hbm_f_fa_supported = 1;
|
|
|
|
/* OS ver message Support */
|
|
+ dev->hbm_f_os_supported = 0;
|
|
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
|
|
dev->hbm_f_os_supported = 1;
|
|
|
|
/* DMA Ring Support */
|
|
+ dev->hbm_f_dr_supported = 0;
|
|
if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
|
|
(dev->version.major_version == HBM_MAJOR_VERSION_DR &&
|
|
dev->version.minor_version >= HBM_MINOR_VERSION_DR))
|
|
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
|
|
index d4f9bfbaf023..6600b3466dfb 100644
|
|
--- a/drivers/mmc/core/core.c
|
|
+++ b/drivers/mmc/core/core.c
|
|
@@ -2378,9 +2378,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
|
|
return card->pref_erase;
|
|
|
|
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
|
|
- if (max_discard && mmc_can_trim(card)) {
|
|
+ if (mmc_can_trim(card)) {
|
|
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
|
|
- if (max_trim < max_discard)
|
|
+ if (max_trim < max_discard || max_discard == 0)
|
|
max_discard = max_trim;
|
|
} else if (max_discard < card->erase_size) {
|
|
max_discard = 0;
|
|
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
index 753973dc1655..8dae12b841b3 100644
|
|
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
|
|
@@ -981,6 +981,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
|
|
case MMC_TIMING_UHS_SDR25:
|
|
case MMC_TIMING_UHS_SDR50:
|
|
case MMC_TIMING_UHS_SDR104:
|
|
+ case MMC_TIMING_MMC_HS:
|
|
case MMC_TIMING_MMC_HS200:
|
|
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
|
|
break;
|
|
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
|
|
index ae219b8a7754..2646faffd36e 100644
|
|
--- a/drivers/net/can/flexcan.c
|
|
+++ b/drivers/net/can/flexcan.c
|
|
@@ -140,7 +140,7 @@
|
|
#define FLEXCAN_TX_MB 63
|
|
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
|
|
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
|
|
-#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
|
|
+#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f)
|
|
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
|
|
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
|
|
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
|
|
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
|
|
index fc8b48adf38b..2fa2caf7a746 100644
|
|
--- a/drivers/net/dsa/bcm_sf2.c
|
|
+++ b/drivers/net/dsa/bcm_sf2.c
|
|
@@ -692,7 +692,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
|
|
* port, the other ones have already been disabled during
|
|
* bcm_sf2_sw_setup
|
|
*/
|
|
- for (port = 0; port < DSA_MAX_PORTS; port++) {
|
|
+ for (port = 0; port < ds->num_ports; port++) {
|
|
if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
|
|
bcm_sf2_port_disable(ds, port, NULL);
|
|
}
|
|
@@ -724,10 +724,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
|
|
{
|
|
struct net_device *p = ds->ports[port].cpu_dp->master;
|
|
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
|
- struct ethtool_wolinfo pwol;
|
|
+ struct ethtool_wolinfo pwol = { };
|
|
|
|
/* Get the parent device WoL settings */
|
|
- p->ethtool_ops->get_wol(p, &pwol);
|
|
+ if (p->ethtool_ops->get_wol)
|
|
+ p->ethtool_ops->get_wol(p, &pwol);
|
|
|
|
/* Advertise the parent device supported settings */
|
|
wol->supported = pwol.supported;
|
|
@@ -748,9 +749,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
|
|
struct net_device *p = ds->ports[port].cpu_dp->master;
|
|
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
|
s8 cpu_port = ds->ports[port].cpu_dp->index;
|
|
- struct ethtool_wolinfo pwol;
|
|
+ struct ethtool_wolinfo pwol = { };
|
|
|
|
- p->ethtool_ops->get_wol(p, &pwol);
|
|
+ if (p->ethtool_ops->get_wol)
|
|
+ p->ethtool_ops->get_wol(p, &pwol);
|
|
if (wol->wolopts & ~pwol.supported)
|
|
return -EINVAL;
|
|
|
|
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
|
|
index bb41becb6609..31ff1e0d1baa 100644
|
|
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
|
|
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
|
|
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
{
|
|
struct net_device *netdev;
|
|
struct atl2_adapter *adapter;
|
|
- static int cards_found;
|
|
+ static int cards_found = 0;
|
|
unsigned long mmio_start;
|
|
int mmio_len;
|
|
int err;
|
|
|
|
- cards_found = 0;
|
|
-
|
|
err = pci_enable_device(pdev);
|
|
if (err)
|
|
return err;
|
|
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
|
|
index fc16b2b0d0e9..0bdbc72605e1 100644
|
|
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
|
|
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
|
|
@@ -134,6 +134,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
|
|
|
|
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
|
|
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
|
+ /* Clear L2 header checks, which would prevent BPDUs
|
|
+ * from being received.
|
|
+ */
|
|
+ reg &= ~RXCHK_L2_HDR_DIS;
|
|
if (priv->rx_chk_en)
|
|
reg |= RXCHK_EN;
|
|
else
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
index 1fdaf86bbe8f..0bd93bb7d1a2 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
|
|
@@ -3542,7 +3542,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
if (len)
|
|
break;
|
|
/* on first few passes, just barely sleep */
|
|
- if (i < DFLT_HWRM_CMD_TIMEOUT)
|
|
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER)
|
|
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
|
|
HWRM_SHORT_MAX_TIMEOUT);
|
|
else
|
|
@@ -3565,7 +3565,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
dma_rmb();
|
|
if (*valid)
|
|
break;
|
|
- udelay(1);
|
|
+ usleep_range(1, 5);
|
|
}
|
|
|
|
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
|
|
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
|
|
index bde384630a75..cf2d4a6583d5 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
|
|
@@ -548,7 +548,7 @@ struct rx_tpa_end_cmp_ext {
|
|
(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
|
|
((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
|
|
|
|
-#define HWRM_VALID_BIT_DELAY_USEC 20
|
|
+#define HWRM_VALID_BIT_DELAY_USEC 150
|
|
|
|
#define BNXT_RX_EVENT 1
|
|
#define BNXT_AGG_EVENT 2
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
index 6c8dcb65ff03..90497a27df18 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
@@ -1039,7 +1039,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
|
|
case NIC_MBOX_MSG_CFG_DONE:
|
|
/* Last message of VF config msg sequence */
|
|
nic_enable_vf(nic, vf, true);
|
|
- goto unlock;
|
|
+ break;
|
|
case NIC_MBOX_MSG_SHUTDOWN:
|
|
/* First msg in VF teardown sequence */
|
|
if (vf >= nic->num_vf_en)
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
|
|
index 88f8a8fa93cd..9800738448ec 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
|
|
@@ -172,6 +172,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
|
|
return 1;
|
|
}
|
|
|
|
+static void nicvf_send_cfg_done(struct nicvf *nic)
|
|
+{
|
|
+ union nic_mbx mbx = {};
|
|
+
|
|
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
|
|
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
|
|
+ netdev_err(nic->netdev,
|
|
+ "PF didn't respond to CFG DONE msg\n");
|
|
+ }
|
|
+}
|
|
+
|
|
static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
|
|
{
|
|
if (bgx->rx)
|
|
@@ -1416,7 +1427,6 @@ int nicvf_open(struct net_device *netdev)
|
|
struct nicvf *nic = netdev_priv(netdev);
|
|
struct queue_set *qs = nic->qs;
|
|
struct nicvf_cq_poll *cq_poll = NULL;
|
|
- union nic_mbx mbx = {};
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
@@ -1512,8 +1522,7 @@ int nicvf_open(struct net_device *netdev)
|
|
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
|
|
|
|
/* Send VF config done msg to PF */
|
|
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
|
|
- nicvf_write_to_mbx(nic, &mbx);
|
|
+ nicvf_send_cfg_done(nic);
|
|
|
|
return 0;
|
|
cleanup:
|
|
@@ -1941,7 +1950,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
|
|
|
|
/* flush DMAC filters and reset RX mode */
|
|
mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
|
|
- nicvf_send_msg_to_pf(nic, &mbx);
|
|
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
|
|
+ goto free_mc;
|
|
|
|
if (mode & BGX_XCAST_MCAST_FILTER) {
|
|
/* once enabling filtering, we need to signal to PF to add
|
|
@@ -1949,7 +1959,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
|
|
*/
|
|
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
|
|
mbx.xcast.data.mac = 0;
|
|
- nicvf_send_msg_to_pf(nic, &mbx);
|
|
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
|
|
+ goto free_mc;
|
|
}
|
|
|
|
/* check if we have any specific MACs to be added to PF DMAC filter */
|
|
@@ -1958,9 +1969,9 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
|
|
for (idx = 0; idx < mc_addrs->count; idx++) {
|
|
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
|
|
mbx.xcast.data.mac = mc_addrs->mc[idx];
|
|
- nicvf_send_msg_to_pf(nic, &mbx);
|
|
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
|
|
+ goto free_mc;
|
|
}
|
|
- kfree(mc_addrs);
|
|
}
|
|
|
|
/* and finally set rx mode for PF accordingly */
|
|
@@ -1968,6 +1979,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
|
|
mbx.xcast.data.mode = mode;
|
|
|
|
nicvf_send_msg_to_pf(nic, &mbx);
|
|
+free_mc:
|
|
+ kfree(mc_addrs);
|
|
}
|
|
|
|
static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
|
|
index 3b9e74be5fbd..b8155f5e71b4 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
|
|
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
|
|
dsaf_dev = dev_get_drvdata(&pdev->dev);
|
|
if (!dsaf_dev) {
|
|
dev_err(&pdev->dev, "dsaf_dev is NULL\n");
|
|
+ put_device(&pdev->dev);
|
|
return -ENODEV;
|
|
}
|
|
|
|
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
|
|
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
|
|
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
|
|
dsaf_dev->ae_dev.name);
|
|
+ put_device(&pdev->dev);
|
|
return -ENODEV;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
index 6cdd58d9d461..410d5d3aa393 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
@@ -3924,8 +3924,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
|
|
else
|
|
mrqc = IXGBE_MRQC_VMDQRSS64EN;
|
|
|
|
- /* Enable L3/L4 for Tx Switched packets */
|
|
- mrqc |= IXGBE_MRQC_L3L4TXSWEN;
|
|
+ /* Enable L3/L4 for Tx Switched packets only for X550,
|
|
+ * older devices do not support this feature
|
|
+ */
|
|
+ if (hw->mac.type >= ixgbe_mac_X550)
|
|
+ mrqc |= IXGBE_MRQC_L3L4TXSWEN;
|
|
} else {
|
|
if (tcs > 4)
|
|
mrqc = IXGBE_MRQC_RTRSS8TCEN;
|
|
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
|
|
index 62f204f32316..59007d6cd36d 100644
|
|
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
|
|
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
|
|
@@ -2886,7 +2886,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
|
|
|
|
ret = mv643xx_eth_shared_of_probe(pdev);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_put_clk;
|
|
pd = dev_get_platdata(&pdev->dev);
|
|
|
|
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
|
|
@@ -2894,6 +2894,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
|
|
infer_hw_params(msp);
|
|
|
|
return 0;
|
|
+
|
|
+err_put_clk:
|
|
+ if (!IS_ERR(msp->clk))
|
|
+ clk_disable_unprepare(msp->clk);
|
|
+ return ret;
|
|
}
|
|
|
|
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
|
|
index a78a39244b79..2ba0d89aaf3c 100644
|
|
--- a/drivers/net/ethernet/marvell/mvneta.c
|
|
+++ b/drivers/net/ethernet/marvell/mvneta.c
|
|
@@ -2147,7 +2147,7 @@ err_drop_frame:
|
|
if (unlikely(!skb))
|
|
goto err_drop_frame_ret_pool;
|
|
|
|
- dma_sync_single_range_for_cpu(dev->dev.parent,
|
|
+ dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
|
|
rx_desc->buf_phys_addr,
|
|
MVNETA_MH_SIZE + NET_SKB_PAD,
|
|
rx_bytes,
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
|
|
index eff57f7d056a..4e18d95e548f 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
|
|
@@ -1288,15 +1288,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
|
|
static int
|
|
wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
|
|
- enum alu_op alu_op, bool skip)
|
|
+ enum alu_op alu_op)
|
|
{
|
|
const struct bpf_insn *insn = &meta->insn;
|
|
|
|
- if (skip) {
|
|
- meta->skip = true;
|
|
- return 0;
|
|
- }
|
|
-
|
|
wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
|
|
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
|
|
|
|
@@ -2306,7 +2301,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
|
|
static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
{
|
|
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
|
|
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
|
|
}
|
|
|
|
static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
@@ -2316,7 +2311,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
|
|
static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
{
|
|
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
|
|
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
|
|
}
|
|
|
|
static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
@@ -2326,7 +2321,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
|
|
static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
{
|
|
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
|
|
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
|
|
}
|
|
|
|
static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
@@ -2336,7 +2331,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
|
|
static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
{
|
|
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
|
|
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
|
|
}
|
|
|
|
static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
@@ -2346,7 +2341,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
|
|
static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
{
|
|
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
|
|
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
|
|
}
|
|
|
|
static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
|
|
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
|
|
index e860bdf0f752..b7471e48db7b 100644
|
|
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
|
|
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
|
|
@@ -1689,6 +1689,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
|
|
|
|
eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
|
|
|
|
+ if (!ether_addr_equal(ethh->h_dest,
|
|
+ p_hwfn->p_rdma_info->iwarp.mac_addr)) {
|
|
+ DP_VERBOSE(p_hwfn,
|
|
+ QED_MSG_RDMA,
|
|
+ "Got unexpected mac %pM instead of %pM\n",
|
|
+ ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
ether_addr_copy(remote_mac_addr, ethh->h_source);
|
|
ether_addr_copy(local_mac_addr, ethh->h_dest);
|
|
|
|
@@ -2606,7 +2615,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|
struct qed_iwarp_info *iwarp_info;
|
|
struct qed_ll2_acquire_data data;
|
|
struct qed_ll2_cbs cbs;
|
|
- u32 mpa_buff_size;
|
|
+ u32 buff_size;
|
|
u16 n_ooo_bufs;
|
|
int rc = 0;
|
|
int i;
|
|
@@ -2633,7 +2642,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|
|
|
memset(&data, 0, sizeof(data));
|
|
data.input.conn_type = QED_LL2_TYPE_IWARP;
|
|
- data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
|
|
+ data.input.mtu = params->max_mtu;
|
|
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
|
|
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
|
|
data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
|
|
@@ -2655,9 +2664,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|
goto err;
|
|
}
|
|
|
|
+ buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
|
|
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
|
|
QED_IWARP_LL2_SYN_RX_SIZE,
|
|
- QED_IWARP_MAX_SYN_PKT_SIZE,
|
|
+ buff_size,
|
|
iwarp_info->ll2_syn_handle);
|
|
if (rc)
|
|
goto err;
|
|
@@ -2711,10 +2721,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|
if (rc)
|
|
goto err;
|
|
|
|
- mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
|
|
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
|
|
data.input.rx_num_desc,
|
|
- mpa_buff_size,
|
|
+ buff_size,
|
|
iwarp_info->ll2_mpa_handle);
|
|
if (rc)
|
|
goto err;
|
|
@@ -2727,7 +2736,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|
|
|
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
|
|
|
|
- iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
|
|
+ iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
|
|
if (!iwarp_info->mpa_intermediate_buf)
|
|
goto err;
|
|
|
|
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
|
|
index b8f612d00241..7ac959038324 100644
|
|
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
|
|
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
|
|
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
|
|
|
|
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
|
|
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
|
|
-#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
|
|
|
|
#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
|
|
#define QED_IWARP_MAX_OOO (16)
|
|
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
|
|
index 6e381354f658..74bebbdb4b15 100644
|
|
--- a/drivers/net/usb/qmi_wwan.c
|
|
+++ b/drivers/net/usb/qmi_wwan.c
|
|
@@ -1208,8 +1208,8 @@ static const struct usb_device_id products[] = {
|
|
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
|
|
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
|
|
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
|
|
- {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
|
|
- {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
|
|
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
|
|
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
|
|
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
|
|
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
|
|
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
|
|
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
|
|
index 4ca6592f5b3a..7cd428c0af43 100644
|
|
--- a/drivers/net/wireless/mac80211_hwsim.c
|
|
+++ b/drivers/net/wireless/mac80211_hwsim.c
|
|
@@ -3454,7 +3454,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
|
|
goto out_err;
|
|
}
|
|
|
|
- genlmsg_reply(skb, info);
|
|
+ res = genlmsg_reply(skb, info);
|
|
break;
|
|
}
|
|
|
|
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
|
|
index 789337ea676a..6ede6168bd85 100644
|
|
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
|
|
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
|
|
@@ -433,8 +433,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
|
|
skb_tail_pointer(skb),
|
|
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
|
|
|
|
- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
|
|
-
|
|
lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
|
|
cardp->rx_urb);
|
|
ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
|
|
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
|
|
index 1d28cd656536..1eeb7be6aa34 100644
|
|
--- a/drivers/nvdimm/label.c
|
|
+++ b/drivers/nvdimm/label.c
|
|
@@ -625,7 +625,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
|
|
|
|
static int __pmem_label_update(struct nd_region *nd_region,
|
|
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
|
|
- int pos)
|
|
+ int pos, unsigned long flags)
|
|
{
|
|
struct nd_namespace_common *ndns = &nspm->nsio.common;
|
|
struct nd_interleave_set *nd_set = nd_region->nd_set;
|
|
@@ -666,7 +666,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
|
|
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
|
|
if (nspm->alt_name)
|
|
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
|
|
- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
|
|
+ nd_label->flags = __cpu_to_le32(flags);
|
|
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
|
|
nd_label->position = __cpu_to_le16(pos);
|
|
nd_label->isetcookie = __cpu_to_le64(cookie);
|
|
@@ -1120,13 +1120,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
|
|
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
|
|
struct nd_namespace_pmem *nspm, resource_size_t size)
|
|
{
|
|
- int i;
|
|
+ int i, rc;
|
|
|
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
|
|
struct resource *res;
|
|
- int rc, count = 0;
|
|
+ int count = 0;
|
|
|
|
if (size == 0) {
|
|
rc = del_labels(nd_mapping, nspm->uuid);
|
|
@@ -1144,7 +1144,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
- rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
|
|
+ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
|
|
+ NSLABEL_FLAG_UPDATING);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ if (size == 0)
|
|
+ return 0;
|
|
+
|
|
+ /* Clear the UPDATING flag per UEFI 2.7 expectations */
|
|
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
|
|
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
|
+
|
|
+ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
|
|
if (rc)
|
|
return rc;
|
|
}
|
|
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
|
|
index 4a4266250c28..54d79837f7c6 100644
|
|
--- a/drivers/nvdimm/namespace_devs.c
|
|
+++ b/drivers/nvdimm/namespace_devs.c
|
|
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
|
|
bool pmem_should_map_pages(struct device *dev)
|
|
{
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
+ struct nd_namespace_common *ndns = to_ndns(dev);
|
|
struct nd_namespace_io *nsio;
|
|
|
|
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
|
|
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
|
|
if (is_nd_pfn(dev) || is_nd_btt(dev))
|
|
return false;
|
|
|
|
+ if (ndns->force_raw)
|
|
+ return false;
|
|
+
|
|
nsio = to_nd_namespace_io(dev);
|
|
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
|
|
IORESOURCE_SYSTEM_RAM,
|
|
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
|
|
index 7fe84bfe0878..3ee995a3bfc9 100644
|
|
--- a/drivers/nvdimm/pfn_devs.c
|
|
+++ b/drivers/nvdimm/pfn_devs.c
|
|
@@ -534,7 +534,7 @@ static unsigned long init_altmap_base(resource_size_t base)
|
|
|
|
static unsigned long init_altmap_reserve(resource_size_t base)
|
|
{
|
|
- unsigned long reserve = PHYS_PFN(SZ_8K);
|
|
+ unsigned long reserve = PFN_UP(SZ_8K);
|
|
unsigned long base_pfn = PHYS_PFN(base);
|
|
|
|
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
|
@@ -619,7 +619,7 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
|
|
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
|
IORES_DESC_NONE) == REGION_MIXED
|
|
|| !IS_ALIGNED(end, nd_pfn->align)
|
|
- || nd_region_conflict(nd_region, start, size + adjust))
|
|
+ || nd_region_conflict(nd_region, start, size))
|
|
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
|
|
}
|
|
|
|
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
|
|
index 380916bff9e0..dee5b9e35ffd 100644
|
|
--- a/drivers/parport/parport_pc.c
|
|
+++ b/drivers/parport/parport_pc.c
|
|
@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
|
|
{
|
|
int i;
|
|
for (i = 0; i < NR_SUPERIOS; i++)
|
|
- if (superios[i].io != p->base)
|
|
+ if (superios[i].io == p->base)
|
|
return &superios[i];
|
|
return NULL;
|
|
}
|
|
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
index 0fa9e8fdce66..b56e22262a77 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
|
|
@@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
|
if (ret)
|
|
pci->num_viewport = 2;
|
|
|
|
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
|
+ if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
|
|
/*
|
|
* If a specific SoC driver needs to change the
|
|
* default number of vectors, it needs to implement
|
|
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
|
|
index f03279fc87cd..1908dd2978d3 100644
|
|
--- a/drivers/pci/pcie/dpc.c
|
|
+++ b/drivers/pci/pcie/dpc.c
|
|
@@ -153,6 +153,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
|
|
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
|
|
}
|
|
|
|
+static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
|
|
+ struct aer_err_info *info)
|
|
+{
|
|
+ int pos = dev->aer_cap;
|
|
+ u32 status, mask, sev;
|
|
+
|
|
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
|
|
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
|
|
+ status &= ~mask;
|
|
+ if (!status)
|
|
+ return 0;
|
|
+
|
|
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
|
|
+ status &= sev;
|
|
+ if (status)
|
|
+ info->severity = AER_FATAL;
|
|
+ else
|
|
+ info->severity = AER_NONFATAL;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static irqreturn_t dpc_handler(int irq, void *context)
|
|
{
|
|
struct aer_err_info info;
|
|
@@ -180,9 +202,12 @@ static irqreturn_t dpc_handler(int irq, void *context)
|
|
/* show RP PIO error detail information */
|
|
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
|
|
dpc_process_rp_pio_error(dpc);
|
|
- else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
|
|
+ else if (reason == 0 &&
|
|
+ dpc_get_aer_uncorrect_severity(pdev, &info) &&
|
|
+ aer_get_device_error_info(pdev, &info)) {
|
|
aer_print_error(pdev, &info);
|
|
pci_cleanup_aer_uncorrect_error_status(pdev);
|
|
+ pci_aer_clear_fatal_status(pdev);
|
|
}
|
|
|
|
/* We configure DPC so it only triggers on ERR_FATAL */
|
|
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
|
|
index 201f9e5ff55c..4a4c16bfc0d3 100644
|
|
--- a/drivers/pci/probe.c
|
|
+++ b/drivers/pci/probe.c
|
|
@@ -2038,11 +2038,8 @@ static void pci_configure_ltr(struct pci_dev *dev)
|
|
{
|
|
#ifdef CONFIG_PCIEASPM
|
|
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
|
|
- u32 cap;
|
|
struct pci_dev *bridge;
|
|
-
|
|
- if (!host->native_ltr)
|
|
- return;
|
|
+ u32 cap, ctl;
|
|
|
|
if (!pci_is_pcie(dev))
|
|
return;
|
|
@@ -2051,22 +2048,35 @@ static void pci_configure_ltr(struct pci_dev *dev)
|
|
if (!(cap & PCI_EXP_DEVCAP2_LTR))
|
|
return;
|
|
|
|
- /*
|
|
- * Software must not enable LTR in an Endpoint unless the Root
|
|
- * Complex and all intermediate Switches indicate support for LTR.
|
|
- * PCIe r3.1, sec 6.18.
|
|
- */
|
|
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
|
|
- dev->ltr_path = 1;
|
|
- else {
|
|
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
|
|
+ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
|
|
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
|
|
+ dev->ltr_path = 1;
|
|
+ return;
|
|
+ }
|
|
+
|
|
bridge = pci_upstream_bridge(dev);
|
|
if (bridge && bridge->ltr_path)
|
|
dev->ltr_path = 1;
|
|
+
|
|
+ return;
|
|
}
|
|
|
|
- if (dev->ltr_path)
|
|
+ if (!host->native_ltr)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Software must not enable LTR in an Endpoint unless the Root
|
|
+ * Complex and all intermediate Switches indicate support for LTR.
|
|
+ * PCIe r4.0, sec 6.18.
|
|
+ */
|
|
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
|
|
+ ((bridge = pci_upstream_bridge(dev)) &&
|
|
+ bridge->ltr_path)) {
|
|
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
|
|
PCI_EXP_DEVCTL2_LTR_EN);
|
|
+ dev->ltr_path = 1;
|
|
+ }
|
|
#endif
|
|
}
|
|
|
|
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
|
|
index 91cffc051055..ead4beb5f55f 100644
|
|
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
|
|
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
|
|
@@ -665,7 +665,7 @@ static const char * const sd_a_groups[] = {
|
|
|
|
static const char * const sdxc_a_groups[] = {
|
|
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
|
|
- "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
|
|
+ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
|
|
};
|
|
|
|
static const char * const pcm_a_groups[] = {
|
|
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
|
|
index e4905bef2663..37be541e057d 100644
|
|
--- a/drivers/power/supply/cpcap-charger.c
|
|
+++ b/drivers/power/supply/cpcap-charger.c
|
|
@@ -458,6 +458,7 @@ static void cpcap_usb_detect(struct work_struct *work)
|
|
goto out_err;
|
|
}
|
|
|
|
+ power_supply_changed(ddata->usb);
|
|
return;
|
|
|
|
out_err:
|
|
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
|
|
index b94e3a721721..cd93cf53e23c 100644
|
|
--- a/drivers/regulator/max77620-regulator.c
|
|
+++ b/drivers/regulator/max77620-regulator.c
|
|
@@ -1,7 +1,7 @@
|
|
/*
|
|
* Maxim MAX77620 Regulator driver
|
|
*
|
|
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
|
+ * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
|
|
* Laxman Dewangan <ldewangan@nvidia.com>
|
|
@@ -803,6 +803,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
|
|
rdesc = &rinfo[id].desc;
|
|
pmic->rinfo[id] = &max77620_regs_info[id];
|
|
pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
|
|
+ pmic->reg_pdata[id].active_fps_src = -1;
|
|
+ pmic->reg_pdata[id].active_fps_pd_slot = -1;
|
|
+ pmic->reg_pdata[id].active_fps_pu_slot = -1;
|
|
+ pmic->reg_pdata[id].suspend_fps_src = -1;
|
|
+ pmic->reg_pdata[id].suspend_fps_pd_slot = -1;
|
|
+ pmic->reg_pdata[id].suspend_fps_pu_slot = -1;
|
|
+ pmic->reg_pdata[id].power_ok = -1;
|
|
+ pmic->reg_pdata[id].ramp_rate_setting = -1;
|
|
|
|
ret = max77620_read_slew_rate(pmic, id);
|
|
if (ret < 0)
|
|
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
|
|
index 095d25f3d2ea..58a1fe583a6c 100644
|
|
--- a/drivers/regulator/s2mpa01.c
|
|
+++ b/drivers/regulator/s2mpa01.c
|
|
@@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = {
|
|
regulator_desc_ldo(2, STEP_50_MV),
|
|
regulator_desc_ldo(3, STEP_50_MV),
|
|
regulator_desc_ldo(4, STEP_50_MV),
|
|
- regulator_desc_ldo(5, STEP_50_MV),
|
|
+ regulator_desc_ldo(5, STEP_25_MV),
|
|
regulator_desc_ldo(6, STEP_25_MV),
|
|
regulator_desc_ldo(7, STEP_50_MV),
|
|
regulator_desc_ldo(8, STEP_50_MV),
|
|
regulator_desc_ldo(9, STEP_50_MV),
|
|
regulator_desc_ldo(10, STEP_50_MV),
|
|
- regulator_desc_ldo(11, STEP_25_MV),
|
|
+ regulator_desc_ldo(11, STEP_50_MV),
|
|
regulator_desc_ldo(12, STEP_50_MV),
|
|
regulator_desc_ldo(13, STEP_50_MV),
|
|
regulator_desc_ldo(14, STEP_50_MV),
|
|
@@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = {
|
|
regulator_desc_ldo(19, STEP_50_MV),
|
|
regulator_desc_ldo(20, STEP_50_MV),
|
|
regulator_desc_ldo(21, STEP_50_MV),
|
|
- regulator_desc_ldo(22, STEP_25_MV),
|
|
- regulator_desc_ldo(23, STEP_25_MV),
|
|
+ regulator_desc_ldo(22, STEP_50_MV),
|
|
+ regulator_desc_ldo(23, STEP_50_MV),
|
|
regulator_desc_ldo(24, STEP_50_MV),
|
|
regulator_desc_ldo(25, STEP_50_MV),
|
|
- regulator_desc_ldo(26, STEP_50_MV),
|
|
+ regulator_desc_ldo(26, STEP_25_MV),
|
|
regulator_desc_buck1_4(1),
|
|
regulator_desc_buck1_4(2),
|
|
regulator_desc_buck1_4(3),
|
|
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
|
|
index 5bb6f4ca48db..c584bd1ffa9c 100644
|
|
--- a/drivers/regulator/s2mps11.c
|
|
+++ b/drivers/regulator/s2mps11.c
|
|
@@ -363,7 +363,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
|
|
regulator_desc_s2mps11_ldo(32, STEP_50_MV),
|
|
regulator_desc_s2mps11_ldo(33, STEP_50_MV),
|
|
regulator_desc_s2mps11_ldo(34, STEP_50_MV),
|
|
- regulator_desc_s2mps11_ldo(35, STEP_50_MV),
|
|
+ regulator_desc_s2mps11_ldo(35, STEP_25_MV),
|
|
regulator_desc_s2mps11_ldo(36, STEP_50_MV),
|
|
regulator_desc_s2mps11_ldo(37, STEP_50_MV),
|
|
regulator_desc_s2mps11_ldo(38, STEP_50_MV),
|
|
@@ -373,8 +373,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
|
|
regulator_desc_s2mps11_buck1_4(4),
|
|
regulator_desc_s2mps11_buck5,
|
|
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
|
|
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
|
|
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
|
|
+ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
|
|
+ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
|
|
regulator_desc_s2mps11_buck9,
|
|
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
|
|
};
|
|
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
|
|
index 4e7b55a14b1a..6e294b4d3635 100644
|
|
--- a/drivers/s390/block/dasd_eckd.c
|
|
+++ b/drivers/s390/block/dasd_eckd.c
|
|
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
|
|
usrparm.psf_data &= 0x7fffffffULL;
|
|
usrparm.rssd_result &= 0x7fffffffULL;
|
|
}
|
|
+ /* at least 2 bytes are accessed and should be allocated */
|
|
+ if (usrparm.psf_data_len < 2) {
|
|
+ DBF_DEV_EVENT(DBF_WARNING, device,
|
|
+ "Symmetrix ioctl invalid data length %d",
|
|
+ usrparm.psf_data_len);
|
|
+ rc = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
/* alloc I/O data area */
|
|
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
|
|
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
|
|
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
|
|
index b67dc4974f23..ec54538f7ae1 100644
|
|
--- a/drivers/s390/virtio/virtio_ccw.c
|
|
+++ b/drivers/s390/virtio/virtio_ccw.c
|
|
@@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
|
|
{
|
|
struct virtio_ccw_vq_info *info;
|
|
|
|
+ if (!vcdev->airq_info)
|
|
+ return;
|
|
list_for_each_entry(info, &vcdev->virtqueues, node)
|
|
drop_airq_indicator(info->vq, vcdev->airq_info);
|
|
}
|
|
@@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
|
|
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
|
|
if (ret)
|
|
return ret;
|
|
- return vcdev->config_block->num;
|
|
+ return vcdev->config_block->num ?: -ENOENT;
|
|
}
|
|
|
|
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
|
|
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
|
|
index 04443577d48b..1046947064a0 100644
|
|
--- a/drivers/scsi/aacraid/linit.c
|
|
+++ b/drivers/scsi/aacraid/linit.c
|
|
@@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev)
|
|
if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
|
|
devtype = aac->hba_map[chn][tid].devtype;
|
|
|
|
- if (devtype == AAC_DEVTYPE_NATIVE_RAW)
|
|
+ if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
|
|
depth = aac->hba_map[chn][tid].qd_limit;
|
|
- else if (devtype == AAC_DEVTYPE_ARC_RAW)
|
|
+ set_timeout = 1;
|
|
+ goto common_config;
|
|
+ }
|
|
+ if (devtype == AAC_DEVTYPE_ARC_RAW) {
|
|
set_qd_dev_type = true;
|
|
-
|
|
- set_timeout = 1;
|
|
- goto common_config;
|
|
+ set_timeout = 1;
|
|
+ goto common_config;
|
|
+ }
|
|
}
|
|
|
|
if (aac->jbod && (sdev->type == TYPE_DISK))
|
|
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
|
|
index f78d2e5c1471..4ad61cfa69c0 100644
|
|
--- a/drivers/scsi/libiscsi.c
|
|
+++ b/drivers/scsi/libiscsi.c
|
|
@@ -1449,7 +1449,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
|
|
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
|
|
return -ENODATA;
|
|
|
|
+ spin_lock_bh(&conn->session->back_lock);
|
|
+ if (conn->task == NULL) {
|
|
+ spin_unlock_bh(&conn->session->back_lock);
|
|
+ return -ENODATA;
|
|
+ }
|
|
__iscsi_get_task(task);
|
|
+ spin_unlock_bh(&conn->session->back_lock);
|
|
spin_unlock_bh(&conn->session->frwd_lock);
|
|
rc = conn->session->tt->xmit_task(task);
|
|
spin_lock_bh(&conn->session->frwd_lock);
|
|
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
|
|
index 5352c9bbcaf7..f84f9bf15027 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_init.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_init.c
|
|
@@ -643,11 +643,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
|
|
break;
|
|
case DSC_LS_PORT_UNAVAIL:
|
|
default:
|
|
- if (fcport->loop_id != FC_NO_LOOP_ID)
|
|
- qla2x00_clear_loop_id(fcport);
|
|
-
|
|
- fcport->loop_id = loop_id;
|
|
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
|
|
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
|
|
+ qla2x00_find_new_loop_id(vha, fcport);
|
|
+ fcport->fw_login_state =
|
|
+ DSC_LS_PORT_UNAVAIL;
|
|
+ }
|
|
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
|
|
+ "%s %d %8phC\n", __func__, __LINE__,
|
|
+ fcport->port_name);
|
|
qla24xx_fcport_handle_login(vha, fcport);
|
|
break;
|
|
}
|
|
@@ -1719,13 +1722,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
|
|
|
|
/* Issue Marker IOCB */
|
|
qla2x00_marker(vha, vha->hw->req_q_map[0],
|
|
- vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
|
|
+ vha->hw->rsp_q_map[0], fcport->loop_id, lun,
|
|
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
|
|
}
|
|
|
|
done_free_sp:
|
|
sp->free(sp);
|
|
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
|
|
+ fcport->flags &= ~FCF_ASYNC_SENT;
|
|
done:
|
|
return rval;
|
|
}
|
|
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
|
|
index 58b78702c6c9..a3a5162fa60e 100644
|
|
--- a/drivers/scsi/sd.c
|
|
+++ b/drivers/scsi/sd.c
|
|
@@ -3066,6 +3066,55 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
|
|
sdkp->security = 1;
|
|
}
|
|
|
|
+/*
|
|
+ * Determine the device's preferred I/O size for reads and writes
|
|
+ * unless the reported value is unreasonably small, large, not a
|
|
+ * multiple of the physical block size, or simply garbage.
|
|
+ */
|
|
+static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
|
|
+ unsigned int dev_max)
|
|
+{
|
|
+ struct scsi_device *sdp = sdkp->device;
|
|
+ unsigned int opt_xfer_bytes =
|
|
+ logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
|
+
|
|
+ if (sdkp->opt_xfer_blocks > dev_max) {
|
|
+ sd_first_printk(KERN_WARNING, sdkp,
|
|
+ "Optimal transfer size %u logical blocks " \
|
|
+ "> dev_max (%u logical blocks)\n",
|
|
+ sdkp->opt_xfer_blocks, dev_max);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
|
|
+ sd_first_printk(KERN_WARNING, sdkp,
|
|
+ "Optimal transfer size %u logical blocks " \
|
|
+ "> sd driver limit (%u logical blocks)\n",
|
|
+ sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (opt_xfer_bytes < PAGE_SIZE) {
|
|
+ sd_first_printk(KERN_WARNING, sdkp,
|
|
+ "Optimal transfer size %u bytes < " \
|
|
+ "PAGE_SIZE (%u bytes)\n",
|
|
+ opt_xfer_bytes, (unsigned int)PAGE_SIZE);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
|
|
+ sd_first_printk(KERN_WARNING, sdkp,
|
|
+ "Optimal transfer size %u bytes not a " \
|
|
+ "multiple of physical block size (%u bytes)\n",
|
|
+ opt_xfer_bytes, sdkp->physical_block_size);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
|
|
+ opt_xfer_bytes);
|
|
+ return true;
|
|
+}
|
|
+
|
|
/**
|
|
* sd_revalidate_disk - called the first time a new disk is seen,
|
|
* performs disk spin up, read_capacity, etc.
|
|
@@ -3144,15 +3193,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
|
|
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
|
|
|
|
- /*
|
|
- * Determine the device's preferred I/O size for reads and writes
|
|
- * unless the reported value is unreasonably small, large, or
|
|
- * garbage.
|
|
- */
|
|
- if (sdkp->opt_xfer_blocks &&
|
|
- sdkp->opt_xfer_blocks <= dev_max &&
|
|
- sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
|
|
- logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
|
|
+ if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
|
|
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
|
|
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
|
|
} else
|
|
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
|
|
index 1c72db94270e..3d331a864b2f 100644
|
|
--- a/drivers/scsi/virtio_scsi.c
|
|
+++ b/drivers/scsi/virtio_scsi.c
|
|
@@ -621,7 +621,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
|
|
return FAILED;
|
|
|
|
memset(cmd, 0, sizeof(*cmd));
|
|
- cmd->sc = sc;
|
|
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
|
|
.type = VIRTIO_SCSI_T_TMF,
|
|
.subtype = cpu_to_virtio32(vscsi->vdev,
|
|
@@ -680,7 +679,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
|
|
return FAILED;
|
|
|
|
memset(cmd, 0, sizeof(*cmd));
|
|
- cmd->sc = sc;
|
|
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
|
|
.type = VIRTIO_SCSI_T_TMF,
|
|
.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
|
|
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
|
|
index c7beb6841289..ab8f731a3426 100644
|
|
--- a/drivers/soc/qcom/rpmh.c
|
|
+++ b/drivers/soc/qcom/rpmh.c
|
|
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
|
|
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
|
|
msg);
|
|
struct completion *compl = rpm_msg->completion;
|
|
+ bool free = rpm_msg->needs_free;
|
|
|
|
rpm_msg->err = r;
|
|
|
|
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
|
|
complete(compl);
|
|
|
|
exit:
|
|
- if (rpm_msg->needs_free)
|
|
+ if (free)
|
|
kfree(rpm_msg);
|
|
}
|
|
|
|
@@ -348,11 +349,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
{
|
|
struct batch_cache_req *req;
|
|
struct rpmh_request *rpm_msgs;
|
|
- DECLARE_COMPLETION_ONSTACK(compl);
|
|
+ struct completion *compls;
|
|
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
|
unsigned long time_left;
|
|
int count = 0;
|
|
- int ret, i, j;
|
|
+ int ret, i;
|
|
+ void *ptr;
|
|
|
|
if (!cmd || !n)
|
|
return -EINVAL;
|
|
@@ -362,10 +364,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
if (!count)
|
|
return -EINVAL;
|
|
|
|
- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
|
|
+ ptr = kzalloc(sizeof(*req) +
|
|
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
|
|
GFP_ATOMIC);
|
|
- if (!req)
|
|
+ if (!ptr)
|
|
return -ENOMEM;
|
|
+
|
|
+ req = ptr;
|
|
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
|
|
+
|
|
req->count = count;
|
|
rpm_msgs = req->rpm_msgs;
|
|
|
|
@@ -380,25 +387,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
}
|
|
|
|
for (i = 0; i < count; i++) {
|
|
- rpm_msgs[i].completion = &compl;
|
|
+ struct completion *compl = &compls[i];
|
|
+
|
|
+ init_completion(compl);
|
|
+ rpm_msgs[i].completion = compl;
|
|
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
|
|
if (ret) {
|
|
pr_err("Error(%d) sending RPMH message addr=%#x\n",
|
|
ret, rpm_msgs[i].msg.cmds[0].addr);
|
|
- for (j = i; j < count; j++)
|
|
- rpmh_tx_done(&rpm_msgs[j].msg, ret);
|
|
break;
|
|
}
|
|
}
|
|
|
|
time_left = RPMH_TIMEOUT_MS;
|
|
- for (i = 0; i < count; i++) {
|
|
- time_left = wait_for_completion_timeout(&compl, time_left);
|
|
+ while (i--) {
|
|
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
|
|
if (!time_left) {
|
|
/*
|
|
* Better hope they never finish because they'll signal
|
|
- * the completion on our stack and that's bad once
|
|
- * we've returned from the function.
|
|
+ * the completion that we're going to free once
|
|
+ * we've returned from this function.
|
|
*/
|
|
WARN_ON(1);
|
|
ret = -ETIMEDOUT;
|
|
@@ -407,7 +415,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
|
}
|
|
|
|
exit:
|
|
- kfree(req);
|
|
+ kfree(ptr);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
|
|
index 14f4ea59caff..b624f6fb04ce 100644
|
|
--- a/drivers/spi/spi-pxa2xx.c
|
|
+++ b/drivers/spi/spi-pxa2xx.c
|
|
@@ -1612,6 +1612,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
|
|
platform_info->enable_dma = false;
|
|
} else {
|
|
master->can_dma = pxa2xx_spi_can_dma;
|
|
+ master->max_dma_len = MAX_DMA_LEN;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
|
|
index 5f19016bbf10..b9fb6493cd6b 100644
|
|
--- a/drivers/spi/spi-ti-qspi.c
|
|
+++ b/drivers/spi/spi-ti-qspi.c
|
|
@@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
|
|
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
|
|
if (qspi->ctrl_base) {
|
|
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
|
|
- MEM_CS_EN(spi->chip_select),
|
|
- MEM_CS_MASK);
|
|
+ MEM_CS_MASK,
|
|
+ MEM_CS_EN(spi->chip_select));
|
|
}
|
|
qspi->mmap_enabled = true;
|
|
}
|
|
@@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
|
|
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
|
|
if (qspi->ctrl_base)
|
|
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
|
|
- 0, MEM_CS_MASK);
|
|
+ MEM_CS_MASK, 0);
|
|
qspi->mmap_enabled = false;
|
|
}
|
|
|
|
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
|
|
index 28f41caba05d..fb442499f806 100644
|
|
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
|
|
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
|
|
@@ -680,12 +680,23 @@ static int prp_start(struct prp_priv *priv)
|
|
goto out_free_nfb4eof_irq;
|
|
}
|
|
|
|
+ /* start upstream */
|
|
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
|
|
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
|
|
+ if (ret) {
|
|
+ v4l2_err(&ic_priv->sd,
|
|
+ "upstream stream on failed: %d\n", ret);
|
|
+ goto out_free_eof_irq;
|
|
+ }
|
|
+
|
|
/* start the EOF timeout timer */
|
|
mod_timer(&priv->eof_timeout_timer,
|
|
jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
|
|
|
|
return 0;
|
|
|
|
+out_free_eof_irq:
|
|
+ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
|
|
out_free_nfb4eof_irq:
|
|
devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
|
|
out_unsetup:
|
|
@@ -717,6 +728,12 @@ static void prp_stop(struct prp_priv *priv)
|
|
if (ret == 0)
|
|
v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
|
|
|
|
+ /* stop upstream */
|
|
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
|
|
+ if (ret && ret != -ENOIOCTLCMD)
|
|
+ v4l2_warn(&ic_priv->sd,
|
|
+ "upstream stream off failed: %d\n", ret);
|
|
+
|
|
devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
|
|
devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
|
|
|
|
@@ -1148,15 +1165,6 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
|
|
if (ret)
|
|
goto out;
|
|
|
|
- /* start/stop upstream */
|
|
- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
|
|
- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
|
|
- if (ret) {
|
|
- if (enable)
|
|
- prp_stop(priv);
|
|
- goto out;
|
|
- }
|
|
-
|
|
update_count:
|
|
priv->stream_count += enable ? 1 : -1;
|
|
if (priv->stream_count < 0)
|
|
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
|
|
index cd2c291e1e94..e22f1239a318 100644
|
|
--- a/drivers/staging/media/imx/imx-media-csi.c
|
|
+++ b/drivers/staging/media/imx/imx-media-csi.c
|
|
@@ -626,7 +626,7 @@ out_put_ipu:
|
|
return ret;
|
|
}
|
|
|
|
-static void csi_idmac_stop(struct csi_priv *priv)
|
|
+static void csi_idmac_wait_last_eof(struct csi_priv *priv)
|
|
{
|
|
unsigned long flags;
|
|
int ret;
|
|
@@ -643,7 +643,10 @@ static void csi_idmac_stop(struct csi_priv *priv)
|
|
&priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
|
|
if (ret == 0)
|
|
v4l2_warn(&priv->sd, "wait last EOF timeout\n");
|
|
+}
|
|
|
|
+static void csi_idmac_stop(struct csi_priv *priv)
|
|
+{
|
|
devm_free_irq(priv->dev, priv->eof_irq, priv);
|
|
devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
|
|
|
|
@@ -719,10 +722,16 @@ static int csi_start(struct csi_priv *priv)
|
|
|
|
output_fi = &priv->frame_interval[priv->active_output_pad];
|
|
|
|
+ /* start upstream */
|
|
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
|
|
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
if (priv->dest == IPU_CSI_DEST_IDMAC) {
|
|
ret = csi_idmac_start(priv);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto stop_upstream;
|
|
}
|
|
|
|
ret = csi_setup(priv);
|
|
@@ -750,11 +759,26 @@ fim_off:
|
|
idmac_stop:
|
|
if (priv->dest == IPU_CSI_DEST_IDMAC)
|
|
csi_idmac_stop(priv);
|
|
+stop_upstream:
|
|
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
|
|
return ret;
|
|
}
|
|
|
|
static void csi_stop(struct csi_priv *priv)
|
|
{
|
|
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
|
|
+ csi_idmac_wait_last_eof(priv);
|
|
+
|
|
+ /*
|
|
+ * Disable the CSI asap, after syncing with the last EOF.
|
|
+ * Doing so after the IDMA channel is disabled has shown to
|
|
+ * create hard system-wide hangs.
|
|
+ */
|
|
+ ipu_csi_disable(priv->csi);
|
|
+
|
|
+ /* stop upstream */
|
|
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
|
|
+
|
|
if (priv->dest == IPU_CSI_DEST_IDMAC) {
|
|
csi_idmac_stop(priv);
|
|
|
|
@@ -762,8 +786,6 @@ static void csi_stop(struct csi_priv *priv)
|
|
if (priv->fim)
|
|
imx_media_fim_set_stream(priv->fim, NULL, false);
|
|
}
|
|
-
|
|
- ipu_csi_disable(priv->csi);
|
|
}
|
|
|
|
static const struct csi_skip_desc csi_skip[12] = {
|
|
@@ -924,23 +946,13 @@ static int csi_s_stream(struct v4l2_subdev *sd, int enable)
|
|
goto update_count;
|
|
|
|
if (enable) {
|
|
- /* upstream must be started first, before starting CSI */
|
|
- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
|
|
- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
|
|
- if (ret)
|
|
- goto out;
|
|
-
|
|
dev_dbg(priv->dev, "stream ON\n");
|
|
ret = csi_start(priv);
|
|
- if (ret) {
|
|
- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
|
|
+ if (ret)
|
|
goto out;
|
|
- }
|
|
} else {
|
|
dev_dbg(priv->dev, "stream OFF\n");
|
|
- /* CSI must be stopped first, then stop upstream */
|
|
csi_stop(priv);
|
|
- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
|
|
}
|
|
|
|
update_count:
|
|
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
|
|
index cc756a123fd8..03e9cb156df9 100644
|
|
--- a/drivers/target/iscsi/iscsi_target.c
|
|
+++ b/drivers/target/iscsi/iscsi_target.c
|
|
@@ -4045,9 +4045,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
|
if (se_cmd->se_tfo != NULL) {
|
|
- spin_lock(&se_cmd->t_state_lock);
|
|
+ spin_lock_irq(&se_cmd->t_state_lock);
|
|
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
|
|
- spin_unlock(&se_cmd->t_state_lock);
|
|
+ spin_unlock_irq(&se_cmd->t_state_lock);
|
|
}
|
|
}
|
|
spin_unlock_bh(&conn->cmd_lock);
|
|
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
|
|
index 877fd7f8a8ed..98125de2f0a6 100644
|
|
--- a/drivers/tty/serial/8250/8250_of.c
|
|
+++ b/drivers/tty/serial/8250/8250_of.c
|
|
@@ -130,6 +130,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
|
|
port->flags |= UPF_IOREMAP;
|
|
}
|
|
|
|
+ /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
|
|
+ if (of_device_is_compatible(np, "mrvl,mmp-uart"))
|
|
+ port->regshift = 2;
|
|
+
|
|
/* Check for registers offset within the devices address range */
|
|
if (of_property_read_u32(np, "reg-shift", &prop) == 0)
|
|
port->regshift = prop;
|
|
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
|
|
index 48bd694a5fa1..bbe5cba21522 100644
|
|
--- a/drivers/tty/serial/8250/8250_pci.c
|
|
+++ b/drivers/tty/serial/8250/8250_pci.c
|
|
@@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
|
.setup = pci_default_setup,
|
|
.exit = pci_plx9050_exit,
|
|
},
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
+ {
|
|
+ .vendor = PCI_VENDOR_ID_ACCESIO,
|
|
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
|
|
+ .subvendor = PCI_ANY_ID,
|
|
+ .subdevice = PCI_ANY_ID,
|
|
+ .setup = pci_pericom_setup,
|
|
+ },
|
|
/*
|
|
* SBS Technologies, Inc., PMC-OCTALPRO 232
|
|
*/
|
|
@@ -4575,10 +4680,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
*/
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7954 },
|
|
@@ -4587,10 +4692,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7954 },
|
|
@@ -4599,10 +4704,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7954 },
|
|
@@ -4611,13 +4716,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7951 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7954 },
|
|
@@ -4626,16 +4731,16 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7954 },
|
|
@@ -4644,13 +4749,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7954 },
|
|
+ pbn_pericom_PI7C9X7952 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7958 },
|
|
+ pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7958 },
|
|
+ pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7958 },
|
|
@@ -4659,19 +4764,19 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|
pbn_pericom_PI7C9X7958 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7958 },
|
|
+ pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7958 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7958 },
|
|
+ pbn_pericom_PI7C9X7954 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
pbn_pericom_PI7C9X7958 },
|
|
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
|
- pbn_pericom_PI7C9X7958 },
|
|
+ pbn_pericom_PI7C9X7954 },
|
|
/*
|
|
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
|
|
*/
|
|
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
|
|
index 87d8dd90d605..0e3627289047 100644
|
|
--- a/drivers/tty/serial/xilinx_uartps.c
|
|
+++ b/drivers/tty/serial/xilinx_uartps.c
|
|
@@ -362,7 +362,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
|
|
cdns_uart_handle_tx(dev_id);
|
|
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
|
|
}
|
|
- if (isrstatus & CDNS_UART_IXR_RXMASK)
|
|
+
|
|
+ /*
|
|
+ * Skip RX processing if RX is disabled as RXEMPTY will never be set
|
|
+ * as read bytes will not be removed from the FIFO.
|
|
+ */
|
|
+ if (isrstatus & CDNS_UART_IXR_RXMASK &&
|
|
+ !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
|
|
cdns_uart_handle_rx(dev_id, isrstatus);
|
|
|
|
spin_unlock(&port->lock);
|
|
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
|
|
index da335899527b..b9a9a07f1ee9 100644
|
|
--- a/drivers/tty/vt/vt.c
|
|
+++ b/drivers/tty/vt/vt.c
|
|
@@ -935,8 +935,11 @@ static void flush_scrollback(struct vc_data *vc)
|
|
{
|
|
WARN_CONSOLE_UNLOCKED();
|
|
|
|
+ set_origin(vc);
|
|
if (vc->vc_sw->con_flush_scrollback)
|
|
vc->vc_sw->con_flush_scrollback(vc);
|
|
+ else
|
|
+ vc->vc_sw->con_switch(vc);
|
|
}
|
|
|
|
/*
|
|
@@ -1506,8 +1509,10 @@ static void csi_J(struct vc_data *vc, int vpar)
|
|
count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
|
|
start = (unsigned short *)vc->vc_origin;
|
|
break;
|
|
+ case 3: /* include scrollback */
|
|
+ flush_scrollback(vc);
|
|
+ /* fallthrough */
|
|
case 2: /* erase whole display */
|
|
- case 3: /* (and scrollback buffer later) */
|
|
vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
|
|
count = vc->vc_cols * vc->vc_rows;
|
|
start = (unsigned short *)vc->vc_origin;
|
|
@@ -1516,13 +1521,7 @@ static void csi_J(struct vc_data *vc, int vpar)
|
|
return;
|
|
}
|
|
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
|
|
- if (vpar == 3) {
|
|
- set_origin(vc);
|
|
- flush_scrollback(vc);
|
|
- if (con_is_visible(vc))
|
|
- update_screen(vc);
|
|
- } else if (con_should_update(vc))
|
|
- do_update_region(vc, (unsigned long) start, count);
|
|
+ update_region(vc, (unsigned long) start, count);
|
|
vc->vc_need_wrap = 0;
|
|
}
|
|
|
|
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
|
|
index 772851bee99b..12025358bb3c 100644
|
|
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
|
|
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
|
|
@@ -130,6 +130,7 @@ static int tegra_udc_remove(struct platform_device *pdev)
|
|
{
|
|
struct tegra_udc *udc = platform_get_drvdata(pdev);
|
|
|
|
+ ci_hdrc_remove_device(udc->dev);
|
|
usb_phy_set_suspend(udc->phy, 1);
|
|
clk_disable_unprepare(udc->clk);
|
|
|
|
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
|
|
index c84c8c189e90..eb8046f87a54 100644
|
|
--- a/drivers/usb/typec/tps6598x.c
|
|
+++ b/drivers/usb/typec/tps6598x.c
|
|
@@ -110,6 +110,20 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
|
|
return 0;
|
|
}
|
|
|
|
+static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
|
|
+ void *val, size_t len)
|
|
+{
|
|
+ u8 data[TPS_MAX_LEN + 1];
|
|
+
|
|
+ if (!tps->i2c_protocol)
|
|
+ return regmap_raw_write(tps->regmap, reg, val, len);
|
|
+
|
|
+ data[0] = len;
|
|
+ memcpy(&data[1], val, len);
|
|
+
|
|
+ return regmap_raw_write(tps->regmap, reg, data, sizeof(data));
|
|
+}
|
|
+
|
|
static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
|
|
{
|
|
return tps6598x_block_read(tps, reg, val, sizeof(u16));
|
|
@@ -127,23 +141,23 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
|
|
|
|
static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
|
|
{
|
|
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
|
|
+ return tps6598x_block_write(tps, reg, &val, sizeof(u16));
|
|
}
|
|
|
|
static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
|
|
{
|
|
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
|
|
+ return tps6598x_block_write(tps, reg, &val, sizeof(u32));
|
|
}
|
|
|
|
static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
|
|
{
|
|
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
|
|
+ return tps6598x_block_write(tps, reg, &val, sizeof(u64));
|
|
}
|
|
|
|
static inline int
|
|
tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
|
|
{
|
|
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
|
|
+ return tps6598x_block_write(tps, reg, &val, sizeof(u32));
|
|
}
|
|
|
|
static int tps6598x_read_partner_identity(struct tps6598x *tps)
|
|
@@ -229,8 +243,8 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
|
|
return -EBUSY;
|
|
|
|
if (in_len) {
|
|
- ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
|
|
- in_data, in_len);
|
|
+ ret = tps6598x_block_write(tps, TPS_REG_DATA1,
|
|
+ in_data, in_len);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
|
|
index 5a0db6dec8d1..aaee1e6584e6 100644
|
|
--- a/fs/9p/v9fs_vfs.h
|
|
+++ b/fs/9p/v9fs_vfs.h
|
|
@@ -40,6 +40,9 @@
|
|
*/
|
|
#define P9_LOCK_TIMEOUT (30*HZ)
|
|
|
|
+/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
|
|
+#define V9FS_STAT2INODE_KEEP_ISIZE 1
|
|
+
|
|
extern struct file_system_type v9fs_fs_type;
|
|
extern const struct address_space_operations v9fs_addr_operations;
|
|
extern const struct file_operations v9fs_file_operations;
|
|
@@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
|
|
struct inode *inode, umode_t mode, dev_t);
|
|
void v9fs_evict_inode(struct inode *inode);
|
|
ino_t v9fs_qid2ino(struct p9_qid *qid);
|
|
-void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
|
|
-void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
|
|
+void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
|
|
+ struct super_block *sb, unsigned int flags);
|
|
+void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
|
|
+ unsigned int flags);
|
|
int v9fs_dir_release(struct inode *inode, struct file *filp);
|
|
int v9fs_file_open(struct inode *inode, struct file *file);
|
|
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
|
|
@@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
|
|
}
|
|
|
|
int v9fs_open_to_dotl_flags(int flags);
|
|
+
|
|
+static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
|
|
+{
|
|
+ /*
|
|
+ * 32-bit need the lock, concurrent updates could break the
|
|
+ * sequences and make i_size_read() loop forever.
|
|
+ * 64-bit updates are atomic and can skip the locking.
|
|
+ */
|
|
+ if (sizeof(i_size) > sizeof(long))
|
|
+ spin_lock(&inode->i_lock);
|
|
+ i_size_write(inode, i_size);
|
|
+ if (sizeof(i_size) > sizeof(long))
|
|
+ spin_unlock(&inode->i_lock);
|
|
+}
|
|
#endif
|
|
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
|
|
index ab3d5f5dbb00..c87e6d6ec069 100644
|
|
--- a/fs/9p/vfs_file.c
|
|
+++ b/fs/9p/vfs_file.c
|
|
@@ -442,7 +442,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|
i_size = i_size_read(inode);
|
|
if (iocb->ki_pos > i_size) {
|
|
inode_add_bytes(inode, iocb->ki_pos - i_size);
|
|
- i_size_write(inode, iocb->ki_pos);
|
|
+ /*
|
|
+ * Need to serialize against i_size_write() in
|
|
+ * v9fs_stat2inode()
|
|
+ */
|
|
+ v9fs_i_size_write(inode, iocb->ki_pos);
|
|
}
|
|
return retval;
|
|
}
|
|
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
|
|
index 85ff859d3af5..72b779bc0942 100644
|
|
--- a/fs/9p/vfs_inode.c
|
|
+++ b/fs/9p/vfs_inode.c
|
|
@@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
|
|
if (retval)
|
|
goto error;
|
|
|
|
- v9fs_stat2inode(st, inode, sb);
|
|
+ v9fs_stat2inode(st, inode, sb, 0);
|
|
v9fs_cache_inode_get_cookie(inode);
|
|
unlock_new_inode(inode);
|
|
return inode;
|
|
@@ -1092,7 +1092,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
|
|
if (IS_ERR(st))
|
|
return PTR_ERR(st);
|
|
|
|
- v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
|
|
+ v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
|
|
generic_fillattr(d_inode(dentry), stat);
|
|
|
|
p9stat_free(st);
|
|
@@ -1170,12 +1170,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
|
|
* @stat: Plan 9 metadata (mistat) structure
|
|
* @inode: inode to populate
|
|
* @sb: superblock of filesystem
|
|
+ * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
|
|
*
|
|
*/
|
|
|
|
void
|
|
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
|
|
- struct super_block *sb)
|
|
+ struct super_block *sb, unsigned int flags)
|
|
{
|
|
umode_t mode;
|
|
char ext[32];
|
|
@@ -1216,10 +1217,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
|
|
mode = p9mode2perm(v9ses, stat);
|
|
mode |= inode->i_mode & ~S_IALLUGO;
|
|
inode->i_mode = mode;
|
|
- i_size_write(inode, stat->length);
|
|
|
|
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
|
|
+ v9fs_i_size_write(inode, stat->length);
|
|
/* not real number of blocks, but 512 byte ones ... */
|
|
- inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
|
|
+ inode->i_blocks = (stat->length + 512 - 1) >> 9;
|
|
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
|
|
}
|
|
|
|
@@ -1416,9 +1418,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
|
|
{
|
|
int umode;
|
|
dev_t rdev;
|
|
- loff_t i_size;
|
|
struct p9_wstat *st;
|
|
struct v9fs_session_info *v9ses;
|
|
+ unsigned int flags;
|
|
|
|
v9ses = v9fs_inode2v9ses(inode);
|
|
st = p9_client_stat(fid);
|
|
@@ -1431,16 +1433,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
|
|
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
|
|
goto out;
|
|
|
|
- spin_lock(&inode->i_lock);
|
|
/*
|
|
* We don't want to refresh inode->i_size,
|
|
* because we may have cached data
|
|
*/
|
|
- i_size = inode->i_size;
|
|
- v9fs_stat2inode(st, inode, inode->i_sb);
|
|
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
|
|
- inode->i_size = i_size;
|
|
- spin_unlock(&inode->i_lock);
|
|
+ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
|
|
+ V9FS_STAT2INODE_KEEP_ISIZE : 0;
|
|
+ v9fs_stat2inode(st, inode, inode->i_sb, flags);
|
|
out:
|
|
p9stat_free(st);
|
|
kfree(st);
|
|
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
|
|
index 4823e1c46999..a950a927a626 100644
|
|
--- a/fs/9p/vfs_inode_dotl.c
|
|
+++ b/fs/9p/vfs_inode_dotl.c
|
|
@@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
|
|
if (retval)
|
|
goto error;
|
|
|
|
- v9fs_stat2inode_dotl(st, inode);
|
|
+ v9fs_stat2inode_dotl(st, inode, 0);
|
|
v9fs_cache_inode_get_cookie(inode);
|
|
retval = v9fs_get_acl(inode, fid);
|
|
if (retval)
|
|
@@ -496,7 +496,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
|
|
if (IS_ERR(st))
|
|
return PTR_ERR(st);
|
|
|
|
- v9fs_stat2inode_dotl(st, d_inode(dentry));
|
|
+ v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
|
|
generic_fillattr(d_inode(dentry), stat);
|
|
/* Change block size to what the server returned */
|
|
stat->blksize = st->st_blksize;
|
|
@@ -607,11 +607,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
|
|
* v9fs_stat2inode_dotl - populate an inode structure with stat info
|
|
* @stat: stat structure
|
|
* @inode: inode to populate
|
|
+ * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
|
|
*
|
|
*/
|
|
|
|
void
|
|
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
|
|
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
|
|
+ unsigned int flags)
|
|
{
|
|
umode_t mode;
|
|
struct v9fs_inode *v9inode = V9FS_I(inode);
|
|
@@ -631,7 +633,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
|
|
mode |= inode->i_mode & ~S_IALLUGO;
|
|
inode->i_mode = mode;
|
|
|
|
- i_size_write(inode, stat->st_size);
|
|
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
|
|
+ v9fs_i_size_write(inode, stat->st_size);
|
|
inode->i_blocks = stat->st_blocks;
|
|
} else {
|
|
if (stat->st_result_mask & P9_STATS_ATIME) {
|
|
@@ -661,8 +664,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
|
|
}
|
|
if (stat->st_result_mask & P9_STATS_RDEV)
|
|
inode->i_rdev = new_decode_dev(stat->st_rdev);
|
|
- if (stat->st_result_mask & P9_STATS_SIZE)
|
|
- i_size_write(inode, stat->st_size);
|
|
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
|
|
+ stat->st_result_mask & P9_STATS_SIZE)
|
|
+ v9fs_i_size_write(inode, stat->st_size);
|
|
if (stat->st_result_mask & P9_STATS_BLOCKS)
|
|
inode->i_blocks = stat->st_blocks;
|
|
}
|
|
@@ -928,9 +932,9 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
|
|
|
|
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
|
|
{
|
|
- loff_t i_size;
|
|
struct p9_stat_dotl *st;
|
|
struct v9fs_session_info *v9ses;
|
|
+ unsigned int flags;
|
|
|
|
v9ses = v9fs_inode2v9ses(inode);
|
|
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
|
|
@@ -942,16 +946,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
|
|
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
|
|
goto out;
|
|
|
|
- spin_lock(&inode->i_lock);
|
|
/*
|
|
* We don't want to refresh inode->i_size,
|
|
* because we may have cached data
|
|
*/
|
|
- i_size = inode->i_size;
|
|
- v9fs_stat2inode_dotl(st, inode);
|
|
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
|
|
- inode->i_size = i_size;
|
|
- spin_unlock(&inode->i_lock);
|
|
+ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
|
|
+ V9FS_STAT2INODE_KEEP_ISIZE : 0;
|
|
+ v9fs_stat2inode_dotl(st, inode, flags);
|
|
out:
|
|
kfree(st);
|
|
return 0;
|
|
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
|
|
index 48ce50484e80..eeab9953af89 100644
|
|
--- a/fs/9p/vfs_super.c
|
|
+++ b/fs/9p/vfs_super.c
|
|
@@ -172,7 +172,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
|
|
goto release_sb;
|
|
}
|
|
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
|
|
- v9fs_stat2inode_dotl(st, d_inode(root));
|
|
+ v9fs_stat2inode_dotl(st, d_inode(root), 0);
|
|
kfree(st);
|
|
} else {
|
|
struct p9_wstat *st = NULL;
|
|
@@ -183,7 +183,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
|
|
}
|
|
|
|
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
|
|
- v9fs_stat2inode(st, d_inode(root), sb);
|
|
+ v9fs_stat2inode(st, d_inode(root), sb, 0);
|
|
|
|
p9stat_free(st);
|
|
kfree(st);
|
|
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
|
|
index 3b66c957ea6f..5810463dc6d2 100644
|
|
--- a/fs/btrfs/acl.c
|
|
+++ b/fs/btrfs/acl.c
|
|
@@ -9,6 +9,7 @@
|
|
#include <linux/posix_acl_xattr.h>
|
|
#include <linux/posix_acl.h>
|
|
#include <linux/sched.h>
|
|
+#include <linux/sched/mm.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "ctree.h"
|
|
@@ -72,8 +73,16 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
|
|
}
|
|
|
|
if (acl) {
|
|
+ unsigned int nofs_flag;
|
|
+
|
|
size = posix_acl_xattr_size(acl->a_count);
|
|
+ /*
|
|
+ * We're holding a transaction handle, so use a NOFS memory
|
|
+ * allocation context to avoid deadlock if reclaim happens.
|
|
+ */
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
value = kmalloc(size, GFP_KERNEL);
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
if (!value) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
|
index d96d1390068a..b4f61a3d560a 100644
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/semaphore.h>
|
|
#include <linux/error-injection.h>
|
|
#include <linux/crc32c.h>
|
|
+#include <linux/sched/mm.h>
|
|
#include <asm/unaligned.h>
|
|
#include "ctree.h"
|
|
#include "disk-io.h"
|
|
@@ -1236,10 +1237,17 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *tree_root = fs_info->tree_root;
|
|
struct btrfs_root *root;
|
|
struct btrfs_key key;
|
|
+ unsigned int nofs_flag;
|
|
int ret = 0;
|
|
uuid_le uuid = NULL_UUID_LE;
|
|
|
|
+ /*
|
|
+ * We're holding a transaction handle, so use a NOFS memory allocation
|
|
+ * context to avoid deadlock if reclaim happens.
|
|
+ */
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
if (!root)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
|
|
index 79f82f2ec4d5..90b0a6eff535 100644
|
|
--- a/fs/btrfs/extent_io.c
|
|
+++ b/fs/btrfs/extent_io.c
|
|
@@ -3002,11 +3002,11 @@ static int __do_readpage(struct extent_io_tree *tree,
|
|
*/
|
|
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
|
|
prev_em_start && *prev_em_start != (u64)-1 &&
|
|
- *prev_em_start != em->orig_start)
|
|
+ *prev_em_start != em->start)
|
|
force_bio_submit = true;
|
|
|
|
if (prev_em_start)
|
|
- *prev_em_start = em->orig_start;
|
|
+ *prev_em_start = em->start;
|
|
|
|
free_extent_map(em);
|
|
em = NULL;
|
|
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
|
|
index 285f64f2de5f..c13f62182513 100644
|
|
--- a/fs/btrfs/volumes.c
|
|
+++ b/fs/btrfs/volumes.c
|
|
@@ -6425,10 +6425,10 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
|
|
}
|
|
|
|
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
|
|
- (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
|
|
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
|
|
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
|
|
(type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
|
|
- (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
|
|
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
|
|
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
|
|
num_stripes != 1)) {
|
|
btrfs_err(fs_info,
|
|
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
|
|
index 9dcaed031843..80f33582059e 100644
|
|
--- a/fs/cifs/cifsglob.h
|
|
+++ b/fs/cifs/cifsglob.h
|
|
@@ -235,6 +235,8 @@ struct smb_version_operations {
|
|
int * (*get_credits_field)(struct TCP_Server_Info *, const int);
|
|
unsigned int (*get_credits)(struct mid_q_entry *);
|
|
__u64 (*get_next_mid)(struct TCP_Server_Info *);
|
|
+ void (*revert_current_mid)(struct TCP_Server_Info *server,
|
|
+ const unsigned int val);
|
|
/* data offset from read response message */
|
|
unsigned int (*read_data_offset)(char *);
|
|
/*
|
|
@@ -756,6 +758,22 @@ get_next_mid(struct TCP_Server_Info *server)
|
|
return cpu_to_le16(mid);
|
|
}
|
|
|
|
+static inline void
|
|
+revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
|
|
+{
|
|
+ if (server->ops->revert_current_mid)
|
|
+ server->ops->revert_current_mid(server, val);
|
|
+}
|
|
+
|
|
+static inline void
|
|
+revert_current_mid_from_hdr(struct TCP_Server_Info *server,
|
|
+ const struct smb2_sync_hdr *shdr)
|
|
+{
|
|
+ unsigned int num = le16_to_cpu(shdr->CreditCharge);
|
|
+
|
|
+ return revert_current_mid(server, num > 0 ? num : 1);
|
|
+}
|
|
+
|
|
static inline __u16
|
|
get_mid(const struct smb_hdr *smb)
|
|
{
|
|
@@ -1391,6 +1409,7 @@ struct mid_q_entry {
|
|
struct kref refcount;
|
|
struct TCP_Server_Info *server; /* server corresponding to this mid */
|
|
__u64 mid; /* multiplex id */
|
|
+ __u16 credits; /* number of credits consumed by this mid */
|
|
__u32 pid; /* process id */
|
|
__u32 sequence_number; /* for CIFS signing */
|
|
unsigned long when_alloc; /* when mid was created */
|
|
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
|
|
index 23db881daab5..08761a6a039d 100644
|
|
--- a/fs/cifs/file.c
|
|
+++ b/fs/cifs/file.c
|
|
@@ -2871,14 +2871,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
|
|
* these pages but not on the region from pos to ppos+len-1.
|
|
*/
|
|
written = cifs_user_writev(iocb, from);
|
|
- if (written > 0 && CIFS_CACHE_READ(cinode)) {
|
|
+ if (CIFS_CACHE_READ(cinode)) {
|
|
/*
|
|
- * Windows 7 server can delay breaking level2 oplock if a write
|
|
- * request comes - break it on the client to prevent reading
|
|
- * an old data.
|
|
+ * We have read level caching and we have just sent a write
|
|
+ * request to the server thus making data in the cache stale.
|
|
+ * Zap the cache and set oplock/lease level to NONE to avoid
|
|
+ * reading stale data from the cache. All subsequent read
|
|
+ * operations will read new data from the server.
|
|
*/
|
|
cifs_zap_mapping(inode);
|
|
- cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
|
|
+ cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
|
|
inode);
|
|
cinode->oplock = 0;
|
|
}
|
|
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
|
|
index 7b8b58fb4d3f..58700d2ba8cd 100644
|
|
--- a/fs/cifs/smb2misc.c
|
|
+++ b/fs/cifs/smb2misc.c
|
|
@@ -517,7 +517,6 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
|
|
__u8 lease_state;
|
|
struct list_head *tmp;
|
|
struct cifsFileInfo *cfile;
|
|
- struct TCP_Server_Info *server = tcon->ses->server;
|
|
struct cifs_pending_open *open;
|
|
struct cifsInodeInfo *cinode;
|
|
int ack_req = le32_to_cpu(rsp->Flags &
|
|
@@ -537,13 +536,25 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
|
|
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
|
|
le32_to_cpu(rsp->NewLeaseState));
|
|
|
|
- server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
|
|
-
|
|
if (ack_req)
|
|
cfile->oplock_break_cancelled = false;
|
|
else
|
|
cfile->oplock_break_cancelled = true;
|
|
|
|
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
|
|
+
|
|
+ /*
|
|
+ * Set or clear flags depending on the lease state being READ.
|
|
+ * HANDLE caching flag should be added when the client starts
|
|
+ * to defer closing remote file handles with HANDLE leases.
|
|
+ */
|
|
+ if (lease_state & SMB2_LEASE_READ_CACHING_HE)
|
|
+ set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
|
|
+ &cinode->flags);
|
|
+ else
|
|
+ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
|
|
+ &cinode->flags);
|
|
+
|
|
queue_work(cifsoplockd_wq, &cfile->oplock_break);
|
|
kfree(lw);
|
|
return true;
|
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
|
index 237d7281ada3..d4d7d61a6fe2 100644
|
|
--- a/fs/cifs/smb2ops.c
|
|
+++ b/fs/cifs/smb2ops.c
|
|
@@ -204,6 +204,15 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
|
|
return mid;
|
|
}
|
|
|
|
+static void
|
|
+smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
|
|
+{
|
|
+ spin_lock(&GlobalMid_Lock);
|
|
+ if (server->CurrentMid >= val)
|
|
+ server->CurrentMid -= val;
|
|
+ spin_unlock(&GlobalMid_Lock);
|
|
+}
|
|
+
|
|
static struct mid_q_entry *
|
|
smb2_find_mid(struct TCP_Server_Info *server, char *buf)
|
|
{
|
|
@@ -2300,6 +2309,15 @@ smb2_downgrade_oplock(struct TCP_Server_Info *server,
|
|
server->ops->set_oplock_level(cinode, 0, 0, NULL);
|
|
}
|
|
|
|
+static void
|
|
+smb21_downgrade_oplock(struct TCP_Server_Info *server,
|
|
+ struct cifsInodeInfo *cinode, bool set_level2)
|
|
+{
|
|
+ server->ops->set_oplock_level(cinode,
|
|
+ set_level2 ? SMB2_LEASE_READ_CACHING_HE :
|
|
+ 0, 0, NULL);
|
|
+}
|
|
+
|
|
static void
|
|
smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
|
|
unsigned int epoch, bool *purge_cache)
|
|
@@ -3247,6 +3265,7 @@ struct smb_version_operations smb20_operations = {
|
|
.get_credits = smb2_get_credits,
|
|
.wait_mtu_credits = cifs_wait_mtu_credits,
|
|
.get_next_mid = smb2_get_next_mid,
|
|
+ .revert_current_mid = smb2_revert_current_mid,
|
|
.read_data_offset = smb2_read_data_offset,
|
|
.read_data_length = smb2_read_data_length,
|
|
.map_error = map_smb2_to_linux_error,
|
|
@@ -3341,6 +3360,7 @@ struct smb_version_operations smb21_operations = {
|
|
.get_credits = smb2_get_credits,
|
|
.wait_mtu_credits = smb2_wait_mtu_credits,
|
|
.get_next_mid = smb2_get_next_mid,
|
|
+ .revert_current_mid = smb2_revert_current_mid,
|
|
.read_data_offset = smb2_read_data_offset,
|
|
.read_data_length = smb2_read_data_length,
|
|
.map_error = map_smb2_to_linux_error,
|
|
@@ -3351,7 +3371,7 @@ struct smb_version_operations smb21_operations = {
|
|
.print_stats = smb2_print_stats,
|
|
.is_oplock_break = smb2_is_valid_oplock_break,
|
|
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
|
- .downgrade_oplock = smb2_downgrade_oplock,
|
|
+ .downgrade_oplock = smb21_downgrade_oplock,
|
|
.need_neg = smb2_need_neg,
|
|
.negotiate = smb2_negotiate,
|
|
.negotiate_wsize = smb2_negotiate_wsize,
|
|
@@ -3436,6 +3456,7 @@ struct smb_version_operations smb30_operations = {
|
|
.get_credits = smb2_get_credits,
|
|
.wait_mtu_credits = smb2_wait_mtu_credits,
|
|
.get_next_mid = smb2_get_next_mid,
|
|
+ .revert_current_mid = smb2_revert_current_mid,
|
|
.read_data_offset = smb2_read_data_offset,
|
|
.read_data_length = smb2_read_data_length,
|
|
.map_error = map_smb2_to_linux_error,
|
|
@@ -3447,7 +3468,7 @@ struct smb_version_operations smb30_operations = {
|
|
.dump_share_caps = smb2_dump_share_caps,
|
|
.is_oplock_break = smb2_is_valid_oplock_break,
|
|
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
|
- .downgrade_oplock = smb2_downgrade_oplock,
|
|
+ .downgrade_oplock = smb21_downgrade_oplock,
|
|
.need_neg = smb2_need_neg,
|
|
.negotiate = smb2_negotiate,
|
|
.negotiate_wsize = smb2_negotiate_wsize,
|
|
@@ -3540,6 +3561,7 @@ struct smb_version_operations smb311_operations = {
|
|
.get_credits = smb2_get_credits,
|
|
.wait_mtu_credits = smb2_wait_mtu_credits,
|
|
.get_next_mid = smb2_get_next_mid,
|
|
+ .revert_current_mid = smb2_revert_current_mid,
|
|
.read_data_offset = smb2_read_data_offset,
|
|
.read_data_length = smb2_read_data_length,
|
|
.map_error = map_smb2_to_linux_error,
|
|
@@ -3551,7 +3573,7 @@ struct smb_version_operations smb311_operations = {
|
|
.dump_share_caps = smb2_dump_share_caps,
|
|
.is_oplock_break = smb2_is_valid_oplock_break,
|
|
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
|
- .downgrade_oplock = smb2_downgrade_oplock,
|
|
+ .downgrade_oplock = smb21_downgrade_oplock,
|
|
.need_neg = smb2_need_neg,
|
|
.negotiate = smb2_negotiate,
|
|
.negotiate_wsize = smb2_negotiate_wsize,
|
|
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
|
|
index 7b351c65ee46..63264db78b89 100644
|
|
--- a/fs/cifs/smb2transport.c
|
|
+++ b/fs/cifs/smb2transport.c
|
|
@@ -576,6 +576,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
|
|
struct TCP_Server_Info *server)
|
|
{
|
|
struct mid_q_entry *temp;
|
|
+ unsigned int credits = le16_to_cpu(shdr->CreditCharge);
|
|
|
|
if (server == NULL) {
|
|
cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
|
|
@@ -586,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
|
|
memset(temp, 0, sizeof(struct mid_q_entry));
|
|
kref_init(&temp->refcount);
|
|
temp->mid = le64_to_cpu(shdr->MessageId);
|
|
+ temp->credits = credits > 0 ? credits : 1;
|
|
temp->pid = current->pid;
|
|
temp->command = shdr->Command; /* Always LE */
|
|
temp->when_alloc = jiffies;
|
|
@@ -674,13 +676,18 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
|
|
smb2_seq_num_into_buf(ses->server, shdr);
|
|
|
|
rc = smb2_get_mid_entry(ses, shdr, &mid);
|
|
- if (rc)
|
|
+ if (rc) {
|
|
+ revert_current_mid_from_hdr(ses->server, shdr);
|
|
return ERR_PTR(rc);
|
|
+ }
|
|
+
|
|
rc = smb2_sign_rqst(rqst, ses->server);
|
|
if (rc) {
|
|
+ revert_current_mid_from_hdr(ses->server, shdr);
|
|
cifs_delete_mid(mid);
|
|
return ERR_PTR(rc);
|
|
}
|
|
+
|
|
return mid;
|
|
}
|
|
|
|
@@ -695,11 +702,14 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
|
smb2_seq_num_into_buf(server, shdr);
|
|
|
|
mid = smb2_mid_entry_alloc(shdr, server);
|
|
- if (mid == NULL)
|
|
+ if (mid == NULL) {
|
|
+ revert_current_mid_from_hdr(server, shdr);
|
|
return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
|
|
rc = smb2_sign_rqst(rqst, server);
|
|
if (rc) {
|
|
+ revert_current_mid_from_hdr(server, shdr);
|
|
DeleteMidQEntry(mid);
|
|
return ERR_PTR(rc);
|
|
}
|
|
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
|
|
index 66348b3d28e6..f2938bd95c40 100644
|
|
--- a/fs/cifs/transport.c
|
|
+++ b/fs/cifs/transport.c
|
|
@@ -638,6 +638,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
|
|
cifs_in_send_dec(server);
|
|
|
|
if (rc < 0) {
|
|
+ revert_current_mid(server, mid->credits);
|
|
server->sequence_number -= 2;
|
|
cifs_delete_mid(mid);
|
|
}
|
|
@@ -842,6 +843,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
for (i = 0; i < num_rqst; i++) {
|
|
midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
|
|
if (IS_ERR(midQ[i])) {
|
|
+ revert_current_mid(ses->server, i);
|
|
for (j = 0; j < i; j++)
|
|
cifs_delete_mid(midQ[j]);
|
|
mutex_unlock(&ses->server->srv_mutex);
|
|
@@ -867,8 +869,10 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
|
|
for (i = 0; i < num_rqst; i++)
|
|
cifs_save_when_sent(midQ[i]);
|
|
|
|
- if (rc < 0)
|
|
+ if (rc < 0) {
|
|
+ revert_current_mid(ses->server, num_rqst);
|
|
ses->server->sequence_number -= 2;
|
|
+ }
|
|
|
|
mutex_unlock(&ses->server->srv_mutex);
|
|
|
|
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
|
|
index c53814539070..553a3f3300ae 100644
|
|
--- a/fs/devpts/inode.c
|
|
+++ b/fs/devpts/inode.c
|
|
@@ -455,6 +455,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
|
|
s->s_blocksize_bits = 10;
|
|
s->s_magic = DEVPTS_SUPER_MAGIC;
|
|
s->s_op = &devpts_sops;
|
|
+ s->s_d_op = &simple_dentry_operations;
|
|
s->s_time_gran = 1;
|
|
|
|
error = -ENOMEM;
|
|
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
|
|
index 0c38e31ec938..364e647d87c0 100644
|
|
--- a/fs/ext2/super.c
|
|
+++ b/fs/ext2/super.c
|
|
@@ -761,7 +761,8 @@ static loff_t ext2_max_size(int bits)
|
|
{
|
|
loff_t res = EXT2_NDIR_BLOCKS;
|
|
int meta_blocks;
|
|
- loff_t upper_limit;
|
|
+ unsigned int upper_limit;
|
|
+ unsigned int ppb = 1 << (bits-2);
|
|
|
|
/* This is calculated to be the largest file size for a
|
|
* dense, file such that the total number of
|
|
@@ -775,24 +776,34 @@ static loff_t ext2_max_size(int bits)
|
|
/* total blocks in file system block size */
|
|
upper_limit >>= (bits - 9);
|
|
|
|
+ /* Compute how many blocks we can address by block tree */
|
|
+ res += 1LL << (bits-2);
|
|
+ res += 1LL << (2*(bits-2));
|
|
+ res += 1LL << (3*(bits-2));
|
|
+ /* Does block tree limit file size? */
|
|
+ if (res < upper_limit)
|
|
+ goto check_lfs;
|
|
|
|
+ res = upper_limit;
|
|
+ /* How many metadata blocks are needed for addressing upper_limit? */
|
|
+ upper_limit -= EXT2_NDIR_BLOCKS;
|
|
/* indirect blocks */
|
|
meta_blocks = 1;
|
|
+ upper_limit -= ppb;
|
|
/* double indirect blocks */
|
|
- meta_blocks += 1 + (1LL << (bits-2));
|
|
- /* tripple indirect blocks */
|
|
- meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
|
|
-
|
|
- upper_limit -= meta_blocks;
|
|
- upper_limit <<= bits;
|
|
-
|
|
- res += 1LL << (bits-2);
|
|
- res += 1LL << (2*(bits-2));
|
|
- res += 1LL << (3*(bits-2));
|
|
+ if (upper_limit < ppb * ppb) {
|
|
+ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
|
|
+ res -= meta_blocks;
|
|
+ goto check_lfs;
|
|
+ }
|
|
+ meta_blocks += 1 + ppb;
|
|
+ upper_limit -= ppb * ppb;
|
|
+ /* tripple indirect blocks for the rest */
|
|
+ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
|
|
+ DIV_ROUND_UP(upper_limit, ppb*ppb);
|
|
+ res -= meta_blocks;
|
|
+check_lfs:
|
|
res <<= bits;
|
|
- if (res > upper_limit)
|
|
- res = upper_limit;
|
|
-
|
|
if (res > MAX_LFS_FILESIZE)
|
|
res = MAX_LFS_FILESIZE;
|
|
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index 032cf9b92665..2ddf7833350d 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -435,6 +435,9 @@ struct flex_groups {
|
|
/* Flags that are appropriate for non-directories/regular files. */
|
|
#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
|
|
|
|
+/* The only flags that should be swapped */
|
|
+#define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
|
|
+
|
|
/* Mask out flags that are inappropriate for the given type of inode. */
|
|
static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
|
|
{
|
|
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
|
|
index d37dafa1d133..2e76fb55d94a 100644
|
|
--- a/fs/ext4/ioctl.c
|
|
+++ b/fs/ext4/ioctl.c
|
|
@@ -63,18 +63,20 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
|
|
loff_t isize;
|
|
struct ext4_inode_info *ei1;
|
|
struct ext4_inode_info *ei2;
|
|
+ unsigned long tmp;
|
|
|
|
ei1 = EXT4_I(inode1);
|
|
ei2 = EXT4_I(inode2);
|
|
|
|
swap(inode1->i_version, inode2->i_version);
|
|
- swap(inode1->i_blocks, inode2->i_blocks);
|
|
- swap(inode1->i_bytes, inode2->i_bytes);
|
|
swap(inode1->i_atime, inode2->i_atime);
|
|
swap(inode1->i_mtime, inode2->i_mtime);
|
|
|
|
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
|
|
- swap(ei1->i_flags, ei2->i_flags);
|
|
+ tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
|
|
+ ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
|
|
+ (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
|
|
+ ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
|
|
swap(ei1->i_disksize, ei2->i_disksize);
|
|
ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
|
|
ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
|
|
@@ -115,28 +117,41 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
|
int err;
|
|
struct inode *inode_bl;
|
|
struct ext4_inode_info *ei_bl;
|
|
-
|
|
- if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
|
|
- IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
|
|
- ext4_has_inline_data(inode))
|
|
- return -EINVAL;
|
|
-
|
|
- if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
|
|
- !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
|
|
- return -EPERM;
|
|
+ qsize_t size, size_bl, diff;
|
|
+ blkcnt_t blocks;
|
|
+ unsigned short bytes;
|
|
|
|
inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
|
|
if (IS_ERR(inode_bl))
|
|
return PTR_ERR(inode_bl);
|
|
ei_bl = EXT4_I(inode_bl);
|
|
|
|
- filemap_flush(inode->i_mapping);
|
|
- filemap_flush(inode_bl->i_mapping);
|
|
-
|
|
/* Protect orig inodes against a truncate and make sure,
|
|
* that only 1 swap_inode_boot_loader is running. */
|
|
lock_two_nondirectories(inode, inode_bl);
|
|
|
|
+ if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
|
|
+ IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
|
|
+ ext4_has_inline_data(inode)) {
|
|
+ err = -EINVAL;
|
|
+ goto journal_err_out;
|
|
+ }
|
|
+
|
|
+ if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
|
|
+ !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
|
|
+ err = -EPERM;
|
|
+ goto journal_err_out;
|
|
+ }
|
|
+
|
|
+ down_write(&EXT4_I(inode)->i_mmap_sem);
|
|
+ err = filemap_write_and_wait(inode->i_mapping);
|
|
+ if (err)
|
|
+ goto err_out;
|
|
+
|
|
+ err = filemap_write_and_wait(inode_bl->i_mapping);
|
|
+ if (err)
|
|
+ goto err_out;
|
|
+
|
|
/* Wait for all existing dio workers */
|
|
inode_dio_wait(inode);
|
|
inode_dio_wait(inode_bl);
|
|
@@ -147,7 +162,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
|
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
|
|
if (IS_ERR(handle)) {
|
|
err = -EINVAL;
|
|
- goto journal_err_out;
|
|
+ goto err_out;
|
|
}
|
|
|
|
/* Protect extent tree against block allocations via delalloc */
|
|
@@ -170,6 +185,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
|
memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
|
|
}
|
|
|
|
+ err = dquot_initialize(inode);
|
|
+ if (err)
|
|
+ goto err_out1;
|
|
+
|
|
+ size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
|
|
+ size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
|
|
+ diff = size - size_bl;
|
|
swap_inode_data(inode, inode_bl);
|
|
|
|
inode->i_ctime = inode_bl->i_ctime = current_time(inode);
|
|
@@ -183,27 +205,51 @@ static long swap_inode_boot_loader(struct super_block *sb,
|
|
|
|
err = ext4_mark_inode_dirty(handle, inode);
|
|
if (err < 0) {
|
|
+ /* No need to update quota information. */
|
|
ext4_warning(inode->i_sb,
|
|
"couldn't mark inode #%lu dirty (err %d)",
|
|
inode->i_ino, err);
|
|
/* Revert all changes: */
|
|
swap_inode_data(inode, inode_bl);
|
|
ext4_mark_inode_dirty(handle, inode);
|
|
- } else {
|
|
- err = ext4_mark_inode_dirty(handle, inode_bl);
|
|
- if (err < 0) {
|
|
- ext4_warning(inode_bl->i_sb,
|
|
- "couldn't mark inode #%lu dirty (err %d)",
|
|
- inode_bl->i_ino, err);
|
|
- /* Revert all changes: */
|
|
- swap_inode_data(inode, inode_bl);
|
|
- ext4_mark_inode_dirty(handle, inode);
|
|
- ext4_mark_inode_dirty(handle, inode_bl);
|
|
- }
|
|
+ goto err_out1;
|
|
+ }
|
|
+
|
|
+ blocks = inode_bl->i_blocks;
|
|
+ bytes = inode_bl->i_bytes;
|
|
+ inode_bl->i_blocks = inode->i_blocks;
|
|
+ inode_bl->i_bytes = inode->i_bytes;
|
|
+ err = ext4_mark_inode_dirty(handle, inode_bl);
|
|
+ if (err < 0) {
|
|
+ /* No need to update quota information. */
|
|
+ ext4_warning(inode_bl->i_sb,
|
|
+ "couldn't mark inode #%lu dirty (err %d)",
|
|
+ inode_bl->i_ino, err);
|
|
+ goto revert;
|
|
+ }
|
|
+
|
|
+ /* Bootloader inode should not be counted into quota information. */
|
|
+ if (diff > 0)
|
|
+ dquot_free_space(inode, diff);
|
|
+ else
|
|
+ err = dquot_alloc_space(inode, -1 * diff);
|
|
+
|
|
+ if (err < 0) {
|
|
+revert:
|
|
+ /* Revert all changes: */
|
|
+ inode_bl->i_blocks = blocks;
|
|
+ inode_bl->i_bytes = bytes;
|
|
+ swap_inode_data(inode, inode_bl);
|
|
+ ext4_mark_inode_dirty(handle, inode);
|
|
+ ext4_mark_inode_dirty(handle, inode_bl);
|
|
}
|
|
+
|
|
+err_out1:
|
|
ext4_journal_stop(handle);
|
|
ext4_double_up_write_data_sem(inode, inode_bl);
|
|
|
|
+err_out:
|
|
+ up_write(&EXT4_I(inode)->i_mmap_sem);
|
|
journal_err_out:
|
|
unlock_two_nondirectories(inode, inode_bl);
|
|
iput(inode_bl);
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index 48421de803b7..3d9b18505c0c 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -1960,7 +1960,8 @@ retry:
|
|
le16_to_cpu(es->s_reserved_gdt_blocks);
|
|
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
|
|
n_blocks_count = (ext4_fsblk_t)n_group *
|
|
- EXT4_BLOCKS_PER_GROUP(sb);
|
|
+ EXT4_BLOCKS_PER_GROUP(sb) +
|
|
+ le32_to_cpu(es->s_first_data_block);
|
|
n_group--; /* set to last group number */
|
|
}
|
|
|
|
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
|
|
index c0b66a7a795b..914e725c82c4 100644
|
|
--- a/fs/jbd2/transaction.c
|
|
+++ b/fs/jbd2/transaction.c
|
|
@@ -1219,11 +1219,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
|
|
struct journal_head *jh;
|
|
char *committed_data = NULL;
|
|
|
|
- JBUFFER_TRACE(jh, "entry");
|
|
if (jbd2_write_access_granted(handle, bh, true))
|
|
return 0;
|
|
|
|
jh = jbd2_journal_add_journal_head(bh);
|
|
+ JBUFFER_TRACE(jh, "entry");
|
|
+
|
|
/*
|
|
* Do this first --- it can drop the journal lock, so we want to
|
|
* make sure that obtaining the committed_data is done
|
|
@@ -1334,15 +1335,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
|
|
|
if (is_handle_aborted(handle))
|
|
return -EROFS;
|
|
- if (!buffer_jbd(bh)) {
|
|
- ret = -EUCLEAN;
|
|
- goto out;
|
|
- }
|
|
+ if (!buffer_jbd(bh))
|
|
+ return -EUCLEAN;
|
|
+
|
|
/*
|
|
* We don't grab jh reference here since the buffer must be part
|
|
* of the running transaction.
|
|
*/
|
|
jh = bh2jh(bh);
|
|
+ jbd_debug(5, "journal_head %p\n", jh);
|
|
+ JBUFFER_TRACE(jh, "entry");
|
|
+
|
|
/*
|
|
* This and the following assertions are unreliable since we may see jh
|
|
* in inconsistent state unless we grab bh_state lock. But this is
|
|
@@ -1376,9 +1379,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
|
|
}
|
|
|
|
journal = transaction->t_journal;
|
|
- jbd_debug(5, "journal_head %p\n", jh);
|
|
- JBUFFER_TRACE(jh, "entry");
|
|
-
|
|
jbd_lock_bh_state(bh);
|
|
|
|
if (jh->b_modified == 0) {
|
|
@@ -1576,14 +1576,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
|
/* However, if the buffer is still owned by a prior
|
|
* (committing) transaction, we can't drop it yet... */
|
|
JBUFFER_TRACE(jh, "belongs to older transaction");
|
|
- /* ... but we CAN drop it from the new transaction if we
|
|
- * have also modified it since the original commit. */
|
|
+ /* ... but we CAN drop it from the new transaction through
|
|
+ * marking the buffer as freed and set j_next_transaction to
|
|
+ * the new transaction, so that not only the commit code
|
|
+ * knows it should clear dirty bits when it is done with the
|
|
+ * buffer, but also the buffer can be checkpointed only
|
|
+ * after the new transaction commits. */
|
|
|
|
- if (jh->b_next_transaction) {
|
|
- J_ASSERT(jh->b_next_transaction == transaction);
|
|
+ set_buffer_freed(bh);
|
|
+
|
|
+ if (!jh->b_next_transaction) {
|
|
spin_lock(&journal->j_list_lock);
|
|
- jh->b_next_transaction = NULL;
|
|
+ jh->b_next_transaction = transaction;
|
|
spin_unlock(&journal->j_list_lock);
|
|
+ } else {
|
|
+ J_ASSERT(jh->b_next_transaction == transaction);
|
|
|
|
/*
|
|
* only drop a reference if this transaction modified
|
|
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
|
|
index ff2716f9322e..0b22c39dad47 100644
|
|
--- a/fs/kernfs/mount.c
|
|
+++ b/fs/kernfs/mount.c
|
|
@@ -196,8 +196,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
|
|
return dentry;
|
|
|
|
knparent = find_next_ancestor(kn, NULL);
|
|
- if (WARN_ON(!knparent))
|
|
+ if (WARN_ON(!knparent)) {
|
|
+ dput(dentry);
|
|
return ERR_PTR(-EINVAL);
|
|
+ }
|
|
|
|
do {
|
|
struct dentry *dtmp;
|
|
@@ -206,8 +208,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
|
|
if (kn == knparent)
|
|
return dentry;
|
|
kntmp = find_next_ancestor(kn, knparent);
|
|
- if (WARN_ON(!kntmp))
|
|
+ if (WARN_ON(!kntmp)) {
|
|
+ dput(dentry);
|
|
return ERR_PTR(-EINVAL);
|
|
+ }
|
|
dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
|
|
strlen(kntmp->name));
|
|
dput(dentry);
|
|
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
|
|
index 3f23b6840547..bf34ddaa2ad7 100644
|
|
--- a/fs/nfs/nfs4idmap.c
|
|
+++ b/fs/nfs/nfs4idmap.c
|
|
@@ -44,6 +44,7 @@
|
|
#include <linux/keyctl.h>
|
|
#include <linux/key-type.h>
|
|
#include <keys/user-type.h>
|
|
+#include <keys/request_key_auth-type.h>
|
|
#include <linux/module.h>
|
|
|
|
#include "internal.h"
|
|
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
|
|
struct idmap_legacy_upcalldata {
|
|
struct rpc_pipe_msg pipe_msg;
|
|
struct idmap_msg idmap_msg;
|
|
- struct key_construction *key_cons;
|
|
+ struct key *authkey;
|
|
struct idmap *idmap;
|
|
};
|
|
|
|
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
|
|
{ Opt_find_err, NULL }
|
|
};
|
|
|
|
-static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
|
|
+static int nfs_idmap_legacy_upcall(struct key *, void *);
|
|
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
|
|
size_t);
|
|
static void idmap_release_pipe(struct inode *);
|
|
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
|
|
static void
|
|
nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
|
|
{
|
|
- struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
|
|
+ struct key *authkey = idmap->idmap_upcall_data->authkey;
|
|
|
|
kfree(idmap->idmap_upcall_data);
|
|
idmap->idmap_upcall_data = NULL;
|
|
- complete_request_key(cons, ret);
|
|
+ complete_request_key(authkey, ret);
|
|
+ key_put(authkey);
|
|
}
|
|
|
|
static void
|
|
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
|
|
nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
|
|
}
|
|
|
|
-static int nfs_idmap_legacy_upcall(struct key_construction *cons,
|
|
- const char *op,
|
|
- void *aux)
|
|
+static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
|
|
{
|
|
struct idmap_legacy_upcalldata *data;
|
|
+ struct request_key_auth *rka = get_request_key_auth(authkey);
|
|
struct rpc_pipe_msg *msg;
|
|
struct idmap_msg *im;
|
|
struct idmap *idmap = (struct idmap *)aux;
|
|
- struct key *key = cons->key;
|
|
+ struct key *key = rka->target_key;
|
|
int ret = -ENOKEY;
|
|
|
|
if (!aux)
|
|
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
|
|
msg = &data->pipe_msg;
|
|
im = &data->idmap_msg;
|
|
data->idmap = idmap;
|
|
- data->key_cons = cons;
|
|
+ data->authkey = key_get(authkey);
|
|
|
|
ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
|
|
if (ret < 0)
|
|
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
|
|
out2:
|
|
kfree(data);
|
|
out1:
|
|
- complete_request_key(cons, ret);
|
|
+ complete_request_key(authkey, ret);
|
|
return ret;
|
|
}
|
|
|
|
@@ -651,9 +652,10 @@ out:
|
|
static ssize_t
|
|
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
|
{
|
|
+ struct request_key_auth *rka;
|
|
struct rpc_inode *rpci = RPC_I(file_inode(filp));
|
|
struct idmap *idmap = (struct idmap *)rpci->private;
|
|
- struct key_construction *cons;
|
|
+ struct key *authkey;
|
|
struct idmap_msg im;
|
|
size_t namelen_in;
|
|
int ret = -ENOKEY;
|
|
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
|
if (idmap->idmap_upcall_data == NULL)
|
|
goto out_noupcall;
|
|
|
|
- cons = idmap->idmap_upcall_data->key_cons;
|
|
+ authkey = idmap->idmap_upcall_data->authkey;
|
|
+ rka = get_request_key_auth(authkey);
|
|
|
|
if (mlen != sizeof(im)) {
|
|
ret = -ENOSPC;
|
|
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
|
|
|
|
ret = nfs_idmap_read_and_verify_message(&im,
|
|
&idmap->idmap_upcall_data->idmap_msg,
|
|
- cons->key, cons->authkey);
|
|
+ rka->target_key, authkey);
|
|
if (ret >= 0) {
|
|
- key_set_timeout(cons->key, nfs_idmap_cache_timeout);
|
|
+ key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
|
|
ret = mlen;
|
|
}
|
|
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index 8220a168282e..e7abcf7629b3 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -947,6 +947,13 @@ nfs4_sequence_process_interrupted(struct nfs_client *client,
|
|
|
|
#endif /* !CONFIG_NFS_V4_1 */
|
|
|
|
+static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
|
|
+{
|
|
+ res->sr_timestamp = jiffies;
|
|
+ res->sr_status_flags = 0;
|
|
+ res->sr_status = 1;
|
|
+}
|
|
+
|
|
static
|
|
void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
|
|
struct nfs4_sequence_res *res,
|
|
@@ -958,10 +965,6 @@ void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
|
|
args->sa_slot = slot;
|
|
|
|
res->sr_slot = slot;
|
|
- res->sr_timestamp = jiffies;
|
|
- res->sr_status_flags = 0;
|
|
- res->sr_status = 1;
|
|
-
|
|
}
|
|
|
|
int nfs4_setup_sequence(struct nfs_client *client,
|
|
@@ -1007,6 +1010,7 @@ int nfs4_setup_sequence(struct nfs_client *client,
|
|
|
|
trace_nfs4_setup_sequence(session, args);
|
|
out_start:
|
|
+ nfs41_sequence_res_init(res);
|
|
rpc_call_start(task);
|
|
return 0;
|
|
|
|
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
|
|
index 3dbd15b47c27..0ec6bce3dd69 100644
|
|
--- a/fs/nfs/pagelist.c
|
|
+++ b/fs/nfs/pagelist.c
|
|
@@ -989,6 +989,17 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
|
|
}
|
|
}
|
|
|
|
+static void
|
|
+nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
|
|
+ struct nfs_page *req)
|
|
+{
|
|
+ LIST_HEAD(head);
|
|
+
|
|
+ nfs_list_remove_request(req);
|
|
+ nfs_list_add_request(req, &head);
|
|
+ desc->pg_completion_ops->error_cleanup(&head);
|
|
+}
|
|
+
|
|
/**
|
|
* nfs_pageio_add_request - Attempt to coalesce a request into a page list.
|
|
* @desc: destination io descriptor
|
|
@@ -1026,10 +1037,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|
nfs_page_group_unlock(req);
|
|
desc->pg_moreio = 1;
|
|
nfs_pageio_doio(desc);
|
|
- if (desc->pg_error < 0)
|
|
- return 0;
|
|
- if (mirror->pg_recoalesce)
|
|
- return 0;
|
|
+ if (desc->pg_error < 0 || mirror->pg_recoalesce)
|
|
+ goto out_cleanup_subreq;
|
|
/* retry add_request for this subreq */
|
|
nfs_page_group_lock(req);
|
|
continue;
|
|
@@ -1062,6 +1071,10 @@ err_ptr:
|
|
desc->pg_error = PTR_ERR(subreq);
|
|
nfs_page_group_unlock(req);
|
|
return 0;
|
|
+out_cleanup_subreq:
|
|
+ if (req != subreq)
|
|
+ nfs_pageio_cleanup_request(desc, subreq);
|
|
+ return 0;
|
|
}
|
|
|
|
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
|
|
@@ -1080,7 +1093,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
|
|
struct nfs_page *req;
|
|
|
|
req = list_first_entry(&head, struct nfs_page, wb_list);
|
|
- nfs_list_remove_request(req);
|
|
if (__nfs_pageio_add_request(desc, req))
|
|
continue;
|
|
if (desc->pg_error < 0) {
|
|
@@ -1169,11 +1181,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|
if (nfs_pgio_has_mirroring(desc))
|
|
desc->pg_mirror_idx = midx;
|
|
if (!nfs_pageio_add_request_mirror(desc, dupreq))
|
|
- goto out_failed;
|
|
+ goto out_cleanup_subreq;
|
|
}
|
|
|
|
return 1;
|
|
|
|
+out_cleanup_subreq:
|
|
+ if (req != dupreq)
|
|
+ nfs_pageio_cleanup_request(desc, dupreq);
|
|
out_failed:
|
|
/* remember fatal errors */
|
|
if (nfs_error_is_fatal(desc->pg_error))
|
|
@@ -1199,7 +1214,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
|
|
desc->pg_mirror_idx = mirror_idx;
|
|
for (;;) {
|
|
nfs_pageio_doio(desc);
|
|
- if (!mirror->pg_recoalesce)
|
|
+ if (desc->pg_error < 0 || !mirror->pg_recoalesce)
|
|
break;
|
|
if (!nfs_do_recoalesce(desc))
|
|
break;
|
|
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
|
|
index d790faff8e47..51d0b7913c04 100644
|
|
--- a/fs/nfs/write.c
|
|
+++ b/fs/nfs/write.c
|
|
@@ -238,9 +238,9 @@ out:
|
|
}
|
|
|
|
/* A writeback failed: mark the page as bad, and invalidate the page cache */
|
|
-static void nfs_set_pageerror(struct page *page)
|
|
+static void nfs_set_pageerror(struct address_space *mapping)
|
|
{
|
|
- nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
|
|
+ nfs_zap_mapping(mapping->host, mapping);
|
|
}
|
|
|
|
/*
|
|
@@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
|
|
nfs_list_remove_request(req);
|
|
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
|
|
(hdr->good_bytes < bytes)) {
|
|
- nfs_set_pageerror(req->wb_page);
|
|
+ nfs_set_pageerror(page_file_mapping(req->wb_page));
|
|
nfs_context_set_write_error(req->wb_context, hdr->error);
|
|
goto remove_req;
|
|
}
|
|
@@ -1330,7 +1330,8 @@ int nfs_updatepage(struct file *file, struct page *page,
|
|
unsigned int offset, unsigned int count)
|
|
{
|
|
struct nfs_open_context *ctx = nfs_file_open_context(file);
|
|
- struct inode *inode = page_file_mapping(page)->host;
|
|
+ struct address_space *mapping = page_file_mapping(page);
|
|
+ struct inode *inode = mapping->host;
|
|
int status = 0;
|
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
|
|
@@ -1348,7 +1349,7 @@ int nfs_updatepage(struct file *file, struct page *page,
|
|
|
|
status = nfs_writepage_setup(ctx, page, offset, count);
|
|
if (status < 0)
|
|
- nfs_set_pageerror(page);
|
|
+ nfs_set_pageerror(mapping);
|
|
else
|
|
__set_page_dirty_nobuffers(page);
|
|
out:
|
|
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
|
|
index 9eb8086ea841..c9cf46e0c040 100644
|
|
--- a/fs/nfsd/nfs3proc.c
|
|
+++ b/fs/nfsd/nfs3proc.c
|
|
@@ -463,8 +463,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
|
|
&resp->common, nfs3svc_encode_entry);
|
|
memcpy(resp->verf, argp->verf, 8);
|
|
resp->count = resp->buffer - argp->buffer;
|
|
- if (resp->offset)
|
|
- xdr_encode_hyper(resp->offset, argp->cookie);
|
|
+ if (resp->offset) {
|
|
+ loff_t offset = argp->cookie;
|
|
+
|
|
+ if (unlikely(resp->offset1)) {
|
|
+ /* we ended up with offset on a page boundary */
|
|
+ *resp->offset = htonl(offset >> 32);
|
|
+ *resp->offset1 = htonl(offset & 0xffffffff);
|
|
+ resp->offset1 = NULL;
|
|
+ } else {
|
|
+ xdr_encode_hyper(resp->offset, offset);
|
|
+ }
|
|
+ resp->offset = NULL;
|
|
+ }
|
|
|
|
RETURN_STATUS(nfserr);
|
|
}
|
|
@@ -533,6 +544,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
|
|
} else {
|
|
xdr_encode_hyper(resp->offset, offset);
|
|
}
|
|
+ resp->offset = NULL;
|
|
}
|
|
|
|
RETURN_STATUS(nfserr);
|
|
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
|
|
index 9b973f4f7d01..83919116d5cb 100644
|
|
--- a/fs/nfsd/nfs3xdr.c
|
|
+++ b/fs/nfsd/nfs3xdr.c
|
|
@@ -921,6 +921,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
|
|
} else {
|
|
xdr_encode_hyper(cd->offset, offset64);
|
|
}
|
|
+ cd->offset = NULL;
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
|
|
index 9c6d1d57b598..bec75600e692 100644
|
|
--- a/fs/nfsd/nfs4state.c
|
|
+++ b/fs/nfsd/nfs4state.c
|
|
@@ -1514,16 +1514,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
|
|
{
|
|
u32 slotsize = slot_bytes(ca);
|
|
u32 num = ca->maxreqs;
|
|
- int avail;
|
|
+ unsigned long avail, total_avail;
|
|
|
|
spin_lock(&nfsd_drc_lock);
|
|
- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
|
|
- nfsd_drc_max_mem - nfsd_drc_mem_used);
|
|
+ total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
|
|
+ avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
|
|
/*
|
|
* Never use more than a third of the remaining memory,
|
|
* unless it's the only way to give this client a slot:
|
|
*/
|
|
- avail = clamp_t(int, avail, slotsize, avail/3);
|
|
+ avail = clamp_t(int, avail, slotsize, total_avail/3);
|
|
num = min_t(int, num, avail / slotsize);
|
|
nfsd_drc_mem_used += num * slotsize;
|
|
spin_unlock(&nfsd_drc_lock);
|
|
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
|
|
index 39b835d7c445..cb69660d0779 100644
|
|
--- a/fs/nfsd/nfsctl.c
|
|
+++ b/fs/nfsd/nfsctl.c
|
|
@@ -1126,7 +1126,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
|
|
case 'Y':
|
|
case 'y':
|
|
case '1':
|
|
- if (nn->nfsd_serv)
|
|
+ if (!nn->nfsd_serv)
|
|
return -EBUSY;
|
|
nfsd4_end_grace(nn);
|
|
break;
|
|
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
|
|
index 1cc797a08a5b..75eeee08d848 100644
|
|
--- a/fs/overlayfs/copy_up.c
|
|
+++ b/fs/overlayfs/copy_up.c
|
|
@@ -501,6 +501,24 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
|
|
{
|
|
int err;
|
|
|
|
+ /*
|
|
+ * Copy up data first and then xattrs. Writing data after
|
|
+ * xattrs will remove security.capability xattr automatically.
|
|
+ */
|
|
+ if (S_ISREG(c->stat.mode) && !c->metacopy) {
|
|
+ struct path upperpath, datapath;
|
|
+
|
|
+ ovl_path_upper(c->dentry, &upperpath);
|
|
+ if (WARN_ON(upperpath.dentry != NULL))
|
|
+ return -EIO;
|
|
+ upperpath.dentry = temp;
|
|
+
|
|
+ ovl_path_lowerdata(c->dentry, &datapath);
|
|
+ err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
+
|
|
err = ovl_copy_xattr(c->lowerpath.dentry, temp);
|
|
if (err)
|
|
return err;
|
|
@@ -518,19 +536,6 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
|
|
return err;
|
|
}
|
|
|
|
- if (S_ISREG(c->stat.mode) && !c->metacopy) {
|
|
- struct path upperpath, datapath;
|
|
-
|
|
- ovl_path_upper(c->dentry, &upperpath);
|
|
- BUG_ON(upperpath.dentry != NULL);
|
|
- upperpath.dentry = temp;
|
|
-
|
|
- ovl_path_lowerdata(c->dentry, &datapath);
|
|
- err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
|
|
- if (err)
|
|
- return err;
|
|
- }
|
|
-
|
|
if (c->metacopy) {
|
|
err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
|
|
NULL, 0, -EOPNOTSUPP);
|
|
@@ -706,6 +711,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
|
|
{
|
|
struct path upperpath, datapath;
|
|
int err;
|
|
+ char *capability = NULL;
|
|
+ ssize_t uninitialized_var(cap_size);
|
|
|
|
ovl_path_upper(c->dentry, &upperpath);
|
|
if (WARN_ON(upperpath.dentry == NULL))
|
|
@@ -715,15 +722,37 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
|
|
if (WARN_ON(datapath.dentry == NULL))
|
|
return -EIO;
|
|
|
|
+ if (c->stat.size) {
|
|
+ err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
|
|
+ &capability, 0);
|
|
+ if (err < 0 && err != -ENODATA)
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
+
|
|
+ /*
|
|
+ * Writing to upper file will clear security.capability xattr. We
|
|
+ * don't want that to happen for normal copy-up operation.
|
|
+ */
|
|
+ if (capability) {
|
|
+ err = ovl_do_setxattr(upperpath.dentry, XATTR_NAME_CAPS,
|
|
+ capability, cap_size, 0);
|
|
+ if (err)
|
|
+ goto out_free;
|
|
+ }
|
|
+
|
|
|
|
err = vfs_removexattr(upperpath.dentry, OVL_XATTR_METACOPY);
|
|
if (err)
|
|
- return err;
|
|
+ goto out_free;
|
|
|
|
ovl_set_upperdata(d_inode(c->dentry));
|
|
+out_free:
|
|
+ kfree(capability);
|
|
+out:
|
|
return err;
|
|
}
|
|
|
|
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
|
|
index a3c0d9584312..d9c16ceebfe7 100644
|
|
--- a/fs/overlayfs/overlayfs.h
|
|
+++ b/fs/overlayfs/overlayfs.h
|
|
@@ -277,6 +277,8 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
|
|
int ovl_check_metacopy_xattr(struct dentry *dentry);
|
|
bool ovl_is_metacopy_dentry(struct dentry *dentry);
|
|
char *ovl_get_redirect_xattr(struct dentry *dentry, int padding);
|
|
+ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
|
|
+ size_t padding);
|
|
|
|
static inline bool ovl_is_impuredir(struct dentry *dentry)
|
|
{
|
|
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
|
|
index ace4fe4c39a9..c9a2e3c6d537 100644
|
|
--- a/fs/overlayfs/util.c
|
|
+++ b/fs/overlayfs/util.c
|
|
@@ -867,28 +867,49 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
|
|
return (oe->numlower > 1);
|
|
}
|
|
|
|
-char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
|
|
+ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
|
|
+ size_t padding)
|
|
{
|
|
- int res;
|
|
- char *s, *next, *buf = NULL;
|
|
+ ssize_t res;
|
|
+ char *buf = NULL;
|
|
|
|
- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
|
|
+ res = vfs_getxattr(dentry, name, NULL, 0);
|
|
if (res < 0) {
|
|
if (res == -ENODATA || res == -EOPNOTSUPP)
|
|
- return NULL;
|
|
+ return -ENODATA;
|
|
goto fail;
|
|
}
|
|
|
|
- buf = kzalloc(res + padding + 1, GFP_KERNEL);
|
|
- if (!buf)
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ if (res != 0) {
|
|
+ buf = kzalloc(res + padding, GFP_KERNEL);
|
|
+ if (!buf)
|
|
+ return -ENOMEM;
|
|
|
|
- if (res == 0)
|
|
- goto invalid;
|
|
+ res = vfs_getxattr(dentry, name, buf, res);
|
|
+ if (res < 0)
|
|
+ goto fail;
|
|
+ }
|
|
+ *value = buf;
|
|
+
|
|
+ return res;
|
|
+
|
|
+fail:
|
|
+ pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
|
|
+ name, res);
|
|
+ kfree(buf);
|
|
+ return res;
|
|
+}
|
|
|
|
- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
|
|
+char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
|
|
+{
|
|
+ int res;
|
|
+ char *s, *next, *buf = NULL;
|
|
+
|
|
+ res = ovl_getxattr(dentry, OVL_XATTR_REDIRECT, &buf, padding + 1);
|
|
+ if (res == -ENODATA)
|
|
+ return NULL;
|
|
if (res < 0)
|
|
- goto fail;
|
|
+ return ERR_PTR(res);
|
|
if (res == 0)
|
|
goto invalid;
|
|
|
|
@@ -904,15 +925,9 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
|
|
}
|
|
|
|
return buf;
|
|
-
|
|
-err_free:
|
|
- kfree(buf);
|
|
- return ERR_PTR(res);
|
|
-fail:
|
|
- pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
|
|
- goto err_free;
|
|
invalid:
|
|
pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
|
|
res = -EINVAL;
|
|
- goto err_free;
|
|
+ kfree(buf);
|
|
+ return ERR_PTR(res);
|
|
}
|
|
diff --git a/fs/pipe.c b/fs/pipe.c
|
|
index bdc5d3c0977d..c51750ed4011 100644
|
|
--- a/fs/pipe.c
|
|
+++ b/fs/pipe.c
|
|
@@ -234,6 +234,14 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
|
|
.get = generic_pipe_buf_get,
|
|
};
|
|
|
|
+static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
|
|
+ .can_merge = 0,
|
|
+ .confirm = generic_pipe_buf_confirm,
|
|
+ .release = anon_pipe_buf_release,
|
|
+ .steal = anon_pipe_buf_steal,
|
|
+ .get = generic_pipe_buf_get,
|
|
+};
|
|
+
|
|
static const struct pipe_buf_operations packet_pipe_buf_ops = {
|
|
.can_merge = 0,
|
|
.confirm = generic_pipe_buf_confirm,
|
|
@@ -242,6 +250,12 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = {
|
|
.get = generic_pipe_buf_get,
|
|
};
|
|
|
|
+void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
|
|
+{
|
|
+ if (buf->ops == &anon_pipe_buf_ops)
|
|
+ buf->ops = &anon_pipe_buf_nomerge_ops;
|
|
+}
|
|
+
|
|
static ssize_t
|
|
pipe_read(struct kiocb *iocb, struct iov_iter *to)
|
|
{
|
|
diff --git a/fs/splice.c b/fs/splice.c
|
|
index b3daa971f597..29e92b506394 100644
|
|
--- a/fs/splice.c
|
|
+++ b/fs/splice.c
|
|
@@ -1593,6 +1593,8 @@ retry:
|
|
*/
|
|
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
|
|
|
|
+ pipe_buf_mark_unmergeable(obuf);
|
|
+
|
|
obuf->len = len;
|
|
opipe->nrbufs++;
|
|
ibuf->offset += obuf->len;
|
|
@@ -1667,6 +1669,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
|
|
*/
|
|
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
|
|
|
|
+ pipe_buf_mark_unmergeable(obuf);
|
|
+
|
|
if (obuf->len > len)
|
|
obuf->len = len;
|
|
|
|
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
|
|
index d7701d466b60..dd38c97933f1 100644
|
|
--- a/include/asm-generic/vmlinux.lds.h
|
|
+++ b/include/asm-generic/vmlinux.lds.h
|
|
@@ -727,7 +727,7 @@
|
|
KEEP(*(.orc_unwind_ip)) \
|
|
__stop_orc_unwind_ip = .; \
|
|
} \
|
|
- . = ALIGN(6); \
|
|
+ . = ALIGN(2); \
|
|
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
|
|
__start_orc_unwind = .; \
|
|
KEEP(*(.orc_unwind)) \
|
|
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
|
|
new file mode 100644
|
|
index 000000000000..a726dd3f1dc6
|
|
--- /dev/null
|
|
+++ b/include/keys/request_key_auth-type.h
|
|
@@ -0,0 +1,36 @@
|
|
+/* request_key authorisation token key type
|
|
+ *
|
|
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
|
|
+ * Written by David Howells (dhowells@redhat.com)
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU General Public Licence
|
|
+ * as published by the Free Software Foundation; either version
|
|
+ * 2 of the Licence, or (at your option) any later version.
|
|
+ */
|
|
+
|
|
+#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
|
|
+#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
|
|
+
|
|
+#include <linux/key.h>
|
|
+
|
|
+/*
|
|
+ * Authorisation record for request_key().
|
|
+ */
|
|
+struct request_key_auth {
|
|
+ struct key *target_key;
|
|
+ struct key *dest_keyring;
|
|
+ const struct cred *cred;
|
|
+ void *callout_info;
|
|
+ size_t callout_len;
|
|
+ pid_t pid;
|
|
+ char op[8];
|
|
+} __randomize_layout;
|
|
+
|
|
+static inline struct request_key_auth *get_request_key_auth(const struct key *key)
|
|
+{
|
|
+ return key->payload.data[0];
|
|
+}
|
|
+
|
|
+
|
|
+#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
|
|
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
|
|
index 4f31f96bbfab..90ac450745f1 100644
|
|
--- a/include/kvm/arm_vgic.h
|
|
+++ b/include/kvm/arm_vgic.h
|
|
@@ -256,7 +256,7 @@ struct vgic_dist {
|
|
u64 propbaser;
|
|
|
|
/* Protects the lpi_list and the count value below. */
|
|
- spinlock_t lpi_list_lock;
|
|
+ raw_spinlock_t lpi_list_lock;
|
|
struct list_head lpi_list_head;
|
|
int lpi_list_count;
|
|
|
|
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
|
|
index 6fb0808e87c8..bef2e36c01b4 100644
|
|
--- a/include/linux/device-mapper.h
|
|
+++ b/include/linux/device-mapper.h
|
|
@@ -601,7 +601,7 @@ do { \
|
|
*/
|
|
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
|
|
|
|
-static inline sector_t to_sector(unsigned long n)
|
|
+static inline sector_t to_sector(unsigned long long n)
|
|
{
|
|
return (n >> SECTOR_SHIFT);
|
|
}
|
|
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
|
|
index 0fbbcdf0c178..da0af631ded5 100644
|
|
--- a/include/linux/hardirq.h
|
|
+++ b/include/linux/hardirq.h
|
|
@@ -60,8 +60,14 @@ extern void irq_enter(void);
|
|
*/
|
|
extern void irq_exit(void);
|
|
|
|
+#ifndef arch_nmi_enter
|
|
+#define arch_nmi_enter() do { } while (0)
|
|
+#define arch_nmi_exit() do { } while (0)
|
|
+#endif
|
|
+
|
|
#define nmi_enter() \
|
|
do { \
|
|
+ arch_nmi_enter(); \
|
|
printk_nmi_enter(); \
|
|
lockdep_off(); \
|
|
ftrace_nmi_enter(); \
|
|
@@ -80,6 +86,7 @@ extern void irq_exit(void);
|
|
ftrace_nmi_exit(); \
|
|
lockdep_on(); \
|
|
printk_nmi_exit(); \
|
|
+ arch_nmi_exit(); \
|
|
} while (0)
|
|
|
|
#endif /* LINUX_HARDIRQ_H */
|
|
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
|
|
index 05d8fb5a06c4..d3c5ae8ad498 100644
|
|
--- a/include/linux/key-type.h
|
|
+++ b/include/linux/key-type.h
|
|
@@ -17,15 +17,6 @@
|
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
-/*
|
|
- * key under-construction record
|
|
- * - passed to the request_key actor if supplied
|
|
- */
|
|
-struct key_construction {
|
|
- struct key *key; /* key being constructed */
|
|
- struct key *authkey;/* authorisation for key being constructed */
|
|
-};
|
|
-
|
|
/*
|
|
* Pre-parsed payload, used by key add, update and instantiate.
|
|
*
|
|
@@ -47,8 +38,7 @@ struct key_preparsed_payload {
|
|
time64_t expiry; /* Expiry time of key */
|
|
} __randomize_layout;
|
|
|
|
-typedef int (*request_key_actor_t)(struct key_construction *key,
|
|
- const char *op, void *aux);
|
|
+typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
|
|
|
|
/*
|
|
* Preparsed matching criterion.
|
|
@@ -170,20 +160,20 @@ extern int key_instantiate_and_link(struct key *key,
|
|
const void *data,
|
|
size_t datalen,
|
|
struct key *keyring,
|
|
- struct key *instkey);
|
|
+ struct key *authkey);
|
|
extern int key_reject_and_link(struct key *key,
|
|
unsigned timeout,
|
|
unsigned error,
|
|
struct key *keyring,
|
|
- struct key *instkey);
|
|
-extern void complete_request_key(struct key_construction *cons, int error);
|
|
+ struct key *authkey);
|
|
+extern void complete_request_key(struct key *authkey, int error);
|
|
|
|
static inline int key_negate_and_link(struct key *key,
|
|
unsigned timeout,
|
|
struct key *keyring,
|
|
- struct key *instkey)
|
|
+ struct key *authkey)
|
|
{
|
|
- return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
|
|
+ return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
|
|
}
|
|
|
|
extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
|
|
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
index a03d5e264e5e..23c242a7ac52 100644
|
|
--- a/include/linux/kvm_host.h
|
|
+++ b/include/linux/kvm_host.h
|
|
@@ -633,7 +633,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
|
struct kvm_memory_slot *dont);
|
|
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
unsigned long npages);
|
|
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
|
|
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
|
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
struct kvm_memory_slot *memslot,
|
|
const struct kvm_userspace_memory_region *mem,
|
|
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
|
|
index 5a3bb3b7c9ad..3ecd7ea212ae 100644
|
|
--- a/include/linux/pipe_fs_i.h
|
|
+++ b/include/linux/pipe_fs_i.h
|
|
@@ -182,6 +182,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
|
|
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
|
|
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
|
|
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
|
|
+void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
|
|
|
|
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
|
|
|
|
diff --git a/include/linux/property.h b/include/linux/property.h
|
|
index ac8a1ebc4c1b..1a12364050d8 100644
|
|
--- a/include/linux/property.h
|
|
+++ b/include/linux/property.h
|
|
@@ -258,7 +258,7 @@ struct property_entry {
|
|
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
|
|
(struct property_entry) { \
|
|
.name = _name_, \
|
|
- .length = sizeof(_val_), \
|
|
+ .length = sizeof(const char *), \
|
|
.type = DEV_PROP_STRING, \
|
|
{ .value = { .str = _val_ } }, \
|
|
}
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index a404d475acee..820903ceac4f 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -4086,6 +4086,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
|
|
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
|
|
}
|
|
|
|
+/* Note: Should be called only if skb_is_gso(skb) is true */
|
|
+static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
|
|
+{
|
|
+ return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
|
|
+}
|
|
+
|
|
static inline void skb_gso_reset(struct sk_buff *skb)
|
|
{
|
|
skb_shinfo(skb)->gso_size = 0;
|
|
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
|
|
index b669fe6dbc3b..98f31c7ea23d 100644
|
|
--- a/include/net/phonet/pep.h
|
|
+++ b/include/net/phonet/pep.h
|
|
@@ -63,10 +63,11 @@ struct pnpipehdr {
|
|
u8 state_after_reset; /* reset request */
|
|
u8 error_code; /* any response */
|
|
u8 pep_type; /* status indication */
|
|
- u8 data[1];
|
|
+ u8 data0; /* anything else */
|
|
};
|
|
+ u8 data[];
|
|
};
|
|
-#define other_pep_type data[1]
|
|
+#define other_pep_type data[0]
|
|
|
|
static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
|
|
{
|
|
diff --git a/init/main.c b/init/main.c
|
|
index 18f8f0140fa0..e083fac08aed 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -689,7 +689,6 @@ asmlinkage __visible void __init start_kernel(void)
|
|
initrd_start = 0;
|
|
}
|
|
#endif
|
|
- page_ext_init();
|
|
kmemleak_init();
|
|
debug_objects_mem_init();
|
|
setup_per_cpu_pageset();
|
|
@@ -1140,6 +1139,8 @@ static noinline void __init kernel_init_freeable(void)
|
|
sched_init_smp();
|
|
|
|
page_alloc_init_late();
|
|
+ /* Initialize page ext after all struct pages are initialized. */
|
|
+ page_ext_init();
|
|
|
|
do_basic_setup();
|
|
|
|
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
|
|
index 9058317ba9de..4f3138e6ecb2 100644
|
|
--- a/kernel/bpf/lpm_trie.c
|
|
+++ b/kernel/bpf/lpm_trie.c
|
|
@@ -432,6 +432,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
|
|
}
|
|
|
|
if (!node || node->prefixlen != key->prefixlen ||
|
|
+ node->prefixlen != matchlen ||
|
|
(node->flags & LPM_TREE_NODE_FLAG_IM)) {
|
|
ret = -ENOENT;
|
|
goto out;
|
|
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
|
|
index 6a32933cae4f..7cb7a7f98a37 100644
|
|
--- a/kernel/bpf/stackmap.c
|
|
+++ b/kernel/bpf/stackmap.c
|
|
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
|
|
struct stack_map_irq_work *work;
|
|
|
|
work = container_of(entry, struct stack_map_irq_work, irq_work);
|
|
- up_read(work->sem);
|
|
+ up_read_non_owner(work->sem);
|
|
work->sem = NULL;
|
|
}
|
|
|
|
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|
} else {
|
|
work->sem = ¤t->mm->mmap_sem;
|
|
irq_work_queue(&work->irq_work);
|
|
+ /*
|
|
+ * The irq_work will release the mmap_sem with
|
|
+ * up_read_non_owner(). The rwsem_release() is called
|
|
+ * here to release the lock from lockdep's perspective.
|
|
+ */
|
|
+ rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_);
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
|
|
index e578c3999970..e710ac7fbbbf 100644
|
|
--- a/kernel/cgroup/cgroup.c
|
|
+++ b/kernel/cgroup/cgroup.c
|
|
@@ -1998,7 +1998,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
|
|
struct cgroup_namespace *ns)
|
|
{
|
|
struct dentry *dentry;
|
|
- bool new_sb;
|
|
+ bool new_sb = false;
|
|
|
|
dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
|
|
|
|
@@ -2008,6 +2008,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
|
|
*/
|
|
if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
|
|
struct dentry *nsdentry;
|
|
+ struct super_block *sb = dentry->d_sb;
|
|
struct cgroup *cgrp;
|
|
|
|
mutex_lock(&cgroup_mutex);
|
|
@@ -2018,12 +2019,14 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
|
|
spin_unlock_irq(&css_set_lock);
|
|
mutex_unlock(&cgroup_mutex);
|
|
|
|
- nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
|
|
+ nsdentry = kernfs_node_dentry(cgrp->kn, sb);
|
|
dput(dentry);
|
|
+ if (IS_ERR(nsdentry))
|
|
+ deactivate_locked_super(sb);
|
|
dentry = nsdentry;
|
|
}
|
|
|
|
- if (IS_ERR(dentry) || !new_sb)
|
|
+ if (!new_sb)
|
|
cgroup_put(&root->cgrp);
|
|
|
|
return dentry;
|
|
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
|
|
index 15301ed19da6..f7e89c989df7 100644
|
|
--- a/kernel/rcu/tree.c
|
|
+++ b/kernel/rcu/tree.c
|
|
@@ -1689,15 +1689,23 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
}
|
|
|
|
/*
|
|
- * Awaken the grace-period kthread for the specified flavor of RCU.
|
|
- * Don't do a self-awaken, and don't bother awakening when there is
|
|
- * nothing for the grace-period kthread to do (as in several CPUs
|
|
- * raced to awaken, and we lost), and finally don't try to awaken
|
|
- * a kthread that has not yet been created.
|
|
+ * Awaken the grace-period kthread. Don't do a self-awaken (unless in
|
|
+ * an interrupt or softirq handler), and don't bother awakening when there
|
|
+ * is nothing for the grace-period kthread to do (as in several CPUs raced
|
|
+ * to awaken, and we lost), and finally don't try to awaken a kthread that
|
|
+ * has not yet been created. If all those checks are passed, track some
|
|
+ * debug information and awaken.
|
|
+ *
|
|
+ * So why do the self-wakeup when in an interrupt or softirq handler
|
|
+ * in the grace-period kthread's context? Because the kthread might have
|
|
+ * been interrupted just as it was going to sleep, and just after the final
|
|
+ * pre-sleep check of the awaken condition. In this case, a wakeup really
|
|
+ * is required, and is therefore supplied.
|
|
*/
|
|
static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
|
{
|
|
- if (current == rsp->gp_kthread ||
|
|
+ if ((current == rsp->gp_kthread &&
|
|
+ !in_interrupt() && !in_serving_softirq()) ||
|
|
!READ_ONCE(rsp->gp_flags) ||
|
|
!rsp->gp_kthread)
|
|
return;
|
|
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
|
|
index 32dea29d05a0..3b86acd5de4e 100644
|
|
--- a/kernel/sysctl.c
|
|
+++ b/kernel/sysctl.c
|
|
@@ -2552,7 +2552,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
|
|
{
|
|
struct do_proc_dointvec_minmax_conv_param *param = data;
|
|
if (write) {
|
|
- int val = *negp ? -*lvalp : *lvalp;
|
|
+ int val;
|
|
+ if (*negp) {
|
|
+ if (*lvalp > (unsigned long) INT_MAX + 1)
|
|
+ return -EINVAL;
|
|
+ val = -*lvalp;
|
|
+ } else {
|
|
+ if (*lvalp > (unsigned long) INT_MAX)
|
|
+ return -EINVAL;
|
|
+ val = *lvalp;
|
|
+ }
|
|
if ((param->min && *param->min > val) ||
|
|
(param->max && *param->max < val))
|
|
return -EINVAL;
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index 17bd0c0dfa98..1f96b292df31 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -5606,7 +5606,6 @@ out:
|
|
return ret;
|
|
|
|
fail:
|
|
- kfree(iter->trace);
|
|
kfree(iter);
|
|
__trace_array_put(tr);
|
|
mutex_unlock(&trace_types_lock);
|
|
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
|
|
index 69a3fe926e8c..e6945b55c688 100644
|
|
--- a/kernel/trace/trace_event_perf.c
|
|
+++ b/kernel/trace/trace_event_perf.c
|
|
@@ -298,15 +298,13 @@ int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
|
|
|
|
if (!p_event->attr.uprobe_path)
|
|
return -EINVAL;
|
|
- path = kzalloc(PATH_MAX, GFP_KERNEL);
|
|
- if (!path)
|
|
- return -ENOMEM;
|
|
- ret = strncpy_from_user(
|
|
- path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
|
|
- if (ret == PATH_MAX)
|
|
- return -E2BIG;
|
|
- if (ret < 0)
|
|
- goto out;
|
|
+
|
|
+ path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
|
|
+ PATH_MAX);
|
|
+ if (IS_ERR(path)) {
|
|
+ ret = PTR_ERR(path);
|
|
+ return (ret == -EINVAL) ? -E2BIG : ret;
|
|
+ }
|
|
if (path[0] == '\0') {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
|
|
index eb908ef2ecec..11853e90b649 100644
|
|
--- a/kernel/trace/trace_events_hist.c
|
|
+++ b/kernel/trace/trace_events_hist.c
|
|
@@ -4621,9 +4621,10 @@ static inline void add_to_key(char *compound_key, void *key,
|
|
/* ensure NULL-termination */
|
|
if (size > key_field->size - 1)
|
|
size = key_field->size - 1;
|
|
- }
|
|
|
|
- memcpy(compound_key + key_field->offset, key, size);
|
|
+ strncpy(compound_key + key_field->offset, (char *)key, size);
|
|
+ } else
|
|
+ memcpy(compound_key + key_field->offset, key, size);
|
|
}
|
|
|
|
static void
|
|
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
|
|
index c6659cb37033..59875eb278ea 100644
|
|
--- a/lib/assoc_array.c
|
|
+++ b/lib/assoc_array.c
|
|
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
|
|
new_s0->index_key[i] =
|
|
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
|
|
|
|
- blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
|
|
- pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
|
|
- new_s0->index_key[keylen - 1] &= ~blank;
|
|
+ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
|
|
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
|
|
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
|
|
+ new_s0->index_key[keylen - 1] &= ~blank;
|
|
+ }
|
|
|
|
/* This now reduces to a node splitting exercise for which we'll need
|
|
* to regenerate the disparity table.
|
|
diff --git a/mm/gup.c b/mm/gup.c
|
|
index 1abc8b4afff6..0a5374e6e82d 100644
|
|
--- a/mm/gup.c
|
|
+++ b/mm/gup.c
|
|
@@ -1649,7 +1649,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
|
if (!pmd_present(pmd))
|
|
return 0;
|
|
|
|
- if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
|
|
+ if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
|
|
+ pmd_devmap(pmd))) {
|
|
/*
|
|
* NUMA hinting faults need to be handled in the GUP
|
|
* slowpath for accounting purposes and so that they
|
|
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
|
|
index d9b8a2490633..6edc6db5ec1b 100644
|
|
--- a/mm/memory-failure.c
|
|
+++ b/mm/memory-failure.c
|
|
@@ -1823,19 +1823,17 @@ static int soft_offline_in_use_page(struct page *page, int flags)
|
|
struct page *hpage = compound_head(page);
|
|
|
|
if (!PageHuge(page) && PageTransHuge(hpage)) {
|
|
- lock_page(hpage);
|
|
- if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
|
|
- unlock_page(hpage);
|
|
- if (!PageAnon(hpage))
|
|
+ lock_page(page);
|
|
+ if (!PageAnon(page) || unlikely(split_huge_page(page))) {
|
|
+ unlock_page(page);
|
|
+ if (!PageAnon(page))
|
|
pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
|
|
else
|
|
pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
|
|
- put_hwpoison_page(hpage);
|
|
+ put_hwpoison_page(page);
|
|
return -EBUSY;
|
|
}
|
|
- unlock_page(hpage);
|
|
- get_hwpoison_page(page);
|
|
- put_hwpoison_page(hpage);
|
|
+ unlock_page(page);
|
|
}
|
|
|
|
/*
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 281172540a9c..5b3f71bcd1ae 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -3762,10 +3762,13 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
|
|
* but allow concurrent faults).
|
|
* The mmap_sem may have been released depending on flags and our
|
|
* return value. See filemap_fault() and __lock_page_or_retry().
|
|
+ * If mmap_sem is released, vma may become invalid (for example
|
|
+ * by other thread calling munmap()).
|
|
*/
|
|
static vm_fault_t do_fault(struct vm_fault *vmf)
|
|
{
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
+ struct mm_struct *vm_mm = vma->vm_mm;
|
|
vm_fault_t ret;
|
|
|
|
/*
|
|
@@ -3806,7 +3809,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
|
|
|
|
/* preallocated pagetable is unused: free it */
|
|
if (vmf->prealloc_pte) {
|
|
- pte_free(vma->vm_mm, vmf->prealloc_pte);
|
|
+ pte_free(vm_mm, vmf->prealloc_pte);
|
|
vmf->prealloc_pte = NULL;
|
|
}
|
|
return ret;
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index a9de1dbb9a6c..ef99971c13dd 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -4532,11 +4532,11 @@ refill:
|
|
/* Even if we own the page, we do not use atomic_set().
|
|
* This would break get_page_unless_zero() users.
|
|
*/
|
|
- page_ref_add(page, size - 1);
|
|
+ page_ref_add(page, size);
|
|
|
|
/* reset page count bias and offset to start of new frag */
|
|
nc->pfmemalloc = page_is_pfmemalloc(page);
|
|
- nc->pagecnt_bias = size;
|
|
+ nc->pagecnt_bias = size + 1;
|
|
nc->offset = size;
|
|
}
|
|
|
|
@@ -4552,10 +4552,10 @@ refill:
|
|
size = nc->size;
|
|
#endif
|
|
/* OK, page count is 0, we can safely set it */
|
|
- set_page_count(page, size);
|
|
+ set_page_count(page, size + 1);
|
|
|
|
/* reset page count bias and offset to start of new frag */
|
|
- nc->pagecnt_bias = size;
|
|
+ nc->pagecnt_bias = size + 1;
|
|
offset = size - fragsz;
|
|
}
|
|
|
|
diff --git a/mm/page_ext.c b/mm/page_ext.c
|
|
index a9826da84ccb..4961f13b6ec1 100644
|
|
--- a/mm/page_ext.c
|
|
+++ b/mm/page_ext.c
|
|
@@ -398,10 +398,8 @@ void __init page_ext_init(void)
|
|
* We know some arch can have a nodes layout such as
|
|
* -------------pfn-------------->
|
|
* N0 | N1 | N2 | N0 | N1 | N2|....
|
|
- *
|
|
- * Take into account DEFERRED_STRUCT_PAGE_INIT.
|
|
*/
|
|
- if (early_pfn_to_nid(pfn) != nid)
|
|
+ if (pfn_to_nid(pfn) != nid)
|
|
continue;
|
|
if (init_section_page_ext(pfn, nid))
|
|
goto oom;
|
|
diff --git a/mm/shmem.c b/mm/shmem.c
|
|
index b6cf0e8e685b..3c8742655756 100644
|
|
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -2895,16 +2895,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
|
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
|
|
{
|
|
struct inode *inode = d_inode(old_dentry);
|
|
- int ret;
|
|
+ int ret = 0;
|
|
|
|
/*
|
|
* No ordinary (disk based) filesystem counts links as inodes;
|
|
* but each new link needs a new dentry, pinning lowmem, and
|
|
* tmpfs dentries cannot be pruned until they are unlinked.
|
|
+ * But if an O_TMPFILE file is linked into the tmpfs, the
|
|
+ * first link must skip that, to get the accounting right.
|
|
*/
|
|
- ret = shmem_reserve_inode(inode->i_sb);
|
|
- if (ret)
|
|
- goto out;
|
|
+ if (inode->i_nlink) {
|
|
+ ret = shmem_reserve_inode(inode->i_sb);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+ }
|
|
|
|
dir->i_size += BOGO_DIRENT_SIZE;
|
|
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
|
|
diff --git a/mm/swap.c b/mm/swap.c
|
|
index 26fc9b5f1b6c..a3fc028e338e 100644
|
|
--- a/mm/swap.c
|
|
+++ b/mm/swap.c
|
|
@@ -321,11 +321,6 @@ static inline void activate_page_drain(int cpu)
|
|
{
|
|
}
|
|
|
|
-static bool need_activate_page_drain(int cpu)
|
|
-{
|
|
- return false;
|
|
-}
|
|
-
|
|
void activate_page(struct page *page)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
@@ -654,13 +649,15 @@ void lru_add_drain(void)
|
|
put_cpu();
|
|
}
|
|
|
|
+#ifdef CONFIG_SMP
|
|
+
|
|
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
|
+
|
|
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
|
{
|
|
lru_add_drain();
|
|
}
|
|
|
|
-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
|
-
|
|
/*
|
|
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
|
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
|
@@ -703,6 +700,12 @@ void lru_add_drain_all(void)
|
|
|
|
mutex_unlock(&lock);
|
|
}
|
|
+#else
|
|
+void lru_add_drain_all(void)
|
|
+{
|
|
+ lru_add_drain();
|
|
+}
|
|
+#endif
|
|
|
|
/**
|
|
* release_pages - batched put_page()
|
|
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
|
|
index a728fc492557..91a789a46b12 100644
|
|
--- a/mm/vmalloc.c
|
|
+++ b/mm/vmalloc.c
|
|
@@ -2244,7 +2244,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
|
|
if (!(area->flags & VM_USERMAP))
|
|
return -EINVAL;
|
|
|
|
- if (kaddr + size > area->addr + area->size)
|
|
+ if (kaddr + size > area->addr + get_vm_area_size(area))
|
|
return -EINVAL;
|
|
|
|
do {
|
|
diff --git a/net/9p/client.c b/net/9p/client.c
|
|
index 75b7bf7c7f07..23ec6187dc07 100644
|
|
--- a/net/9p/client.c
|
|
+++ b/net/9p/client.c
|
|
@@ -1073,7 +1073,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
|
|
p9_debug(P9_DEBUG_ERROR,
|
|
"Please specify a msize of at least 4k\n");
|
|
err = -EINVAL;
|
|
- goto free_client;
|
|
+ goto close_trans;
|
|
}
|
|
|
|
err = p9_client_version(clnt);
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index bed9061102f4..eb81e9db4093 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -2614,8 +2614,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
|
|
u32 off = skb_mac_header_len(skb);
|
|
int ret;
|
|
|
|
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
|
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
|
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
|
return -ENOTSUPP;
|
|
|
|
ret = skb_cow(skb, len_diff);
|
|
@@ -2656,8 +2655,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
|
|
u32 off = skb_mac_header_len(skb);
|
|
int ret;
|
|
|
|
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
|
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
|
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
|
return -ENOTSUPP;
|
|
|
|
ret = skb_unclone(skb, GFP_ATOMIC);
|
|
@@ -2782,8 +2780,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
|
|
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
|
|
int ret;
|
|
|
|
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
|
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
|
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
|
return -ENOTSUPP;
|
|
|
|
ret = skb_cow(skb, len_diff);
|
|
@@ -2812,8 +2809,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
|
|
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
|
|
int ret;
|
|
|
|
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
|
|
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
|
|
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
|
return -ENOTSUPP;
|
|
|
|
ret = skb_unclone(skb, GFP_ATOMIC);
|
|
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
|
|
index 97689012b357..12a43a5369a5 100644
|
|
--- a/net/ipv4/esp4.c
|
|
+++ b/net/ipv4/esp4.c
|
|
@@ -325,7 +325,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
|
|
skb->len += tailen;
|
|
skb->data_len += tailen;
|
|
skb->truesize += tailen;
|
|
- if (sk)
|
|
+ if (sk && sk_fullsock(sk))
|
|
refcount_add(tailen, &sk->sk_wmem_alloc);
|
|
|
|
goto out;
|
|
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
index 3cd237b42f44..2fa196325988 100644
|
|
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
|
|
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
|
|
|
|
static void clusterip_net_exit(struct net *net)
|
|
{
|
|
+#ifdef CONFIG_PROC_FS
|
|
struct clusterip_net *cn = clusterip_pernet(net);
|
|
|
|
-#ifdef CONFIG_PROC_FS
|
|
mutex_lock(&cn->mutex);
|
|
proc_remove(cn->procdir);
|
|
cn->procdir = NULL;
|
|
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
|
|
index 88a7579c23bd..a7d996148eed 100644
|
|
--- a/net/ipv6/esp6.c
|
|
+++ b/net/ipv6/esp6.c
|
|
@@ -293,7 +293,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
|
|
skb->len += tailen;
|
|
skb->data_len += tailen;
|
|
skb->truesize += tailen;
|
|
- if (sk)
|
|
+ if (sk && sk_fullsock(sk))
|
|
refcount_add(tailen, &sk->sk_wmem_alloc);
|
|
|
|
goto out;
|
|
diff --git a/net/key/af_key.c b/net/key/af_key.c
|
|
index 9d61266526e7..7da629d59717 100644
|
|
--- a/net/key/af_key.c
|
|
+++ b/net/key/af_key.c
|
|
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
|
|
return 0;
|
|
}
|
|
|
|
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
|
|
- gfp_t allocation, struct sock *sk)
|
|
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
|
|
+ struct sock *sk)
|
|
{
|
|
int err = -ENOBUFS;
|
|
|
|
- sock_hold(sk);
|
|
- if (*skb2 == NULL) {
|
|
- if (refcount_read(&skb->users) != 1) {
|
|
- *skb2 = skb_clone(skb, allocation);
|
|
- } else {
|
|
- *skb2 = skb;
|
|
- refcount_inc(&skb->users);
|
|
- }
|
|
- }
|
|
- if (*skb2 != NULL) {
|
|
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
|
|
- skb_set_owner_r(*skb2, sk);
|
|
- skb_queue_tail(&sk->sk_receive_queue, *skb2);
|
|
- sk->sk_data_ready(sk);
|
|
- *skb2 = NULL;
|
|
- err = 0;
|
|
- }
|
|
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
|
|
+ return err;
|
|
+
|
|
+ skb = skb_clone(skb, allocation);
|
|
+
|
|
+ if (skb) {
|
|
+ skb_set_owner_r(skb, sk);
|
|
+ skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
+ sk->sk_data_ready(sk);
|
|
+ err = 0;
|
|
}
|
|
- sock_put(sk);
|
|
return err;
|
|
}
|
|
|
|
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
|
{
|
|
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
|
|
struct sock *sk;
|
|
- struct sk_buff *skb2 = NULL;
|
|
int err = -ESRCH;
|
|
|
|
/* XXX Do we need something like netlink_overrun? I think
|
|
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
|
* socket.
|
|
*/
|
|
if (pfk->promisc)
|
|
- pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
|
|
+ pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
|
|
|
|
/* the exact target will be processed later */
|
|
if (sk == one_sk)
|
|
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
|
continue;
|
|
}
|
|
|
|
- err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
|
|
+ err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
|
|
|
|
/* Error is cleared after successful sending to at least one
|
|
* registered KM */
|
|
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
|
|
rcu_read_unlock();
|
|
|
|
if (one_sk != NULL)
|
|
- err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
|
|
+ err = pfkey_broadcast_one(skb, allocation, one_sk);
|
|
|
|
- kfree_skb(skb2);
|
|
kfree_skb(skb);
|
|
return err;
|
|
}
|
|
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
|
|
index 69e831bc317b..54821fb1a960 100644
|
|
--- a/net/mac80211/agg-tx.c
|
|
+++ b/net/mac80211/agg-tx.c
|
|
@@ -8,7 +8,7 @@
|
|
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
|
* Copyright 2007-2010, Intel Corporation
|
|
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
|
- * Copyright (C) 2018 Intel Corporation
|
|
+ * Copyright (C) 2018 - 2019 Intel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|
|
|
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
|
|
|
+ ieee80211_agg_stop_txq(sta, tid);
|
|
+
|
|
spin_unlock_bh(&sta->lock);
|
|
|
|
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
|
|
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
|
|
index 716cd6442d86..3deaa01ebee4 100644
|
|
--- a/net/mac80211/util.c
|
|
+++ b/net/mac80211/util.c
|
|
@@ -5,7 +5,7 @@
|
|
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
|
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
|
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
|
- * Copyright (C) 2018 Intel Corporation
|
|
+ * Copyright (C) 2018-2019 Intel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -2020,6 +2020,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
case NL80211_IFTYPE_MONITOR:
|
|
break;
|
|
+ case NL80211_IFTYPE_ADHOC:
|
|
+ if (sdata->vif.bss_conf.ibss_joined)
|
|
+ WARN_ON(drv_join_ibss(local, sdata));
|
|
+ /* fall through */
|
|
default:
|
|
ieee80211_reconfig_stations(sdata);
|
|
/* fall through */
|
|
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
|
|
index cad48d07c818..8401cefd9f65 100644
|
|
--- a/net/netfilter/ipvs/Kconfig
|
|
+++ b/net/netfilter/ipvs/Kconfig
|
|
@@ -29,6 +29,7 @@ config IP_VS_IPV6
|
|
bool "IPv6 support for IPVS"
|
|
depends on IPV6 = y || IP_VS = IPV6
|
|
select IP6_NF_IPTABLES
|
|
+ select NF_DEFRAG_IPV6
|
|
---help---
|
|
Add IPv6 support to IPVS.
|
|
|
|
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
|
|
index 7ca926a03b81..3f963ea22277 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_core.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_core.c
|
|
@@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
|
|
/* sorry, all this trouble for a no-hit :) */
|
|
IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
|
|
"ip_vs_in: packet continues traversal as normal");
|
|
- if (iph->fragoffs) {
|
|
- /* Fragment that couldn't be mapped to a conn entry
|
|
- * is missing module nf_defrag_ipv6
|
|
- */
|
|
- IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
|
|
+
|
|
+ /* Fragment couldn't be mapped to a conn entry */
|
|
+ if (iph->fragoffs)
|
|
IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
|
|
"unhandled fragment");
|
|
- }
|
|
+
|
|
*verdict = NF_ACCEPT;
|
|
return 0;
|
|
}
|
|
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
index 55a77314340a..8fd8d06454d6 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_ctl.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
@@ -43,6 +43,7 @@
|
|
#ifdef CONFIG_IP_VS_IPV6
|
|
#include <net/ipv6.h>
|
|
#include <net/ip6_route.h>
|
|
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
|
|
#endif
|
|
#include <net/route.h>
|
|
#include <net/sock.h>
|
|
@@ -895,6 +896,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|
{
|
|
struct ip_vs_dest *dest;
|
|
unsigned int atype, i;
|
|
+ int ret = 0;
|
|
|
|
EnterFunction(2);
|
|
|
|
@@ -905,6 +907,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|
atype & IPV6_ADDR_LINKLOCAL) &&
|
|
!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
|
|
return -EINVAL;
|
|
+
|
|
+ ret = nf_defrag_ipv6_enable(svc->ipvs->net);
|
|
+ if (ret)
|
|
+ return ret;
|
|
} else
|
|
#endif
|
|
{
|
|
@@ -1228,6 +1234,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
|
|
ret = -EINVAL;
|
|
goto out_err;
|
|
}
|
|
+
|
|
+ ret = nf_defrag_ipv6_enable(ipvs->net);
|
|
+ if (ret)
|
|
+ goto out_err;
|
|
}
|
|
#endif
|
|
|
|
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
|
|
index aecadd471e1d..13e1ac333fa4 100644
|
|
--- a/net/netfilter/x_tables.c
|
|
+++ b/net/netfilter/x_tables.c
|
|
@@ -1899,7 +1899,7 @@ static int __init xt_init(void)
|
|
seqcount_init(&per_cpu(xt_recseq, i));
|
|
}
|
|
|
|
- xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
|
|
+ xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
|
|
if (!xt)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
|
|
index 9fc76b19cd3c..db3473540303 100644
|
|
--- a/net/phonet/pep.c
|
|
+++ b/net/phonet/pep.c
|
|
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
|
|
ph->utid = 0;
|
|
ph->message_id = id;
|
|
ph->pipe_handle = pn->pipe_handle;
|
|
- ph->data[0] = code;
|
|
+ ph->error_code = code;
|
|
return pn_skb_send(sk, skb, NULL);
|
|
}
|
|
|
|
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
|
|
ph->utid = id; /* whatever */
|
|
ph->message_id = id;
|
|
ph->pipe_handle = pn->pipe_handle;
|
|
- ph->data[0] = code;
|
|
+ ph->error_code = code;
|
|
return pn_skb_send(sk, skb, NULL);
|
|
}
|
|
|
|
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
|
|
struct pnpipehdr *ph;
|
|
struct sockaddr_pn dst;
|
|
u8 data[4] = {
|
|
- oph->data[0], /* PEP type */
|
|
+ oph->pep_type, /* PEP type */
|
|
code, /* error code, at an unusual offset */
|
|
PAD, PAD,
|
|
};
|
|
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
|
|
ph->utid = oph->utid;
|
|
ph->message_id = PNS_PEP_CTRL_RESP;
|
|
ph->pipe_handle = oph->pipe_handle;
|
|
- ph->data[0] = oph->data[1]; /* CTRL id */
|
|
+ ph->data0 = oph->data[0]; /* CTRL id */
|
|
|
|
pn_skb_get_src_sockaddr(oskb, &dst);
|
|
return pn_skb_send(sk, skb, &dst);
|
|
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
|
|
return -EINVAL;
|
|
|
|
hdr = pnp_hdr(skb);
|
|
- if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
|
|
+ if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
|
|
net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
|
|
- (unsigned int)hdr->data[0]);
|
|
+ (unsigned int)hdr->pep_type);
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
- switch (hdr->data[1]) {
|
|
+ switch (hdr->data[0]) {
|
|
case PN_PEP_IND_FLOW_CONTROL:
|
|
switch (pn->tx_fc) {
|
|
case PN_LEGACY_FLOW_CONTROL:
|
|
- switch (hdr->data[4]) {
|
|
+ switch (hdr->data[3]) {
|
|
case PEP_IND_BUSY:
|
|
atomic_set(&pn->tx_credits, 0);
|
|
break;
|
|
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
break;
|
|
case PN_ONE_CREDIT_FLOW_CONTROL:
|
|
- if (hdr->data[4] == PEP_IND_READY)
|
|
+ if (hdr->data[3] == PEP_IND_READY)
|
|
atomic_set(&pn->tx_credits, wake = 1);
|
|
break;
|
|
}
|
|
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
|
|
case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
|
|
if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
|
|
break;
|
|
- atomic_add(wake = hdr->data[4], &pn->tx_credits);
|
|
+ atomic_add(wake = hdr->data[3], &pn->tx_credits);
|
|
break;
|
|
|
|
default:
|
|
net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
|
|
- (unsigned int)hdr->data[1]);
|
|
+ (unsigned int)hdr->data[0]);
|
|
return -EOPNOTSUPP;
|
|
}
|
|
if (wake)
|
|
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
struct pep_sock *pn = pep_sk(sk);
|
|
struct pnpipehdr *hdr = pnp_hdr(skb);
|
|
- u8 n_sb = hdr->data[0];
|
|
+ u8 n_sb = hdr->data0;
|
|
|
|
pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
|
|
__skb_pull(skb, sizeof(*hdr));
|
|
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
|
|
return -ECONNREFUSED;
|
|
|
|
/* Parse sub-blocks */
|
|
- n_sb = hdr->data[4];
|
|
+ n_sb = hdr->data[3];
|
|
while (n_sb > 0) {
|
|
u8 type, buf[6], len = sizeof(buf);
|
|
const u8 *data = pep_get_sb(skb, &type, &len, buf);
|
|
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
|
|
ph->utid = 0;
|
|
ph->message_id = PNS_PIPE_REMOVE_REQ;
|
|
ph->pipe_handle = pn->pipe_handle;
|
|
- ph->data[0] = PAD;
|
|
+ ph->data0 = PAD;
|
|
return pn_skb_send(sk, skb, NULL);
|
|
}
|
|
|
|
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
|
|
peer_type = hdr->other_pep_type << 8;
|
|
|
|
/* Parse sub-blocks (options) */
|
|
- n_sb = hdr->data[4];
|
|
+ n_sb = hdr->data[3];
|
|
while (n_sb > 0) {
|
|
u8 type, buf[1], len = sizeof(buf);
|
|
const u8 *data = pep_get_sb(skb, &type, &len, buf);
|
|
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
|
|
ph->utid = 0;
|
|
if (pn->aligned) {
|
|
ph->message_id = PNS_PIPE_ALIGNED_DATA;
|
|
- ph->data[0] = 0; /* padding */
|
|
+ ph->data0 = 0; /* padding */
|
|
} else
|
|
ph->message_id = PNS_PIPE_DATA;
|
|
ph->pipe_handle = pn->pipe_handle;
|
|
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
|
|
index 97a8282955a8..8566531c2f10 100644
|
|
--- a/net/sunrpc/svcsock.c
|
|
+++ b/net/sunrpc/svcsock.c
|
|
@@ -381,12 +381,16 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
|
|
/*
|
|
* Set socket snd and rcv buffer lengths
|
|
*/
|
|
-static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
|
|
- unsigned int rcv)
|
|
+static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
|
|
{
|
|
+ unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
|
|
+ struct socket *sock = svsk->sk_sock;
|
|
+
|
|
+ nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
|
|
+
|
|
lock_sock(sock->sk);
|
|
- sock->sk->sk_sndbuf = snd * 2;
|
|
- sock->sk->sk_rcvbuf = rcv * 2;
|
|
+ sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
|
|
+ sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
|
|
sock->sk->sk_write_space(sock->sk);
|
|
release_sock(sock->sk);
|
|
}
|
|
@@ -548,9 +552,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
|
|
* provides an upper bound on the number of threads
|
|
* which will access the socket.
|
|
*/
|
|
- svc_sock_setbufsize(svsk->sk_sock,
|
|
- (serv->sv_nrthreads+3) * serv->sv_max_mesg,
|
|
- (serv->sv_nrthreads+3) * serv->sv_max_mesg);
|
|
+ svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
|
|
|
|
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
|
skb = NULL;
|
|
@@ -718,9 +720,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
|
|
* receive and respond to one request.
|
|
* svc_udp_recvfrom will re-adjust if necessary
|
|
*/
|
|
- svc_sock_setbufsize(svsk->sk_sock,
|
|
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
|
|
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
|
|
+ svc_sock_setbufsize(svsk, 3);
|
|
|
|
/* data might have come in before data_ready set up */
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
|
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
|
|
index 3d6bf790cf1f..5ddbf227e7c6 100644
|
|
--- a/net/sunrpc/xprtrdma/verbs.c
|
|
+++ b/net/sunrpc/xprtrdma/verbs.c
|
|
@@ -546,7 +546,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|
|
|
sendcq = ib_alloc_cq(ia->ri_device, NULL,
|
|
ep->rep_attr.cap.max_send_wr + 1,
|
|
- 1, IB_POLL_WORKQUEUE);
|
|
+ ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
|
|
+ IB_POLL_WORKQUEUE);
|
|
if (IS_ERR(sendcq)) {
|
|
rc = PTR_ERR(sendcq);
|
|
dprintk("RPC: %s: failed to create send CQ: %i\n",
|
|
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
|
|
index 6f05e831a73e..82723ef44db3 100644
|
|
--- a/net/xfrm/xfrm_interface.c
|
|
+++ b/net/xfrm/xfrm_interface.c
|
|
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
|
|
int ifindex;
|
|
struct xfrm_if *xi;
|
|
|
|
- if (!skb->dev)
|
|
+ if (!secpath_exists(skb) || !skb->dev)
|
|
return NULL;
|
|
|
|
- xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
|
|
+ xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
|
|
ifindex = skb->dev->ifindex;
|
|
|
|
for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
|
|
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
|
|
index 6ea8036fcdbe..bf5d59270f79 100644
|
|
--- a/net/xfrm/xfrm_policy.c
|
|
+++ b/net/xfrm/xfrm_policy.c
|
|
@@ -2340,8 +2340,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
|
|
|
|
if (ifcb) {
|
|
xi = ifcb->decode_session(skb);
|
|
- if (xi)
|
|
+ if (xi) {
|
|
if_id = xi->p.if_id;
|
|
+ net = xi->net;
|
|
+ }
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
|
|
index a9186a98a37d..0c9c54b57515 100644
|
|
--- a/scripts/kallsyms.c
|
|
+++ b/scripts/kallsyms.c
|
|
@@ -120,8 +120,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
|
|
fprintf(stderr, "Read error or end of file.\n");
|
|
return -1;
|
|
}
|
|
- if (strlen(sym) > KSYM_NAME_LEN) {
|
|
- fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
|
|
+ if (strlen(sym) >= KSYM_NAME_LEN) {
|
|
+ fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
|
|
"Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
|
|
sym, strlen(sym), KSYM_NAME_LEN);
|
|
return -1;
|
|
diff --git a/security/keys/internal.h b/security/keys/internal.h
|
|
index 9f8208dc0e55..a02742621c8d 100644
|
|
--- a/security/keys/internal.h
|
|
+++ b/security/keys/internal.h
|
|
@@ -188,20 +188,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
|
|
return key_task_permission(key_ref, current_cred(), perm);
|
|
}
|
|
|
|
-/*
|
|
- * Authorisation record for request_key().
|
|
- */
|
|
-struct request_key_auth {
|
|
- struct key *target_key;
|
|
- struct key *dest_keyring;
|
|
- const struct cred *cred;
|
|
- void *callout_info;
|
|
- size_t callout_len;
|
|
- pid_t pid;
|
|
-} __randomize_layout;
|
|
-
|
|
extern struct key_type key_type_request_key_auth;
|
|
extern struct key *request_key_auth_new(struct key *target,
|
|
+ const char *op,
|
|
const void *callout_info,
|
|
size_t callout_len,
|
|
struct key *dest_keyring);
|
|
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
|
|
index 1ffe60bb2845..ca31af186abd 100644
|
|
--- a/security/keys/keyctl.c
|
|
+++ b/security/keys/keyctl.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/security.h>
|
|
#include <linux/uio.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <keys/request_key_auth-type.h>
|
|
#include "internal.h"
|
|
|
|
#define KEY_MAX_DESC_SIZE 4096
|
|
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
|
|
index d5b25e535d3a..d776d2114651 100644
|
|
--- a/security/keys/process_keys.c
|
|
+++ b/security/keys/process_keys.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/security.h>
|
|
#include <linux/user_namespace.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <keys/request_key_auth-type.h>
|
|
#include "internal.h"
|
|
|
|
/* Session keyring create vs join semaphore */
|
|
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
|
|
index 738553698649..1a547cec8385 100644
|
|
--- a/security/keys/request_key.c
|
|
+++ b/security/keys/request_key.c
|
|
@@ -18,31 +18,30 @@
|
|
#include <linux/keyctl.h>
|
|
#include <linux/slab.h>
|
|
#include "internal.h"
|
|
+#include <keys/request_key_auth-type.h>
|
|
|
|
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
|
|
|
|
/**
|
|
* complete_request_key - Complete the construction of a key.
|
|
- * @cons: The key construction record.
|
|
+ * @auth_key: The authorisation key.
|
|
* @error: The success or failute of the construction.
|
|
*
|
|
* Complete the attempt to construct a key. The key will be negated
|
|
* if an error is indicated. The authorisation key will be revoked
|
|
* unconditionally.
|
|
*/
|
|
-void complete_request_key(struct key_construction *cons, int error)
|
|
+void complete_request_key(struct key *authkey, int error)
|
|
{
|
|
- kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
|
|
+ struct request_key_auth *rka = get_request_key_auth(authkey);
|
|
+ struct key *key = rka->target_key;
|
|
+
|
|
+ kenter("%d{%d},%d", authkey->serial, key->serial, error);
|
|
|
|
if (error < 0)
|
|
- key_negate_and_link(cons->key, key_negative_timeout, NULL,
|
|
- cons->authkey);
|
|
+ key_negate_and_link(key, key_negative_timeout, NULL, authkey);
|
|
else
|
|
- key_revoke(cons->authkey);
|
|
-
|
|
- key_put(cons->key);
|
|
- key_put(cons->authkey);
|
|
- kfree(cons);
|
|
+ key_revoke(authkey);
|
|
}
|
|
EXPORT_SYMBOL(complete_request_key);
|
|
|
|
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
|
|
* Request userspace finish the construction of a key
|
|
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
|
|
*/
|
|
-static int call_sbin_request_key(struct key_construction *cons,
|
|
- const char *op,
|
|
- void *aux)
|
|
+static int call_sbin_request_key(struct key *authkey, void *aux)
|
|
{
|
|
static char const request_key[] = "/sbin/request-key";
|
|
+ struct request_key_auth *rka = get_request_key_auth(authkey);
|
|
const struct cred *cred = current_cred();
|
|
key_serial_t prkey, sskey;
|
|
- struct key *key = cons->key, *authkey = cons->authkey, *keyring,
|
|
- *session;
|
|
+ struct key *key = rka->target_key, *keyring, *session;
|
|
char *argv[9], *envp[3], uid_str[12], gid_str[12];
|
|
char key_str[12], keyring_str[3][12];
|
|
char desc[20];
|
|
int ret, i;
|
|
|
|
- kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
|
|
+ kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
|
|
|
|
ret = install_user_keyrings();
|
|
if (ret < 0)
|
|
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
|
|
/* set up the argument list */
|
|
i = 0;
|
|
argv[i++] = (char *)request_key;
|
|
- argv[i++] = (char *) op;
|
|
+ argv[i++] = (char *)rka->op;
|
|
argv[i++] = key_str;
|
|
argv[i++] = uid_str;
|
|
argv[i++] = gid_str;
|
|
@@ -191,7 +188,7 @@ error_link:
|
|
key_put(keyring);
|
|
|
|
error_alloc:
|
|
- complete_request_key(cons, ret);
|
|
+ complete_request_key(authkey, ret);
|
|
kleave(" = %d", ret);
|
|
return ret;
|
|
}
|
|
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
|
|
size_t callout_len, void *aux,
|
|
struct key *dest_keyring)
|
|
{
|
|
- struct key_construction *cons;
|
|
request_key_actor_t actor;
|
|
struct key *authkey;
|
|
int ret;
|
|
|
|
kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
|
|
|
|
- cons = kmalloc(sizeof(*cons), GFP_KERNEL);
|
|
- if (!cons)
|
|
- return -ENOMEM;
|
|
-
|
|
/* allocate an authorisation key */
|
|
- authkey = request_key_auth_new(key, callout_info, callout_len,
|
|
+ authkey = request_key_auth_new(key, "create", callout_info, callout_len,
|
|
dest_keyring);
|
|
- if (IS_ERR(authkey)) {
|
|
- kfree(cons);
|
|
- ret = PTR_ERR(authkey);
|
|
- authkey = NULL;
|
|
- } else {
|
|
- cons->authkey = key_get(authkey);
|
|
- cons->key = key_get(key);
|
|
+ if (IS_ERR(authkey))
|
|
+ return PTR_ERR(authkey);
|
|
|
|
- /* make the call */
|
|
- actor = call_sbin_request_key;
|
|
- if (key->type->request_key)
|
|
- actor = key->type->request_key;
|
|
+ /* Make the call */
|
|
+ actor = call_sbin_request_key;
|
|
+ if (key->type->request_key)
|
|
+ actor = key->type->request_key;
|
|
|
|
- ret = actor(cons, "create", aux);
|
|
+ ret = actor(authkey, aux);
|
|
|
|
- /* check that the actor called complete_request_key() prior to
|
|
- * returning an error */
|
|
- WARN_ON(ret < 0 &&
|
|
- !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
|
|
- key_put(authkey);
|
|
- }
|
|
+ /* check that the actor called complete_request_key() prior to
|
|
+ * returning an error */
|
|
+ WARN_ON(ret < 0 &&
|
|
+ !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
|
|
|
|
+ key_put(authkey);
|
|
kleave(" = %d", ret);
|
|
return ret;
|
|
}
|
|
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
|
|
if (cred->request_key_auth) {
|
|
authkey = cred->request_key_auth;
|
|
down_read(&authkey->sem);
|
|
- rka = authkey->payload.data[0];
|
|
+ rka = get_request_key_auth(authkey);
|
|
if (!test_bit(KEY_FLAG_REVOKED,
|
|
&authkey->flags))
|
|
dest_keyring =
|
|
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
|
|
index 6797843154f0..5e515791ccd1 100644
|
|
--- a/security/keys/request_key_auth.c
|
|
+++ b/security/keys/request_key_auth.c
|
|
@@ -18,7 +18,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/uaccess.h>
|
|
#include "internal.h"
|
|
-#include <keys/user-type.h>
|
|
+#include <keys/request_key_auth-type.h>
|
|
|
|
static int request_key_auth_preparse(struct key_preparsed_payload *);
|
|
static void request_key_auth_free_preparse(struct key_preparsed_payload *);
|
|
@@ -69,7 +69,7 @@ static int request_key_auth_instantiate(struct key *key,
|
|
static void request_key_auth_describe(const struct key *key,
|
|
struct seq_file *m)
|
|
{
|
|
- struct request_key_auth *rka = key->payload.data[0];
|
|
+ struct request_key_auth *rka = get_request_key_auth(key);
|
|
|
|
seq_puts(m, "key:");
|
|
seq_puts(m, key->description);
|
|
@@ -84,7 +84,7 @@ static void request_key_auth_describe(const struct key *key,
|
|
static long request_key_auth_read(const struct key *key,
|
|
char __user *buffer, size_t buflen)
|
|
{
|
|
- struct request_key_auth *rka = key->payload.data[0];
|
|
+ struct request_key_auth *rka = get_request_key_auth(key);
|
|
size_t datalen;
|
|
long ret;
|
|
|
|
@@ -110,7 +110,7 @@ static long request_key_auth_read(const struct key *key,
|
|
*/
|
|
static void request_key_auth_revoke(struct key *key)
|
|
{
|
|
- struct request_key_auth *rka = key->payload.data[0];
|
|
+ struct request_key_auth *rka = get_request_key_auth(key);
|
|
|
|
kenter("{%d}", key->serial);
|
|
|
|
@@ -137,7 +137,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
|
|
*/
|
|
static void request_key_auth_destroy(struct key *key)
|
|
{
|
|
- struct request_key_auth *rka = key->payload.data[0];
|
|
+ struct request_key_auth *rka = get_request_key_auth(key);
|
|
|
|
kenter("{%d}", key->serial);
|
|
|
|
@@ -148,8 +148,9 @@ static void request_key_auth_destroy(struct key *key)
|
|
* Create an authorisation token for /sbin/request-key or whoever to gain
|
|
* access to the caller's security data.
|
|
*/
|
|
-struct key *request_key_auth_new(struct key *target, const void *callout_info,
|
|
- size_t callout_len, struct key *dest_keyring)
|
|
+struct key *request_key_auth_new(struct key *target, const char *op,
|
|
+ const void *callout_info, size_t callout_len,
|
|
+ struct key *dest_keyring)
|
|
{
|
|
struct request_key_auth *rka, *irka;
|
|
const struct cred *cred = current->cred;
|
|
@@ -167,6 +168,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
|
|
if (!rka->callout_info)
|
|
goto error_free_rka;
|
|
rka->callout_len = callout_len;
|
|
+ strlcpy(rka->op, op, sizeof(rka->op));
|
|
|
|
/* see if the calling process is already servicing the key request of
|
|
* another process */
|
|
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
|
|
index 3c3878f0d2fa..6ea3d3aa1a1e 100644
|
|
--- a/security/selinux/hooks.c
|
|
+++ b/security/selinux/hooks.c
|
|
@@ -1034,8 +1034,11 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
|
|
BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
|
|
|
|
/* if fs is reusing a sb, make sure that the contexts match */
|
|
- if (newsbsec->flags & SE_SBINITIALIZED)
|
|
+ if (newsbsec->flags & SE_SBINITIALIZED) {
|
|
+ if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
|
|
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
|
|
return selinux_cmp_sb_context(oldsb, newsb);
|
|
+ }
|
|
|
|
mutex_lock(&newsbsec->lock);
|
|
|
|
@@ -5334,6 +5337,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (walk_size + len > addrlen)
|
|
+ return -EINVAL;
|
|
+
|
|
err = -EINVAL;
|
|
switch (optname) {
|
|
/* Bind checks */
|
|
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
|
|
index 690c26e7389e..3be0e1478d7d 100644
|
|
--- a/sound/soc/codecs/pcm186x.c
|
|
+++ b/sound/soc/codecs/pcm186x.c
|
|
@@ -42,7 +42,7 @@ struct pcm186x_priv {
|
|
bool is_master_mode;
|
|
};
|
|
|
|
-static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
|
|
+static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0);
|
|
|
|
static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
|
|
SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
|
|
@@ -158,7 +158,7 @@ static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
|
|
* Put the codec into SLEEP mode when not in use, allowing the
|
|
* Energysense mechanism to operate.
|
|
*/
|
|
- SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
|
|
+ SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 1),
|
|
};
|
|
|
|
static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
|
|
@@ -184,8 +184,8 @@ static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
|
|
* Put the codec into SLEEP mode when not in use, allowing the
|
|
* Energysense mechanism to operate.
|
|
*/
|
|
- SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
|
|
- SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
|
|
+ SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 1),
|
|
+ SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 1),
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
|
|
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
|
|
index afe7d5b19313..79ebcc239786 100644
|
|
--- a/sound/soc/codecs/rt5682.c
|
|
+++ b/sound/soc/codecs/rt5682.c
|
|
@@ -1757,7 +1757,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
|
|
{"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
|
|
{"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
|
|
{"ADC STO1 ASRC", NULL, "AD ASRC"},
|
|
+ {"ADC STO1 ASRC", NULL, "DA ASRC"},
|
|
{"ADC STO1 ASRC", NULL, "CLKDET"},
|
|
+ {"DAC STO1 ASRC", NULL, "AD ASRC"},
|
|
{"DAC STO1 ASRC", NULL, "DA ASRC"},
|
|
{"DAC STO1 ASRC", NULL, "CLKDET"},
|
|
|
|
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
|
|
index c1d1d06783e5..4daefa5b150a 100644
|
|
--- a/sound/soc/fsl/fsl_esai.c
|
|
+++ b/sound/soc/fsl/fsl_esai.c
|
|
@@ -398,7 +398,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
|
|
break;
|
|
case SND_SOC_DAIFMT_RIGHT_J:
|
|
/* Data on rising edge of bclk, frame high, right aligned */
|
|
- xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
|
|
+ xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
|
|
+ xcr |= ESAI_xCR_xWA;
|
|
break;
|
|
case SND_SOC_DAIFMT_DSP_A:
|
|
/* Data on rising edge of bclk, frame high, 1clk before data */
|
|
@@ -455,12 +456,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
|
|
+ mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
|
|
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
|
|
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
|
|
|
|
mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
|
|
- ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
|
|
+ ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
|
|
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
|
|
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
|
|
|
|
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
|
|
index d6c62aa13041..ce00fe2f6aae 100644
|
|
--- a/sound/soc/samsung/i2s.c
|
|
+++ b/sound/soc/samsung/i2s.c
|
|
@@ -700,6 +700,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
|
|
{
|
|
struct i2s_dai *i2s = to_info(dai);
|
|
u32 mod, mask = 0, val = 0;
|
|
+ struct clk *rclksrc;
|
|
unsigned long flags;
|
|
|
|
WARN_ON(!pm_runtime_active(dai->dev));
|
|
@@ -782,6 +783,10 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
|
|
|
|
i2s->frmclk = params_rate(params);
|
|
|
|
+ rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
|
|
+ if (rclksrc && !IS_ERR(rclksrc))
|
|
+ i2s->rclk_srcrate = clk_get_rate(rclksrc);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -886,11 +891,6 @@ static int config_setup(struct i2s_dai *i2s)
|
|
return 0;
|
|
|
|
if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
|
|
- struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
|
|
-
|
|
- if (rclksrc && !IS_ERR(rclksrc))
|
|
- i2s->rclk_srcrate = clk_get_rate(rclksrc);
|
|
-
|
|
psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
|
|
writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
|
|
dev_dbg(&i2s->pdev->dev,
|
|
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
|
|
index a566dae3ec8a..9410e0a9b14b 100644
|
|
--- a/sound/soc/sh/rcar/ssi.c
|
|
+++ b/sound/soc/sh/rcar/ssi.c
|
|
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
|
|
if (rsnd_ssi_is_multi_slave(mod, io))
|
|
return 0;
|
|
|
|
- if (ssi->usrcnt > 1) {
|
|
+ if (ssi->usrcnt > 0) {
|
|
if (ssi->rate != rate) {
|
|
dev_err(dev, "SSI parent/child should use same rate\n");
|
|
return -EINVAL;
|
|
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
|
|
index 653706945222..9b78fb3daa7b 100644
|
|
--- a/sound/soc/soc-dapm.c
|
|
+++ b/sound/soc/soc-dapm.c
|
|
@@ -71,12 +71,16 @@ static int dapm_up_seq[] = {
|
|
[snd_soc_dapm_clock_supply] = 1,
|
|
[snd_soc_dapm_supply] = 2,
|
|
[snd_soc_dapm_micbias] = 3,
|
|
+ [snd_soc_dapm_vmid] = 3,
|
|
[snd_soc_dapm_dai_link] = 2,
|
|
[snd_soc_dapm_dai_in] = 4,
|
|
[snd_soc_dapm_dai_out] = 4,
|
|
[snd_soc_dapm_aif_in] = 4,
|
|
[snd_soc_dapm_aif_out] = 4,
|
|
[snd_soc_dapm_mic] = 5,
|
|
+ [snd_soc_dapm_siggen] = 5,
|
|
+ [snd_soc_dapm_input] = 5,
|
|
+ [snd_soc_dapm_output] = 5,
|
|
[snd_soc_dapm_mux] = 6,
|
|
[snd_soc_dapm_demux] = 6,
|
|
[snd_soc_dapm_dac] = 7,
|
|
@@ -84,11 +88,19 @@ static int dapm_up_seq[] = {
|
|
[snd_soc_dapm_mixer] = 8,
|
|
[snd_soc_dapm_mixer_named_ctl] = 8,
|
|
[snd_soc_dapm_pga] = 9,
|
|
+ [snd_soc_dapm_buffer] = 9,
|
|
+ [snd_soc_dapm_scheduler] = 9,
|
|
+ [snd_soc_dapm_effect] = 9,
|
|
+ [snd_soc_dapm_src] = 9,
|
|
+ [snd_soc_dapm_asrc] = 9,
|
|
+ [snd_soc_dapm_encoder] = 9,
|
|
+ [snd_soc_dapm_decoder] = 9,
|
|
[snd_soc_dapm_adc] = 10,
|
|
[snd_soc_dapm_out_drv] = 11,
|
|
[snd_soc_dapm_hp] = 11,
|
|
[snd_soc_dapm_spk] = 11,
|
|
[snd_soc_dapm_line] = 11,
|
|
+ [snd_soc_dapm_sink] = 11,
|
|
[snd_soc_dapm_kcontrol] = 12,
|
|
[snd_soc_dapm_post] = 13,
|
|
};
|
|
@@ -101,13 +113,25 @@ static int dapm_down_seq[] = {
|
|
[snd_soc_dapm_spk] = 3,
|
|
[snd_soc_dapm_line] = 3,
|
|
[snd_soc_dapm_out_drv] = 3,
|
|
+ [snd_soc_dapm_sink] = 3,
|
|
[snd_soc_dapm_pga] = 4,
|
|
+ [snd_soc_dapm_buffer] = 4,
|
|
+ [snd_soc_dapm_scheduler] = 4,
|
|
+ [snd_soc_dapm_effect] = 4,
|
|
+ [snd_soc_dapm_src] = 4,
|
|
+ [snd_soc_dapm_asrc] = 4,
|
|
+ [snd_soc_dapm_encoder] = 4,
|
|
+ [snd_soc_dapm_decoder] = 4,
|
|
[snd_soc_dapm_switch] = 5,
|
|
[snd_soc_dapm_mixer_named_ctl] = 5,
|
|
[snd_soc_dapm_mixer] = 5,
|
|
[snd_soc_dapm_dac] = 6,
|
|
[snd_soc_dapm_mic] = 7,
|
|
+ [snd_soc_dapm_siggen] = 7,
|
|
+ [snd_soc_dapm_input] = 7,
|
|
+ [snd_soc_dapm_output] = 7,
|
|
[snd_soc_dapm_micbias] = 8,
|
|
+ [snd_soc_dapm_vmid] = 8,
|
|
[snd_soc_dapm_mux] = 9,
|
|
[snd_soc_dapm_demux] = 9,
|
|
[snd_soc_dapm_aif_in] = 10,
|
|
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
|
|
index 66e77e020745..88a7e860b175 100644
|
|
--- a/sound/soc/soc-topology.c
|
|
+++ b/sound/soc/soc-topology.c
|
|
@@ -2493,6 +2493,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
|
|
struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
|
|
{
|
|
struct soc_tplg tplg;
|
|
+ int ret;
|
|
|
|
/* setup parsing context */
|
|
memset(&tplg, 0, sizeof(tplg));
|
|
@@ -2506,7 +2507,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
|
|
tplg.bytes_ext_ops = ops->bytes_ext_ops;
|
|
tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
|
|
|
|
- return soc_tplg_load(&tplg);
|
|
+ ret = soc_tplg_load(&tplg);
|
|
+ /* free the created components if fail to load topology */
|
|
+ if (ret)
|
|
+ snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
|
|
+
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
|
|
|
|
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
|
|
index db1511359c5e..40c93d8158b5 100644
|
|
--- a/tools/perf/util/auxtrace.c
|
|
+++ b/tools/perf/util/auxtrace.c
|
|
@@ -1273,9 +1273,9 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
|
|
}
|
|
|
|
/* padding must be written by fn() e.g. record__process_auxtrace() */
|
|
- padding = size & 7;
|
|
+ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
|
|
if (padding)
|
|
- padding = 8 - padding;
|
|
+ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
|
|
|
|
memset(&ev, 0, sizeof(ev));
|
|
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
|
|
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
|
|
index 71fc3bd74299..136c5f95f9bf 100644
|
|
--- a/tools/perf/util/auxtrace.h
|
|
+++ b/tools/perf/util/auxtrace.h
|
|
@@ -38,6 +38,9 @@ struct record_opts;
|
|
struct auxtrace_info_event;
|
|
struct events_stats;
|
|
|
|
+/* Auxtrace records must have the same alignment as perf event records */
|
|
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
|
|
+
|
|
enum auxtrace_type {
|
|
PERF_AUXTRACE_UNKNOWN,
|
|
PERF_AUXTRACE_INTEL_PT,
|
|
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
|
|
index d404bed7003a..f3db68abbd9a 100644
|
|
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
|
|
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
|
|
@@ -26,6 +26,7 @@
|
|
|
|
#include "../cache.h"
|
|
#include "../util.h"
|
|
+#include "../auxtrace.h"
|
|
|
|
#include "intel-pt-insn-decoder.h"
|
|
#include "intel-pt-pkt-decoder.h"
|
|
@@ -1389,7 +1390,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
|
|
{
|
|
intel_pt_log("ERROR: Buffer overflow\n");
|
|
intel_pt_clear_tx_flags(decoder);
|
|
- decoder->cbr = 0;
|
|
decoder->timestamp_insn_cnt = 0;
|
|
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
|
|
decoder->overflow = true;
|
|
@@ -2559,6 +2559,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
|
|
}
|
|
}
|
|
|
|
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
|
|
+
|
|
+/**
|
|
+ * adj_for_padding - adjust overlap to account for padding.
|
|
+ * @buf_b: second buffer
|
|
+ * @buf_a: first buffer
|
|
+ * @len_a: size of first buffer
|
|
+ *
|
|
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
|
|
+ * accordingly.
|
|
+ *
|
|
+ * Return: A pointer into @buf_b from where non-overlapped data starts
|
|
+ */
|
|
+static unsigned char *adj_for_padding(unsigned char *buf_b,
|
|
+ unsigned char *buf_a, size_t len_a)
|
|
+{
|
|
+ unsigned char *p = buf_b - MAX_PADDING;
|
|
+ unsigned char *q = buf_a + len_a - MAX_PADDING;
|
|
+ int i;
|
|
+
|
|
+ for (i = MAX_PADDING; i; i--, p++, q++) {
|
|
+ if (*p != *q)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return p;
|
|
+}
|
|
+
|
|
/**
|
|
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
|
|
* using TSC.
|
|
@@ -2609,8 +2637,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
|
|
|
|
/* Same TSC, so buffers are consecutive */
|
|
if (!cmp && rem_b >= rem_a) {
|
|
+ unsigned char *start;
|
|
+
|
|
*consecutive = true;
|
|
- return buf_b + len_b - (rem_b - rem_a);
|
|
+ start = buf_b + len_b - (rem_b - rem_a);
|
|
+ return adj_for_padding(start, buf_a, len_a);
|
|
}
|
|
if (cmp < 0)
|
|
return buf_b; /* tsc_a < tsc_b => no overlap */
|
|
@@ -2673,7 +2704,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
|
|
found = memmem(buf_a, len_a, buf_b, len_a);
|
|
if (found) {
|
|
*consecutive = true;
|
|
- return buf_b + len_a;
|
|
+ return adj_for_padding(buf_b + len_a, buf_a, len_a);
|
|
}
|
|
|
|
/* Try again at next PSB in buffer 'a' */
|
|
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
|
|
index 4f48bc11a29c..ff2c41ea94c8 100644
|
|
--- a/tools/perf/util/intel-pt.c
|
|
+++ b/tools/perf/util/intel-pt.c
|
|
@@ -2507,6 +2507,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
|
|
}
|
|
|
|
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
|
|
+ if (pt->timeless_decoding && !pt->tc.time_mult)
|
|
+ pt->tc.time_mult = 1;
|
|
pt->have_tsc = intel_pt_have_tsc(pt);
|
|
pt->sampling_mode = false;
|
|
pt->est_tsc = !pt->timeless_decoding;
|
|
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
|
|
index d188b7588152..0715f972a275 100644
|
|
--- a/tools/perf/util/symbol.c
|
|
+++ b/tools/perf/util/symbol.c
|
|
@@ -709,6 +709,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
|
|
}
|
|
|
|
pos->start -= curr_map->start - curr_map->pgoff;
|
|
+ if (pos->end > curr_map->end)
|
|
+ pos->end = curr_map->end;
|
|
if (pos->end)
|
|
pos->end -= curr_map->start - curr_map->pgoff;
|
|
symbols__insert(&curr_map->dso->symbols, pos);
|
|
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
|
|
index 147e34cfceb7..02d7c871862a 100644
|
|
--- a/tools/testing/selftests/bpf/test_lpm_map.c
|
|
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
|
|
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
|
|
assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
|
|
errno == ENOENT);
|
|
|
|
+ key->prefixlen = 30; // unused prefix so far
|
|
+ inet_pton(AF_INET, "192.255.0.0", key->data);
|
|
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
|
|
+ errno == ENOENT);
|
|
+
|
|
+ key->prefixlen = 16; // same prefix as the root node
|
|
+ inet_pton(AF_INET, "192.255.0.0", key->data);
|
|
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
|
|
+ errno == ENOENT);
|
|
+
|
|
/* assert initial lookup */
|
|
key->prefixlen = 32;
|
|
inet_pton(AF_INET, "192.168.0.1", key->data);
|
|
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
|
|
index 0f45633bd634..a4ccde0e473b 100755
|
|
--- a/tools/testing/selftests/net/fib_tests.sh
|
|
+++ b/tools/testing/selftests/net/fib_tests.sh
|
|
@@ -385,6 +385,7 @@ fib_carrier_unicast_test()
|
|
|
|
set -e
|
|
$IP link set dev dummy0 carrier off
|
|
+ sleep 1
|
|
set +e
|
|
|
|
echo " Carrier down"
|
|
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
|
|
index 91495045ad5a..1415e36fed3d 100644
|
|
--- a/virt/kvm/arm/arm.c
|
|
+++ b/virt/kvm/arm/arm.c
|
|
@@ -624,6 +624,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
|
|
/* Awaken to handle a signal, request we sleep again later. */
|
|
kvm_make_request(KVM_REQ_SLEEP, vcpu);
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Make sure we will observe a potential reset request if we've
|
|
+ * observed a change to the power state. Pairs with the smp_wmb() in
|
|
+ * kvm_psci_vcpu_on().
|
|
+ */
|
|
+ smp_rmb();
|
|
}
|
|
|
|
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
|
|
@@ -637,6 +644,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
|
|
if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
|
|
vcpu_req_sleep(vcpu);
|
|
|
|
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
|
|
+ kvm_reset_vcpu(vcpu);
|
|
+
|
|
/*
|
|
* Clear IRQ_PENDING requests that were made to guarantee
|
|
* that a VCPU sees new virtual interrupts.
|
|
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
|
|
index 2f405b0be25c..1344557a7085 100644
|
|
--- a/virt/kvm/arm/mmu.c
|
|
+++ b/virt/kvm/arm/mmu.c
|
|
@@ -2154,7 +2154,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
return 0;
|
|
}
|
|
|
|
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
|
|
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
|
{
|
|
}
|
|
|
|
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
|
|
index 9b73d3ad918a..34d08ee63747 100644
|
|
--- a/virt/kvm/arm/psci.c
|
|
+++ b/virt/kvm/arm/psci.c
|
|
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
|
|
|
|
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|
{
|
|
+ struct vcpu_reset_state *reset_state;
|
|
struct kvm *kvm = source_vcpu->kvm;
|
|
struct kvm_vcpu *vcpu = NULL;
|
|
- struct swait_queue_head *wq;
|
|
unsigned long cpu_id;
|
|
- unsigned long context_id;
|
|
- phys_addr_t target_pc;
|
|
|
|
cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
|
|
if (vcpu_mode_is_32bit(source_vcpu))
|
|
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|
return PSCI_RET_INVALID_PARAMS;
|
|
}
|
|
|
|
- target_pc = smccc_get_arg2(source_vcpu);
|
|
- context_id = smccc_get_arg3(source_vcpu);
|
|
+ reset_state = &vcpu->arch.reset_state;
|
|
|
|
- kvm_reset_vcpu(vcpu);
|
|
-
|
|
- /* Gracefully handle Thumb2 entry point */
|
|
- if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
|
- target_pc &= ~((phys_addr_t) 1);
|
|
- vcpu_set_thumb(vcpu);
|
|
- }
|
|
+ reset_state->pc = smccc_get_arg2(source_vcpu);
|
|
|
|
/* Propagate caller endianness */
|
|
- if (kvm_vcpu_is_be(source_vcpu))
|
|
- kvm_vcpu_set_be(vcpu);
|
|
+ reset_state->be = kvm_vcpu_is_be(source_vcpu);
|
|
|
|
- *vcpu_pc(vcpu) = target_pc;
|
|
/*
|
|
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
|
* the general puspose registers are undefined upon CPU_ON.
|
|
*/
|
|
- smccc_set_retval(vcpu, context_id, 0, 0, 0);
|
|
- vcpu->arch.power_off = false;
|
|
- smp_mb(); /* Make sure the above is visible */
|
|
+ reset_state->r0 = smccc_get_arg3(source_vcpu);
|
|
+
|
|
+ WRITE_ONCE(reset_state->reset, true);
|
|
+ kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
|
|
|
|
- wq = kvm_arch_vcpu_wq(vcpu);
|
|
- swake_up_one(wq);
|
|
+ /*
|
|
+ * Make sure the reset request is observed if the change to
|
|
+ * power_state is observed.
|
|
+ */
|
|
+ smp_wmb();
|
|
+
|
|
+ vcpu->arch.power_off = false;
|
|
+ kvm_vcpu_wake_up(vcpu);
|
|
|
|
return PSCI_RET_SUCCESS;
|
|
}
|
|
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
|
|
index c0c0b88af1d5..8196e4f8731f 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-init.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-init.c
|
|
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
|
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
|
|
|
INIT_LIST_HEAD(&dist->lpi_list_head);
|
|
- spin_lock_init(&dist->lpi_list_lock);
|
|
+ raw_spin_lock_init(&dist->lpi_list_lock);
|
|
}
|
|
|
|
/* CREATION */
|
|
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
|
irq->config = VGIC_CONFIG_LEVEL;
|
|
}
|
|
|
|
- /*
|
|
- * GICv3 can only be created via the KVM_DEVICE_CREATE API and
|
|
- * so we always know the emulation type at this point as it's
|
|
- * either explicitly configured as GICv3, or explicitly
|
|
- * configured as GICv2, or not configured yet which also
|
|
- * implies GICv2.
|
|
- */
|
|
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
|
irq->group = 1;
|
|
else
|
|
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
|
|
{
|
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
|
struct kvm_vcpu *vcpu;
|
|
- int ret = 0, i;
|
|
+ int ret = 0, i, idx;
|
|
|
|
if (vgic_initialized(kvm))
|
|
return 0;
|
|
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
|
|
if (ret)
|
|
goto out;
|
|
|
|
+ /* Initialize groups on CPUs created before the VGIC type was known */
|
|
+ kvm_for_each_vcpu(idx, vcpu, kvm) {
|
|
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
+
|
|
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
|
|
+ struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
|
|
+ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
|
+ irq->group = 1;
|
|
+ else
|
|
+ irq->group = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (vgic_has_its(kvm)) {
|
|
ret = vgic_v4_init(kvm);
|
|
if (ret)
|
|
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
|
|
index 12502251727e..f376c82afb61 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
|
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
irq->target_vcpu = vcpu;
|
|
irq->group = 1;
|
|
|
|
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
|
/*
|
|
* There could be a race with another vgic_add_lpi(), so we need to
|
|
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|
dist->lpi_list_count++;
|
|
|
|
out_unlock:
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
/*
|
|
* We "cache" the configuration table entries in our struct vgic_irq's.
|
|
@@ -339,7 +339,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
if (!intids)
|
|
return -ENOMEM;
|
|
|
|
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
if (i == irq_count)
|
|
break;
|
|
@@ -348,7 +348,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
|
continue;
|
|
intids[i++] = irq->intid;
|
|
}
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
*intid_ptr = intids;
|
|
return i;
|
|
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
|
|
index f884a54b2601..c5165e3b80cb 100644
|
|
--- a/virt/kvm/arm/vgic/vgic.c
|
|
+++ b/virt/kvm/arm/vgic/vgic.c
|
|
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
|
struct vgic_irq *irq = NULL;
|
|
unsigned long flags;
|
|
|
|
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
if (irq->intid != intid)
|
|
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
|
irq = NULL;
|
|
|
|
out_unlock:
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
return irq;
|
|
}
|
|
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
|
if (irq->intid < VGIC_MIN_LPI)
|
|
return;
|
|
|
|
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
if (!kref_put(&irq->refcount, vgic_irq_release)) {
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
return;
|
|
};
|
|
|
|
list_del(&irq->lpi_list);
|
|
dist->lpi_list_count--;
|
|
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
kfree(irq);
|
|
}
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index 0ffb02ff5234..c436d95fd7aa 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -873,6 +873,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
|
|
int as_id, struct kvm_memslots *slots)
|
|
{
|
|
struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
|
|
+ u64 gen;
|
|
|
|
/*
|
|
* Set the low bit in the generation, which disables SPTE caching
|
|
@@ -895,9 +896,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
|
|
* space 0 will use generations 0, 4, 8, ... while * address space 1 will
|
|
* use generations 2, 6, 10, 14, ...
|
|
*/
|
|
- slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
|
|
+ gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
|
|
|
|
- kvm_arch_memslots_updated(kvm, slots);
|
|
+ kvm_arch_memslots_updated(kvm, gen);
|
|
+
|
|
+ slots->generation = gen;
|
|
|
|
return old_memslots;
|
|
}
|