mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-29 10:11:23 +00:00
4127 lines
133 KiB
Diff
4127 lines
133 KiB
Diff
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
|
|
index 1c35e7b665e1..03ab8f5eab40 100644
|
|
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
|
|
+++ b/Documentation/devicetree/bindings/usb/usb-device.txt
|
|
@@ -11,7 +11,7 @@ Required properties:
|
|
be used, but a device adhering to this binding may leave out all except
|
|
for usbVID,PID.
|
|
- reg: the port number which this device is connecting to, the range
|
|
- is 1-31.
|
|
+ is 1-255.
|
|
|
|
Example:
|
|
|
|
diff --git a/Makefile b/Makefile
|
|
index dfe17af517b2..8f2819bf8135 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 4
|
|
PATCHLEVEL = 9
|
|
-SUBLEVEL = 68
|
|
+SUBLEVEL = 69
|
|
EXTRAVERSION =
|
|
NAME = Roaring Lionus
|
|
|
|
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
|
|
index 68b06f9c65de..12f99fd2e3b2 100644
|
|
--- a/arch/arm/include/asm/assembler.h
|
|
+++ b/arch/arm/include/asm/assembler.h
|
|
@@ -516,4 +516,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|
#endif
|
|
.endm
|
|
|
|
+ .macro bug, msg, line
|
|
+#ifdef CONFIG_THUMB2_KERNEL
|
|
+1: .inst 0xde02
|
|
+#else
|
|
+1: .inst 0xe7f001f2
|
|
+#endif
|
|
+#ifdef CONFIG_DEBUG_BUGVERBOSE
|
|
+ .pushsection .rodata.str, "aMS", %progbits, 1
|
|
+2: .asciz "\msg"
|
|
+ .popsection
|
|
+ .pushsection __bug_table, "aw"
|
|
+ .align 2
|
|
+ .word 1b, 2b
|
|
+ .hword \line
|
|
+ .popsection
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
#endif /* __ASM_ASSEMBLER_H__ */
|
|
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
|
|
index e22089fb44dc..98d6de177b7a 100644
|
|
--- a/arch/arm/include/asm/kvm_arm.h
|
|
+++ b/arch/arm/include/asm/kvm_arm.h
|
|
@@ -161,8 +161,7 @@
|
|
#else
|
|
#define VTTBR_X (5 - KVM_T0SZ)
|
|
#endif
|
|
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
|
-#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
|
+#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
|
|
#define VTTBR_VMID_SHIFT _AC(48, ULL)
|
|
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
|
|
|
@@ -209,6 +208,7 @@
|
|
#define HSR_EC_IABT_HYP (0x21)
|
|
#define HSR_EC_DABT (0x24)
|
|
#define HSR_EC_DABT_HYP (0x25)
|
|
+#define HSR_EC_MAX (0x3f)
|
|
|
|
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
|
|
|
|
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
|
|
index 1f59ea051bab..b7e0125c0bbf 100644
|
|
--- a/arch/arm/include/asm/uaccess.h
|
|
+++ b/arch/arm/include/asm/uaccess.h
|
|
@@ -478,11 +478,10 @@ extern unsigned long __must_check
|
|
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
|
|
|
static inline unsigned long __must_check
|
|
-__copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
unsigned int __ua_flags;
|
|
|
|
- check_object_size(to, n, false);
|
|
__ua_flags = uaccess_save_and_enable();
|
|
n = arm_copy_from_user(to, from, n);
|
|
uaccess_restore(__ua_flags);
|
|
@@ -495,18 +494,15 @@ extern unsigned long __must_check
|
|
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
|
|
|
static inline unsigned long __must_check
|
|
-__copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
|
unsigned int __ua_flags;
|
|
-
|
|
- check_object_size(from, n, true);
|
|
__ua_flags = uaccess_save_and_enable();
|
|
n = arm_copy_to_user(to, from, n);
|
|
uaccess_restore(__ua_flags);
|
|
return n;
|
|
#else
|
|
- check_object_size(from, n, true);
|
|
return arm_copy_to_user(to, from, n);
|
|
#endif
|
|
}
|
|
@@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
|
|
}
|
|
|
|
#else
|
|
-#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
|
|
-#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
|
|
+#define __arch_copy_from_user(to, from, n) \
|
|
+ (memcpy(to, (void __force *)from, n), 0)
|
|
+#define __arch_copy_to_user(to, from, n) \
|
|
+ (memcpy((void __force *)to, from, n), 0)
|
|
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
|
#endif
|
|
|
|
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+static inline unsigned long __must_check
|
|
+__copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+{
|
|
+ check_object_size(to, n, false);
|
|
+ return __arch_copy_from_user(to, from, n);
|
|
+}
|
|
+
|
|
+static inline unsigned long __must_check
|
|
+copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
unsigned long res = n;
|
|
+
|
|
+ check_object_size(to, n, false);
|
|
+
|
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
|
- res = __copy_from_user(to, from, n);
|
|
+ res = __arch_copy_from_user(to, from, n);
|
|
if (unlikely(res))
|
|
memset(to + (n - res), 0, res);
|
|
return res;
|
|
}
|
|
|
|
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+static inline unsigned long __must_check
|
|
+__copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
+ check_object_size(from, n, true);
|
|
+
|
|
+ return __arch_copy_to_user(to, from, n);
|
|
+}
|
|
+
|
|
+static inline unsigned long __must_check
|
|
+copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+{
|
|
+ check_object_size(from, n, true);
|
|
+
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
- n = __copy_to_user(to, from, n);
|
|
+ n = __arch_copy_to_user(to, from, n);
|
|
return n;
|
|
}
|
|
|
|
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
|
|
index 6391728c8f03..e056c9a9aa9d 100644
|
|
--- a/arch/arm/kernel/entry-header.S
|
|
+++ b/arch/arm/kernel/entry-header.S
|
|
@@ -299,6 +299,8 @@
|
|
mov r2, sp
|
|
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
|
|
ldr lr, [r2, #\offset + S_PC]! @ get pc
|
|
+ tst r1, #PSR_I_BIT | 0x0f
|
|
+ bne 1f
|
|
msr spsr_cxsf, r1 @ save in spsr_svc
|
|
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
|
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
|
@@ -313,6 +315,7 @@
|
|
@ after ldm {}^
|
|
add sp, sp, #\offset + PT_REGS_SIZE
|
|
movs pc, lr @ return & move spsr_svc into cpsr
|
|
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
|
|
#elif defined(CONFIG_CPU_V7M)
|
|
@ V7M restore.
|
|
@ Note that we don't need to do clrex here as clearing the local
|
|
@@ -328,6 +331,8 @@
|
|
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
|
ldr lr, [sp, #\offset + S_PC] @ get pc
|
|
add sp, sp, #\offset + S_SP
|
|
+ tst r1, #PSR_I_BIT | 0x0f
|
|
+ bne 1f
|
|
msr spsr_cxsf, r1 @ save in spsr_svc
|
|
|
|
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
|
@@ -340,6 +345,7 @@
|
|
.endif
|
|
add sp, sp, #PT_REGS_SIZE - S_SP
|
|
movs pc, lr @ return & move spsr_svc into cpsr
|
|
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
|
|
#endif /* !CONFIG_THUMB2_KERNEL */
|
|
.endm
|
|
|
|
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
|
|
index 066b6d4508ce..42f5daf715d0 100644
|
|
--- a/arch/arm/kvm/handle_exit.c
|
|
+++ b/arch/arm/kvm/handle_exit.c
|
|
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
return 1;
|
|
}
|
|
|
|
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
+{
|
|
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
+
|
|
+ kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
|
|
+ hsr);
|
|
+
|
|
+ kvm_inject_undefined(vcpu);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static exit_handle_fn arm_exit_handlers[] = {
|
|
+ [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
|
|
[HSR_EC_WFI] = kvm_handle_wfx,
|
|
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
|
|
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
|
|
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
|
{
|
|
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
|
|
|
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
|
- !arm_exit_handlers[hsr_ec]) {
|
|
- kvm_err("Unknown exception class: hsr: %#08x\n",
|
|
- (unsigned int)kvm_vcpu_get_hsr(vcpu));
|
|
- BUG();
|
|
- }
|
|
-
|
|
return arm_exit_handlers[hsr_ec];
|
|
}
|
|
|
|
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
|
|
index 8633c703546a..2944af820558 100644
|
|
--- a/arch/arm/mach-omap2/gpmc-onenand.c
|
|
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
|
|
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
|
|
return ret;
|
|
}
|
|
|
|
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
|
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
|
{
|
|
int err;
|
|
struct device *dev = &gpmc_onenand_device.dev;
|
|
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
|
if (err < 0) {
|
|
dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
|
|
gpmc_onenand_data->cs, err);
|
|
- return;
|
|
+ return err;
|
|
}
|
|
|
|
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
|
|
ONENAND_IO_SIZE - 1;
|
|
|
|
- if (platform_device_register(&gpmc_onenand_device) < 0) {
|
|
+ err = platform_device_register(&gpmc_onenand_device);
|
|
+ if (err) {
|
|
dev_err(dev, "Unable to register OneNAND device\n");
|
|
gpmc_cs_free(gpmc_onenand_data->cs);
|
|
- return;
|
|
}
|
|
+
|
|
+ return err;
|
|
}
|
|
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
|
|
index 1cc4a6f3954e..bca54154e14f 100644
|
|
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
|
|
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
|
|
@@ -3828,16 +3828,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
|
|
* Return: 0 if device named @dev_name is not likely to be accessible,
|
|
* or 1 if it is likely to be accessible.
|
|
*/
|
|
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
|
- const char *dev_name)
|
|
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
|
+ const char *dev_name)
|
|
{
|
|
+ struct device_node *node;
|
|
+ bool available;
|
|
+
|
|
if (!bus)
|
|
- return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
|
|
+ return omap_type() == OMAP2_DEVICE_TYPE_GP;
|
|
|
|
- if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
|
|
- return 1;
|
|
+ node = of_get_child_by_name(bus, dev_name);
|
|
+ available = of_device_is_available(node);
|
|
+ of_node_put(node);
|
|
|
|
- return 0;
|
|
+ return available;
|
|
}
|
|
|
|
int __init omap3xxx_hwmod_init(void)
|
|
@@ -3906,15 +3910,20 @@ int __init omap3xxx_hwmod_init(void)
|
|
|
|
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
|
|
r = omap_hwmod_register_links(h_sham);
|
|
- if (r < 0)
|
|
+ if (r < 0) {
|
|
+ of_node_put(bus);
|
|
return r;
|
|
+ }
|
|
}
|
|
|
|
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
|
|
r = omap_hwmod_register_links(h_aes);
|
|
- if (r < 0)
|
|
+ if (r < 0) {
|
|
+ of_node_put(bus);
|
|
return r;
|
|
+ }
|
|
}
|
|
+ of_node_put(bus);
|
|
|
|
/*
|
|
* Register hwmod links specific to certain ES levels of a
|
|
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
|
|
index 2a2752b5b6aa..0dbc1c6ab7dc 100644
|
|
--- a/arch/arm64/include/asm/kvm_arm.h
|
|
+++ b/arch/arm64/include/asm/kvm_arm.h
|
|
@@ -170,8 +170,7 @@
|
|
#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
|
|
#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
|
|
|
|
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
|
-#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
|
+#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
|
|
#define VTTBR_VMID_SHIFT (UL(48))
|
|
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
|
|
|
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
|
|
index 01753cd7d3f0..0e7394915c70 100644
|
|
--- a/arch/arm64/kernel/process.c
|
|
+++ b/arch/arm64/kernel/process.c
|
|
@@ -255,6 +255,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|
|
|
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
|
|
|
|
+ /*
|
|
+ * In case p was allocated the same task_struct pointer as some
|
|
+ * other recently-exited task, make sure p is disassociated from
|
|
+ * any cpu that may have run that now-exited task recently.
|
|
+ * Otherwise we could erroneously skip reloading the FPSIMD
|
|
+ * registers for p.
|
|
+ */
|
|
+ fpsimd_flush_task_state(p);
|
|
+
|
|
if (likely(!(p->flags & PF_KTHREAD))) {
|
|
*childregs = *current_pt_regs();
|
|
childregs->regs[0] = 0;
|
|
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
|
|
index a204adf29f0a..85baadab02d3 100644
|
|
--- a/arch/arm64/kvm/handle_exit.c
|
|
+++ b/arch/arm64/kvm/handle_exit.c
|
|
@@ -125,7 +125,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
return ret;
|
|
}
|
|
|
|
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
+{
|
|
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
+
|
|
+ kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
|
|
+ hsr, esr_get_class_string(hsr));
|
|
+
|
|
+ kvm_inject_undefined(vcpu);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
static exit_handle_fn arm_exit_handlers[] = {
|
|
+ [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
|
|
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
|
|
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
|
|
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
|
|
@@ -151,13 +163,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
|
u8 hsr_ec = ESR_ELx_EC(hsr);
|
|
|
|
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
|
- !arm_exit_handlers[hsr_ec]) {
|
|
- kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
|
|
- hsr, esr_get_class_string(hsr));
|
|
- BUG();
|
|
- }
|
|
-
|
|
return arm_exit_handlers[hsr_ec];
|
|
}
|
|
|
|
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
|
|
index 617dece67924..a60c9c6e5cc1 100644
|
|
--- a/arch/powerpc/Makefile
|
|
+++ b/arch/powerpc/Makefile
|
|
@@ -72,8 +72,15 @@ GNUTARGET := powerpc
|
|
MULTIPLEWORD := -mmultiple
|
|
endif
|
|
|
|
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
|
|
+ifdef CONFIG_PPC64
|
|
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
|
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
|
|
+aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
|
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
|
|
+endif
|
|
+
|
|
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
|
|
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
|
|
ifneq ($(cc-name),clang)
|
|
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
|
|
endif
|
|
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
|
|
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
|
|
else
|
|
+CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
|
|
+AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
|
endif
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
|
|
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
|
|
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
|
|
index 1e8fceb308a5..a67bb09585f4 100644
|
|
--- a/arch/powerpc/include/asm/checksum.h
|
|
+++ b/arch/powerpc/include/asm/checksum.h
|
|
@@ -100,7 +100,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
|
|
|
|
#ifdef __powerpc64__
|
|
res += (__force u64)addend;
|
|
- return (__force __wsum)((u32)res + (res >> 32));
|
|
+ return (__force __wsum) from64to32(res);
|
|
#else
|
|
asm("addc %0,%0,%1;"
|
|
"addze %0,%0;"
|
|
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
|
|
index 7803756998e2..9e05c8828ee2 100644
|
|
--- a/arch/powerpc/kernel/cpu_setup_power.S
|
|
+++ b/arch/powerpc/kernel/cpu_setup_power.S
|
|
@@ -97,6 +97,7 @@ _GLOBAL(__setup_cpu_power9)
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
+ mtspr SPRN_PID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
|
|
or r3, r3, r4
|
|
@@ -119,6 +120,7 @@ _GLOBAL(__restore_cpu_power9)
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
+ mtspr SPRN_PID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
|
|
or r3, r3, r4
|
|
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
|
|
index 9a25dce87875..44c33ee397a0 100644
|
|
--- a/arch/powerpc/mm/pgtable-radix.c
|
|
+++ b/arch/powerpc/mm/pgtable-radix.c
|
|
@@ -173,6 +173,10 @@ static void __init radix_init_pgtable(void)
|
|
*/
|
|
register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
|
|
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
|
|
+ asm volatile("ptesync" : : : "memory");
|
|
+ asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
|
|
+ "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
|
|
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
|
}
|
|
|
|
static void __init radix_init_partition_table(void)
|
|
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
index dcdfee0cd4f2..f602307a4386 100644
|
|
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
|
|
@@ -2623,6 +2623,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|
level_shift = entries_shift + 3;
|
|
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
|
|
|
+ if ((level_shift - 3) * levels + page_shift >= 60)
|
|
+ return -EINVAL;
|
|
+
|
|
/* Allocate TCE table */
|
|
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
|
levels, tce_table_size, &offset, &total_allocated);
|
|
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
|
|
index ada29eaed6e2..f523ac883150 100644
|
|
--- a/arch/powerpc/sysdev/axonram.c
|
|
+++ b/arch/powerpc/sysdev/axonram.c
|
|
@@ -274,7 +274,9 @@ static int axon_ram_probe(struct platform_device *device)
|
|
if (bank->disk->major > 0)
|
|
unregister_blkdev(bank->disk->major,
|
|
bank->disk->disk_name);
|
|
- del_gendisk(bank->disk);
|
|
+ if (bank->disk->flags & GENHD_FL_UP)
|
|
+ del_gendisk(bank->disk);
|
|
+ put_disk(bank->disk);
|
|
}
|
|
device->dev.platform_data = NULL;
|
|
if (bank->io_addr != 0)
|
|
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
|
|
device_remove_file(&device->dev, &dev_attr_ecc);
|
|
free_irq(bank->irq_id, device);
|
|
del_gendisk(bank->disk);
|
|
+ put_disk(bank->disk);
|
|
iounmap((void __iomem *) bank->io_addr);
|
|
kfree(bank);
|
|
|
|
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
|
|
index 9b59e6212d8f..709da452413d 100644
|
|
--- a/arch/s390/kernel/syscalls.S
|
|
+++ b/arch/s390/kernel/syscalls.S
|
|
@@ -369,10 +369,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
|
|
SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
|
|
SYSCALL(sys_socket,sys_socket)
|
|
SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
|
|
-SYSCALL(sys_bind,sys_bind)
|
|
-SYSCALL(sys_connect,sys_connect)
|
|
+SYSCALL(sys_bind,compat_sys_bind)
|
|
+SYSCALL(sys_connect,compat_sys_connect)
|
|
SYSCALL(sys_listen,sys_listen)
|
|
-SYSCALL(sys_accept4,sys_accept4)
|
|
+SYSCALL(sys_accept4,compat_sys_accept4)
|
|
SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
|
|
SYSCALL(sys_setsockopt,compat_sys_setsockopt)
|
|
SYSCALL(sys_getsockname,compat_sys_getsockname)
|
|
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
|
|
index e18435355c16..c2905a10cb37 100644
|
|
--- a/arch/s390/kvm/priv.c
|
|
+++ b/arch/s390/kvm/priv.c
|
|
@@ -197,8 +197,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
|
|
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
|
|
return -EAGAIN;
|
|
}
|
|
- if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
- return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
return 0;
|
|
}
|
|
|
|
@@ -209,6 +207,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
|
|
int reg1, reg2;
|
|
int rc;
|
|
|
|
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
+
|
|
rc = try_handle_skey(vcpu);
|
|
if (rc)
|
|
return rc != -EAGAIN ? rc : 0;
|
|
@@ -238,6 +239,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
|
|
int reg1, reg2;
|
|
int rc;
|
|
|
|
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
+
|
|
rc = try_handle_skey(vcpu);
|
|
if (rc)
|
|
return rc != -EAGAIN ? rc : 0;
|
|
@@ -273,6 +277,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
|
|
int reg1, reg2;
|
|
int rc;
|
|
|
|
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
+
|
|
rc = try_handle_skey(vcpu);
|
|
if (rc)
|
|
return rc != -EAGAIN ? rc : 0;
|
|
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
|
|
index 57154c638e71..0f183ffe3416 100644
|
|
--- a/arch/sparc/mm/init_64.c
|
|
+++ b/arch/sparc/mm/init_64.c
|
|
@@ -2391,9 +2391,16 @@ void __init mem_init(void)
|
|
{
|
|
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
|
|
|
|
- register_page_bootmem_info();
|
|
free_all_bootmem();
|
|
|
|
+ /*
|
|
+ * Must be done after boot memory is put on freelist, because here we
|
|
+ * might set fields in deferred struct pages that have not yet been
|
|
+ * initialized, and free_all_bootmem() initializes all the reserved
|
|
+ * deferred pages for us.
|
|
+ */
|
|
+ register_page_bootmem_info();
|
|
+
|
|
/*
|
|
* Set up the zero page, mark it reserved, so that page count
|
|
* is not manipulated when freeing the page from user ptes.
|
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
index bdde80731f49..cbd1d44da2d3 100644
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -1397,4 +1397,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
|
|
#endif
|
|
}
|
|
|
|
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
|
+ unsigned long start, unsigned long end);
|
|
+
|
|
#endif /* _ASM_X86_KVM_HOST_H */
|
|
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
|
|
index 932348fbb6ea..9512529e8eab 100644
|
|
--- a/arch/x86/kernel/hpet.c
|
|
+++ b/arch/x86/kernel/hpet.c
|
|
@@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
|
|
|
|
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
|
|
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
|
|
- disable_irq(hdev->irq);
|
|
+ disable_hardirq(hdev->irq);
|
|
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
|
|
enable_irq(hdev->irq);
|
|
}
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index f0d3de153e29..9aa62ab13ae8 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -6413,12 +6413,7 @@ static __init int hardware_setup(void)
|
|
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
|
|
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
|
|
|
|
- /*
|
|
- * Allow direct access to the PC debug port (it is often used for I/O
|
|
- * delays, but the vmexits simply slow things down).
|
|
- */
|
|
memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
|
|
- clear_bit(0x80, vmx_io_bitmap_a);
|
|
|
|
memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
|
|
|
|
@@ -7208,9 +7203,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
|
|
static int handle_vmclear(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
+ u32 zero = 0;
|
|
gpa_t vmptr;
|
|
- struct vmcs12 *vmcs12;
|
|
- struct page *page;
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
return 1;
|
|
@@ -7221,22 +7215,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
|
|
if (vmptr == vmx->nested.current_vmptr)
|
|
nested_release_vmcs12(vmx);
|
|
|
|
- page = nested_get_page(vcpu, vmptr);
|
|
- if (page == NULL) {
|
|
- /*
|
|
- * For accurate processor emulation, VMCLEAR beyond available
|
|
- * physical memory should do nothing at all. However, it is
|
|
- * possible that a nested vmx bug, not a guest hypervisor bug,
|
|
- * resulted in this case, so let's shut down before doing any
|
|
- * more damage:
|
|
- */
|
|
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
|
|
- return 1;
|
|
- }
|
|
- vmcs12 = kmap(page);
|
|
- vmcs12->launch_state = 0;
|
|
- kunmap(page);
|
|
- nested_release_page(page);
|
|
+ kvm_vcpu_write_guest(vcpu,
|
|
+ vmptr + offsetof(struct vmcs12, launch_state),
|
|
+ &zero, sizeof(zero));
|
|
|
|
nested_free_vmcs02(vmx, vmptr);
|
|
|
|
@@ -10903,8 +10884,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
*/
|
|
static void vmx_leave_nested(struct kvm_vcpu *vcpu)
|
|
{
|
|
- if (is_guest_mode(vcpu))
|
|
+ if (is_guest_mode(vcpu)) {
|
|
+ to_vmx(vcpu)->nested.nested_run_pending = 0;
|
|
nested_vmx_vmexit(vcpu, -1, 0, 0);
|
|
+ }
|
|
free_nested(to_vmx(vcpu));
|
|
}
|
|
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 02d45296a97c..26b580ad268f 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -6526,6 +6526,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
|
|
kvm_x86_ops->tlb_flush(vcpu);
|
|
}
|
|
|
|
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
|
+ unsigned long start, unsigned long end)
|
|
+{
|
|
+ unsigned long apic_address;
|
|
+
|
|
+ /*
|
|
+ * The physical address of apic access page is stored in the VMCS.
|
|
+ * Update it when it becomes invalid.
|
|
+ */
|
|
+ apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
|
+ if (start <= apic_address && apic_address < end)
|
|
+ kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
|
|
+}
|
|
+
|
|
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct page *page = NULL;
|
|
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
|
|
index bb461cfd01ab..526536c81ddc 100644
|
|
--- a/arch/x86/pci/broadcom_bus.c
|
|
+++ b/arch/x86/pci/broadcom_bus.c
|
|
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
|
|
* We should get host bridge information from ACPI unless the BIOS
|
|
* doesn't support it.
|
|
*/
|
|
- if (acpi_os_get_root_pointer())
|
|
+ if (!acpi_disabled && acpi_os_get_root_pointer())
|
|
return 0;
|
|
#endif
|
|
|
|
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
|
|
index 9e42842e924a..0f0175186f1b 100644
|
|
--- a/arch/x86/platform/uv/tlb_uv.c
|
|
+++ b/arch/x86/platform/uv/tlb_uv.c
|
|
@@ -1848,7 +1848,6 @@ static void pq_init(int node, int pnode)
|
|
|
|
ops.write_payload_first(pnode, first);
|
|
ops.write_payload_last(pnode, last);
|
|
- ops.write_g_sw_ack(pnode, 0xffffUL);
|
|
|
|
/* in effect, all msg_type's are set to MSG_NOOP */
|
|
memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index b1c76aa73492..23daf40be371 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -527,8 +527,8 @@ void blk_set_queue_dying(struct request_queue *q)
|
|
|
|
blk_queue_for_each_rl(rl, q) {
|
|
if (rl->rq_pool) {
|
|
- wake_up(&rl->wait[BLK_RW_SYNC]);
|
|
- wake_up(&rl->wait[BLK_RW_ASYNC]);
|
|
+ wake_up_all(&rl->wait[BLK_RW_SYNC]);
|
|
+ wake_up_all(&rl->wait[BLK_RW_ASYNC]);
|
|
}
|
|
}
|
|
}
|
|
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
|
|
index 01fb455d3377..8c0894e0713b 100644
|
|
--- a/block/blk-mq-sysfs.c
|
|
+++ b/block/blk-mq-sysfs.c
|
|
@@ -429,7 +429,7 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
|
|
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
|
|
}
|
|
|
|
-static void blk_mq_sysfs_init(struct request_queue *q)
|
|
+void blk_mq_sysfs_init(struct request_queue *q)
|
|
{
|
|
struct blk_mq_ctx *ctx;
|
|
int cpu;
|
|
@@ -449,8 +449,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
|
|
|
|
blk_mq_disable_hotplug();
|
|
|
|
- blk_mq_sysfs_init(q);
|
|
-
|
|
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
|
|
if (ret < 0)
|
|
goto out;
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
index 7b597ec4e9c5..10f8f94b7f20 100644
|
|
--- a/block/blk-mq.c
|
|
+++ b/block/blk-mq.c
|
|
@@ -1707,7 +1707,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
|
|
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
|
- memset(__ctx, 0, sizeof(*__ctx));
|
|
__ctx->cpu = i;
|
|
spin_lock_init(&__ctx->lock);
|
|
INIT_LIST_HEAD(&__ctx->rq_list);
|
|
@@ -1970,6 +1969,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|
if (!q->queue_ctx)
|
|
goto err_exit;
|
|
|
|
+ /* init q->mq_kobj and sw queues' kobjects */
|
|
+ blk_mq_sysfs_init(q);
|
|
+
|
|
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
|
|
GFP_KERNEL, set->numa_node);
|
|
if (!q->queue_hw_ctx)
|
|
diff --git a/block/blk-mq.h b/block/blk-mq.h
|
|
index e5d25249028c..c55bcf67b956 100644
|
|
--- a/block/blk-mq.h
|
|
+++ b/block/blk-mq.h
|
|
@@ -50,6 +50,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
|
|
/*
|
|
* sysfs helpers
|
|
*/
|
|
+extern void blk_mq_sysfs_init(struct request_queue *q);
|
|
extern int blk_mq_sysfs_register(struct request_queue *q);
|
|
extern void blk_mq_sysfs_unregister(struct request_queue *q);
|
|
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
|
|
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
|
|
index 2ffd69769466..5a37962d2199 100644
|
|
--- a/crypto/asymmetric_keys/pkcs7_verify.c
|
|
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
|
|
@@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
|
|
pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
|
|
sinfo->index, certix);
|
|
|
|
- if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
|
|
+ if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
|
|
pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
|
|
sinfo->index);
|
|
continue;
|
|
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
|
|
index c80765b211cf..029f7051f2be 100644
|
|
--- a/crypto/asymmetric_keys/x509_cert_parser.c
|
|
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
|
|
@@ -408,6 +408,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
|
|
ctx->cert->pub->pkey_algo = "rsa";
|
|
|
|
/* Discard the BIT STRING metadata */
|
|
+ if (vlen < 1 || *(const u8 *)value != 0)
|
|
+ return -EBADMSG;
|
|
ctx->key = value + 1;
|
|
ctx->key_size = vlen - 1;
|
|
return 0;
|
|
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
|
|
index fb732296cd36..e16009a8da9c 100644
|
|
--- a/crypto/asymmetric_keys/x509_public_key.c
|
|
+++ b/crypto/asymmetric_keys/x509_public_key.c
|
|
@@ -125,7 +125,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
|
|
}
|
|
|
|
ret = -EKEYREJECTED;
|
|
- if (cert->pub->pkey_algo != cert->sig->pkey_algo)
|
|
+ if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
|
|
goto out;
|
|
|
|
ret = public_key_verify_signature(cert->pub, cert->sig);
|
|
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
|
|
index 051b6158d1b7..8d22acdf90f0 100644
|
|
--- a/drivers/ata/libata-sff.c
|
|
+++ b/drivers/ata/libata-sff.c
|
|
@@ -1481,7 +1481,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
|
break;
|
|
|
|
default:
|
|
- WARN_ON_ONCE(1);
|
|
return AC_ERR_SYSTEM;
|
|
}
|
|
|
|
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
|
|
index 5fc81e240c24..e55f418d6ab9 100644
|
|
--- a/drivers/atm/horizon.c
|
|
+++ b/drivers/atm/horizon.c
|
|
@@ -2802,7 +2802,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
|
|
return err;
|
|
|
|
out_free_irq:
|
|
- free_irq(dev->irq, dev);
|
|
+ free_irq(irq, dev);
|
|
out_free:
|
|
kfree(dev);
|
|
out_release:
|
|
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
|
|
index cd6ccdcf9df0..372d10af2600 100644
|
|
--- a/drivers/base/isa.c
|
|
+++ b/drivers/base/isa.c
|
|
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
|
|
{
|
|
struct isa_driver *isa_driver = dev->platform_data;
|
|
|
|
- if (isa_driver->probe)
|
|
+ if (isa_driver && isa_driver->probe)
|
|
return isa_driver->probe(dev, to_isa_dev(dev)->id);
|
|
|
|
return 0;
|
|
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
|
|
{
|
|
struct isa_driver *isa_driver = dev->platform_data;
|
|
|
|
- if (isa_driver->remove)
|
|
+ if (isa_driver && isa_driver->remove)
|
|
return isa_driver->remove(dev, to_isa_dev(dev)->id);
|
|
|
|
return 0;
|
|
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
|
|
{
|
|
struct isa_driver *isa_driver = dev->platform_data;
|
|
|
|
- if (isa_driver->shutdown)
|
|
+ if (isa_driver && isa_driver->shutdown)
|
|
isa_driver->shutdown(dev, to_isa_dev(dev)->id);
|
|
}
|
|
|
|
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
|
|
{
|
|
struct isa_driver *isa_driver = dev->platform_data;
|
|
|
|
- if (isa_driver->suspend)
|
|
+ if (isa_driver && isa_driver->suspend)
|
|
return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
|
|
|
|
return 0;
|
|
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
|
|
{
|
|
struct isa_driver *isa_driver = dev->platform_data;
|
|
|
|
- if (isa_driver->resume)
|
|
+ if (isa_driver && isa_driver->resume)
|
|
return isa_driver->resume(dev, to_isa_dev(dev)->id);
|
|
|
|
return 0;
|
|
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
|
index c9914d653968..b7c0b69a02f5 100644
|
|
--- a/drivers/block/zram/zram_drv.c
|
|
+++ b/drivers/block/zram/zram_drv.c
|
|
@@ -1286,6 +1286,8 @@ static int zram_add(void)
|
|
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
|
|
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
|
|
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
|
|
+ zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
|
|
+ zram->disk->queue->limits.chunk_sectors = 0;
|
|
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
|
|
/*
|
|
* zram_bio_discard() will clear all logical blocks if logical block
|
|
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
|
|
index 890082315054..10f56133b281 100644
|
|
--- a/drivers/bus/arm-cci.c
|
|
+++ b/drivers/bus/arm-cci.c
|
|
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
|
|
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
|
|
mutex_init(&cci_pmu->reserve_mutex);
|
|
atomic_set(&cci_pmu->active_events, 0);
|
|
- cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
|
|
+ cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
|
|
|
|
ret = cci_pmu_init(cci_pmu, pdev);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ put_cpu();
|
|
return ret;
|
|
+ }
|
|
|
|
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
|
&cci_pmu->node);
|
|
+ put_cpu();
|
|
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
|
|
index aee83462b796..f0249899fc96 100644
|
|
--- a/drivers/bus/arm-ccn.c
|
|
+++ b/drivers/bus/arm-ccn.c
|
|
@@ -1271,6 +1271,10 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
|
int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
|
|
|
|
name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
|
|
+ if (!name) {
|
|
+ err = -ENOMEM;
|
|
+ goto error_choose_name;
|
|
+ }
|
|
snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
|
|
}
|
|
|
|
@@ -1297,7 +1301,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
|
}
|
|
|
|
/* Pick one CPU which we will use to collect data from CCN... */
|
|
- cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
|
|
+ cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
|
|
|
|
/* Also make sure that the overflow interrupt is handled by this CPU */
|
|
if (ccn->irq) {
|
|
@@ -1314,10 +1318,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
|
|
|
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
|
&ccn->dt.node);
|
|
+ put_cpu();
|
|
return 0;
|
|
|
|
error_pmu_register:
|
|
error_set_affinity:
|
|
+ put_cpu();
|
|
+error_choose_name:
|
|
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
|
|
for (i = 0; i < ccn->num_xps; i++)
|
|
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
|
|
@@ -1578,8 +1585,8 @@ static int __init arm_ccn_init(void)
|
|
|
|
static void __exit arm_ccn_exit(void)
|
|
{
|
|
- cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
|
|
platform_driver_unregister(&arm_ccn_driver);
|
|
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
|
|
}
|
|
|
|
module_init(arm_ccn_init);
|
|
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
|
|
index 5d029991047d..481225adef87 100644
|
|
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
|
|
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
|
|
@@ -98,7 +98,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
|
|
const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
|
|
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
|
|
UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
|
|
- UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
|
|
+ UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
|
|
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
|
|
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
|
|
UNIPHIER_PRO5_SYS_CLK_SD,
|
|
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
|
|
index dce1af0ce85c..a668286d62cb 100644
|
|
--- a/drivers/crypto/s5p-sss.c
|
|
+++ b/drivers/crypto/s5p-sss.c
|
|
@@ -805,8 +805,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
|
dev_warn(dev, "feed control interrupt is not available.\n");
|
|
goto err_irq;
|
|
}
|
|
- err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
|
|
- IRQF_SHARED, pdev->name, pdev);
|
|
+ err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
|
|
+ s5p_aes_interrupt, IRQF_ONESHOT,
|
|
+ pdev->name, pdev);
|
|
if (err < 0) {
|
|
dev_warn(dev, "feed control interrupt is not available.\n");
|
|
goto err_irq;
|
|
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
|
|
index e2d323fa2437..1c8d79d93098 100644
|
|
--- a/drivers/crypto/talitos.c
|
|
+++ b/drivers/crypto/talitos.c
|
|
@@ -1232,12 +1232,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|
sg_link_tbl_len += authsize;
|
|
}
|
|
|
|
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
|
|
- &desc->ptr[4], sg_count, areq->assoclen,
|
|
- tbl_off);
|
|
+ ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
|
|
+ &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
|
|
|
|
- if (sg_count > 1) {
|
|
- tbl_off += sg_count;
|
|
+ if (ret > 1) {
|
|
+ tbl_off += ret;
|
|
sync_needed = true;
|
|
}
|
|
|
|
@@ -1248,14 +1247,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
|
|
}
|
|
|
|
- sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
|
|
- &desc->ptr[5], sg_count, areq->assoclen,
|
|
- tbl_off);
|
|
+ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
|
|
+ sg_count, areq->assoclen, tbl_off);
|
|
|
|
if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
|
|
to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
|
|
|
|
- if (sg_count > 1) {
|
|
+ /* ICV data */
|
|
+ if (ret > 1) {
|
|
+ tbl_off += ret;
|
|
edesc->icv_ool = true;
|
|
sync_needed = true;
|
|
|
|
@@ -1265,9 +1265,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|
sizeof(struct talitos_ptr) + authsize;
|
|
|
|
/* Add an entry to the link table for ICV data */
|
|
- tbl_ptr += sg_count - 1;
|
|
- to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
|
|
- tbl_ptr++;
|
|
+ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
|
|
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
|
|
is_sec1);
|
|
to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
|
|
@@ -1275,18 +1273,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
|
/* icv data follows link tables */
|
|
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
|
|
is_sec1);
|
|
+ } else {
|
|
+ dma_addr_t addr = edesc->dma_link_tbl;
|
|
+
|
|
+ if (is_sec1)
|
|
+ addr += areq->assoclen + cryptlen;
|
|
+ else
|
|
+ addr += sizeof(struct talitos_ptr) * tbl_off;
|
|
+
|
|
+ to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
|
|
+ to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
|
|
+ }
|
|
+ } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
|
|
+ ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
|
|
+ &desc->ptr[6], sg_count, areq->assoclen +
|
|
+ cryptlen,
|
|
+ tbl_off);
|
|
+ if (ret > 1) {
|
|
+ tbl_off += ret;
|
|
+ edesc->icv_ool = true;
|
|
+ sync_needed = true;
|
|
+ } else {
|
|
+ edesc->icv_ool = false;
|
|
}
|
|
} else {
|
|
edesc->icv_ool = false;
|
|
}
|
|
|
|
- /* ICV data */
|
|
- if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
|
|
- to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
|
|
- to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
|
|
- areq->assoclen + cryptlen, is_sec1);
|
|
- }
|
|
-
|
|
/* iv out */
|
|
if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
|
|
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
|
|
@@ -1494,12 +1507,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
|
const u8 *key, unsigned int keylen)
|
|
{
|
|
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
|
+ u32 tmp[DES_EXPKEY_WORDS];
|
|
|
|
if (keylen > TALITOS_MAX_KEY_SIZE) {
|
|
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ if (unlikely(crypto_ablkcipher_get_flags(cipher) &
|
|
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
|
|
+ !des_ekey(tmp, key)) {
|
|
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
memcpy(&ctx->key, key, keylen);
|
|
ctx->keylen = keylen;
|
|
|
|
@@ -2614,7 +2635,7 @@ static struct talitos_alg_template driver_algs[] = {
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
}
|
|
},
|
|
- .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
|
+ .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
|
|
DESC_HDR_SEL0_AESU |
|
|
DESC_HDR_MODE0_AESU_CTR,
|
|
},
|
|
@@ -3047,6 +3068,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
|
t_alg->algt.alg.aead.setkey = aead_setkey;
|
|
t_alg->algt.alg.aead.encrypt = aead_encrypt;
|
|
t_alg->algt.alg.aead.decrypt = aead_decrypt;
|
|
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
|
|
+ !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
|
|
+ kfree(t_alg);
|
|
+ return ERR_PTR(-ENOTSUPP);
|
|
+ }
|
|
break;
|
|
case CRYPTO_ALG_TYPE_AHASH:
|
|
alg = &t_alg->algt.alg.hash.halg.base;
|
|
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
|
|
index 72e07e3cf718..16e0eb523439 100644
|
|
--- a/drivers/edac/i5000_edac.c
|
|
+++ b/drivers/edac/i5000_edac.c
|
|
@@ -227,7 +227,7 @@
|
|
#define NREC_RDWR(x) (((x)>>11) & 1)
|
|
#define NREC_RANK(x) (((x)>>8) & 0x7)
|
|
#define NRECMEMB 0xC0
|
|
-#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF)
|
|
+#define NREC_CAS(x) (((x)>>16) & 0xFFF)
|
|
#define NREC_RAS(x) ((x) & 0x7FFF)
|
|
#define NRECFGLOG 0xC4
|
|
#define NREEECFBDA 0xC8
|
|
@@ -371,7 +371,7 @@ struct i5000_error_info {
|
|
/* These registers are input ONLY if there was a
|
|
* Non-Recoverable Error */
|
|
u16 nrecmema; /* Non-Recoverable Mem log A */
|
|
- u16 nrecmemb; /* Non-Recoverable Mem log B */
|
|
+ u32 nrecmemb; /* Non-Recoverable Mem log B */
|
|
|
|
};
|
|
|
|
@@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci,
|
|
NERR_FAT_FBD, &info->nerr_fat_fbd);
|
|
pci_read_config_word(pvt->branchmap_werrors,
|
|
NRECMEMA, &info->nrecmema);
|
|
- pci_read_config_word(pvt->branchmap_werrors,
|
|
+ pci_read_config_dword(pvt->branchmap_werrors,
|
|
NRECMEMB, &info->nrecmemb);
|
|
|
|
/* Clear the error bits, by writing them back */
|
|
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
|
|
dimm->mtype = MEM_FB_DDR2;
|
|
|
|
/* ask what device type on this row */
|
|
- if (MTR_DRAM_WIDTH(mtr))
|
|
+ if (MTR_DRAM_WIDTH(mtr) == 8)
|
|
dimm->dtype = DEV_X8;
|
|
else
|
|
dimm->dtype = DEV_X4;
|
|
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
|
|
index 6ef6ad1ba16e..2ea2f32e608b 100644
|
|
--- a/drivers/edac/i5400_edac.c
|
|
+++ b/drivers/edac/i5400_edac.c
|
|
@@ -368,7 +368,7 @@ struct i5400_error_info {
|
|
|
|
/* These registers are input ONLY if there was a Non-Rec Error */
|
|
u16 nrecmema; /* Non-Recoverable Mem log A */
|
|
- u16 nrecmemb; /* Non-Recoverable Mem log B */
|
|
+ u32 nrecmemb; /* Non-Recoverable Mem log B */
|
|
|
|
};
|
|
|
|
@@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci,
|
|
NERR_FAT_FBD, &info->nerr_fat_fbd);
|
|
pci_read_config_word(pvt->branchmap_werrors,
|
|
NRECMEMA, &info->nrecmema);
|
|
- pci_read_config_word(pvt->branchmap_werrors,
|
|
+ pci_read_config_dword(pvt->branchmap_werrors,
|
|
NRECMEMB, &info->nrecmemb);
|
|
|
|
/* Clear the error bits, by writing them back */
|
|
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
|
|
|
|
dimm->nr_pages = size_mb << 8;
|
|
dimm->grain = 8;
|
|
- dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
|
|
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
|
|
+ DEV_X8 : DEV_X4;
|
|
dimm->mtype = MEM_FB_DDR2;
|
|
/*
|
|
* The eccc mechanism is SDDC (aka SECC), with
|
|
* is similar to Chipkill.
|
|
*/
|
|
- dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
|
|
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
|
|
EDAC_S8ECD8ED : EDAC_S4ECD4ED;
|
|
ndimms++;
|
|
}
|
|
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
|
|
index a4944e22f294..2f48f848865f 100644
|
|
--- a/drivers/firmware/efi/efi.c
|
|
+++ b/drivers/firmware/efi/efi.c
|
|
@@ -120,8 +120,7 @@ static ssize_t systab_show(struct kobject *kobj,
|
|
return str - buf;
|
|
}
|
|
|
|
-static struct kobj_attribute efi_attr_systab =
|
|
- __ATTR(systab, 0400, systab_show, NULL);
|
|
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
|
|
|
|
#define EFI_FIELD(var) efi.var
|
|
|
|
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
|
|
index 14914074f716..307ec1c11276 100644
|
|
--- a/drivers/firmware/efi/esrt.c
|
|
+++ b/drivers/firmware/efi/esrt.c
|
|
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
|
|
};
|
|
|
|
/* Generic ESRT Entry ("ESRE") support. */
|
|
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
|
|
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
|
|
{
|
|
char *str = buf;
|
|
|
|
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
|
|
return str - buf;
|
|
}
|
|
|
|
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
|
|
- esre_fw_class_show, NULL);
|
|
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
|
|
|
|
#define esre_attr_decl(name, size, fmt) \
|
|
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
|
|
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
|
|
{ \
|
|
return sprintf(buf, fmt "\n", \
|
|
le##size##_to_cpu(entry->esre.esre1->name)); \
|
|
} \
|
|
\
|
|
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
|
|
- esre_##name##_show, NULL)
|
|
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
|
|
|
|
esre_attr_decl(fw_type, 32, "%u");
|
|
esre_attr_decl(fw_version, 32, "%u");
|
|
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
|
|
|
|
/* support for displaying ESRT fields at the top level */
|
|
#define esrt_attr_decl(name, size, fmt) \
|
|
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
|
|
+static ssize_t name##_show(struct kobject *kobj, \
|
|
struct kobj_attribute *attr, char *buf)\
|
|
{ \
|
|
return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
|
|
} \
|
|
\
|
|
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
|
|
- esrt_##name##_show, NULL)
|
|
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
|
|
|
|
esrt_attr_decl(fw_resource_count, 32, "%u");
|
|
esrt_attr_decl(fw_resource_count_max, 32, "%u");
|
|
@@ -431,7 +428,7 @@ static int __init esrt_sysfs_init(void)
|
|
err_remove_esrt:
|
|
kobject_put(esrt_kobj);
|
|
err:
|
|
- kfree(esrt);
|
|
+ memunmap(esrt);
|
|
esrt = NULL;
|
|
return error;
|
|
}
|
|
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
|
|
index 8e64b77aeac9..f377609ff141 100644
|
|
--- a/drivers/firmware/efi/runtime-map.c
|
|
+++ b/drivers/firmware/efi/runtime-map.c
|
|
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
|
|
return map_attr->show(entry, buf);
|
|
}
|
|
|
|
-static struct map_attribute map_type_attr = __ATTR_RO(type);
|
|
-static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
|
|
-static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
|
|
-static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
|
|
-static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
|
|
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
|
|
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
|
|
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
|
|
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
|
|
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
|
|
|
|
/*
|
|
* These are default attributes that are added for every memmap entry.
|
|
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
|
|
index 5bddbd507ca9..3fe6a21e05a5 100644
|
|
--- a/drivers/gpio/gpio-altera.c
|
|
+++ b/drivers/gpio/gpio-altera.c
|
|
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
|
|
|
|
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
|
|
|
- if (type == IRQ_TYPE_NONE)
|
|
+ if (type == IRQ_TYPE_NONE) {
|
|
+ irq_set_handler_locked(d, handle_bad_irq);
|
|
return 0;
|
|
- if (type == IRQ_TYPE_LEVEL_HIGH &&
|
|
- altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
|
|
- return 0;
|
|
- if (type == IRQ_TYPE_EDGE_RISING &&
|
|
- altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
|
|
- return 0;
|
|
- if (type == IRQ_TYPE_EDGE_FALLING &&
|
|
- altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
|
|
- return 0;
|
|
- if (type == IRQ_TYPE_EDGE_BOTH &&
|
|
- altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
|
|
+ }
|
|
+ if (type == altera_gc->interrupt_trigger) {
|
|
+ if (type == IRQ_TYPE_LEVEL_HIGH)
|
|
+ irq_set_handler_locked(d, handle_level_irq);
|
|
+ else
|
|
+ irq_set_handler_locked(d, handle_simple_irq);
|
|
return 0;
|
|
-
|
|
+ }
|
|
+ irq_set_handler_locked(d, handle_bad_irq);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
|
|
chained_irq_exit(chip, desc);
|
|
}
|
|
|
|
-
|
|
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
|
|
{
|
|
struct altera_gpio_chip *altera_gc;
|
|
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
|
altera_gc->interrupt_trigger = reg;
|
|
|
|
ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
|
|
- handle_simple_irq, IRQ_TYPE_NONE);
|
|
+ handle_bad_irq, IRQ_TYPE_NONE);
|
|
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "could not add irqchip\n");
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
index e41d4baebf86..ce9797b6f9c7 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
|
@@ -2020,8 +2020,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|
}
|
|
|
|
r = amdgpu_late_init(adev);
|
|
- if (r)
|
|
+ if (r) {
|
|
+ if (fbcon)
|
|
+ console_unlock();
|
|
return r;
|
|
+ }
|
|
|
|
/* pin cursors */
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
|
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
|
|
index 26412d2f8c98..ffd673615772 100644
|
|
--- a/drivers/gpu/drm/armada/Makefile
|
|
+++ b/drivers/gpu/drm/armada/Makefile
|
|
@@ -4,5 +4,3 @@ armada-y += armada_510.o
|
|
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
|
|
|
|
obj-$(CONFIG_DRM_ARMADA) := armada.o
|
|
-
|
|
-CFLAGS_armada_trace.o := -I$(src)
|
|
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
|
|
index f2ae72ba7d5a..2abc47b554ab 100644
|
|
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
|
|
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
|
|
@@ -246,6 +246,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
|
|
if (IS_ERR(exynos_gem))
|
|
return exynos_gem;
|
|
|
|
+ if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
|
|
+ /*
|
|
+ * when no IOMMU is available, all allocated buffers are
|
|
+ * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
|
|
+ */
|
|
+ flags &= ~EXYNOS_BO_NONCONTIG;
|
|
+ DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
|
|
+ }
|
|
+
|
|
/* set memory type and cache attribute from user side. */
|
|
exynos_gem->flags = flags;
|
|
|
|
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
|
|
index cd4599c0523b..db607d51ee2b 100644
|
|
--- a/drivers/hid/Kconfig
|
|
+++ b/drivers/hid/Kconfig
|
|
@@ -175,11 +175,11 @@ config HID_CHERRY
|
|
Support for Cherry Cymotion keyboard.
|
|
|
|
config HID_CHICONY
|
|
- tristate "Chicony Tactical pad"
|
|
+ tristate "Chicony devices"
|
|
depends on HID
|
|
default !EXPERT
|
|
---help---
|
|
- Support for Chicony Tactical pad.
|
|
+ Support for Chicony Tactical pad and special keys on Chicony keyboards.
|
|
|
|
config HID_CORSAIR
|
|
tristate "Corsair devices"
|
|
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
|
|
index bc3cec199fee..f04ed9aabc3f 100644
|
|
--- a/drivers/hid/hid-chicony.c
|
|
+++ b/drivers/hid/hid-chicony.c
|
|
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
|
|
{ }
|
|
};
|
|
MODULE_DEVICE_TABLE(hid, ch_devices);
|
|
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
|
|
index 4f3f5749b0c1..bdde8859e191 100644
|
|
--- a/drivers/hid/hid-core.c
|
|
+++ b/drivers/hid/hid-core.c
|
|
@@ -1906,6 +1906,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
|
|
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
|
|
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
|
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
|
|
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
|
|
index 08fd3f831d62..433d5f675c03 100644
|
|
--- a/drivers/hid/hid-ids.h
|
|
+++ b/drivers/hid/hid-ids.h
|
|
@@ -558,6 +558,7 @@
|
|
|
|
#define USB_VENDOR_ID_JESS 0x0c45
|
|
#define USB_DEVICE_ID_JESS_YUREX 0x1010
|
|
+#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
|
|
|
|
#define USB_VENDOR_ID_JESS2 0x0f30
|
|
#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
|
|
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
|
|
index 8f11d347b3ec..c811af4c8d81 100644
|
|
--- a/drivers/i2c/busses/i2c-riic.c
|
|
+++ b/drivers/i2c/busses/i2c-riic.c
|
|
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
|
|
}
|
|
|
|
if (riic->is_last || riic->err) {
|
|
- riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
|
|
+ riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
|
|
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
|
|
+ } else {
|
|
+ /* Transfer is complete, but do not send STOP */
|
|
+ riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
|
|
+ complete(&riic->msg_done);
|
|
}
|
|
|
|
return IRQ_HANDLED;
|
|
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
|
|
index c22454383976..709d6491d243 100644
|
|
--- a/drivers/infiniband/hw/mlx4/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx4/qp.c
|
|
@@ -1669,7 +1669,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
|
context->mtu_msgmax = (IB_MTU_4096 << 5) |
|
|
ilog2(dev->dev->caps.max_gso_sz);
|
|
else
|
|
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
|
|
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
|
|
} else if (attr_mask & IB_QP_PATH_MTU) {
|
|
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
|
|
pr_err("path MTU (%u) is invalid\n",
|
|
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
|
|
index 786f640fc462..a2120ff0ef4c 100644
|
|
--- a/drivers/infiniband/hw/mlx5/main.c
|
|
+++ b/drivers/infiniband/hw/mlx5/main.c
|
|
@@ -2514,6 +2514,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
|
|
qp->real_qp = qp;
|
|
qp->uobject = NULL;
|
|
qp->qp_type = MLX5_IB_QPT_REG_UMR;
|
|
+ qp->send_cq = init_attr->send_cq;
|
|
+ qp->recv_cq = init_attr->recv_cq;
|
|
|
|
attr->qp_state = IB_QPS_INIT;
|
|
attr->port_num = 1;
|
|
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
|
|
index 002f8a421efa..88bbc8ccc5e3 100644
|
|
--- a/drivers/iommu/intel-iommu.c
|
|
+++ b/drivers/iommu/intel-iommu.c
|
|
@@ -2245,10 +2245,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|
uint64_t tmp;
|
|
|
|
if (!sg_res) {
|
|
+ unsigned int pgoff = sg->offset & ~PAGE_MASK;
|
|
+
|
|
sg_res = aligned_nrpages(sg->offset, sg->length);
|
|
- sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
|
+ sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
|
|
sg->dma_length = sg->length;
|
|
- pteval = page_to_phys(sg_page(sg)) | prot;
|
|
+ pteval = (sg_phys(sg) - pgoff) | prot;
|
|
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
|
}
|
|
|
|
@@ -3894,7 +3896,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
|
|
|
for_each_sg(sglist, sg, nelems, i) {
|
|
BUG_ON(!sg_page(sg));
|
|
- sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
|
|
+ sg->dma_address = sg_phys(sg);
|
|
sg->dma_length = sg->length;
|
|
}
|
|
return nelems;
|
|
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
|
|
index 05bbf171df37..1070b7b959f2 100644
|
|
--- a/drivers/irqchip/irq-crossbar.c
|
|
+++ b/drivers/irqchip/irq-crossbar.c
|
|
@@ -199,7 +199,7 @@ static const struct irq_domain_ops crossbar_domain_ops = {
|
|
static int __init crossbar_of_init(struct device_node *node)
|
|
{
|
|
int i, size, reserved = 0;
|
|
- u32 max = 0, entry;
|
|
+ u32 max = 0, entry, reg_size;
|
|
const __be32 *irqsr;
|
|
int ret = -ENOMEM;
|
|
|
|
@@ -276,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
|
|
if (!cb->register_offsets)
|
|
goto err_irq_map;
|
|
|
|
- of_property_read_u32(node, "ti,reg-size", &size);
|
|
+ of_property_read_u32(node, "ti,reg-size", ®_size);
|
|
|
|
- switch (size) {
|
|
+ switch (reg_size) {
|
|
case 1:
|
|
cb->write = crossbar_writeb;
|
|
break;
|
|
@@ -304,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
|
|
continue;
|
|
|
|
cb->register_offsets[i] = reserved;
|
|
- reserved += size;
|
|
+ reserved += reg_size;
|
|
}
|
|
|
|
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
|
|
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
|
|
index 6ebe89551961..f4509ef9922b 100644
|
|
--- a/drivers/media/rc/lirc_dev.c
|
|
+++ b/drivers/media/rc/lirc_dev.c
|
|
@@ -446,6 +446,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
|
|
return -ERESTARTSYS;
|
|
|
|
ir = irctls[iminor(inode)];
|
|
+ mutex_unlock(&lirc_dev_lock);
|
|
+
|
|
if (!ir) {
|
|
retval = -ENODEV;
|
|
goto error;
|
|
@@ -486,8 +488,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
|
|
}
|
|
|
|
error:
|
|
- mutex_unlock(&lirc_dev_lock);
|
|
-
|
|
nonseekable_open(inode, file);
|
|
|
|
return retval;
|
|
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
|
|
index 8207e6900656..bcacb0f22028 100644
|
|
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
|
|
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
|
|
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
|
|
|
|
int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
|
|
{
|
|
- u8 wbuf[1] = { offs };
|
|
- return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
|
|
+ u8 *buf;
|
|
+ int rc;
|
|
+
|
|
+ buf = kmalloc(2, GFP_KERNEL);
|
|
+ if (!buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ buf[0] = offs;
|
|
+
|
|
+ rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
|
|
+ *val = buf[1];
|
|
+ kfree(buf);
|
|
+
|
|
+ return rc;
|
|
}
|
|
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
|
|
|
|
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
|
|
index 5457c361ad58..bf0fe0137dfe 100644
|
|
--- a/drivers/memory/omap-gpmc.c
|
|
+++ b/drivers/memory/omap-gpmc.c
|
|
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
|
|
if (!of_property_read_u32(child, "dma-channel", &val))
|
|
gpmc_onenand_data->dma_channel = val;
|
|
|
|
- gpmc_onenand_init(gpmc_onenand_data);
|
|
-
|
|
- return 0;
|
|
+ return gpmc_onenand_init(gpmc_onenand_data);
|
|
}
|
|
#else
|
|
static int gpmc_probe_onenand_child(struct platform_device *pdev,
|
|
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
|
|
index 6749b1829469..4d01d7bc24ef 100644
|
|
--- a/drivers/net/can/ti_hecc.c
|
|
+++ b/drivers/net/can/ti_hecc.c
|
|
@@ -652,6 +652,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
|
|
mbx_mask = hecc_read(priv, HECC_CANMIM);
|
|
mbx_mask |= HECC_TX_MBOX_MASK;
|
|
hecc_write(priv, HECC_CANMIM, mbx_mask);
|
|
+ } else {
|
|
+ /* repoll is done only if whole budget is used */
|
|
+ num_pkts = quota;
|
|
}
|
|
|
|
return num_pkts;
|
|
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
|
|
index b3d02759c226..b00358297424 100644
|
|
--- a/drivers/net/can/usb/ems_usb.c
|
|
+++ b/drivers/net/can/usb/ems_usb.c
|
|
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
|
|
|
|
case -ECONNRESET: /* unlink */
|
|
case -ENOENT:
|
|
+ case -EPIPE:
|
|
+ case -EPROTO:
|
|
case -ESHUTDOWN:
|
|
return;
|
|
|
|
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
|
|
index 9fdb0f0bfa06..c6dcf93675c0 100644
|
|
--- a/drivers/net/can/usb/esd_usb2.c
|
|
+++ b/drivers/net/can/usb/esd_usb2.c
|
|
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
|
|
break;
|
|
|
|
case -ENOENT:
|
|
+ case -EPIPE:
|
|
+ case -EPROTO:
|
|
case -ESHUTDOWN:
|
|
return;
|
|
|
|
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
|
|
index 4224e066cb16..c9d61a6dfb7a 100644
|
|
--- a/drivers/net/can/usb/kvaser_usb.c
|
|
+++ b/drivers/net/can/usb/kvaser_usb.c
|
|
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
|
|
}
|
|
|
|
if (pos + tmp->len > actual_len) {
|
|
- dev_err(dev->udev->dev.parent,
|
|
- "Format error\n");
|
|
+ dev_err_ratelimited(dev->udev->dev.parent,
|
|
+ "Format error\n");
|
|
break;
|
|
}
|
|
|
|
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
|
|
if (err) {
|
|
netdev_err(netdev, "Error transmitting URB\n");
|
|
usb_unanchor_urb(urb);
|
|
+ kfree(buf);
|
|
usb_free_urb(urb);
|
|
return err;
|
|
}
|
|
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
|
case 0:
|
|
break;
|
|
case -ENOENT:
|
|
+ case -EPIPE:
|
|
+ case -EPROTO:
|
|
case -ESHUTDOWN:
|
|
return;
|
|
default:
|
|
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
|
goto resubmit_urb;
|
|
}
|
|
|
|
- while (pos <= urb->actual_length - MSG_HEADER_LEN) {
|
|
+ while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
|
|
msg = urb->transfer_buffer + pos;
|
|
|
|
/* The Kvaser firmware can only read and write messages that
|
|
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
|
}
|
|
|
|
if (pos + msg->len > urb->actual_length) {
|
|
- dev_err(dev->udev->dev.parent, "Format error\n");
|
|
+ dev_err_ratelimited(dev->udev->dev.parent,
|
|
+ "Format error\n");
|
|
break;
|
|
}
|
|
|
|
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
|
|
|
|
usb_unanchor_urb(urb);
|
|
+ kfree(buf);
|
|
|
|
stats->tx_dropped++;
|
|
|
|
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
|
|
index d000cb62d6ae..27861c417c94 100644
|
|
--- a/drivers/net/can/usb/usb_8dev.c
|
|
+++ b/drivers/net/can/usb/usb_8dev.c
|
|
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
|
|
break;
|
|
|
|
case -ENOENT:
|
|
+ case -EPIPE:
|
|
+ case -EPROTO:
|
|
case -ESHUTDOWN:
|
|
return;
|
|
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
index 4febe60eadc2..5d958b5bb8b1 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
|
|
@@ -13293,17 +13293,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
|
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
|
|
|
|
- /* VF with OLD Hypervisor or old PF do not support filtering */
|
|
if (IS_PF(bp)) {
|
|
if (chip_is_e1x)
|
|
bp->accept_any_vlan = true;
|
|
else
|
|
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
-#ifdef CONFIG_BNX2X_SRIOV
|
|
- } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
|
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
-#endif
|
|
}
|
|
+ /* For VF we'll know whether to enable VLAN filtering after
|
|
+ * getting a response to CHANNEL_TLV_ACQUIRE from PF.
|
|
+ */
|
|
|
|
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
|
|
dev->features |= NETIF_F_HIGHDMA;
|
|
@@ -13735,7 +13733,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|
if (!netif_running(bp->dev)) {
|
|
DP(BNX2X_MSG_PTP,
|
|
"PTP adjfreq called while the interface is down\n");
|
|
- return -EFAULT;
|
|
+ return -ENETDOWN;
|
|
}
|
|
|
|
if (ppb < 0) {
|
|
@@ -13794,6 +13792,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
|
{
|
|
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
|
|
|
+ if (!netif_running(bp->dev)) {
|
|
+ DP(BNX2X_MSG_PTP,
|
|
+ "PTP adjtime called while the interface is down\n");
|
|
+ return -ENETDOWN;
|
|
+ }
|
|
+
|
|
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
|
|
|
|
timecounter_adjtime(&bp->timecounter, delta);
|
|
@@ -13806,6 +13810,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
|
u64 ns;
|
|
|
|
+ if (!netif_running(bp->dev)) {
|
|
+ DP(BNX2X_MSG_PTP,
|
|
+ "PTP gettime called while the interface is down\n");
|
|
+ return -ENETDOWN;
|
|
+ }
|
|
+
|
|
ns = timecounter_read(&bp->timecounter);
|
|
|
|
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
|
|
@@ -13821,6 +13831,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
|
|
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
|
u64 ns;
|
|
|
|
+ if (!netif_running(bp->dev)) {
|
|
+ DP(BNX2X_MSG_PTP,
|
|
+ "PTP settime called while the interface is down\n");
|
|
+ return -ENETDOWN;
|
|
+ }
|
|
+
|
|
ns = timespec64_to_ns(ts);
|
|
|
|
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
|
|
@@ -13988,6 +14004,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
|
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
|
|
if (rc)
|
|
goto init_one_freemem;
|
|
+
|
|
+#ifdef CONFIG_BNX2X_SRIOV
|
|
+ /* VF with OLD Hypervisor or old PF do not support filtering */
|
|
+ if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
|
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
+ }
|
|
+#endif
|
|
}
|
|
|
|
/* Enable SRIOV if capability found in configuration space */
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
|
|
index 3f77d0863543..c6e059119b22 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
|
|
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
|
|
|
/* Add/Remove the filter */
|
|
rc = bnx2x_config_vlan_mac(bp, &ramrod);
|
|
- if (rc && rc != -EEXIST) {
|
|
+ if (rc == -EEXIST)
|
|
+ return 0;
|
|
+ if (rc) {
|
|
BNX2X_ERR("Failed to %s %s\n",
|
|
filter->add ? "add" : "delete",
|
|
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
|
|
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
|
return rc;
|
|
}
|
|
|
|
+ filter->applied = true;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -471,6 +475,8 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
|
|
i, filters->count + 1);
|
|
while (--i >= 0) {
|
|
+ if (!filters->filters[i].applied)
|
|
+ continue;
|
|
filters->filters[i].add = !filters->filters[i].add;
|
|
bnx2x_vf_mac_vlan_config(bp, vf, qid,
|
|
&filters->filters[i],
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
|
|
index 7a6d406f4c11..888d0b6632e8 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
|
|
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
|
|
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
|
|
|
|
bool add;
|
|
+ bool applied;
|
|
u8 *mac;
|
|
u16 vid;
|
|
};
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
|
|
index bfae300cf25f..c2d327d9dff0 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
|
|
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
|
|
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
|
|
- int rc, i = 0;
|
|
+ int rc = 0, i = 0;
|
|
struct netdev_hw_addr *ha;
|
|
|
|
if (bp->state != BNX2X_STATE_OPEN) {
|
|
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|
/* Get Rx mode requested */
|
|
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
|
|
|
|
+ /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
|
|
+ if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
|
|
+ DP(NETIF_MSG_IFUP,
|
|
+ "VF supports not more than %d multicast MAC addresses\n",
|
|
+ PFVF_MAX_MULTICAST_PER_VF);
|
|
+ rc = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
|
|
bnx2x_mc_addr(ha));
|
|
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|
i++;
|
|
}
|
|
|
|
- /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
|
|
- * addresses tops
|
|
- */
|
|
- if (i >= PFVF_MAX_MULTICAST_PER_VF) {
|
|
- DP(NETIF_MSG_IFUP,
|
|
- "VF supports not more than %d multicast MAC addresses\n",
|
|
- PFVF_MAX_MULTICAST_PER_VF);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
req->n_multicast = i;
|
|
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
|
|
req->vf_qid = 0;
|
|
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
|
out:
|
|
bnx2x_vfpf_finalize(bp, &req->first_tlv);
|
|
|
|
- return 0;
|
|
+ return rc;
|
|
}
|
|
|
|
/* request pf to add a vlan for the vf */
|
|
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
index b8778e7b1f79..7c6c1468628b 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmvnic.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
|
|
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
|
|
send_map_query(adapter);
|
|
for (i = 0; i < rxadd_subcrqs; i++) {
|
|
init_rx_pool(adapter, &adapter->rx_pool[i],
|
|
- IBMVNIC_BUFFS_PER_POOL, i,
|
|
+ adapter->req_rx_add_entries_per_subcrq, i,
|
|
be64_to_cpu(size_array[i]), 1);
|
|
if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
|
|
dev_err(dev, "Couldn't alloc rx pool\n");
|
|
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
|
|
for (i = 0; i < tx_subcrqs; i++) {
|
|
tx_pool = &adapter->tx_pool[i];
|
|
tx_pool->tx_buff =
|
|
- kcalloc(adapter->max_tx_entries_per_subcrq,
|
|
+ kcalloc(adapter->req_tx_entries_per_subcrq,
|
|
sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
|
|
if (!tx_pool->tx_buff)
|
|
goto tx_pool_alloc_failed;
|
|
|
|
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
|
|
- adapter->max_tx_entries_per_subcrq *
|
|
+ adapter->req_tx_entries_per_subcrq *
|
|
adapter->req_mtu))
|
|
goto tx_ltb_alloc_failed;
|
|
|
|
tx_pool->free_map =
|
|
- kcalloc(adapter->max_tx_entries_per_subcrq,
|
|
+ kcalloc(adapter->req_tx_entries_per_subcrq,
|
|
sizeof(int), GFP_KERNEL);
|
|
if (!tx_pool->free_map)
|
|
goto tx_fm_alloc_failed;
|
|
|
|
- for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
|
|
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
|
|
tx_pool->free_map[j] = j;
|
|
|
|
tx_pool->consumer_index = 0;
|
|
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
|
|
struct device *dev = &adapter->vdev->dev;
|
|
struct ibmvnic_tx_buff *tx_buff = NULL;
|
|
+ struct ibmvnic_sub_crq_queue *tx_scrq;
|
|
struct ibmvnic_tx_pool *tx_pool;
|
|
unsigned int tx_send_failed = 0;
|
|
unsigned int tx_map_failed = 0;
|
|
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
int ret = 0;
|
|
|
|
tx_pool = &adapter->tx_pool[queue_num];
|
|
+ tx_scrq = adapter->tx_scrq[queue_num];
|
|
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
|
|
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
|
be32_to_cpu(adapter->login_rsp_buf->
|
|
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
tx_pool->consumer_index =
|
|
(tx_pool->consumer_index + 1) %
|
|
- adapter->max_tx_entries_per_subcrq;
|
|
+ adapter->req_tx_entries_per_subcrq;
|
|
|
|
tx_buff = &tx_pool->tx_buff[index];
|
|
tx_buff->skb = skb;
|
|
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
if (tx_pool->consumer_index == 0)
|
|
tx_pool->consumer_index =
|
|
- adapter->max_tx_entries_per_subcrq - 1;
|
|
+ adapter->req_tx_entries_per_subcrq - 1;
|
|
else
|
|
tx_pool->consumer_index--;
|
|
|
|
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
ret = NETDEV_TX_BUSY;
|
|
goto out;
|
|
}
|
|
+
|
|
+ atomic_inc(&tx_scrq->used);
|
|
+
|
|
+ if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
|
|
+ netdev_info(netdev, "Stopping queue %d\n", queue_num);
|
|
+ netif_stop_subqueue(netdev, queue_num);
|
|
+ }
|
|
+
|
|
tx_packets++;
|
|
tx_bytes += skb->len;
|
|
txq->trans_start = jiffies;
|
|
@@ -1220,6 +1230,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
|
|
scrq->adapter = adapter;
|
|
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
|
|
scrq->cur = 0;
|
|
+ atomic_set(&scrq->used, 0);
|
|
scrq->rx_skb_top = NULL;
|
|
spin_lock_init(&scrq->lock);
|
|
|
|
@@ -1368,14 +1379,28 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
|
|
DMA_TO_DEVICE);
|
|
}
|
|
|
|
- if (txbuff->last_frag)
|
|
+ if (txbuff->last_frag) {
|
|
+ atomic_dec(&scrq->used);
|
|
+
|
|
+ if (atomic_read(&scrq->used) <=
|
|
+ (adapter->req_tx_entries_per_subcrq / 2) &&
|
|
+ netif_subqueue_stopped(adapter->netdev,
|
|
+ txbuff->skb)) {
|
|
+ netif_wake_subqueue(adapter->netdev,
|
|
+ scrq->pool_index);
|
|
+ netdev_dbg(adapter->netdev,
|
|
+ "Started queue %d\n",
|
|
+ scrq->pool_index);
|
|
+ }
|
|
+
|
|
dev_kfree_skb_any(txbuff->skb);
|
|
+ }
|
|
|
|
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
|
|
producer_index] = index;
|
|
adapter->tx_pool[pool].producer_index =
|
|
(adapter->tx_pool[pool].producer_index + 1) %
|
|
- adapter->max_tx_entries_per_subcrq;
|
|
+ adapter->req_tx_entries_per_subcrq;
|
|
}
|
|
/* remove tx_comp scrq*/
|
|
next->tx_comp.first = 0;
|
|
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
|
|
index dd775d951b73..892eda346e54 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmvnic.h
|
|
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
|
|
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
|
|
spinlock_t lock;
|
|
struct sk_buff *rx_skb_top;
|
|
struct ibmvnic_adapter *adapter;
|
|
+ atomic_t used;
|
|
};
|
|
|
|
struct ibmvnic_long_term_buff {
|
|
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
|
|
index 93ffedfa2994..1e2d4f1179da 100644
|
|
--- a/drivers/net/phy/spi_ks8995.c
|
|
+++ b/drivers/net/phy/spi_ks8995.c
|
|
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
|
|
if (err)
|
|
return err;
|
|
|
|
- ks->regs_attr.size = ks->chip->regs_size;
|
|
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
|
|
+ ks->regs_attr.size = ks->chip->regs_size;
|
|
|
|
err = ks8995_reset(ks);
|
|
if (err)
|
|
return err;
|
|
|
|
+ sysfs_attr_init(&ks->regs_attr.attr);
|
|
err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
|
|
if (err) {
|
|
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
|
|
index 8e3c6f4bdaa0..edffe5aeeeb1 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
|
|
@@ -4080,8 +4080,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
|
|
sdio_release_host(sdiodev->func[1]);
|
|
fail:
|
|
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
|
|
- device_release_driver(dev);
|
|
device_release_driver(&sdiodev->func[2]->dev);
|
|
+ device_release_driver(dev);
|
|
}
|
|
|
|
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
|
|
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
|
|
index d2a28a9d3209..4b462dc21c41 100644
|
|
--- a/drivers/net/wireless/mac80211_hwsim.c
|
|
+++ b/drivers/net/wireless/mac80211_hwsim.c
|
|
@@ -3047,6 +3047,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
|
|
{
|
|
struct hwsim_new_radio_params param = { 0 };
|
|
const char *hwname = NULL;
|
|
+ int ret;
|
|
|
|
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
|
|
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
|
|
@@ -3086,7 +3087,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
|
|
param.regd = hwsim_world_regdom_custom[idx];
|
|
}
|
|
|
|
- return mac80211_hwsim_new_radio(info, ¶m);
|
|
+ ret = mac80211_hwsim_new_radio(info, ¶m);
|
|
+ kfree(hwname);
|
|
+ return ret;
|
|
}
|
|
|
|
static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
|
|
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
|
|
index 9013a585507e..f32fc704cb7e 100644
|
|
--- a/drivers/rapidio/devices/rio_mport_cdev.c
|
|
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
|
|
@@ -964,7 +964,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|
req->sgt.sgl, req->sgt.nents, dir);
|
|
if (nents == -EFAULT) {
|
|
rmcd_error("Failed to map SG list");
|
|
- return -EFAULT;
|
|
+ ret = -EFAULT;
|
|
+ goto err_pg;
|
|
}
|
|
|
|
ret = do_dma_request(req, xfer, sync, nents);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
|
|
index 4df3cdcf88ce..9c9563312a3d 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_els.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_els.c
|
|
@@ -8185,11 +8185,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|
spin_lock_irq(shost->host_lock);
|
|
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
|
spin_unlock_irq(shost->host_lock);
|
|
- if (vport->port_type == LPFC_PHYSICAL_PORT
|
|
- && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
|
|
- lpfc_issue_init_vfi(vport);
|
|
- else
|
|
+ if (mb->mbxStatus == MBX_NOT_FINISHED)
|
|
+ break;
|
|
+ if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
|
|
+ !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
|
|
+ if (phba->sli_rev == LPFC_SLI_REV4)
|
|
+ lpfc_issue_init_vfi(vport);
|
|
+ else
|
|
+ lpfc_initial_flogi(vport);
|
|
+ } else {
|
|
lpfc_initial_fdisc(vport);
|
|
+ }
|
|
break;
|
|
}
|
|
} else {
|
|
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
|
|
index 658e4d15cb71..ce4ac769a9a2 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_dbg.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
|
|
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
|
|
"%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
|
|
ql_dbg(level, vha, id,
|
|
"----- -----------------------------------------------\n");
|
|
- for (cnt = 0; cnt < size; cnt++, buf++) {
|
|
- if (cnt % 16 == 0)
|
|
- ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
|
|
- printk(" %02x", *buf);
|
|
- if (cnt % 16 == 15)
|
|
- printk("\n");
|
|
+ for (cnt = 0; cnt < size; cnt += 16) {
|
|
+ ql_dbg(level, vha, id, "%04x: ", cnt);
|
|
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
|
|
+ buf + cnt, min(16U, size - cnt), false);
|
|
}
|
|
- if (cnt % 16 != 0)
|
|
- printk("\n");
|
|
}
|
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
|
index d8099c7cab00..c7b770075caa 100644
|
|
--- a/drivers/scsi/scsi_lib.c
|
|
+++ b/drivers/scsi/scsi_lib.c
|
|
@@ -2041,11 +2041,13 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
|
q->limits.cluster = 0;
|
|
|
|
/*
|
|
- * set a reasonable default alignment on word boundaries: the
|
|
- * host and device may alter it using
|
|
- * blk_queue_update_dma_alignment() later.
|
|
+ * Set a reasonable default alignment: The larger of 32-byte (dword),
|
|
+ * which is a common minimum for HBAs, and the minimum DMA alignment,
|
|
+ * which is set by the platform.
|
|
+ *
|
|
+ * Devices that require a bigger alignment can increase it later.
|
|
*/
|
|
- blk_queue_dma_alignment(q, 0x03);
|
|
+ blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
|
|
}
|
|
|
|
struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
|
|
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
|
|
index 8e281e47afec..b7995474148c 100644
|
|
--- a/drivers/spi/Kconfig
|
|
+++ b/drivers/spi/Kconfig
|
|
@@ -365,7 +365,6 @@ config SPI_FSL_SPI
|
|
config SPI_FSL_DSPI
|
|
tristate "Freescale DSPI controller"
|
|
select REGMAP_MMIO
|
|
- depends on HAS_DMA
|
|
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
|
|
help
|
|
This enables support for the Freescale DSPI controller in master
|
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
|
index c61ddbf94bc7..16c67120d72b 100644
|
|
--- a/drivers/usb/dwc3/gadget.c
|
|
+++ b/drivers/usb/dwc3/gadget.c
|
|
@@ -3092,15 +3092,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
|
|
|
|
int dwc3_gadget_suspend(struct dwc3 *dwc)
|
|
{
|
|
- int ret;
|
|
-
|
|
if (!dwc->gadget_driver)
|
|
return 0;
|
|
|
|
- ret = dwc3_gadget_run_stop(dwc, false, false);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
+ dwc3_gadget_run_stop(dwc, false, false);
|
|
dwc3_disconnect_gadget(dwc);
|
|
__dwc3_gadget_stop(dwc);
|
|
|
|
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
|
|
index 502a096fc380..a5ca409dc97e 100644
|
|
--- a/drivers/usb/gadget/configfs.c
|
|
+++ b/drivers/usb/gadget/configfs.c
|
|
@@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
|
|
ret = unregister_gadget(gi);
|
|
if (ret)
|
|
goto err;
|
|
+ kfree(name);
|
|
} else {
|
|
if (gi->composite.gadget_driver.udc_name) {
|
|
ret = -EBUSY;
|
|
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
|
|
index 4fce83266926..346a630cebd5 100644
|
|
--- a/drivers/usb/gadget/function/f_fs.c
|
|
+++ b/drivers/usb/gadget/function/f_fs.c
|
|
@@ -2262,9 +2262,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
|
|
int i;
|
|
|
|
if (len < sizeof(*d) ||
|
|
- d->bFirstInterfaceNumber >= ffs->interfaces_count ||
|
|
- d->Reserved1)
|
|
+ d->bFirstInterfaceNumber >= ffs->interfaces_count)
|
|
return -EINVAL;
|
|
+ if (d->Reserved1 != 1) {
|
|
+ /*
|
|
+ * According to the spec, Reserved1 must be set to 1
|
|
+ * but older kernels incorrectly rejected non-zero
|
|
+ * values. We fix it here to avoid returning EINVAL
|
|
+ * in response to values we used to accept.
|
|
+ */
|
|
+ pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
|
|
+ d->Reserved1 = 1;
|
|
+ }
|
|
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
|
|
if (d->Reserved2[i])
|
|
return -EINVAL;
|
|
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
|
|
index f69dbd4bcd18..b8534d3f8bb0 100644
|
|
--- a/drivers/usb/gadget/legacy/inode.c
|
|
+++ b/drivers/usb/gadget/legacy/inode.c
|
|
@@ -1819,8 +1819,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
|
|
|
spin_lock_irq (&dev->lock);
|
|
value = -EINVAL;
|
|
- if (dev->buf)
|
|
+ if (dev->buf) {
|
|
+ kfree(kbuf);
|
|
goto fail;
|
|
+ }
|
|
dev->buf = kbuf;
|
|
|
|
/* full or low speed config */
|
|
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
|
|
index 33f3987218f7..d133252ef2c3 100644
|
|
--- a/drivers/usb/gadget/udc/net2280.c
|
|
+++ b/drivers/usb/gadget/udc/net2280.c
|
|
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
|
|
*/
|
|
while (!list_empty(&ep->queue)) {
|
|
struct net2280_request *req;
|
|
- u32 tmp;
|
|
+ u32 req_dma_count;
|
|
|
|
req = list_entry(ep->queue.next,
|
|
struct net2280_request, queue);
|
|
if (!req->valid)
|
|
break;
|
|
rmb();
|
|
- tmp = le32_to_cpup(&req->td->dmacount);
|
|
- if ((tmp & BIT(VALID_BIT)) != 0)
|
|
+ req_dma_count = le32_to_cpup(&req->td->dmacount);
|
|
+ if ((req_dma_count & BIT(VALID_BIT)) != 0)
|
|
break;
|
|
|
|
/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
|
|
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
|
|
*/
|
|
if (unlikely(req->td->dmadesc == 0)) {
|
|
/* paranoia */
|
|
- tmp = readl(&ep->dma->dmacount);
|
|
- if (tmp & DMA_BYTE_COUNT_MASK)
|
|
+ u32 const ep_dmacount = readl(&ep->dma->dmacount);
|
|
+
|
|
+ if (ep_dmacount & DMA_BYTE_COUNT_MASK)
|
|
break;
|
|
/* single transfer mode */
|
|
- dma_done(ep, req, tmp, 0);
|
|
+ dma_done(ep, req, req_dma_count, 0);
|
|
num_completed++;
|
|
break;
|
|
} else if (!ep->is_in &&
|
|
(req->req.length % ep->ep.maxpacket) &&
|
|
!(ep->dev->quirks & PLX_PCIE)) {
|
|
|
|
- tmp = readl(&ep->regs->ep_stat);
|
|
+ u32 const ep_stat = readl(&ep->regs->ep_stat);
|
|
/* AVOID TROUBLE HERE by not issuing short reads from
|
|
* your gadget driver. That helps avoids errata 0121,
|
|
* 0122, and 0124; not all cases trigger the warning.
|
|
*/
|
|
- if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
|
|
+ if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
|
|
ep_warn(ep->dev, "%s lost packet sync!\n",
|
|
ep->ep.name);
|
|
req->req.status = -EOVERFLOW;
|
|
} else {
|
|
- tmp = readl(&ep->regs->ep_avail);
|
|
- if (tmp) {
|
|
+ u32 const ep_avail = readl(&ep->regs->ep_avail);
|
|
+ if (ep_avail) {
|
|
/* fifo gets flushed later */
|
|
ep->out_overflow = 1;
|
|
ep_dbg(ep->dev,
|
|
"%s dma, discard %d len %d\n",
|
|
- ep->ep.name, tmp,
|
|
+ ep->ep.name, ep_avail,
|
|
req->req.length);
|
|
req->req.status = -EOVERFLOW;
|
|
}
|
|
}
|
|
}
|
|
- dma_done(ep, req, tmp, 0);
|
|
+ dma_done(ep, req, req_dma_count, 0);
|
|
num_completed++;
|
|
}
|
|
|
|
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
|
|
index 7fa60f5b7ae4..afd6b86458c5 100644
|
|
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
|
|
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
|
|
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
|
|
usb_del_gadget_udc(&udc->gadget);
|
|
pxa_cleanup_debugfs(udc);
|
|
|
|
- if (!IS_ERR_OR_NULL(udc->transceiver))
|
|
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
|
|
usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
|
|
- usb_put_phy(udc->transceiver);
|
|
+ usb_put_phy(udc->transceiver);
|
|
+ }
|
|
|
|
udc->transceiver = NULL;
|
|
the_controller = NULL;
|
|
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
|
|
index bb89e24c48b4..2197a50ed2ab 100644
|
|
--- a/drivers/usb/gadget/udc/renesas_usb3.c
|
|
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
|
|
@@ -222,7 +222,7 @@
|
|
#define USB3_EP0_SS_MAX_PACKET_SIZE 512
|
|
#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
|
|
#define USB3_EP0_BUF_SIZE 8
|
|
-#define USB3_MAX_NUM_PIPES 30
|
|
+#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
|
|
#define USB3_WAIT_US 3
|
|
|
|
struct renesas_usb3;
|
|
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
|
|
index 7062bb0975a5..462e183609b6 100644
|
|
--- a/drivers/virtio/virtio.c
|
|
+++ b/drivers/virtio/virtio.c
|
|
@@ -323,6 +323,8 @@ int register_virtio_device(struct virtio_device *dev)
|
|
/* device_register() causes the bus infrastructure to look for a
|
|
* matching driver. */
|
|
err = device_register(&dev->dev);
|
|
+ if (err)
|
|
+ ida_simple_remove(&virtio_index_ida, dev->index);
|
|
out:
|
|
if (err)
|
|
add_status(dev, VIRTIO_CONFIG_S_FAILED);
|
|
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
|
|
index d764236072b1..8d2c5180e015 100644
|
|
--- a/fs/afs/cmservice.c
|
|
+++ b/fs/afs/cmservice.c
|
|
@@ -106,6 +106,9 @@ bool afs_cm_incoming_call(struct afs_call *call)
|
|
case CBProbe:
|
|
call->type = &afs_SRXCBProbe;
|
|
return true;
|
|
+ case CBProbeUuid:
|
|
+ call->type = &afs_SRXCBProbeUuid;
|
|
+ return true;
|
|
case CBTellMeAboutYourself:
|
|
call->type = &afs_SRXCBTellMeAboutYourself;
|
|
return true;
|
|
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
|
|
index c4cff5cc9c93..a29730c44850 100644
|
|
--- a/fs/btrfs/extent-tree.c
|
|
+++ b/fs/btrfs/extent-tree.c
|
|
@@ -9362,6 +9362,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|
ret = btrfs_del_root(trans, tree_root, &root->root_key);
|
|
if (ret) {
|
|
btrfs_abort_transaction(trans, ret);
|
|
+ err = ret;
|
|
goto out_end_trans;
|
|
}
|
|
|
|
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
|
|
index 65566d5fcf39..1e5321d1ed22 100644
|
|
--- a/fs/nfs/dir.c
|
|
+++ b/fs/nfs/dir.c
|
|
@@ -2098,7 +2098,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
if (new_inode != NULL)
|
|
nfs_drop_nlink(new_inode);
|
|
d_move(old_dentry, new_dentry);
|
|
- nfs_set_verifier(new_dentry,
|
|
+ nfs_set_verifier(old_dentry,
|
|
nfs_save_change_attribute(new_dir));
|
|
} else if (error == -ENOENT)
|
|
nfs_dentry_handle_enoent(old_dentry);
|
|
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
|
|
index fe9a9a183b2d..98ca9f1b6a07 100644
|
|
--- a/fs/xfs/xfs_inode.c
|
|
+++ b/fs/xfs/xfs_inode.c
|
|
@@ -2386,6 +2386,7 @@ xfs_ifree_cluster(
|
|
*/
|
|
if (ip->i_ino != inum + i) {
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
+ rcu_read_unlock();
|
|
continue;
|
|
}
|
|
}
|
|
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
|
|
index 08528afdf58b..704caae69c42 100644
|
|
--- a/include/linux/dma-mapping.h
|
|
+++ b/include/linux/dma-mapping.h
|
|
@@ -659,7 +659,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
|
|
return ret;
|
|
}
|
|
|
|
-#ifdef CONFIG_HAS_DMA
|
|
static inline int dma_get_cache_alignment(void)
|
|
{
|
|
#ifdef ARCH_DMA_MINALIGN
|
|
@@ -667,7 +666,6 @@ static inline int dma_get_cache_alignment(void)
|
|
#endif
|
|
return 1;
|
|
}
|
|
-#endif
|
|
|
|
/* flags for the coherent memory api */
|
|
#define DMA_MEMORY_MAP 0x01
|
|
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
|
|
index 29d4385903d4..206fe3bccccc 100644
|
|
--- a/include/linux/genalloc.h
|
|
+++ b/include/linux/genalloc.h
|
|
@@ -32,6 +32,7 @@
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/spinlock_types.h>
|
|
+#include <linux/atomic.h>
|
|
|
|
struct device;
|
|
struct device_node;
|
|
@@ -70,7 +71,7 @@ struct gen_pool {
|
|
*/
|
|
struct gen_pool_chunk {
|
|
struct list_head next_chunk; /* next chunk in pool */
|
|
- atomic_t avail;
|
|
+ atomic_long_t avail;
|
|
phys_addr_t phys_addr; /* physical starting address of memory chunk */
|
|
unsigned long start_addr; /* start address of memory chunk */
|
|
unsigned long end_addr; /* end address of memory chunk (inclusive) */
|
|
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
|
|
index 25c0dc31f084..854dfa6fa6e3 100644
|
|
--- a/include/linux/mmu_notifier.h
|
|
+++ b/include/linux/mmu_notifier.h
|
|
@@ -381,18 +381,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
|
___pmd; \
|
|
})
|
|
|
|
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
|
|
-({ \
|
|
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
|
|
- pmd_t ___pmd; \
|
|
- \
|
|
- ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
|
|
- mmu_notifier_invalidate_range(__mm, ___haddr, \
|
|
- ___haddr + HPAGE_PMD_SIZE); \
|
|
- \
|
|
- ___pmd; \
|
|
-})
|
|
-
|
|
/*
|
|
* set_pte_at_notify() sets the pte _after_ running the notifier.
|
|
* This is safe to start by updating the secondary MMUs, because the primary MMU
|
|
@@ -480,7 +468,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
|
#define pmdp_clear_young_notify pmdp_test_and_clear_young
|
|
#define ptep_clear_flush_notify ptep_clear_flush
|
|
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
|
|
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
|
|
#define set_pte_at_notify set_pte_at
|
|
|
|
#endif /* CONFIG_MMU_NOTIFIER */
|
|
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
|
|
index 35d0fd7a4948..e821a3132a3e 100644
|
|
--- a/include/linux/omap-gpmc.h
|
|
+++ b/include/linux/omap-gpmc.h
|
|
@@ -88,10 +88,11 @@ static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
|
|
#endif
|
|
|
|
#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
|
|
-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
|
|
+extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
|
|
#else
|
|
#define board_onenand_data NULL
|
|
-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
|
|
+static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
|
|
{
|
|
+ return 0;
|
|
}
|
|
#endif
|
|
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
|
|
index c6f0f0d0e17e..00a1f330f93a 100644
|
|
--- a/include/linux/sysfs.h
|
|
+++ b/include/linux/sysfs.h
|
|
@@ -116,6 +116,12 @@ struct attribute_group {
|
|
.show = _name##_show, \
|
|
}
|
|
|
|
+#define __ATTR_RO_MODE(_name, _mode) { \
|
|
+ .attr = { .name = __stringify(_name), \
|
|
+ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
|
|
+ .show = _name##_show, \
|
|
+}
|
|
+
|
|
#define __ATTR_WO(_name) { \
|
|
.attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
|
|
.store = _name##_store, \
|
|
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
|
|
index dae99d7d2bc0..706a7017885c 100644
|
|
--- a/include/scsi/libsas.h
|
|
+++ b/include/scsi/libsas.h
|
|
@@ -165,11 +165,11 @@ struct expander_device {
|
|
|
|
struct sata_device {
|
|
unsigned int class;
|
|
- struct smp_resp rps_resp; /* report_phy_sata_resp */
|
|
u8 port_no; /* port number, if this is a PM (Port) */
|
|
|
|
struct ata_port *ap;
|
|
struct ata_host ata_host;
|
|
+ struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
|
|
u8 fis[ATA_RESP_FIS_SIZE];
|
|
};
|
|
|
|
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
|
|
index 5c51d1985b51..673fa6fe2d73 100644
|
|
--- a/kernel/bpf/percpu_freelist.c
|
|
+++ b/kernel/bpf/percpu_freelist.c
|
|
@@ -78,8 +78,10 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
{
|
|
struct pcpu_freelist_head *head;
|
|
struct pcpu_freelist_node *node;
|
|
+ unsigned long flags;
|
|
int orig_cpu, cpu;
|
|
|
|
+ local_irq_save(flags);
|
|
orig_cpu = cpu = raw_smp_processor_id();
|
|
while (1) {
|
|
head = per_cpu_ptr(s->freelist, cpu);
|
|
@@ -87,14 +89,16 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
node = head->first;
|
|
if (node) {
|
|
head->first = node->next;
|
|
- raw_spin_unlock(&head->lock);
|
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
|
return node;
|
|
}
|
|
raw_spin_unlock(&head->lock);
|
|
cpu = cpumask_next(cpu, cpu_possible_mask);
|
|
if (cpu >= nr_cpu_ids)
|
|
cpu = 0;
|
|
- if (cpu == orig_cpu)
|
|
+ if (cpu == orig_cpu) {
|
|
+ local_irq_restore(flags);
|
|
return NULL;
|
|
+ }
|
|
}
|
|
}
|
|
diff --git a/kernel/cpu.c b/kernel/cpu.c
|
|
index 26a4f74bff83..e1436ca4aed0 100644
|
|
--- a/kernel/cpu.c
|
|
+++ b/kernel/cpu.c
|
|
@@ -1321,11 +1321,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|
.teardown.single = NULL,
|
|
.cant_stop = true,
|
|
},
|
|
- [CPUHP_AP_SMPCFD_DYING] = {
|
|
- .name = "smpcfd:dying",
|
|
- .startup.single = NULL,
|
|
- .teardown.single = smpcfd_dying_cpu,
|
|
- },
|
|
/*
|
|
* Handled on controll processor until the plugged processor manages
|
|
* this itself.
|
|
@@ -1367,6 +1362,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|
.startup.single = NULL,
|
|
.teardown.single = rcutree_dying_cpu,
|
|
},
|
|
+ [CPUHP_AP_SMPCFD_DYING] = {
|
|
+ .name = "smpcfd:dying",
|
|
+ .startup.single = NULL,
|
|
+ .teardown.single = smpcfd_dying_cpu,
|
|
+ },
|
|
/* Entry state on starting. Interrupts enabled from here on. Transient
|
|
* state for synchronsization */
|
|
[CPUHP_AP_ONLINE] = {
|
|
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
|
|
index fc1ef736253c..77777d918676 100644
|
|
--- a/kernel/debug/kdb/kdb_io.c
|
|
+++ b/kernel/debug/kdb/kdb_io.c
|
|
@@ -349,7 +349,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
|
}
|
|
kdb_printf("\n");
|
|
for (i = 0; i < count; i++) {
|
|
- if (kallsyms_symbol_next(p_tmp, i) < 0)
|
|
+ if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
|
|
break;
|
|
kdb_printf("%s ", p_tmp);
|
|
*(p_tmp + len) = '\0';
|
|
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
|
|
index a9b8cf500591..def4548ea40c 100644
|
|
--- a/kernel/jump_label.c
|
|
+++ b/kernel/jump_label.c
|
|
@@ -612,7 +612,7 @@ static __init int jump_label_test(void)
|
|
|
|
return 0;
|
|
}
|
|
-late_initcall(jump_label_test);
|
|
+early_initcall(jump_label_test);
|
|
#endif /* STATIC_KEYS_SELFTEST */
|
|
|
|
#endif /* HAVE_JUMP_LABEL */
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index 7a68c631d5b5..3d862f5b0331 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -5451,7 +5451,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
|
* Due to large variance we need a large fuzz factor; hackbench in
|
|
* particularly is sensitive here.
|
|
*/
|
|
- if ((avg_idle / 512) < avg_cost)
|
|
+ if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
|
|
return -1;
|
|
|
|
time = local_clock();
|
|
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
|
|
index 69631fa46c2f..1b3c8189b286 100644
|
|
--- a/kernel/sched/features.h
|
|
+++ b/kernel/sched/features.h
|
|
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
|
|
*/
|
|
SCHED_FEAT(TTWU_QUEUE, true)
|
|
|
|
+/*
|
|
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
|
|
+ */
|
|
+SCHED_FEAT(SIS_AVG_CPU, false)
|
|
+
|
|
#ifdef HAVE_RT_PUSH_IPI
|
|
/*
|
|
* In order to avoid a thundering herd attack of CPUs that are
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index 296dcca77f33..181c2ad0cb54 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -1506,6 +1506,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
|
|
struct timer_list *timer = &dwork->timer;
|
|
struct work_struct *work = &dwork->work;
|
|
|
|
+ WARN_ON_ONCE(!wq);
|
|
WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
|
|
timer->data != (unsigned long)dwork);
|
|
WARN_ON_ONCE(timer_pending(timer));
|
|
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
|
|
index 1ef0cec38d78..dc14beae2c9a 100644
|
|
--- a/lib/asn1_decoder.c
|
|
+++ b/lib/asn1_decoder.c
|
|
@@ -313,42 +313,47 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
|
|
|
|
/* Decide how to handle the operation */
|
|
switch (op) {
|
|
- case ASN1_OP_MATCH_ANY_ACT:
|
|
- case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
|
|
- case ASN1_OP_COND_MATCH_ANY_ACT:
|
|
- case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
|
|
- ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- goto skip_data;
|
|
-
|
|
- case ASN1_OP_MATCH_ACT:
|
|
- case ASN1_OP_MATCH_ACT_OR_SKIP:
|
|
- case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
|
|
- ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
- goto skip_data;
|
|
-
|
|
case ASN1_OP_MATCH:
|
|
case ASN1_OP_MATCH_OR_SKIP:
|
|
+ case ASN1_OP_MATCH_ACT:
|
|
+ case ASN1_OP_MATCH_ACT_OR_SKIP:
|
|
case ASN1_OP_MATCH_ANY:
|
|
case ASN1_OP_MATCH_ANY_OR_SKIP:
|
|
+ case ASN1_OP_MATCH_ANY_ACT:
|
|
+ case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
|
|
case ASN1_OP_COND_MATCH_OR_SKIP:
|
|
+ case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
|
|
case ASN1_OP_COND_MATCH_ANY:
|
|
case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
|
|
- skip_data:
|
|
+ case ASN1_OP_COND_MATCH_ANY_ACT:
|
|
+ case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
|
|
+
|
|
if (!(flags & FLAG_CONS)) {
|
|
if (flags & FLAG_INDEFINITE_LENGTH) {
|
|
+ size_t tmp = dp;
|
|
+
|
|
ret = asn1_find_indefinite_length(
|
|
- data, datalen, &dp, &len, &errmsg);
|
|
+ data, datalen, &tmp, &len, &errmsg);
|
|
if (ret < 0)
|
|
goto error;
|
|
- } else {
|
|
- dp += len;
|
|
}
|
|
pr_debug("- LEAF: %zu\n", len);
|
|
}
|
|
+
|
|
+ if (op & ASN1_OP_MATCH__ACT) {
|
|
+ unsigned char act;
|
|
+
|
|
+ if (op & ASN1_OP_MATCH__ANY)
|
|
+ act = machine[pc + 1];
|
|
+ else
|
|
+ act = machine[pc + 2];
|
|
+ ret = actions[act](context, hdr, tag, data + dp, len);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (!(flags & FLAG_CONS))
|
|
+ dp += len;
|
|
pc += asn1_op_lengths[op];
|
|
goto next_op;
|
|
|
|
@@ -434,6 +439,8 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
|
|
else
|
|
act = machine[pc + 1];
|
|
ret = actions[act](context, hdr, 0, data + tdp, len);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
}
|
|
pc += asn1_op_lengths[op];
|
|
goto next_op;
|
|
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
|
|
index da796e2dc4f5..c7c96bc7654a 100644
|
|
--- a/lib/dynamic_debug.c
|
|
+++ b/lib/dynamic_debug.c
|
|
@@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords,
|
|
if (parse_lineno(last, &query->last_lineno) < 0)
|
|
return -EINVAL;
|
|
|
|
+ /* special case for last lineno not specified */
|
|
+ if (query->last_lineno == 0)
|
|
+ query->last_lineno = UINT_MAX;
|
|
+
|
|
if (query->last_lineno < query->first_lineno) {
|
|
pr_err("last-line:%d < 1st-line:%d\n",
|
|
query->last_lineno,
|
|
diff --git a/lib/genalloc.c b/lib/genalloc.c
|
|
index 144fe6b1a03e..ca06adc4f445 100644
|
|
--- a/lib/genalloc.c
|
|
+++ b/lib/genalloc.c
|
|
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
|
|
chunk->phys_addr = phys;
|
|
chunk->start_addr = virt;
|
|
chunk->end_addr = virt + size - 1;
|
|
- atomic_set(&chunk->avail, size);
|
|
+ atomic_long_set(&chunk->avail, size);
|
|
|
|
spin_lock(&pool->lock);
|
|
list_add_rcu(&chunk->next_chunk, &pool->chunks);
|
|
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
|
|
nbits = (size + (1UL << order) - 1) >> order;
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
|
|
- if (size > atomic_read(&chunk->avail))
|
|
+ if (size > atomic_long_read(&chunk->avail))
|
|
continue;
|
|
|
|
start_bit = 0;
|
|
@@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
|
|
|
|
addr = chunk->start_addr + ((unsigned long)start_bit << order);
|
|
size = nbits << order;
|
|
- atomic_sub(size, &chunk->avail);
|
|
+ atomic_long_sub(size, &chunk->avail);
|
|
break;
|
|
}
|
|
rcu_read_unlock();
|
|
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
|
|
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
|
|
BUG_ON(remain);
|
|
size = nbits << order;
|
|
- atomic_add(size, &chunk->avail);
|
|
+ atomic_long_add(size, &chunk->avail);
|
|
rcu_read_unlock();
|
|
return;
|
|
}
|
|
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
|
|
- avail += atomic_read(&chunk->avail);
|
|
+ avail += atomic_long_read(&chunk->avail);
|
|
rcu_read_unlock();
|
|
return avail;
|
|
}
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index 3cae1dcf069c..c234c078693c 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -1509,37 +1509,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
spinlock_t *ptl;
|
|
- int ret = 0;
|
|
+ pmd_t entry;
|
|
+ bool preserve_write;
|
|
+ int ret;
|
|
|
|
ptl = __pmd_trans_huge_lock(pmd, vma);
|
|
- if (ptl) {
|
|
- pmd_t entry;
|
|
- bool preserve_write = prot_numa && pmd_write(*pmd);
|
|
- ret = 1;
|
|
+ if (!ptl)
|
|
+ return 0;
|
|
|
|
- /*
|
|
- * Avoid trapping faults against the zero page. The read-only
|
|
- * data is likely to be read-cached on the local CPU and
|
|
- * local/remote hits to the zero page are not interesting.
|
|
- */
|
|
- if (prot_numa && is_huge_zero_pmd(*pmd)) {
|
|
- spin_unlock(ptl);
|
|
- return ret;
|
|
- }
|
|
+ preserve_write = prot_numa && pmd_write(*pmd);
|
|
+ ret = 1;
|
|
|
|
- if (!prot_numa || !pmd_protnone(*pmd)) {
|
|
- entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
|
|
- entry = pmd_modify(entry, newprot);
|
|
- if (preserve_write)
|
|
- entry = pmd_mkwrite(entry);
|
|
- ret = HPAGE_PMD_NR;
|
|
- set_pmd_at(mm, addr, pmd, entry);
|
|
- BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
|
|
- pmd_write(entry));
|
|
- }
|
|
- spin_unlock(ptl);
|
|
- }
|
|
+ /*
|
|
+ * Avoid trapping faults against the zero page. The read-only
|
|
+ * data is likely to be read-cached on the local CPU and
|
|
+ * local/remote hits to the zero page are not interesting.
|
|
+ */
|
|
+ if (prot_numa && is_huge_zero_pmd(*pmd))
|
|
+ goto unlock;
|
|
+
|
|
+ if (prot_numa && pmd_protnone(*pmd))
|
|
+ goto unlock;
|
|
+
|
|
+ /*
|
|
+ * In case prot_numa, we are under down_read(mmap_sem). It's critical
|
|
+ * to not clear pmd intermittently to avoid race with MADV_DONTNEED
|
|
+ * which is also under down_read(mmap_sem):
|
|
+ *
|
|
+ * CPU0: CPU1:
|
|
+ * change_huge_pmd(prot_numa=1)
|
|
+ * pmdp_huge_get_and_clear_notify()
|
|
+ * madvise_dontneed()
|
|
+ * zap_pmd_range()
|
|
+ * pmd_trans_huge(*pmd) == 0 (without ptl)
|
|
+ * // skip the pmd
|
|
+ * set_pmd_at();
|
|
+ * // pmd is re-established
|
|
+ *
|
|
+ * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
|
|
+ * which may break userspace.
|
|
+ *
|
|
+ * pmdp_invalidate() is required to make sure we don't miss
|
|
+ * dirty/young flags set by hardware.
|
|
+ */
|
|
+ entry = *pmd;
|
|
+ pmdp_invalidate(vma, addr, pmd);
|
|
|
|
+ /*
|
|
+ * Recover dirty/young flags. It relies on pmdp_invalidate to not
|
|
+ * corrupt them.
|
|
+ */
|
|
+ if (pmd_dirty(*pmd))
|
|
+ entry = pmd_mkdirty(entry);
|
|
+ if (pmd_young(*pmd))
|
|
+ entry = pmd_mkyoung(entry);
|
|
+
|
|
+ entry = pmd_modify(entry, newprot);
|
|
+ if (preserve_write)
|
|
+ entry = pmd_mkwrite(entry);
|
|
+ ret = HPAGE_PMD_NR;
|
|
+ set_pmd_at(mm, addr, pmd, entry);
|
|
+ BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
|
|
+unlock:
|
|
+ spin_unlock(ptl);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
|
|
index 1689bb58e0d1..d3548c48369f 100644
|
|
--- a/mm/zsmalloc.c
|
|
+++ b/mm/zsmalloc.c
|
|
@@ -1407,7 +1407,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
|
* pools/users, we can't allow mapping in interrupt context
|
|
* because it can corrupt another users mappings.
|
|
*/
|
|
- WARN_ON_ONCE(in_interrupt());
|
|
+ BUG_ON(in_interrupt());
|
|
|
|
/* From now on, migration cannot move the object */
|
|
pin_tag(handle);
|
|
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
|
|
index 713c09a74b90..0c9ded247ebb 100644
|
|
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
|
|
@@ -158,6 +158,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
|
|
if (skb->len < sizeof(struct iphdr) ||
|
|
ip_hdrlen(skb) < sizeof(struct iphdr))
|
|
return NF_ACCEPT;
|
|
+
|
|
+ if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
|
|
+ return NF_ACCEPT;
|
|
+
|
|
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
|
|
}
|
|
|
|
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
|
|
index f8aad03d674b..6f5e8d01b876 100644
|
|
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
|
|
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
|
|
/* maniptype == SRC for postrouting. */
|
|
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
|
|
|
|
- /* We never see fragments: conntrack defrags on pre-routing
|
|
- * and local-out, and nf_nat_out protects post-routing.
|
|
- */
|
|
- NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
|
|
-
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
/* Can't track? It's not due to stress, or conntrack would
|
|
* have dropped it. Hence it's the user's responsibilty to
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 6a5b7783932e..7ac319222558 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -630,9 +630,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
|
|
struct fnhe_hash_bucket *hash;
|
|
struct fib_nh_exception *fnhe;
|
|
struct rtable *rt;
|
|
+ u32 genid, hval;
|
|
unsigned int i;
|
|
int depth;
|
|
- u32 hval = fnhe_hashfun(daddr);
|
|
+
|
|
+ genid = fnhe_genid(dev_net(nh->nh_dev));
|
|
+ hval = fnhe_hashfun(daddr);
|
|
|
|
spin_lock_bh(&fnhe_lock);
|
|
|
|
@@ -655,12 +658,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
|
|
}
|
|
|
|
if (fnhe) {
|
|
+ if (fnhe->fnhe_genid != genid)
|
|
+ fnhe->fnhe_genid = genid;
|
|
if (gw)
|
|
fnhe->fnhe_gw = gw;
|
|
- if (pmtu) {
|
|
+ if (pmtu)
|
|
fnhe->fnhe_pmtu = pmtu;
|
|
- fnhe->fnhe_expires = max(1UL, expires);
|
|
- }
|
|
+ fnhe->fnhe_expires = max(1UL, expires);
|
|
/* Update all cached dsts too */
|
|
rt = rcu_dereference(fnhe->fnhe_rth_input);
|
|
if (rt)
|
|
@@ -679,7 +683,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
|
|
fnhe->fnhe_next = hash->chain;
|
|
rcu_assign_pointer(hash->chain, fnhe);
|
|
}
|
|
- fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
|
|
+ fnhe->fnhe_genid = genid;
|
|
fnhe->fnhe_daddr = daddr;
|
|
fnhe->fnhe_gw = gw;
|
|
fnhe->fnhe_pmtu = pmtu;
|
|
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
|
|
index 46ad699937fd..8285a1c108c9 100644
|
|
--- a/net/ipv6/af_inet6.c
|
|
+++ b/net/ipv6/af_inet6.c
|
|
@@ -909,12 +909,12 @@ static int __init inet6_init(void)
|
|
err = register_pernet_subsys(&inet6_net_ops);
|
|
if (err)
|
|
goto register_pernet_fail;
|
|
- err = icmpv6_init();
|
|
- if (err)
|
|
- goto icmp_fail;
|
|
err = ip6_mr_init();
|
|
if (err)
|
|
goto ipmr_fail;
|
|
+ err = icmpv6_init();
|
|
+ if (err)
|
|
+ goto icmp_fail;
|
|
err = ndisc_init();
|
|
if (err)
|
|
goto ndisc_fail;
|
|
@@ -1044,10 +1044,10 @@ static int __init inet6_init(void)
|
|
ndisc_cleanup();
|
|
ndisc_fail:
|
|
ip6_mr_cleanup();
|
|
-ipmr_fail:
|
|
- icmpv6_cleanup();
|
|
icmp_fail:
|
|
unregister_pernet_subsys(&inet6_net_ops);
|
|
+ipmr_fail:
|
|
+ icmpv6_cleanup();
|
|
register_pernet_fail:
|
|
sock_unregister(PF_INET6);
|
|
rtnl_unregister_all(PF_INET6);
|
|
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
|
|
index e9b14e3493f2..c46066c5dc27 100644
|
|
--- a/net/ipv6/ip6_gre.c
|
|
+++ b/net/ipv6/ip6_gre.c
|
|
@@ -461,7 +461,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
|
|
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
|
|
tpi->proto);
|
|
if (tunnel) {
|
|
- ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
|
|
+ ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
|
|
|
|
return PACKET_RCVD;
|
|
}
|
|
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
|
|
index 67e882d49195..912333586de6 100644
|
|
--- a/net/ipv6/ip6_vti.c
|
|
+++ b/net/ipv6/ip6_vti.c
|
|
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
|
if (!skb->ignore_df && skb->len > mtu) {
|
|
skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
|
|
|
|
- if (skb->protocol == htons(ETH_P_IPV6))
|
|
+ if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
+ if (mtu < IPV6_MIN_MTU)
|
|
+ mtu = IPV6_MIN_MTU;
|
|
+
|
|
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
|
- else
|
|
+ } else {
|
|
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
|
htonl(mtu));
|
|
+ }
|
|
|
|
return -EMSGSIZE;
|
|
}
|
|
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
|
|
index 20e2923dc827..78f976d32018 100644
|
|
--- a/net/rds/tcp.c
|
|
+++ b/net/rds/tcp.c
|
|
@@ -478,9 +478,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
|
|
* we do need to clean up the listen socket here.
|
|
*/
|
|
if (rtn->rds_tcp_listen_sock) {
|
|
- rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
|
|
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
|
|
+
|
|
rtn->rds_tcp_listen_sock = NULL;
|
|
- flush_work(&rtn->rds_tcp_accept_w);
|
|
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
|
|
}
|
|
}
|
|
|
|
@@ -517,10 +518,10 @@ static void rds_tcp_kill_sock(struct net *net)
|
|
struct rds_tcp_connection *tc, *_tc;
|
|
LIST_HEAD(tmp_list);
|
|
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
|
|
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
|
|
|
|
- rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
|
|
rtn->rds_tcp_listen_sock = NULL;
|
|
- flush_work(&rtn->rds_tcp_accept_w);
|
|
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
|
|
spin_lock_irq(&rds_tcp_conn_lock);
|
|
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
|
|
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
|
|
@@ -540,8 +541,12 @@ static void rds_tcp_kill_sock(struct net *net)
|
|
void *rds_tcp_listen_sock_def_readable(struct net *net)
|
|
{
|
|
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
|
|
+ struct socket *lsock = rtn->rds_tcp_listen_sock;
|
|
+
|
|
+ if (!lsock)
|
|
+ return NULL;
|
|
|
|
- return rtn->rds_tcp_listen_sock->sk->sk_user_data;
|
|
+ return lsock->sk->sk_user_data;
|
|
}
|
|
|
|
static int rds_tcp_dev_event(struct notifier_block *this,
|
|
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
|
|
index 9a1cc8906576..56ea6620fcf9 100644
|
|
--- a/net/rds/tcp.h
|
|
+++ b/net/rds/tcp.h
|
|
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
|
|
|
|
/* tcp_listen.c */
|
|
struct socket *rds_tcp_listen_init(struct net *);
|
|
-void rds_tcp_listen_stop(struct socket *);
|
|
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
|
|
void rds_tcp_listen_data_ready(struct sock *sk);
|
|
int rds_tcp_accept_one(struct socket *sock);
|
|
int rds_tcp_keepalive(struct socket *sock);
|
|
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
|
|
index 525b624fec8b..185a56b1e29c 100644
|
|
--- a/net/rds/tcp_listen.c
|
|
+++ b/net/rds/tcp_listen.c
|
|
@@ -227,6 +227,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
|
|
* before it has been accepted and the accepter has set up their
|
|
* data_ready.. we only want to queue listen work for our listening
|
|
* socket
|
|
+ *
|
|
+ * (*ready)() may be null if we are racing with netns delete, and
|
|
+ * the listen socket is being torn down.
|
|
*/
|
|
if (sk->sk_state == TCP_LISTEN)
|
|
rds_tcp_accept_work(sk);
|
|
@@ -235,7 +238,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
|
|
|
|
out:
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
|
- ready(sk);
|
|
+ if (ready)
|
|
+ ready(sk);
|
|
}
|
|
|
|
struct socket *rds_tcp_listen_init(struct net *net)
|
|
@@ -275,7 +279,7 @@ struct socket *rds_tcp_listen_init(struct net *net)
|
|
return NULL;
|
|
}
|
|
|
|
-void rds_tcp_listen_stop(struct socket *sock)
|
|
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
|
|
{
|
|
struct sock *sk;
|
|
|
|
@@ -296,5 +300,6 @@ void rds_tcp_listen_stop(struct socket *sock)
|
|
|
|
/* wait for accepts to stop and close the socket */
|
|
flush_workqueue(rds_wq);
|
|
+ flush_work(acceptor);
|
|
sock_release(sock);
|
|
}
|
|
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
|
|
index c062ceae19e6..c2ab864da50d 100644
|
|
--- a/net/sctp/socket.c
|
|
+++ b/net/sctp/socket.c
|
|
@@ -82,8 +82,8 @@
|
|
/* Forward declarations for internal helper functions. */
|
|
static int sctp_writeable(struct sock *sk);
|
|
static void sctp_wfree(struct sk_buff *skb);
|
|
-static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
|
|
- size_t msg_len);
|
|
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|
+ size_t msg_len, struct sock **orig_sk);
|
|
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
|
|
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
|
|
static int sctp_wait_for_accept(struct sock *sk, long timeo);
|
|
@@ -1957,9 +1957,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
|
|
|
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
|
if (!sctp_wspace(asoc)) {
|
|
- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
|
|
- if (err)
|
|
+ /* sk can be changed by peel off when waiting for buf. */
|
|
+ err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
|
|
+ if (err) {
|
|
+ if (err == -ESRCH) {
|
|
+ /* asoc is already dead. */
|
|
+ new_asoc = NULL;
|
|
+ err = -EPIPE;
|
|
+ }
|
|
goto out_free;
|
|
+ }
|
|
}
|
|
|
|
/* If an address is passed with the sendto/sendmsg call, it is used
|
|
@@ -4771,12 +4778,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
|
|
if (!asoc)
|
|
return -EINVAL;
|
|
|
|
- /* If there is a thread waiting on more sndbuf space for
|
|
- * sending on this asoc, it cannot be peeled.
|
|
- */
|
|
- if (waitqueue_active(&asoc->wait))
|
|
- return -EBUSY;
|
|
-
|
|
/* An association cannot be branched off from an already peeled-off
|
|
* socket, nor is this supported for tcp style sockets.
|
|
*/
|
|
@@ -7440,7 +7441,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
|
|
|
|
/* Helper function to wait for space in the sndbuf. */
|
|
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|
- size_t msg_len)
|
|
+ size_t msg_len, struct sock **orig_sk)
|
|
{
|
|
struct sock *sk = asoc->base.sk;
|
|
int err = 0;
|
|
@@ -7457,10 +7458,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|
for (;;) {
|
|
prepare_to_wait_exclusive(&asoc->wait, &wait,
|
|
TASK_INTERRUPTIBLE);
|
|
+ if (asoc->base.dead)
|
|
+ goto do_dead;
|
|
if (!*timeo_p)
|
|
goto do_nonblock;
|
|
- if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
|
|
- asoc->base.dead)
|
|
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
|
|
goto do_error;
|
|
if (signal_pending(current))
|
|
goto do_interrupted;
|
|
@@ -7473,11 +7475,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|
release_sock(sk);
|
|
current_timeo = schedule_timeout(current_timeo);
|
|
lock_sock(sk);
|
|
+ if (sk != asoc->base.sk) {
|
|
+ release_sock(sk);
|
|
+ sk = asoc->base.sk;
|
|
+ lock_sock(sk);
|
|
+ }
|
|
|
|
*timeo_p = current_timeo;
|
|
}
|
|
|
|
out:
|
|
+ *orig_sk = sk;
|
|
finish_wait(&asoc->wait, &wait);
|
|
|
|
/* Release the association's refcnt. */
|
|
@@ -7485,6 +7493,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|
|
|
return err;
|
|
|
|
+do_dead:
|
|
+ err = -ESRCH;
|
|
+ goto out;
|
|
+
|
|
do_error:
|
|
err = -EPIPE;
|
|
goto out;
|
|
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
|
|
index 5db68b371db2..600eacce653a 100644
|
|
--- a/net/sunrpc/sched.c
|
|
+++ b/net/sunrpc/sched.c
|
|
@@ -274,10 +274,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
|
|
|
|
static void rpc_set_active(struct rpc_task *task)
|
|
{
|
|
- trace_rpc_task_begin(task->tk_client, task, NULL);
|
|
-
|
|
rpc_task_set_debuginfo(task);
|
|
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
|
|
+ trace_rpc_task_begin(task->tk_client, task, NULL);
|
|
}
|
|
|
|
/*
|
|
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
|
|
index 8ce5711ea21b..f19e6a57e118 100644
|
|
--- a/net/xfrm/xfrm_policy.c
|
|
+++ b/net/xfrm/xfrm_policy.c
|
|
@@ -1393,6 +1393,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
|
|
newp->xfrm_nr = old->xfrm_nr;
|
|
newp->index = old->index;
|
|
newp->type = old->type;
|
|
+ newp->family = old->family;
|
|
memcpy(newp->xfrm_vec, old->xfrm_vec,
|
|
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
|
|
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
|
|
diff --git a/scripts/coccicheck b/scripts/coccicheck
|
|
index ec487b8e7051..c36b04b41686 100755
|
|
--- a/scripts/coccicheck
|
|
+++ b/scripts/coccicheck
|
|
@@ -29,12 +29,6 @@ else
|
|
VERBOSE=0
|
|
fi
|
|
|
|
-if [ -z "$J" ]; then
|
|
- NPROC=$(getconf _NPROCESSORS_ONLN)
|
|
-else
|
|
- NPROC="$J"
|
|
-fi
|
|
-
|
|
FLAGS="--very-quiet"
|
|
|
|
# You can use SPFLAGS to append extra arguments to coccicheck or override any
|
|
@@ -69,6 +63,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
|
|
# Take only the last argument, which is the C file to test
|
|
shift $(( $# - 1 ))
|
|
OPTIONS="$COCCIINCLUDE $1"
|
|
+
|
|
+ # No need to parallelize Coccinelle since this mode takes one input file.
|
|
+ NPROC=1
|
|
else
|
|
ONLINE=0
|
|
if [ "$KBUILD_EXTMOD" = "" ] ; then
|
|
@@ -76,6 +73,12 @@ else
|
|
else
|
|
OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
|
|
fi
|
|
+
|
|
+ if [ -z "$J" ]; then
|
|
+ NPROC=$(getconf _NPROCESSORS_ONLN)
|
|
+ else
|
|
+ NPROC="$J"
|
|
+ fi
|
|
fi
|
|
|
|
if [ "$KBUILD_EXTMOD" != "" ] ; then
|
|
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
|
|
index 73a2c7da0e55..53234e85192a 100644
|
|
--- a/scripts/module-common.lds
|
|
+++ b/scripts/module-common.lds
|
|
@@ -19,4 +19,6 @@ SECTIONS {
|
|
|
|
. = ALIGN(8);
|
|
.init_array 0 : { *(SORT(.init_array.*)) *(.init_array) }
|
|
+
|
|
+ __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
|
|
}
|
|
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
|
|
index 71b4a8af9d4d..7badec3498b8 100644
|
|
--- a/scripts/package/Makefile
|
|
+++ b/scripts/package/Makefile
|
|
@@ -39,10 +39,9 @@ if test "$(objtree)" != "$(srctree)"; then \
|
|
false; \
|
|
fi ; \
|
|
$(srctree)/scripts/setlocalversion --save-scmversion; \
|
|
-ln -sf $(srctree) $(2); \
|
|
tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
|
|
- $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
|
|
-rm -f $(2) $(objtree)/.scmversion
|
|
+ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
|
|
+rm -f $(objtree)/.scmversion
|
|
|
|
# rpm-pkg
|
|
# ---------------------------------------------------------------------------
|
|
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
|
|
index 5030fcf23681..cb7f8f730c6d 100644
|
|
--- a/security/keys/request_key.c
|
|
+++ b/security/keys/request_key.c
|
|
@@ -250,11 +250,12 @@ static int construct_key(struct key *key, const void *callout_info,
|
|
* The keyring selected is returned with an extra reference upon it which the
|
|
* caller must release.
|
|
*/
|
|
-static void construct_get_dest_keyring(struct key **_dest_keyring)
|
|
+static int construct_get_dest_keyring(struct key **_dest_keyring)
|
|
{
|
|
struct request_key_auth *rka;
|
|
const struct cred *cred = current_cred();
|
|
struct key *dest_keyring = *_dest_keyring, *authkey;
|
|
+ int ret;
|
|
|
|
kenter("%p", dest_keyring);
|
|
|
|
@@ -263,6 +264,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
|
|
/* the caller supplied one */
|
|
key_get(dest_keyring);
|
|
} else {
|
|
+ bool do_perm_check = true;
|
|
+
|
|
/* use a default keyring; falling through the cases until we
|
|
* find one that we actually have */
|
|
switch (cred->jit_keyring) {
|
|
@@ -277,8 +280,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
|
|
dest_keyring =
|
|
key_get(rka->dest_keyring);
|
|
up_read(&authkey->sem);
|
|
- if (dest_keyring)
|
|
+ if (dest_keyring) {
|
|
+ do_perm_check = false;
|
|
break;
|
|
+ }
|
|
}
|
|
|
|
case KEY_REQKEY_DEFL_THREAD_KEYRING:
|
|
@@ -313,11 +318,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
|
|
default:
|
|
BUG();
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Require Write permission on the keyring. This is essential
|
|
+ * because the default keyring may be the session keyring, and
|
|
+ * joining a keyring only requires Search permission.
|
|
+ *
|
|
+ * However, this check is skipped for the "requestor keyring" so
|
|
+ * that /sbin/request-key can itself use request_key() to add
|
|
+ * keys to the original requestor's destination keyring.
|
|
+ */
|
|
+ if (dest_keyring && do_perm_check) {
|
|
+ ret = key_permission(make_key_ref(dest_keyring, 1),
|
|
+ KEY_NEED_WRITE);
|
|
+ if (ret) {
|
|
+ key_put(dest_keyring);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
*_dest_keyring = dest_keyring;
|
|
kleave(" [dk %d]", key_serial(dest_keyring));
|
|
- return;
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -443,11 +466,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
|
|
if (ctx->index_key.type == &key_type_keyring)
|
|
return ERR_PTR(-EPERM);
|
|
|
|
- user = key_user_lookup(current_fsuid());
|
|
- if (!user)
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ ret = construct_get_dest_keyring(&dest_keyring);
|
|
+ if (ret)
|
|
+ goto error;
|
|
|
|
- construct_get_dest_keyring(&dest_keyring);
|
|
+ user = key_user_lookup(current_fsuid());
|
|
+ if (!user) {
|
|
+ ret = -ENOMEM;
|
|
+ goto error_put_dest_keyring;
|
|
+ }
|
|
|
|
ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
|
|
key_user_put(user);
|
|
@@ -462,7 +489,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
|
|
} else if (ret == -EINPROGRESS) {
|
|
ret = 0;
|
|
} else {
|
|
- goto couldnt_alloc_key;
|
|
+ goto error_put_dest_keyring;
|
|
}
|
|
|
|
key_put(dest_keyring);
|
|
@@ -472,8 +499,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
|
|
construction_failed:
|
|
key_negate_and_link(key, key_negative_timeout, NULL, NULL);
|
|
key_put(key);
|
|
-couldnt_alloc_key:
|
|
+error_put_dest_keyring:
|
|
key_put(dest_keyring);
|
|
+error:
|
|
kleave(" = %d", ret);
|
|
return ERR_PTR(ret);
|
|
}
|
|
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
|
|
index 8e980aa678d0..074363b63cc4 100644
|
|
--- a/sound/core/pcm.c
|
|
+++ b/sound/core/pcm.c
|
|
@@ -149,7 +149,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
|
|
err = -ENXIO;
|
|
goto _error;
|
|
}
|
|
+ mutex_lock(&pcm->open_mutex);
|
|
err = snd_pcm_info_user(substream, info);
|
|
+ mutex_unlock(&pcm->open_mutex);
|
|
_error:
|
|
mutex_unlock(®ister_mutex);
|
|
return err;
|
|
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
|
|
index 37d9cfbc29f9..b80985fbc334 100644
|
|
--- a/sound/core/seq/seq_timer.c
|
|
+++ b/sound/core/seq/seq_timer.c
|
|
@@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
|
|
unsigned long freq;
|
|
|
|
t = tmr->timeri->timer;
|
|
- if (snd_BUG_ON(!t))
|
|
+ if (!t)
|
|
return -EINVAL;
|
|
|
|
freq = tmr->preferred_resolution;
|
|
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
|
|
index 6f9b388ec5a8..3f95d6b88f8c 100644
|
|
--- a/sound/soc/sh/rcar/ssiu.c
|
|
+++ b/sound/soc/sh/rcar/ssiu.c
|
|
@@ -44,7 +44,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
|
|
mask1 = (1 << 4) | (1 << 20); /* mask sync bit */
|
|
mask2 = (1 << 4); /* mask sync bit */
|
|
val1 = val2 = 0;
|
|
- if (rsnd_ssi_is_pin_sharing(io)) {
|
|
+ if (id == 8) {
|
|
+ /*
|
|
+ * SSI8 pin is sharing with SSI7, nothing to do.
|
|
+ */
|
|
+ } else if (rsnd_ssi_is_pin_sharing(io)) {
|
|
int shift = -1;
|
|
|
|
switch (id) {
|
|
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
|
|
index 9133d3e53d9d..24c897f0b571 100644
|
|
--- a/sound/usb/mixer.c
|
|
+++ b/sound/usb/mixer.c
|
|
@@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
|
|
int index, char *buf, int maxlen)
|
|
{
|
|
int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
|
|
+
|
|
+ if (len < 0)
|
|
+ return 0;
|
|
+
|
|
buf[len] = 0;
|
|
return len;
|
|
}
|
|
@@ -2168,13 +2172,14 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
|
|
if (len)
|
|
;
|
|
else if (nameid)
|
|
- snd_usb_copy_string_desc(state, nameid, kctl->id.name,
|
|
+ len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
|
|
sizeof(kctl->id.name));
|
|
- else {
|
|
+ else
|
|
len = get_term_name(state, &state->oterm,
|
|
kctl->id.name, sizeof(kctl->id.name), 0);
|
|
- if (!len)
|
|
- strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
|
|
+
|
|
+ if (!len) {
|
|
+ strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
|
|
|
|
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
|
|
append_ctl_name(kctl, " Clock Source");
|
|
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
|
|
index bc7adb84e679..60a94b3e532e 100644
|
|
--- a/tools/hv/hv_kvp_daemon.c
|
|
+++ b/tools/hv/hv_kvp_daemon.c
|
|
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
|
|
for (;;) {
|
|
readp = &record[records_read];
|
|
records_read += fread(readp, sizeof(struct kvp_record),
|
|
- ENTRIES_PER_BLOCK * num_blocks,
|
|
- filep);
|
|
+ ENTRIES_PER_BLOCK * num_blocks - records_read,
|
|
+ filep);
|
|
|
|
if (ferror(filep)) {
|
|
- syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
|
|
+ syslog(LOG_ERR,
|
|
+ "Failed to read file, pool: %d; error: %d %s",
|
|
+ pool, errno, strerror(errno));
|
|
+ kvp_release_lock(pool);
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
|
|
|
|
if (record == NULL) {
|
|
syslog(LOG_ERR, "malloc failed");
|
|
+ kvp_release_lock(pool);
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
continue;
|
|
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
|
|
fclose(filep);
|
|
kvp_release_lock(pool);
|
|
}
|
|
+
|
|
static int kvp_file_init(void)
|
|
{
|
|
int fd;
|
|
- FILE *filep;
|
|
- size_t records_read;
|
|
char *fname;
|
|
- struct kvp_record *record;
|
|
- struct kvp_record *readp;
|
|
- int num_blocks;
|
|
int i;
|
|
int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
|
|
|
|
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
|
|
|
|
for (i = 0; i < KVP_POOL_COUNT; i++) {
|
|
fname = kvp_file_info[i].fname;
|
|
- records_read = 0;
|
|
- num_blocks = 1;
|
|
sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
|
|
fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
|
|
|
|
if (fd == -1)
|
|
return 1;
|
|
|
|
-
|
|
- filep = fopen(fname, "re");
|
|
- if (!filep) {
|
|
- close(fd);
|
|
- return 1;
|
|
- }
|
|
-
|
|
- record = malloc(alloc_unit * num_blocks);
|
|
- if (record == NULL) {
|
|
- fclose(filep);
|
|
- close(fd);
|
|
- return 1;
|
|
- }
|
|
- for (;;) {
|
|
- readp = &record[records_read];
|
|
- records_read += fread(readp, sizeof(struct kvp_record),
|
|
- ENTRIES_PER_BLOCK,
|
|
- filep);
|
|
-
|
|
- if (ferror(filep)) {
|
|
- syslog(LOG_ERR, "Failed to read file, pool: %d",
|
|
- i);
|
|
- exit(EXIT_FAILURE);
|
|
- }
|
|
-
|
|
- if (!feof(filep)) {
|
|
- /*
|
|
- * We have more data to read.
|
|
- */
|
|
- num_blocks++;
|
|
- record = realloc(record, alloc_unit *
|
|
- num_blocks);
|
|
- if (record == NULL) {
|
|
- fclose(filep);
|
|
- close(fd);
|
|
- return 1;
|
|
- }
|
|
- continue;
|
|
- }
|
|
- break;
|
|
- }
|
|
kvp_file_info[i].fd = fd;
|
|
- kvp_file_info[i].num_blocks = num_blocks;
|
|
- kvp_file_info[i].records = record;
|
|
- kvp_file_info[i].num_records = records_read;
|
|
- fclose(filep);
|
|
-
|
|
+ kvp_file_info[i].num_blocks = 1;
|
|
+ kvp_file_info[i].records = malloc(alloc_unit);
|
|
+ if (kvp_file_info[i].records == NULL)
|
|
+ return 1;
|
|
+ kvp_file_info[i].num_records = 0;
|
|
+ kvp_update_mem_state(i);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
|
|
index 248a820048df..66d31de60b9a 100644
|
|
--- a/tools/testing/selftests/powerpc/harness.c
|
|
+++ b/tools/testing/selftests/powerpc/harness.c
|
|
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
|
|
|
|
rc = run_test(test_function, name);
|
|
|
|
- if (rc == MAGIC_SKIP_RETURN_VALUE)
|
|
+ if (rc == MAGIC_SKIP_RETURN_VALUE) {
|
|
test_skip(name);
|
|
- else
|
|
+ /* so that skipped test is not marked as failed */
|
|
+ rc = 0;
|
|
+ } else
|
|
test_finish(name, rc);
|
|
|
|
return rc;
|
|
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
|
|
index 9b4610c6d3fb..f249e042b3b5 100644
|
|
--- a/tools/testing/selftests/x86/fsgsbase.c
|
|
+++ b/tools/testing/selftests/x86/fsgsbase.c
|
|
@@ -245,7 +245,7 @@ void do_unexpected_base(void)
|
|
long ret;
|
|
asm volatile ("int $0x80"
|
|
: "=a" (ret) : "a" (243), "b" (low_desc)
|
|
- : "flags");
|
|
+ : "r8", "r9", "r10", "r11");
|
|
memcpy(&desc, low_desc, sizeof(desc));
|
|
munmap(low_desc, sizeof(desc));
|
|
|
|
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
|
|
index f936a3cd3e35..ac1a7a3f87b2 100644
|
|
--- a/tools/testing/selftests/x86/ldt_gdt.c
|
|
+++ b/tools/testing/selftests/x86/ldt_gdt.c
|
|
@@ -45,6 +45,12 @@
|
|
#define AR_DB (1 << 22)
|
|
#define AR_G (1 << 23)
|
|
|
|
+#ifdef __x86_64__
|
|
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
|
|
+#else
|
|
+# define INT80_CLOBBERS
|
|
+#endif
|
|
+
|
|
static int nerrs;
|
|
|
|
/* Points to an array of 1024 ints, each holding its own index. */
|
|
@@ -649,7 +655,7 @@ static int invoke_set_thread_area(void)
|
|
asm volatile ("int $0x80"
|
|
: "=a" (ret), "+m" (low_user_desc) :
|
|
"a" (243), "b" (low_user_desc)
|
|
- : "flags");
|
|
+ : INT80_CLOBBERS);
|
|
return ret;
|
|
}
|
|
|
|
@@ -718,7 +724,7 @@ static void test_gdt_invalidation(void)
|
|
"+a" (eax)
|
|
: "m" (low_user_desc_clear),
|
|
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
|
|
- : "flags");
|
|
+ : INT80_CLOBBERS);
|
|
|
|
if (sel != 0) {
|
|
result = "FAIL";
|
|
@@ -749,7 +755,7 @@ static void test_gdt_invalidation(void)
|
|
"+a" (eax)
|
|
: "m" (low_user_desc_clear),
|
|
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
|
|
- : "flags");
|
|
+ : INT80_CLOBBERS);
|
|
|
|
if (sel != 0) {
|
|
result = "FAIL";
|
|
@@ -782,7 +788,7 @@ static void test_gdt_invalidation(void)
|
|
"+a" (eax)
|
|
: "m" (low_user_desc_clear),
|
|
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
|
|
- : "flags");
|
|
+ : INT80_CLOBBERS);
|
|
|
|
#ifdef __x86_64__
|
|
syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
|
|
@@ -835,7 +841,7 @@ static void test_gdt_invalidation(void)
|
|
"+a" (eax)
|
|
: "m" (low_user_desc_clear),
|
|
[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
|
|
- : "flags");
|
|
+ : INT80_CLOBBERS);
|
|
|
|
#ifdef __x86_64__
|
|
syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
|
|
diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
|
|
index 093c190178a9..28b3c7c553a4 100644
|
|
--- a/tools/testing/selftests/x86/mpx-hw.h
|
|
+++ b/tools/testing/selftests/x86/mpx-hw.h
|
|
@@ -51,14 +51,14 @@
|
|
struct mpx_bd_entry {
|
|
union {
|
|
char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
|
|
- void *contents[1];
|
|
+ void *contents[0];
|
|
};
|
|
} __attribute__((packed));
|
|
|
|
struct mpx_bt_entry {
|
|
union {
|
|
char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
|
|
- unsigned long contents[1];
|
|
+ unsigned long contents[0];
|
|
};
|
|
} __attribute__((packed));
|
|
|
|
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
|
|
index b037ce9cf116..eaea92439708 100644
|
|
--- a/tools/testing/selftests/x86/ptrace_syscall.c
|
|
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
|
|
@@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args)
|
|
asm volatile ("int $0x80"
|
|
: "+a" (args->nr),
|
|
"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
|
|
- "+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
|
|
+ "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
|
|
+ : : "r8", "r9", "r10", "r11");
|
|
args->arg5 = bp;
|
|
#else
|
|
sys32_helper(args, int80_and_ret);
|
|
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
|
|
index 50c26358e8b7..a48da95c18fd 100644
|
|
--- a/tools/testing/selftests/x86/single_step_syscall.c
|
|
+++ b/tools/testing/selftests/x86/single_step_syscall.c
|
|
@@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps;
|
|
#ifdef __x86_64__
|
|
# define REG_IP REG_RIP
|
|
# define WIDTH "q"
|
|
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
|
|
#else
|
|
# define REG_IP REG_EIP
|
|
# define WIDTH "l"
|
|
+# define INT80_CLOBBERS
|
|
#endif
|
|
|
|
static unsigned long get_eflags(void)
|
|
@@ -140,7 +142,8 @@ int main()
|
|
|
|
printf("[RUN]\tSet TF and check int80\n");
|
|
set_eflags(get_eflags() | X86_EFLAGS_TF);
|
|
- asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
|
|
+ asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
|
|
+ : INT80_CLOBBERS);
|
|
check_result();
|
|
|
|
/*
|
|
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
|
|
index c8aeb7b91ec8..95021246ee26 100644
|
|
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
|
|
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
|
|
@@ -77,11 +77,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
|
|
else
|
|
elrsr1 = 0;
|
|
|
|
-#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
- cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
|
|
-#else
|
|
cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
|
|
-#endif
|
|
}
|
|
|
|
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
|
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
|
|
index f138ed2e9c63..a26c6773d6df 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
|
|
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
|
|
u32 nr = dist->nr_spis;
|
|
int i, ret;
|
|
|
|
- entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
|
|
- GFP_KERNEL);
|
|
+ entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
|
|
if (!entries)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
|
|
index 4660a7d04eea..ebcaf4641d2b 100644
|
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
|
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
|
return ret;
|
|
}
|
|
|
|
-static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
|
|
- struct vgic_its *its,
|
|
- gpa_t addr, unsigned int len)
|
|
-{
|
|
- u32 reg = 0;
|
|
-
|
|
- mutex_lock(&its->cmd_lock);
|
|
- if (its->creadr == its->cwriter)
|
|
- reg |= GITS_CTLR_QUIESCENT;
|
|
- if (its->enabled)
|
|
- reg |= GITS_CTLR_ENABLE;
|
|
- mutex_unlock(&its->cmd_lock);
|
|
-
|
|
- return reg;
|
|
-}
|
|
-
|
|
-static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
|
|
- gpa_t addr, unsigned int len,
|
|
- unsigned long val)
|
|
-{
|
|
- its->enabled = !!(val & GITS_CTLR_ENABLE);
|
|
-}
|
|
-
|
|
static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
|
|
struct vgic_its *its,
|
|
gpa_t addr, unsigned int len)
|
|
@@ -687,6 +664,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
|
|
return E_ITS_MAPC_COLLECTION_OOR;
|
|
|
|
collection = kzalloc(sizeof(*collection), GFP_KERNEL);
|
|
+ if (!collection)
|
|
+ return -ENOMEM;
|
|
|
|
collection->collection_id = coll_id;
|
|
collection->target_addr = COLLECTION_NOT_MAPPED;
|
|
@@ -1160,33 +1139,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
|
|
#define ITS_CMD_SIZE 32
|
|
#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
|
|
|
|
-/*
|
|
- * By writing to CWRITER the guest announces new commands to be processed.
|
|
- * To avoid any races in the first place, we take the its_cmd lock, which
|
|
- * protects our ring buffer variables, so that there is only one user
|
|
- * per ITS handling commands at a given time.
|
|
- */
|
|
-static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
|
|
- gpa_t addr, unsigned int len,
|
|
- unsigned long val)
|
|
+/* Must be called with the cmd_lock held. */
|
|
+static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
|
|
{
|
|
gpa_t cbaser;
|
|
u64 cmd_buf[4];
|
|
- u32 reg;
|
|
-
|
|
- if (!its)
|
|
- return;
|
|
-
|
|
- mutex_lock(&its->cmd_lock);
|
|
|
|
- reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
|
|
- reg = ITS_CMD_OFFSET(reg);
|
|
- if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
|
|
- mutex_unlock(&its->cmd_lock);
|
|
+ /* Commands are only processed when the ITS is enabled. */
|
|
+ if (!its->enabled)
|
|
return;
|
|
- }
|
|
|
|
- its->cwriter = reg;
|
|
cbaser = CBASER_ADDRESS(its->cbaser);
|
|
|
|
while (its->cwriter != its->creadr) {
|
|
@@ -1206,6 +1168,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
|
|
if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
|
|
its->creadr = 0;
|
|
}
|
|
+}
|
|
+
|
|
+/*
|
|
+ * By writing to CWRITER the guest announces new commands to be processed.
|
|
+ * To avoid any races in the first place, we take the its_cmd lock, which
|
|
+ * protects our ring buffer variables, so that there is only one user
|
|
+ * per ITS handling commands at a given time.
|
|
+ */
|
|
+static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
|
|
+ gpa_t addr, unsigned int len,
|
|
+ unsigned long val)
|
|
+{
|
|
+ u64 reg;
|
|
+
|
|
+ if (!its)
|
|
+ return;
|
|
+
|
|
+ mutex_lock(&its->cmd_lock);
|
|
+
|
|
+ reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
|
|
+ reg = ITS_CMD_OFFSET(reg);
|
|
+ if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
|
|
+ mutex_unlock(&its->cmd_lock);
|
|
+ return;
|
|
+ }
|
|
+ its->cwriter = reg;
|
|
+
|
|
+ vgic_its_process_commands(kvm, its);
|
|
|
|
mutex_unlock(&its->cmd_lock);
|
|
}
|
|
@@ -1286,6 +1276,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
|
|
*regptr = reg;
|
|
}
|
|
|
|
+static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
|
|
+ struct vgic_its *its,
|
|
+ gpa_t addr, unsigned int len)
|
|
+{
|
|
+ u32 reg = 0;
|
|
+
|
|
+ mutex_lock(&its->cmd_lock);
|
|
+ if (its->creadr == its->cwriter)
|
|
+ reg |= GITS_CTLR_QUIESCENT;
|
|
+ if (its->enabled)
|
|
+ reg |= GITS_CTLR_ENABLE;
|
|
+ mutex_unlock(&its->cmd_lock);
|
|
+
|
|
+ return reg;
|
|
+}
|
|
+
|
|
+static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
|
|
+ gpa_t addr, unsigned int len,
|
|
+ unsigned long val)
|
|
+{
|
|
+ mutex_lock(&its->cmd_lock);
|
|
+
|
|
+ its->enabled = !!(val & GITS_CTLR_ENABLE);
|
|
+
|
|
+ /*
|
|
+ * Try to process any pending commands. This function bails out early
|
|
+ * if the ITS is disabled or no commands have been queued.
|
|
+ */
|
|
+ vgic_its_process_commands(kvm, its);
|
|
+
|
|
+ mutex_unlock(&its->cmd_lock);
|
|
+}
|
|
+
|
|
#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
|
|
{ \
|
|
.reg_offset = off, \
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index f4c6d4f6d2e8..4569fdcab701 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -125,6 +125,11 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
|
|
|
|
static bool largepages_enabled = true;
|
|
|
|
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
|
+ unsigned long start, unsigned long end)
|
|
+{
|
|
+}
|
|
+
|
|
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
|
|
{
|
|
if (pfn_valid(pfn))
|
|
@@ -361,6 +366,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
+
|
|
+ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
|
|
+
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
}
|
|
|