mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 01:02:19 +00:00
4194 lines
133 KiB
Diff
4194 lines
133 KiB
Diff
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
|
|
index 77f4de59dc9c..d499676890d8 100644
|
|
--- a/Documentation/networking/ip-sysctl.txt
|
|
+++ b/Documentation/networking/ip-sysctl.txt
|
|
@@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
|
|
min: Minimal size of receive buffer used by TCP sockets.
|
|
It is guaranteed to each TCP socket, even under moderate memory
|
|
pressure.
|
|
- Default: 1 page
|
|
+ Default: 4K
|
|
|
|
default: initial size of receive buffer used by TCP sockets.
|
|
This value overrides net.core.rmem_default used by other protocols.
|
|
@@ -666,7 +666,7 @@ tcp_window_scaling - BOOLEAN
|
|
tcp_wmem - vector of 3 INTEGERs: min, default, max
|
|
min: Amount of memory reserved for send buffers for TCP sockets.
|
|
Each TCP socket has rights to use it due to fact of its birth.
|
|
- Default: 1 page
|
|
+ Default: 4K
|
|
|
|
default: initial size of send buffer used by TCP sockets. This
|
|
value overrides net.core.wmem_default used by other protocols.
|
|
diff --git a/Makefile b/Makefile
|
|
index 38acc6047d7d..0fdae0f455ef 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 14
|
|
-SUBLEVEL = 24
|
|
+SUBLEVEL = 25
|
|
EXTRAVERSION =
|
|
NAME = Petit Gorille
|
|
|
|
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
index 4f2c5ec75714..e262fa9ef334 100644
|
|
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
@@ -97,6 +97,8 @@
|
|
};
|
|
|
|
&i2c1 {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&i2c1_pins>;
|
|
clock-frequency = <2600000>;
|
|
|
|
twl: twl@48 {
|
|
@@ -215,7 +217,12 @@
|
|
>;
|
|
};
|
|
|
|
-
|
|
+ i2c1_pins: pinmux_i2c1_pins {
|
|
+ pinctrl-single,pins = <
|
|
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
|
|
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
|
|
+ >;
|
|
+ };
|
|
};
|
|
|
|
&omap3_pmx_wkup {
|
|
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
|
|
index 6d89736c7b44..cf22b35f0a28 100644
|
|
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
|
|
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
|
|
@@ -104,6 +104,8 @@
|
|
};
|
|
|
|
&i2c1 {
|
|
+ pinctrl-names = "default";
|
|
+ pinctrl-0 = <&i2c1_pins>;
|
|
clock-frequency = <2600000>;
|
|
|
|
twl: twl@48 {
|
|
@@ -211,6 +213,12 @@
|
|
OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
|
|
>;
|
|
};
|
|
+ i2c1_pins: pinmux_i2c1_pins {
|
|
+ pinctrl-single,pins = <
|
|
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
|
|
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
|
|
+ >;
|
|
+ };
|
|
};
|
|
|
|
&uart2 {
|
|
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
|
|
index 99cfae875e12..5eae4776ffde 100644
|
|
--- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
|
|
+++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
|
|
@@ -110,26 +110,6 @@
|
|
};
|
|
};
|
|
|
|
-&cpu0 {
|
|
- cpu0-supply = <&vdd_cpu>;
|
|
- operating-points = <
|
|
- /* KHz uV */
|
|
- 1800000 1400000
|
|
- 1608000 1350000
|
|
- 1512000 1300000
|
|
- 1416000 1200000
|
|
- 1200000 1100000
|
|
- 1008000 1050000
|
|
- 816000 1000000
|
|
- 696000 950000
|
|
- 600000 900000
|
|
- 408000 900000
|
|
- 312000 900000
|
|
- 216000 900000
|
|
- 126000 900000
|
|
- >;
|
|
-};
|
|
-
|
|
&emmc {
|
|
status = "okay";
|
|
bus-width = <8>;
|
|
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
|
|
index 5638ce0c9524..63d6b404d88e 100644
|
|
--- a/arch/arm/kvm/hyp/Makefile
|
|
+++ b/arch/arm/kvm/hyp/Makefile
|
|
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
|
|
|
|
KVM=../../../../virt/kvm
|
|
|
|
+CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
|
|
+
|
|
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
|
|
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
|
|
+CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
|
|
+
|
|
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
|
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
|
|
+CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
|
|
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
|
|
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
|
|
index 111bda8cdebd..be4b8b0a40ad 100644
|
|
--- a/arch/arm/kvm/hyp/banked-sr.c
|
|
+++ b/arch/arm/kvm/hyp/banked-sr.c
|
|
@@ -20,6 +20,10 @@
|
|
|
|
#include <asm/kvm_hyp.h>
|
|
|
|
+/*
|
|
+ * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
|
|
+ * trick the assembler.
|
|
+ */
|
|
__asm__(".arch_extension virt");
|
|
|
|
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
|
|
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
|
|
index 9b49867154bf..63fa79f9f121 100644
|
|
--- a/arch/arm/mach-mvebu/Kconfig
|
|
+++ b/arch/arm/mach-mvebu/Kconfig
|
|
@@ -42,7 +42,7 @@ config MACH_ARMADA_375
|
|
depends on ARCH_MULTI_V7
|
|
select ARMADA_370_XP_IRQ
|
|
select ARM_ERRATA_720789
|
|
- select ARM_ERRATA_753970
|
|
+ select PL310_ERRATA_753970
|
|
select ARM_GIC
|
|
select ARMADA_375_CLK
|
|
select HAVE_ARM_SCU
|
|
@@ -58,7 +58,7 @@ config MACH_ARMADA_38X
|
|
bool "Marvell Armada 380/385 boards"
|
|
depends on ARCH_MULTI_V7
|
|
select ARM_ERRATA_720789
|
|
- select ARM_ERRATA_753970
|
|
+ select PL310_ERRATA_753970
|
|
select ARM_GIC
|
|
select ARM_GLOBAL_TIMER
|
|
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
|
|
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
|
|
index aff6994950ba..a2399fd66e97 100644
|
|
--- a/arch/arm/plat-orion/common.c
|
|
+++ b/arch/arm/plat-orion/common.c
|
|
@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
|
|
/*****************************************************************************
|
|
* Ethernet switch
|
|
****************************************************************************/
|
|
-static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
|
|
-static __initdata struct mdio_board_info
|
|
- orion_ge00_switch_board_info;
|
|
+static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
|
|
+ .bus_id = "orion-mii",
|
|
+ .modalias = "mv88e6085",
|
|
+};
|
|
|
|
void __init orion_ge00_switch_init(struct dsa_chip_data *d)
|
|
{
|
|
- struct mdio_board_info *bd;
|
|
unsigned int i;
|
|
|
|
if (!IS_BUILTIN(CONFIG_PHYLIB))
|
|
return;
|
|
|
|
- for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
|
|
- if (!strcmp(d->port_names[i], "cpu"))
|
|
+ for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
|
|
+ if (!strcmp(d->port_names[i], "cpu")) {
|
|
+ d->netdev[i] = &orion_ge00.dev;
|
|
break;
|
|
+ }
|
|
+ }
|
|
|
|
- bd = &orion_ge00_switch_board_info;
|
|
- bd->bus_id = orion_ge00_mvmdio_bus_name;
|
|
- bd->mdio_addr = d->sw_addr;
|
|
- d->netdev[i] = &orion_ge00.dev;
|
|
- strcpy(bd->modalias, "mv88e6085");
|
|
- bd->platform_data = d;
|
|
+ orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
|
|
+ orion_ge00_switch_board_info.platform_data = d;
|
|
|
|
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
|
|
}
|
|
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
|
|
index 3742508cc534..bd5ce31936f5 100644
|
|
--- a/arch/parisc/include/asm/cacheflush.h
|
|
+++ b/arch/parisc/include/asm/cacheflush.h
|
|
@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
|
|
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
|
|
void flush_user_dcache_range_asm(unsigned long, unsigned long);
|
|
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
|
|
+void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
|
|
void flush_kernel_dcache_page_asm(void *);
|
|
void flush_kernel_icache_page(void *);
|
|
|
|
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
|
|
index 0e6ab6e4a4e9..2dbe5580a1a4 100644
|
|
--- a/arch/parisc/include/asm/processor.h
|
|
+++ b/arch/parisc/include/asm/processor.h
|
|
@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
|
|
#define parisc_requires_coherency() (0)
|
|
#endif
|
|
|
|
+extern int running_on_qemu;
|
|
+
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_PARISC_PROCESSOR_H */
|
|
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
|
|
index 19c0c141bc3f..79089778725b 100644
|
|
--- a/arch/parisc/kernel/cache.c
|
|
+++ b/arch/parisc/kernel/cache.c
|
|
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
|
|
int __flush_tlb_range(unsigned long sid, unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
- unsigned long flags, size;
|
|
+ unsigned long flags;
|
|
|
|
- size = (end - start);
|
|
- if (size >= parisc_tlb_flush_threshold) {
|
|
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
+ end - start >= parisc_tlb_flush_threshold) {
|
|
flush_tlb_all();
|
|
return 1;
|
|
}
|
|
@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
|
|
struct vm_area_struct *vma;
|
|
pgd_t *pgd;
|
|
|
|
- /* Flush the TLB to avoid speculation if coherency is required. */
|
|
- if (parisc_requires_coherency())
|
|
- flush_tlb_all();
|
|
-
|
|
/* Flushing the whole cache on each cpu takes forever on
|
|
rp3440, etc. So, avoid it if the mm isn't too big. */
|
|
- if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
|
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
+ mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
|
+ flush_tlb_all();
|
|
flush_cache_all();
|
|
return;
|
|
}
|
|
@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
|
|
if (mm->context == mfsp(3)) {
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
|
|
- if ((vma->vm_flags & VM_EXEC) == 0)
|
|
- continue;
|
|
- flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
|
|
+ if (vma->vm_flags & VM_EXEC)
|
|
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
|
|
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end);
|
|
}
|
|
return;
|
|
}
|
|
@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
|
|
void flush_cache_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
- BUG_ON(!vma->vm_mm->context);
|
|
-
|
|
- /* Flush the TLB to avoid speculation if coherency is required. */
|
|
- if (parisc_requires_coherency())
|
|
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
+ end - start >= parisc_cache_flush_threshold) {
|
|
flush_tlb_range(vma, start, end);
|
|
-
|
|
- if ((end - start) >= parisc_cache_flush_threshold
|
|
- || vma->vm_mm->context != mfsp(3)) {
|
|
flush_cache_all();
|
|
return;
|
|
}
|
|
@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
|
|
flush_user_dcache_range_asm(start, end);
|
|
if (vma->vm_flags & VM_EXEC)
|
|
flush_user_icache_range_asm(start, end);
|
|
+ flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
void
|
|
@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
|
BUG_ON(!vma->vm_mm->context);
|
|
|
|
if (pfn_valid(pfn)) {
|
|
- if (parisc_requires_coherency())
|
|
- flush_tlb_page(vma, vmaddr);
|
|
+ flush_tlb_page(vma, vmaddr);
|
|
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
|
}
|
|
}
|
|
@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
|
void flush_kernel_vmap_range(void *vaddr, int size)
|
|
{
|
|
unsigned long start = (unsigned long)vaddr;
|
|
+ unsigned long end = start + size;
|
|
|
|
- if ((unsigned long)size > parisc_cache_flush_threshold)
|
|
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
+ (unsigned long)size >= parisc_cache_flush_threshold) {
|
|
+ flush_tlb_kernel_range(start, end);
|
|
flush_data_cache();
|
|
- else
|
|
- flush_kernel_dcache_range_asm(start, start + size);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ flush_kernel_dcache_range_asm(start, end);
|
|
+ flush_tlb_kernel_range(start, end);
|
|
}
|
|
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
|
|
|
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
|
{
|
|
unsigned long start = (unsigned long)vaddr;
|
|
+ unsigned long end = start + size;
|
|
|
|
- if ((unsigned long)size > parisc_cache_flush_threshold)
|
|
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
|
+ (unsigned long)size >= parisc_cache_flush_threshold) {
|
|
+ flush_tlb_kernel_range(start, end);
|
|
flush_data_cache();
|
|
- else
|
|
- flush_kernel_dcache_range_asm(start, start + size);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ purge_kernel_dcache_range_asm(start, end);
|
|
+ flush_tlb_kernel_range(start, end);
|
|
}
|
|
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
|
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
|
|
index 2d40c4ff3f69..67b0f7532e83 100644
|
|
--- a/arch/parisc/kernel/pacache.S
|
|
+++ b/arch/parisc/kernel/pacache.S
|
|
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
|
|
.procend
|
|
ENDPROC_CFI(flush_kernel_dcache_range_asm)
|
|
|
|
+ENTRY_CFI(purge_kernel_dcache_range_asm)
|
|
+ .proc
|
|
+ .callinfo NO_CALLS
|
|
+ .entry
|
|
+
|
|
+ ldil L%dcache_stride, %r1
|
|
+ ldw R%dcache_stride(%r1), %r23
|
|
+ ldo -1(%r23), %r21
|
|
+ ANDCM %r26, %r21, %r26
|
|
+
|
|
+1: cmpb,COND(<<),n %r26, %r25,1b
|
|
+ pdc,m %r23(%r26)
|
|
+
|
|
+ sync
|
|
+ syncdma
|
|
+ bv %r0(%r2)
|
|
+ nop
|
|
+ .exit
|
|
+
|
|
+ .procend
|
|
+ENDPROC_CFI(purge_kernel_dcache_range_asm)
|
|
+
|
|
ENTRY_CFI(flush_user_icache_range_asm)
|
|
.proc
|
|
.callinfo NO_CALLS
|
|
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
|
|
index 4b8fd6dc22da..f7e684560186 100644
|
|
--- a/arch/parisc/kernel/time.c
|
|
+++ b/arch/parisc/kernel/time.c
|
|
@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
|
next_tick = cpuinfo->it_value;
|
|
|
|
/* Calculate how many ticks have elapsed. */
|
|
+ now = mfctl(16);
|
|
do {
|
|
++ticks_elapsed;
|
|
next_tick += cpt;
|
|
- now = mfctl(16);
|
|
} while (next_tick - now > cpt);
|
|
|
|
/* Store (in CR16 cycles) up to when we are accounting right now. */
|
|
@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
|
* if one or the other wrapped. If "now" is "bigger" we'll end up
|
|
* with a very large unsigned number.
|
|
*/
|
|
- while (next_tick - mfctl(16) > cpt)
|
|
+ now = mfctl(16);
|
|
+ while (next_tick - now > cpt)
|
|
next_tick += cpt;
|
|
|
|
/* Program the IT when to deliver the next interrupt.
|
|
* Only bottom 32-bits of next_tick are writable in CR16!
|
|
* Timer interrupt will be delivered at least a few hundred cycles
|
|
- * after the IT fires, so if we are too close (<= 500 cycles) to the
|
|
+ * after the IT fires, so if we are too close (<= 8000 cycles) to the
|
|
* next cycle, simply skip it.
|
|
*/
|
|
- if (next_tick - mfctl(16) <= 500)
|
|
+ if (next_tick - now <= 8000)
|
|
next_tick += cpt;
|
|
mtctl(next_tick, 16);
|
|
|
|
@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
|
|
* different sockets, so mark them unstable and lower rating on
|
|
* multi-socket SMP systems.
|
|
*/
|
|
- if (num_online_cpus() > 1) {
|
|
+ if (num_online_cpus() > 1 && !running_on_qemu) {
|
|
int cpu;
|
|
unsigned long cpu0_loc;
|
|
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
|
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
|
|
index a832ad031cee..5185be314661 100644
|
|
--- a/arch/s390/kvm/interrupt.c
|
|
+++ b/arch/s390/kvm/interrupt.c
|
|
@@ -173,8 +173,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
|
|
{
|
|
- if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
|
|
+ const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
|
+ const u64 ckc = vcpu->arch.sie_block->ckc;
|
|
+
|
|
+ if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
|
+ if ((s64)ckc >= (s64)now)
|
|
+ return 0;
|
|
+ } else if (ckc >= now) {
|
|
return 0;
|
|
+ }
|
|
return ckc_interrupts_enabled(vcpu);
|
|
}
|
|
|
|
@@ -1004,13 +1011,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|
|
|
static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
|
{
|
|
- u64 now, cputm, sltime = 0;
|
|
+ const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
|
+ const u64 ckc = vcpu->arch.sie_block->ckc;
|
|
+ u64 cputm, sltime = 0;
|
|
|
|
if (ckc_interrupts_enabled(vcpu)) {
|
|
- now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
|
- sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
|
|
- /* already expired or overflow? */
|
|
- if (!sltime || vcpu->arch.sie_block->ckc <= now)
|
|
+ if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
|
+ if ((s64)now < (s64)ckc)
|
|
+ sltime = tod_to_ns((s64)ckc - (s64)now);
|
|
+ } else if (now < ckc) {
|
|
+ sltime = tod_to_ns(ckc - now);
|
|
+ }
|
|
+ /* already expired */
|
|
+ if (!sltime)
|
|
return 0;
|
|
if (cpu_timer_interrupts_enabled(vcpu)) {
|
|
cputm = kvm_s390_get_cpu_timer(vcpu);
|
|
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
|
|
index 6e3d80b2048e..f4f12ecd0cec 100644
|
|
--- a/arch/s390/kvm/kvm-s390.c
|
|
+++ b/arch/s390/kvm/kvm-s390.c
|
|
@@ -169,6 +169,28 @@ int kvm_arch_hardware_enable(void)
|
|
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
|
|
unsigned long end);
|
|
|
|
+static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
|
|
+{
|
|
+ u8 delta_idx = 0;
|
|
+
|
|
+ /*
|
|
+ * The TOD jumps by delta, we have to compensate this by adding
|
|
+ * -delta to the epoch.
|
|
+ */
|
|
+ delta = -delta;
|
|
+
|
|
+ /* sign-extension - we're adding to signed values below */
|
|
+ if ((s64)delta < 0)
|
|
+ delta_idx = -1;
|
|
+
|
|
+ scb->epoch += delta;
|
|
+ if (scb->ecd & ECD_MEF) {
|
|
+ scb->epdx += delta_idx;
|
|
+ if (scb->epoch < delta)
|
|
+ scb->epdx += 1;
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* This callback is executed during stop_machine(). All CPUs are therefore
|
|
* temporarily stopped. In order not to change guest behavior, we have to
|
|
@@ -184,13 +206,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
|
|
unsigned long long *delta = v;
|
|
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
- kvm->arch.epoch -= *delta;
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
- vcpu->arch.sie_block->epoch -= *delta;
|
|
+ kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
|
|
+ if (i == 0) {
|
|
+ kvm->arch.epoch = vcpu->arch.sie_block->epoch;
|
|
+ kvm->arch.epdx = vcpu->arch.sie_block->epdx;
|
|
+ }
|
|
if (vcpu->arch.cputm_enabled)
|
|
vcpu->arch.cputm_start += *delta;
|
|
if (vcpu->arch.vsie_block)
|
|
- vcpu->arch.vsie_block->epoch -= *delta;
|
|
+ kvm_clock_sync_scb(vcpu->arch.vsie_block,
|
|
+ *delta);
|
|
}
|
|
}
|
|
return NOTIFY_OK;
|
|
@@ -888,12 +914,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
return -EFAULT;
|
|
|
|
- if (test_kvm_facility(kvm, 139))
|
|
- kvm_s390_set_tod_clock_ext(kvm, >od);
|
|
- else if (gtod.epoch_idx == 0)
|
|
- kvm_s390_set_tod_clock(kvm, gtod.tod);
|
|
- else
|
|
+ if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
|
return -EINVAL;
|
|
+ kvm_s390_set_tod_clock(kvm, >od);
|
|
|
|
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
|
gtod.epoch_idx, gtod.tod);
|
|
@@ -918,13 +941,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
|
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
{
|
|
- u64 gtod;
|
|
+ struct kvm_s390_vm_tod_clock gtod = { 0 };
|
|
|
|
- if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
+ if (copy_from_user(>od.tod, (void __user *)attr->addr,
|
|
+ sizeof(gtod.tod)))
|
|
return -EFAULT;
|
|
|
|
- kvm_s390_set_tod_clock(kvm, gtod);
|
|
- VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
|
|
+ kvm_s390_set_tod_clock(kvm, >od);
|
|
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
|
return 0;
|
|
}
|
|
|
|
@@ -2359,6 +2383,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
|
mutex_lock(&vcpu->kvm->lock);
|
|
preempt_disable();
|
|
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
|
|
+ vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
|
|
preempt_enable();
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
if (!kvm_is_ucontrol(vcpu->kvm)) {
|
|
@@ -2945,8 +2970,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
|
return 0;
|
|
}
|
|
|
|
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
- const struct kvm_s390_vm_tod_clock *gtod)
|
|
+void kvm_s390_set_tod_clock(struct kvm *kvm,
|
|
+ const struct kvm_s390_vm_tod_clock *gtod)
|
|
{
|
|
struct kvm_vcpu *vcpu;
|
|
struct kvm_s390_tod_clock_ext htod;
|
|
@@ -2958,10 +2983,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
get_tod_clock_ext((char *)&htod);
|
|
|
|
kvm->arch.epoch = gtod->tod - htod.tod;
|
|
- kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
-
|
|
- if (kvm->arch.epoch > gtod->tod)
|
|
- kvm->arch.epdx -= 1;
|
|
+ kvm->arch.epdx = 0;
|
|
+ if (test_kvm_facility(kvm, 139)) {
|
|
+ kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
+ if (kvm->arch.epoch > gtod->tod)
|
|
+ kvm->arch.epdx -= 1;
|
|
+ }
|
|
|
|
kvm_s390_vcpu_block_all(kvm);
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
@@ -2974,22 +3001,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
mutex_unlock(&kvm->lock);
|
|
}
|
|
|
|
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
|
|
-{
|
|
- struct kvm_vcpu *vcpu;
|
|
- int i;
|
|
-
|
|
- mutex_lock(&kvm->lock);
|
|
- preempt_disable();
|
|
- kvm->arch.epoch = tod - get_tod_clock();
|
|
- kvm_s390_vcpu_block_all(kvm);
|
|
- kvm_for_each_vcpu(i, vcpu, kvm)
|
|
- vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
- kvm_s390_vcpu_unblock_all(kvm);
|
|
- preempt_enable();
|
|
- mutex_unlock(&kvm->lock);
|
|
-}
|
|
-
|
|
/**
|
|
* kvm_arch_fault_in_page - fault-in guest page if necessary
|
|
* @vcpu: The corresponding virtual cpu
|
|
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
|
|
index 9f8fdd7b2311..e22d94f494a7 100644
|
|
--- a/arch/s390/kvm/kvm-s390.h
|
|
+++ b/arch/s390/kvm/kvm-s390.h
|
|
@@ -272,9 +272,8 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
|
int handle_sthyi(struct kvm_vcpu *vcpu);
|
|
|
|
/* implemented in kvm-s390.c */
|
|
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
- const struct kvm_s390_vm_tod_clock *gtod);
|
|
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
|
|
+void kvm_s390_set_tod_clock(struct kvm *kvm,
|
|
+ const struct kvm_s390_vm_tod_clock *gtod);
|
|
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
|
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
|
|
index 7bd3a59232f0..734283a21677 100644
|
|
--- a/arch/s390/kvm/priv.c
|
|
+++ b/arch/s390/kvm/priv.c
|
|
@@ -84,9 +84,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
|
|
/* Handle SCK (SET CLOCK) interception */
|
|
static int handle_set_clock(struct kvm_vcpu *vcpu)
|
|
{
|
|
+ struct kvm_s390_vm_tod_clock gtod = { 0 };
|
|
int rc;
|
|
u8 ar;
|
|
- u64 op2, val;
|
|
+ u64 op2;
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
@@ -94,12 +95,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
|
|
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
|
if (op2 & 7) /* Operand must be on a doubleword boundary */
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
- rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
|
|
+ rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
|
|
if (rc)
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
|
|
|
- VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
|
|
- kvm_s390_set_tod_clock(vcpu->kvm, val);
|
|
+ VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
|
|
+ kvm_s390_set_tod_clock(vcpu->kvm, >od);
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 0);
|
|
return 0;
|
|
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
|
|
index 8b8f1f14a0bf..5c790e93657d 100644
|
|
--- a/arch/x86/include/asm/pgtable.h
|
|
+++ b/arch/x86/include/asm/pgtable.h
|
|
@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
|
{
|
|
pmdval_t v = native_pmd_val(pmd);
|
|
|
|
- return __pmd(v | set);
|
|
+ return native_make_pmd(v | set);
|
|
}
|
|
|
|
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
|
|
{
|
|
pmdval_t v = native_pmd_val(pmd);
|
|
|
|
- return __pmd(v & ~clear);
|
|
+ return native_make_pmd(v & ~clear);
|
|
}
|
|
|
|
static inline pmd_t pmd_mkold(pmd_t pmd)
|
|
@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
|
|
{
|
|
pudval_t v = native_pud_val(pud);
|
|
|
|
- return __pud(v | set);
|
|
+ return native_make_pud(v | set);
|
|
}
|
|
|
|
static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
|
|
{
|
|
pudval_t v = native_pud_val(pud);
|
|
|
|
- return __pud(v & ~clear);
|
|
+ return native_make_pud(v & ~clear);
|
|
}
|
|
|
|
static inline pud_t pud_mkold(pud_t pud)
|
|
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
|
|
index e55466760ff8..b3ec519e3982 100644
|
|
--- a/arch/x86/include/asm/pgtable_32.h
|
|
+++ b/arch/x86/include/asm/pgtable_32.h
|
|
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
|
|
static inline void pgtable_cache_init(void) { }
|
|
static inline void check_pgt_cache(void) { }
|
|
void paging_init(void);
|
|
+void sync_initial_page_table(void);
|
|
|
|
/*
|
|
* Define this if things work differently on an i386 and an i486:
|
|
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
|
|
index 81462e9a34f6..1149d2112b2e 100644
|
|
--- a/arch/x86/include/asm/pgtable_64.h
|
|
+++ b/arch/x86/include/asm/pgtable_64.h
|
|
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
|
|
#define swapper_pg_dir init_top_pgt
|
|
|
|
extern void paging_init(void);
|
|
+static inline void sync_initial_page_table(void) { }
|
|
|
|
#define pte_ERROR(e) \
|
|
pr_err("%s:%d: bad pte %p(%016lx)\n", \
|
|
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
|
|
index 3696398a9475..246f15b4e64c 100644
|
|
--- a/arch/x86/include/asm/pgtable_types.h
|
|
+++ b/arch/x86/include/asm/pgtable_types.h
|
|
@@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
|
|
#else
|
|
#include <asm-generic/pgtable-nopud.h>
|
|
|
|
+static inline pud_t native_make_pud(pudval_t val)
|
|
+{
|
|
+ return (pud_t) { .p4d.pgd = native_make_pgd(val) };
|
|
+}
|
|
+
|
|
static inline pudval_t native_pud_val(pud_t pud)
|
|
{
|
|
return native_pgd_val(pud.p4d.pgd);
|
|
@@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|
#else
|
|
#include <asm-generic/pgtable-nopmd.h>
|
|
|
|
+static inline pmd_t native_make_pmd(pmdval_t val)
|
|
+{
|
|
+ return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
|
|
+}
|
|
+
|
|
static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|
{
|
|
return native_pgd_val(pmd.pud.p4d.pgd);
|
|
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
|
|
index c54361a22f59..efbcf5283520 100644
|
|
--- a/arch/x86/kernel/setup.c
|
|
+++ b/arch/x86/kernel/setup.c
|
|
@@ -1238,20 +1238,13 @@ void __init setup_arch(char **cmdline_p)
|
|
|
|
kasan_init();
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
- /* sync back kernel address range */
|
|
- clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
|
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
- KERNEL_PGD_PTRS);
|
|
-
|
|
/*
|
|
- * sync back low identity map too. It is used for example
|
|
- * in the 32-bit EFI stub.
|
|
+ * Sync back kernel address range.
|
|
+ *
|
|
+ * FIXME: Can the later sync in setup_cpu_entry_areas() replace
|
|
+ * this call?
|
|
*/
|
|
- clone_pgd_range(initial_page_table,
|
|
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
- min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
|
-#endif
|
|
+ sync_initial_page_table();
|
|
|
|
tboot_probe();
|
|
|
|
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
|
|
index 497aa766fab3..ea554f812ee1 100644
|
|
--- a/arch/x86/kernel/setup_percpu.c
|
|
+++ b/arch/x86/kernel/setup_percpu.c
|
|
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
|
|
/* Setup cpu initialized, callin, callout masks */
|
|
setup_cpu_local_masks();
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
/*
|
|
* Sync back kernel address range again. We already did this in
|
|
* setup_arch(), but percpu data also needs to be available in
|
|
* the smpboot asm. We can't reliably pick up percpu mappings
|
|
* using vmalloc_fault(), because exception dispatch needs
|
|
* percpu data.
|
|
+ *
|
|
+ * FIXME: Can the later sync in setup_cpu_entry_areas() replace
|
|
+ * this call?
|
|
*/
|
|
- clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
|
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
- KERNEL_PGD_PTRS);
|
|
-
|
|
- /*
|
|
- * sync back low identity map too. It is used for example
|
|
- * in the 32-bit EFI stub.
|
|
- */
|
|
- clone_pgd_range(initial_page_table,
|
|
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
- min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
|
-#endif
|
|
+ sync_initial_page_table();
|
|
}
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index ef03efba1c23..8cfdb6484fd0 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -1944,14 +1944,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
|
|
|
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|
{
|
|
- struct kvm_lapic *apic;
|
|
+ struct kvm_lapic *apic = vcpu->arch.apic;
|
|
int i;
|
|
|
|
- apic_debug("%s\n", __func__);
|
|
+ if (!apic)
|
|
+ return;
|
|
|
|
- ASSERT(vcpu);
|
|
- apic = vcpu->arch.apic;
|
|
- ASSERT(apic != NULL);
|
|
+ apic_debug("%s\n", __func__);
|
|
|
|
/* Stop the timer in case it's a reset to an active apic */
|
|
hrtimer_cancel(&apic->lapic_timer.timer);
|
|
@@ -2107,7 +2106,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
|
*/
|
|
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
|
|
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
|
|
- kvm_lapic_reset(vcpu, false);
|
|
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
|
|
|
|
return 0;
|
|
@@ -2511,7 +2509,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
|
|
|
|
pe = xchg(&apic->pending_events, 0);
|
|
if (test_bit(KVM_APIC_INIT, &pe)) {
|
|
- kvm_lapic_reset(vcpu, true);
|
|
kvm_vcpu_reset(vcpu, true);
|
|
if (kvm_vcpu_is_bsp(apic->vcpu))
|
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
|
index ca000fc644bc..2b6f8a4f2731 100644
|
|
--- a/arch/x86/kvm/mmu.c
|
|
+++ b/arch/x86/kvm/mmu.c
|
|
@@ -150,6 +150,20 @@ module_param(dbg, bool, 0644);
|
|
/* make pte_list_desc fit well in cache line */
|
|
#define PTE_LIST_EXT 3
|
|
|
|
+/*
|
|
+ * Return values of handle_mmio_page_fault and mmu.page_fault:
|
|
+ * RET_PF_RETRY: let CPU fault again on the address.
|
|
+ * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
|
|
+ *
|
|
+ * For handle_mmio_page_fault only:
|
|
+ * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
|
|
+ */
|
|
+enum {
|
|
+ RET_PF_RETRY = 0,
|
|
+ RET_PF_EMULATE = 1,
|
|
+ RET_PF_INVALID = 2,
|
|
+};
|
|
+
|
|
struct pte_list_desc {
|
|
u64 *sptes[PTE_LIST_EXT];
|
|
struct pte_list_desc *more;
|
|
@@ -2794,13 +2808,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
return ret;
|
|
}
|
|
|
|
-static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
|
- int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
|
|
- bool speculative, bool host_writable)
|
|
+static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
|
+ int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
|
|
+ bool speculative, bool host_writable)
|
|
{
|
|
int was_rmapped = 0;
|
|
int rmap_count;
|
|
- bool emulate = false;
|
|
+ int ret = RET_PF_RETRY;
|
|
|
|
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
|
|
*sptep, write_fault, gfn);
|
|
@@ -2830,12 +2844,12 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
|
if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
|
|
true, host_writable)) {
|
|
if (write_fault)
|
|
- emulate = true;
|
|
+ ret = RET_PF_EMULATE;
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
|
}
|
|
|
|
if (unlikely(is_mmio_spte(*sptep)))
|
|
- emulate = true;
|
|
+ ret = RET_PF_EMULATE;
|
|
|
|
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
|
|
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
|
|
@@ -2855,7 +2869,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
|
|
|
kvm_release_pfn_clean(pfn);
|
|
|
|
- return emulate;
|
|
+ return ret;
|
|
}
|
|
|
|
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
@@ -2994,17 +3008,16 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
|
|
* Do not cache the mmio info caused by writing the readonly gfn
|
|
* into the spte otherwise read access on readonly gfn also can
|
|
* caused mmio page fault and treat it as mmio access.
|
|
- * Return 1 to tell kvm to emulate it.
|
|
*/
|
|
if (pfn == KVM_PFN_ERR_RO_FAULT)
|
|
- return 1;
|
|
+ return RET_PF_EMULATE;
|
|
|
|
if (pfn == KVM_PFN_ERR_HWPOISON) {
|
|
kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
|
|
- return -EFAULT;
|
|
+ return RET_PF_EMULATE;
|
|
}
|
|
|
|
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
|
@@ -3286,13 +3299,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
|
}
|
|
|
|
if (fast_page_fault(vcpu, v, level, error_code))
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
|
|
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
|
smp_rmb();
|
|
|
|
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
|
|
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
|
|
return r;
|
|
@@ -3312,7 +3325,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
|
out_unlock:
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
kvm_release_pfn_clean(pfn);
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
|
|
|
|
@@ -3659,54 +3672,38 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|
return reserved;
|
|
}
|
|
|
|
-/*
|
|
- * Return values of handle_mmio_page_fault:
|
|
- * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
|
|
- * directly.
|
|
- * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
|
|
- * fault path update the mmio spte.
|
|
- * RET_MMIO_PF_RETRY: let CPU fault again on the address.
|
|
- * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
|
|
- */
|
|
-enum {
|
|
- RET_MMIO_PF_EMULATE = 1,
|
|
- RET_MMIO_PF_INVALID = 2,
|
|
- RET_MMIO_PF_RETRY = 0,
|
|
- RET_MMIO_PF_BUG = -1
|
|
-};
|
|
-
|
|
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
|
{
|
|
u64 spte;
|
|
bool reserved;
|
|
|
|
if (mmio_info_in_cache(vcpu, addr, direct))
|
|
- return RET_MMIO_PF_EMULATE;
|
|
+ return RET_PF_EMULATE;
|
|
|
|
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
|
|
if (WARN_ON(reserved))
|
|
- return RET_MMIO_PF_BUG;
|
|
+ return -EINVAL;
|
|
|
|
if (is_mmio_spte(spte)) {
|
|
gfn_t gfn = get_mmio_spte_gfn(spte);
|
|
unsigned access = get_mmio_spte_access(spte);
|
|
|
|
if (!check_mmio_spte(vcpu, spte))
|
|
- return RET_MMIO_PF_INVALID;
|
|
+ return RET_PF_INVALID;
|
|
|
|
if (direct)
|
|
addr = 0;
|
|
|
|
trace_handle_mmio_page_fault(addr, gfn, access);
|
|
vcpu_cache_mmio_info(vcpu, addr, gfn, access);
|
|
- return RET_MMIO_PF_EMULATE;
|
|
+ return RET_PF_EMULATE;
|
|
}
|
|
|
|
/*
|
|
* If the page table is zapped by other cpus, let CPU fault again on
|
|
* the address.
|
|
*/
|
|
- return RET_MMIO_PF_RETRY;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
|
|
|
|
@@ -3756,7 +3753,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
|
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
|
|
|
|
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
|
- return 1;
|
|
+ return RET_PF_EMULATE;
|
|
|
|
r = mmu_topup_memory_caches(vcpu);
|
|
if (r)
|
|
@@ -3877,7 +3874,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
|
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
|
|
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
|
- return 1;
|
|
+ return RET_PF_EMULATE;
|
|
|
|
r = mmu_topup_memory_caches(vcpu);
|
|
if (r)
|
|
@@ -3894,13 +3891,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
|
}
|
|
|
|
if (fast_page_fault(vcpu, gpa, level, error_code))
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
|
|
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
|
smp_rmb();
|
|
|
|
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
|
|
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
|
|
return r;
|
|
@@ -3920,7 +3917,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
|
out_unlock:
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
kvm_release_pfn_clean(pfn);
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
|
|
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
|
|
@@ -4919,25 +4916,25 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
|
vcpu->arch.gpa_val = cr2;
|
|
}
|
|
|
|
+ r = RET_PF_INVALID;
|
|
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
|
r = handle_mmio_page_fault(vcpu, cr2, direct);
|
|
- if (r == RET_MMIO_PF_EMULATE) {
|
|
+ if (r == RET_PF_EMULATE) {
|
|
emulation_type = 0;
|
|
goto emulate;
|
|
}
|
|
- if (r == RET_MMIO_PF_RETRY)
|
|
- return 1;
|
|
- if (r < 0)
|
|
- return r;
|
|
- /* Must be RET_MMIO_PF_INVALID. */
|
|
}
|
|
|
|
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
|
|
- false);
|
|
+ if (r == RET_PF_INVALID) {
|
|
+ r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
|
|
+ false);
|
|
+ WARN_ON(r == RET_PF_INVALID);
|
|
+ }
|
|
+
|
|
+ if (r == RET_PF_RETRY)
|
|
+ return 1;
|
|
if (r < 0)
|
|
return r;
|
|
- if (!r)
|
|
- return 1;
|
|
|
|
/*
|
|
* Before emulating the instruction, check if the error code
|
|
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
|
|
index f18d1f8d332b..5abae72266b7 100644
|
|
--- a/arch/x86/kvm/paging_tmpl.h
|
|
+++ b/arch/x86/kvm/paging_tmpl.h
|
|
@@ -593,7 +593,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
|
struct kvm_mmu_page *sp = NULL;
|
|
struct kvm_shadow_walk_iterator it;
|
|
unsigned direct_access, access = gw->pt_access;
|
|
- int top_level, emulate;
|
|
+ int top_level, ret;
|
|
|
|
direct_access = gw->pte_access;
|
|
|
|
@@ -659,15 +659,15 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
|
}
|
|
|
|
clear_sp_write_flooding_count(it.sptep);
|
|
- emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
|
|
- it.level, gw->gfn, pfn, prefault, map_writable);
|
|
+ ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
|
|
+ it.level, gw->gfn, pfn, prefault, map_writable);
|
|
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
|
|
|
|
- return emulate;
|
|
+ return ret;
|
|
|
|
out_gpte_changed:
|
|
kvm_release_pfn_clean(pfn);
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
|
|
/*
|
|
@@ -762,12 +762,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|
if (!prefault)
|
|
inject_page_fault(vcpu, &walker.fault);
|
|
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
|
|
if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
|
|
shadow_page_table_clear_flood(vcpu, addr);
|
|
- return 1;
|
|
+ return RET_PF_EMULATE;
|
|
}
|
|
|
|
vcpu->arch.write_fault_to_shadow_pgtable = false;
|
|
@@ -789,7 +789,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|
|
|
if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
|
|
&map_writable))
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
|
|
if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
|
|
return r;
|
|
@@ -834,7 +834,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|
out_unlock:
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
kvm_release_pfn_clean(pfn);
|
|
- return 0;
|
|
+ return RET_PF_RETRY;
|
|
}
|
|
|
|
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index e0bc3ad0f6cd..9fb0daf628cb 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -45,6 +45,7 @@
|
|
#include <asm/debugreg.h>
|
|
#include <asm/kvm_para.h>
|
|
#include <asm/irq_remapping.h>
|
|
+#include <asm/microcode.h>
|
|
#include <asm/nospec-branch.h>
|
|
|
|
#include <asm/virtext.h>
|
|
@@ -5015,7 +5016,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
* being speculatively taken.
|
|
*/
|
|
if (svm->spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
|
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
|
|
|
asm volatile (
|
|
"push %%" _ASM_BP "; \n\t"
|
|
@@ -5124,11 +5125,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
|
* save it.
|
|
*/
|
|
- if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
|
|
- rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
|
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
if (svm->spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
|
|
/* Eliminate branch target predictions from guest mode */
|
|
vmexit_fill_RSB();
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 5ffde16253cb..315fccb2684b 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -51,6 +51,7 @@
|
|
#include <asm/apic.h>
|
|
#include <asm/irq_remapping.h>
|
|
#include <asm/mmu_context.h>
|
|
+#include <asm/microcode.h>
|
|
#include <asm/nospec-branch.h>
|
|
|
|
#include "trace.h"
|
|
@@ -9431,7 +9432,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
* being speculatively taken.
|
|
*/
|
|
if (vmx->spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
|
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
|
|
|
vmx->__launched = vmx->loaded_vmcs->launched;
|
|
asm(
|
|
@@ -9566,11 +9567,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
|
* save it.
|
|
*/
|
|
- if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
|
|
- rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
|
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
if (vmx->spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
|
|
/* Eliminate branch target predictions from guest mode */
|
|
vmexit_fill_RSB();
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 0dcd7bf45dc1..b9afb4784d12 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -7482,13 +7482,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
|
|
|
|
int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|
{
|
|
- if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
|
|
+ if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
|
|
/*
|
|
* When EFER.LME and CR0.PG are set, the processor is in
|
|
* 64-bit mode (though maybe in a 32-bit code segment).
|
|
* CR4.PAE and EFER.LMA must be set.
|
|
*/
|
|
- if (!(sregs->cr4 & X86_CR4_PAE_BIT)
|
|
+ if (!(sregs->cr4 & X86_CR4_PAE)
|
|
|| !(sregs->efer & EFER_LMA))
|
|
return -EINVAL;
|
|
} else {
|
|
@@ -7821,6 +7821,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|
|
|
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|
{
|
|
+ kvm_lapic_reset(vcpu, init_event);
|
|
+
|
|
vcpu->arch.hflags = 0;
|
|
|
|
vcpu->arch.smi_pending = 0;
|
|
@@ -8249,10 +8251,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
|
|
return r;
|
|
}
|
|
|
|
- if (!size) {
|
|
- r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
|
|
- WARN_ON(r < 0);
|
|
- }
|
|
+ if (!size)
|
|
+ vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
|
|
index b9283cc27622..476d810639a8 100644
|
|
--- a/arch/x86/mm/cpu_entry_area.c
|
|
+++ b/arch/x86/mm/cpu_entry_area.c
|
|
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
|
|
|
|
for_each_possible_cpu(cpu)
|
|
setup_cpu_entry_area(cpu);
|
|
+
|
|
+ /*
|
|
+ * This is the last essential update to swapper_pgdir which needs
|
|
+ * to be synchronized to initial_page_table on 32bit.
|
|
+ */
|
|
+ sync_initial_page_table();
|
|
}
|
|
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
|
|
index 135c9a7898c7..3141e67ec24c 100644
|
|
--- a/arch/x86/mm/init_32.c
|
|
+++ b/arch/x86/mm/init_32.c
|
|
@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
|
|
}
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
+void __init sync_initial_page_table(void)
|
|
+{
|
|
+ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
|
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
+ KERNEL_PGD_PTRS);
|
|
+
|
|
+ /*
|
|
+ * sync back low identity map too. It is used for example
|
|
+ * in the 32-bit EFI stub.
|
|
+ */
|
|
+ clone_pgd_range(initial_page_table,
|
|
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
+ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
|
+}
|
|
+
|
|
void __init native_pagetable_init(void)
|
|
{
|
|
unsigned long pfn, va;
|
|
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
|
|
index 86676cec99a1..09dd7f3cf621 100644
|
|
--- a/arch/x86/platform/intel-mid/intel-mid.c
|
|
+++ b/arch/x86/platform/intel-mid/intel-mid.c
|
|
@@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
|
|
|
|
static void intel_mid_reboot(void)
|
|
{
|
|
- intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
|
|
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
|
|
}
|
|
|
|
static unsigned long __init intel_mid_calibrate_tsc(void)
|
|
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
|
|
index 92bf5ecb6baf..3e3a58ea669e 100644
|
|
--- a/arch/x86/xen/suspend.c
|
|
+++ b/arch/x86/xen/suspend.c
|
|
@@ -1,12 +1,15 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/types.h>
|
|
#include <linux/tick.h>
|
|
+#include <linux/percpu-defs.h>
|
|
|
|
#include <xen/xen.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/grant_table.h>
|
|
#include <xen/events.h>
|
|
|
|
+#include <asm/cpufeatures.h>
|
|
+#include <asm/msr-index.h>
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/page.h>
|
|
#include <asm/fixmap.h>
|
|
@@ -15,6 +18,8 @@
|
|
#include "mmu.h"
|
|
#include "pmu.h"
|
|
|
|
+static DEFINE_PER_CPU(u64, spec_ctrl);
|
|
+
|
|
void xen_arch_pre_suspend(void)
|
|
{
|
|
if (xen_pv_domain())
|
|
@@ -31,6 +36,9 @@ void xen_arch_post_suspend(int cancelled)
|
|
|
|
static void xen_vcpu_notify_restore(void *data)
|
|
{
|
|
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
|
|
+
|
|
/* Boot processor notified via generic timekeeping_resume() */
|
|
if (smp_processor_id() == 0)
|
|
return;
|
|
@@ -40,7 +48,15 @@ static void xen_vcpu_notify_restore(void *data)
|
|
|
|
static void xen_vcpu_notify_suspend(void *data)
|
|
{
|
|
+ u64 tmp;
|
|
+
|
|
tick_suspend_local();
|
|
+
|
|
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
|
|
+ rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
|
|
+ this_cpu_write(spec_ctrl, tmp);
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
+ }
|
|
}
|
|
|
|
void xen_arch_resume(void)
|
|
diff --git a/block/blk-core.c b/block/blk-core.c
|
|
index 95b7ea996ac2..c01f4907dbbc 100644
|
|
--- a/block/blk-core.c
|
|
+++ b/block/blk-core.c
|
|
@@ -2277,7 +2277,7 @@ blk_qc_t submit_bio(struct bio *bio)
|
|
unsigned int count;
|
|
|
|
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
|
|
- count = queue_logical_block_size(bio->bi_disk->queue);
|
|
+ count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
|
|
else
|
|
count = bio_sectors(bio);
|
|
|
|
diff --git a/block/blk-mq.c b/block/blk-mq.c
|
|
index b60798a30ea2..f1fb126a3be5 100644
|
|
--- a/block/blk-mq.c
|
|
+++ b/block/blk-mq.c
|
|
@@ -638,7 +638,6 @@ static void __blk_mq_requeue_request(struct request *rq)
|
|
|
|
trace_block_rq_requeue(q, rq);
|
|
wbt_requeue(q->rq_wb, &rq->issue_stat);
|
|
- blk_mq_sched_requeue_request(rq);
|
|
|
|
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
|
if (q->dma_drain_size && blk_rq_bytes(rq))
|
|
@@ -650,6 +649,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
|
|
{
|
|
__blk_mq_requeue_request(rq);
|
|
|
|
+ /* this request will be re-inserted to io scheduler queue */
|
|
+ blk_mq_sched_requeue_request(rq);
|
|
+
|
|
BUG_ON(blk_queued_rq(rq));
|
|
blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
|
|
}
|
|
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
|
|
index f58cab82105b..09cd5cf2e459 100644
|
|
--- a/block/kyber-iosched.c
|
|
+++ b/block/kyber-iosched.c
|
|
@@ -814,6 +814,7 @@ static struct elevator_type kyber_sched = {
|
|
.limit_depth = kyber_limit_depth,
|
|
.prepare_request = kyber_prepare_request,
|
|
.finish_request = kyber_finish_request,
|
|
+ .requeue_request = kyber_finish_request,
|
|
.completed_request = kyber_completed_request,
|
|
.dispatch_request = kyber_dispatch_request,
|
|
.has_work = kyber_has_work,
|
|
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
|
|
index 4d0979e02a28..b6d58cc58f5f 100644
|
|
--- a/drivers/acpi/bus.c
|
|
+++ b/drivers/acpi/bus.c
|
|
@@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
|
|
return 0;
|
|
}
|
|
#endif
|
|
+static int set_gbl_term_list(const struct dmi_system_id *id)
|
|
+{
|
|
+ acpi_gbl_parse_table_as_term_list = 1;
|
|
+ return 0;
|
|
+}
|
|
|
|
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
|
|
+static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
|
|
+ /*
|
|
+ * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
|
|
+ * mode.
|
|
+ * https://bugzilla.kernel.org/show_bug.cgi?id=198515
|
|
+ */
|
|
+ {
|
|
+ .callback = set_gbl_term_list,
|
|
+ .ident = "Dell Precision M5530",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
|
|
+ },
|
|
+ },
|
|
+ {
|
|
+ .callback = set_gbl_term_list,
|
|
+ .ident = "Dell XPS 15 9570",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
|
|
+ },
|
|
+ },
|
|
/*
|
|
* Invoke DSDT corruption work-around on all Toshiba Satellite.
|
|
+ * DSDT will be copied to memory.
|
|
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
|
|
*/
|
|
{
|
|
@@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
|
|
{}
|
|
};
|
|
#else
|
|
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
|
|
+static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
|
|
{}
|
|
};
|
|
#endif
|
|
@@ -1001,11 +1028,8 @@ void __init acpi_early_init(void)
|
|
|
|
acpi_permanent_mmap = true;
|
|
|
|
- /*
|
|
- * If the machine falls into the DMI check table,
|
|
- * DSDT will be copied to memory
|
|
- */
|
|
- dmi_check_system(dsdt_dmi_table);
|
|
+ /* Check machine-specific quirks */
|
|
+ dmi_check_system(acpi_quirks_dmi_table);
|
|
|
|
status = acpi_reallocate_root_table();
|
|
if (ACPI_FAILURE(status)) {
|
|
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
|
|
index d54c3f6f728c..673698c7b143 100644
|
|
--- a/drivers/bluetooth/btusb.c
|
|
+++ b/drivers/bluetooth/btusb.c
|
|
@@ -21,6 +21,7 @@
|
|
*
|
|
*/
|
|
|
|
+#include <linux/dmi.h>
|
|
#include <linux/module.h>
|
|
#include <linux/usb.h>
|
|
#include <linux/usb/quirks.h>
|
|
@@ -381,6 +382,21 @@ static const struct usb_device_id blacklist_table[] = {
|
|
{ } /* Terminating entry */
|
|
};
|
|
|
|
+/* The Bluetooth USB module build into some devices needs to be reset on resume,
|
|
+ * this is a problem with the platform (likely shutting off all power) not with
|
|
+ * the module itself. So we use a DMI list to match known broken platforms.
|
|
+ */
|
|
+static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
|
|
+ {
|
|
+ /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
|
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
|
|
+ },
|
|
+ },
|
|
+ {}
|
|
+};
|
|
+
|
|
#define BTUSB_MAX_ISOC_FRAMES 10
|
|
|
|
#define BTUSB_INTR_RUNNING 0
|
|
@@ -3013,6 +3029,9 @@ static int btusb_probe(struct usb_interface *intf,
|
|
hdev->send = btusb_send_frame;
|
|
hdev->notify = btusb_notify;
|
|
|
|
+ if (dmi_check_system(btusb_needs_reset_resume_table))
|
|
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
|
|
+
|
|
#ifdef CONFIG_PM
|
|
err = btusb_config_oob_wake(hdev);
|
|
if (err)
|
|
@@ -3099,12 +3118,6 @@ static int btusb_probe(struct usb_interface *intf,
|
|
if (id->driver_info & BTUSB_QCA_ROME) {
|
|
data->setup_on_usb = btusb_setup_qca;
|
|
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
|
|
-
|
|
- /* QCA Rome devices lose their updated firmware over suspend,
|
|
- * but the USB hub doesn't notice any status change.
|
|
- * explicitly request a device reset on resume.
|
|
- */
|
|
- interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
|
|
}
|
|
|
|
#ifdef CONFIG_BT_HCIBTUSB_RTL
|
|
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
|
|
index 4d1dc8b46877..f95b9c75175b 100644
|
|
--- a/drivers/char/tpm/st33zp24/st33zp24.c
|
|
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
|
|
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
|
|
size_t count)
|
|
{
|
|
int size = 0;
|
|
- int expected;
|
|
+ u32 expected;
|
|
|
|
if (!chip)
|
|
return -EBUSY;
|
|
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
|
|
}
|
|
|
|
expected = be32_to_cpu(*(__be32 *)(buf + 2));
|
|
- if (expected > count) {
|
|
+ if (expected > count || expected < TPM_HEADER_SIZE) {
|
|
size = -EIO;
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
|
|
index 1d6729be4cd6..3cec403a80b3 100644
|
|
--- a/drivers/char/tpm/tpm-interface.c
|
|
+++ b/drivers/char/tpm/tpm-interface.c
|
|
@@ -1228,6 +1228,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
|
|
break;
|
|
|
|
recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
|
|
+ if (recd > num_bytes) {
|
|
+ total = -EFAULT;
|
|
+ break;
|
|
+ }
|
|
|
|
rlength = be32_to_cpu(tpm_cmd.header.out.length);
|
|
if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
|
|
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
|
|
index e1a41b788f08..44a3d16231f6 100644
|
|
--- a/drivers/char/tpm/tpm2-cmd.c
|
|
+++ b/drivers/char/tpm/tpm2-cmd.c
|
|
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
|
|
if (!rc) {
|
|
data_len = be16_to_cpup(
|
|
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
|
|
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
|
|
+ rc = -EFAULT;
|
|
+ goto out;
|
|
+ }
|
|
|
|
rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
|
|
->header.out.length);
|
|
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
|
|
index 79d6bbb58e39..d5b44cadac56 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_infineon.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
|
|
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
{
|
|
int size = 0;
|
|
- int expected, status;
|
|
+ int status;
|
|
+ u32 expected;
|
|
|
|
if (count < TPM_HEADER_SIZE) {
|
|
size = -EIO;
|
|
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
}
|
|
|
|
expected = be32_to_cpu(*(__be32 *)(buf + 2));
|
|
- if ((size_t) expected > count) {
|
|
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
|
|
size = -EIO;
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
index c6428771841f..caa86b19c76d 100644
|
|
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
|
|
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
struct device *dev = chip->dev.parent;
|
|
struct i2c_client *client = to_i2c_client(dev);
|
|
s32 rc;
|
|
- int expected, status, burst_count, retries, size = 0;
|
|
+ int status;
|
|
+ int burst_count;
|
|
+ int retries;
|
|
+ int size = 0;
|
|
+ u32 expected;
|
|
|
|
if (count < TPM_HEADER_SIZE) {
|
|
i2c_nuvoton_ready(chip); /* return to idle */
|
|
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
* to machine native
|
|
*/
|
|
expected = be32_to_cpu(*(__be32 *) (buf + 2));
|
|
- if (expected > count) {
|
|
+ if (expected > count || expected < size) {
|
|
dev_err(dev, "%s() expected > count\n", __func__);
|
|
size = -EIO;
|
|
continue;
|
|
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
|
|
index 7e55aa9ce680..ebd0e75a3e4d 100644
|
|
--- a/drivers/char/tpm/tpm_tis.c
|
|
+++ b/drivers/char/tpm/tpm_tis.c
|
|
@@ -223,7 +223,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
}
|
|
|
|
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
- u8 *value)
|
|
+ const u8 *value)
|
|
{
|
|
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
|
|
|
|
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
|
|
index 63bc6c3b949e..083578b2517e 100644
|
|
--- a/drivers/char/tpm/tpm_tis_core.c
|
|
+++ b/drivers/char/tpm/tpm_tis_core.c
|
|
@@ -202,7 +202,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
{
|
|
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
|
int size = 0;
|
|
- int expected, status;
|
|
+ int status;
|
|
+ u32 expected;
|
|
|
|
if (count < TPM_HEADER_SIZE) {
|
|
size = -EIO;
|
|
@@ -217,7 +218,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
}
|
|
|
|
expected = be32_to_cpu(*(__be32 *) (buf + 2));
|
|
- if (expected > count) {
|
|
+ if (expected > count || expected < TPM_HEADER_SIZE) {
|
|
size = -EIO;
|
|
goto out;
|
|
}
|
|
@@ -252,7 +253,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|
* tpm.c can skip polling for the data to be available as the interrupt is
|
|
* waited for here
|
|
*/
|
|
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
|
|
{
|
|
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
|
int rc, status, burstcnt;
|
|
@@ -343,7 +344,7 @@ static void disable_interrupts(struct tpm_chip *chip)
|
|
* tpm.c can skip polling for the data to be available as the interrupt is
|
|
* waited for here
|
|
*/
|
|
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
|
|
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
|
|
{
|
|
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
|
int rc;
|
|
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
|
|
index e2212f021a02..6bbac319ff3b 100644
|
|
--- a/drivers/char/tpm/tpm_tis_core.h
|
|
+++ b/drivers/char/tpm/tpm_tis_core.h
|
|
@@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
|
|
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
u8 *result);
|
|
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
- u8 *value);
|
|
+ const u8 *value);
|
|
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
|
|
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
|
|
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
|
|
@@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
|
|
}
|
|
|
|
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
|
|
- u16 len, u8 *value)
|
|
+ u16 len, const u8 *value)
|
|
{
|
|
return data->phy_ops->write_bytes(data, addr, len, value);
|
|
}
|
|
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
|
|
index 88fe72ae967f..8ab0bd8445f6 100644
|
|
--- a/drivers/char/tpm/tpm_tis_spi.c
|
|
+++ b/drivers/char/tpm/tpm_tis_spi.c
|
|
@@ -46,9 +46,7 @@
|
|
struct tpm_tis_spi_phy {
|
|
struct tpm_tis_data priv;
|
|
struct spi_device *spi_device;
|
|
-
|
|
- u8 tx_buf[4];
|
|
- u8 rx_buf[4];
|
|
+ u8 *iobuf;
|
|
};
|
|
|
|
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
|
|
@@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
|
|
}
|
|
|
|
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
- u8 *buffer, u8 direction)
|
|
+ u8 *in, const u8 *out)
|
|
{
|
|
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
|
|
int ret = 0;
|
|
@@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
while (len) {
|
|
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
|
|
|
|
- phy->tx_buf[0] = direction | (transfer_len - 1);
|
|
- phy->tx_buf[1] = 0xd4;
|
|
- phy->tx_buf[2] = addr >> 8;
|
|
- phy->tx_buf[3] = addr;
|
|
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
|
|
+ phy->iobuf[1] = 0xd4;
|
|
+ phy->iobuf[2] = addr >> 8;
|
|
+ phy->iobuf[3] = addr;
|
|
|
|
memset(&spi_xfer, 0, sizeof(spi_xfer));
|
|
- spi_xfer.tx_buf = phy->tx_buf;
|
|
- spi_xfer.rx_buf = phy->rx_buf;
|
|
+ spi_xfer.tx_buf = phy->iobuf;
|
|
+ spi_xfer.rx_buf = phy->iobuf;
|
|
spi_xfer.len = 4;
|
|
spi_xfer.cs_change = 1;
|
|
|
|
@@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
if (ret < 0)
|
|
goto exit;
|
|
|
|
- if ((phy->rx_buf[3] & 0x01) == 0) {
|
|
+ if ((phy->iobuf[3] & 0x01) == 0) {
|
|
// handle SPI wait states
|
|
- phy->tx_buf[0] = 0;
|
|
+ phy->iobuf[0] = 0;
|
|
|
|
for (i = 0; i < TPM_RETRY; i++) {
|
|
spi_xfer.len = 1;
|
|
@@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
ret = spi_sync_locked(phy->spi_device, &m);
|
|
if (ret < 0)
|
|
goto exit;
|
|
- if (phy->rx_buf[0] & 0x01)
|
|
+ if (phy->iobuf[0] & 0x01)
|
|
break;
|
|
}
|
|
|
|
@@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
spi_xfer.len = transfer_len;
|
|
spi_xfer.delay_usecs = 5;
|
|
|
|
- if (direction) {
|
|
+ if (in) {
|
|
spi_xfer.tx_buf = NULL;
|
|
- spi_xfer.rx_buf = buffer;
|
|
- } else {
|
|
- spi_xfer.tx_buf = buffer;
|
|
+ } else if (out) {
|
|
spi_xfer.rx_buf = NULL;
|
|
+ memcpy(phy->iobuf, out, transfer_len);
|
|
+ out += transfer_len;
|
|
}
|
|
|
|
spi_message_init(&m);
|
|
@@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
if (ret < 0)
|
|
goto exit;
|
|
|
|
+ if (in) {
|
|
+ memcpy(in, phy->iobuf, transfer_len);
|
|
+ in += transfer_len;
|
|
+ }
|
|
+
|
|
len -= transfer_len;
|
|
- buffer += transfer_len;
|
|
}
|
|
|
|
exit:
|
|
@@ -139,13 +141,13 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
|
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
|
|
u16 len, u8 *result)
|
|
{
|
|
- return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
|
|
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
|
|
}
|
|
|
|
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
|
|
- u16 len, u8 *value)
|
|
+ u16 len, const u8 *value)
|
|
{
|
|
- return tpm_tis_spi_transfer(data, addr, len, value, 0);
|
|
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
|
|
}
|
|
|
|
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
|
|
@@ -194,6 +196,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
|
|
|
|
phy->spi_device = dev;
|
|
|
|
+ phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
|
|
+ if (!phy->iobuf)
|
|
+ return -ENOMEM;
|
|
+
|
|
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
|
|
NULL);
|
|
}
|
|
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
|
|
index 7b596fa38ad2..6bebc1f9f55a 100644
|
|
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
|
|
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
|
|
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
|
|
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
|
|
{
|
|
policy->clk = clk_arm;
|
|
- return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
|
|
+
|
|
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
|
|
+
|
|
+ if (ftab)
|
|
+ return cpufreq_table_validate_and_show(policy, ftab);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int __init s3c_cpufreq_initclks(void)
|
|
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
|
|
index cd9d6ba03579..0dc0d595c47c 100644
|
|
--- a/drivers/edac/sb_edac.c
|
|
+++ b/drivers/edac/sb_edac.c
|
|
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
|
|
* sbridge structs
|
|
*/
|
|
|
|
-#define NUM_CHANNELS 4 /* Max channels per MC */
|
|
+#define NUM_CHANNELS 6 /* Max channels per MC */
|
|
#define MAX_DIMMS 3 /* Max DIMMS per channel */
|
|
#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
|
|
#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index 6bf093cef958..e058c209bbcf 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -8522,6 +8522,10 @@ static int remove_and_add_spares(struct mddev *mddev,
|
|
int removed = 0;
|
|
bool remove_some = false;
|
|
|
|
+ if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
|
+ /* Mustn't remove devices when resync thread is running */
|
|
+ return 0;
|
|
+
|
|
rdev_for_each(rdev, mddev) {
|
|
if ((this == NULL || rdev == this) &&
|
|
rdev->raid_disk >= 0 &&
|
|
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
|
|
index 50bce68ffd66..65d157fe76d1 100644
|
|
--- a/drivers/media/dvb-frontends/m88ds3103.c
|
|
+++ b/drivers/media/dvb-frontends/m88ds3103.c
|
|
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
|
|
* New users must use I2C client binding directly!
|
|
*/
|
|
struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
|
|
- struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
|
|
+ struct i2c_adapter *i2c,
|
|
+ struct i2c_adapter **tuner_i2c_adapter)
|
|
{
|
|
struct i2c_client *client;
|
|
struct i2c_board_info board_info;
|
|
- struct m88ds3103_platform_data pdata;
|
|
+ struct m88ds3103_platform_data pdata = {};
|
|
|
|
pdata.clk = cfg->clock;
|
|
pdata.i2c_wr_max = cfg->i2c_wr_max;
|
|
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
|
|
case M88DS3103_CHIP_ID:
|
|
break;
|
|
default:
|
|
+ ret = -ENODEV;
|
|
+ dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
|
|
goto err_kfree;
|
|
}
|
|
|
|
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
|
|
index 35026795be28..fa41d9422d57 100644
|
|
--- a/drivers/mmc/host/dw_mmc-exynos.c
|
|
+++ b/drivers/mmc/host/dw_mmc-exynos.c
|
|
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
|
|
|
|
static const struct dw_mci_drv_data exynos_drv_data = {
|
|
.caps = exynos_dwmmc_caps,
|
|
+ .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
|
|
.init = dw_mci_exynos_priv_init,
|
|
.set_ios = dw_mci_exynos_set_ios,
|
|
.parse_dt = dw_mci_exynos_parse_dt,
|
|
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
|
|
index 64cda84b2302..864e7fcaffaf 100644
|
|
--- a/drivers/mmc/host/dw_mmc-k3.c
|
|
+++ b/drivers/mmc/host/dw_mmc-k3.c
|
|
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
|
|
if (priv->ctrl_id < 0)
|
|
priv->ctrl_id = 0;
|
|
|
|
+ if (priv->ctrl_id >= TIMING_MODE)
|
|
+ return -EINVAL;
|
|
+
|
|
host->priv = priv;
|
|
return 0;
|
|
}
|
|
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|
|
|
static const struct dw_mci_drv_data hi6220_data = {
|
|
.caps = dw_mci_hi6220_caps,
|
|
+ .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps),
|
|
.switch_voltage = dw_mci_hi6220_switch_voltage,
|
|
.set_ios = dw_mci_hi6220_set_ios,
|
|
.parse_dt = dw_mci_hi6220_parse_dt,
|
|
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
|
|
index a3f1c2b30145..339295212935 100644
|
|
--- a/drivers/mmc/host/dw_mmc-rockchip.c
|
|
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
|
|
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
|
|
|
|
static const struct dw_mci_drv_data rk3288_drv_data = {
|
|
.caps = dw_mci_rk3288_dwmmc_caps,
|
|
+ .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
|
|
.set_ios = dw_mci_rk3288_set_ios,
|
|
.execute_tuning = dw_mci_rk3288_execute_tuning,
|
|
.parse_dt = dw_mci_rk3288_parse_dt,
|
|
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
|
|
index d38e94ae2b85..c06b5393312f 100644
|
|
--- a/drivers/mmc/host/dw_mmc-zx.c
|
|
+++ b/drivers/mmc/host/dw_mmc-zx.c
|
|
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
|
|
|
|
static const struct dw_mci_drv_data zx_drv_data = {
|
|
.caps = zx_dwmmc_caps,
|
|
+ .num_caps = ARRAY_SIZE(zx_dwmmc_caps),
|
|
.execute_tuning = dw_mci_zx_execute_tuning,
|
|
.prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
|
|
.parse_dt = dw_mci_zx_parse_dt,
|
|
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
|
|
index 4f2806720c5c..60341a814055 100644
|
|
--- a/drivers/mmc/host/dw_mmc.c
|
|
+++ b/drivers/mmc/host/dw_mmc.c
|
|
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
|
|
{
|
|
struct dw_mci *host = s->private;
|
|
|
|
+ pm_runtime_get_sync(host->dev);
|
|
+
|
|
seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
|
|
seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
|
|
seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
|
|
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
|
|
seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
|
|
seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
|
|
|
|
+ pm_runtime_put_autosuspend(host->dev);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2758,12 +2762,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
+static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
|
|
+{
|
|
+ struct dw_mci *host = slot->host;
|
|
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
|
|
+ struct mmc_host *mmc = slot->mmc;
|
|
+ int ctrl_id;
|
|
+
|
|
+ if (host->pdata->caps)
|
|
+ mmc->caps = host->pdata->caps;
|
|
+
|
|
+ /*
|
|
+ * Support MMC_CAP_ERASE by default.
|
|
+ * It needs to use trim/discard/erase commands.
|
|
+ */
|
|
+ mmc->caps |= MMC_CAP_ERASE;
|
|
+
|
|
+ if (host->pdata->pm_caps)
|
|
+ mmc->pm_caps = host->pdata->pm_caps;
|
|
+
|
|
+ if (host->dev->of_node) {
|
|
+ ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
|
|
+ if (ctrl_id < 0)
|
|
+ ctrl_id = 0;
|
|
+ } else {
|
|
+ ctrl_id = to_platform_device(host->dev)->id;
|
|
+ }
|
|
+
|
|
+ if (drv_data && drv_data->caps) {
|
|
+ if (ctrl_id >= drv_data->num_caps) {
|
|
+ dev_err(host->dev, "invalid controller id %d\n",
|
|
+ ctrl_id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ mmc->caps |= drv_data->caps[ctrl_id];
|
|
+ }
|
|
+
|
|
+ if (host->pdata->caps2)
|
|
+ mmc->caps2 = host->pdata->caps2;
|
|
+
|
|
+ /* Process SDIO IRQs through the sdio_irq_work. */
|
|
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
|
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int dw_mci_init_slot(struct dw_mci *host)
|
|
{
|
|
struct mmc_host *mmc;
|
|
struct dw_mci_slot *slot;
|
|
- const struct dw_mci_drv_data *drv_data = host->drv_data;
|
|
- int ctrl_id, ret;
|
|
+ int ret;
|
|
u32 freq[2];
|
|
|
|
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
|
|
@@ -2797,38 +2846,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
|
|
if (!mmc->ocr_avail)
|
|
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
|
|
|
|
- if (host->pdata->caps)
|
|
- mmc->caps = host->pdata->caps;
|
|
-
|
|
- /*
|
|
- * Support MMC_CAP_ERASE by default.
|
|
- * It needs to use trim/discard/erase commands.
|
|
- */
|
|
- mmc->caps |= MMC_CAP_ERASE;
|
|
-
|
|
- if (host->pdata->pm_caps)
|
|
- mmc->pm_caps = host->pdata->pm_caps;
|
|
-
|
|
- if (host->dev->of_node) {
|
|
- ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
|
|
- if (ctrl_id < 0)
|
|
- ctrl_id = 0;
|
|
- } else {
|
|
- ctrl_id = to_platform_device(host->dev)->id;
|
|
- }
|
|
- if (drv_data && drv_data->caps)
|
|
- mmc->caps |= drv_data->caps[ctrl_id];
|
|
-
|
|
- if (host->pdata->caps2)
|
|
- mmc->caps2 = host->pdata->caps2;
|
|
-
|
|
ret = mmc_of_parse(mmc);
|
|
if (ret)
|
|
goto err_host_allocated;
|
|
|
|
- /* Process SDIO IRQs through the sdio_irq_work. */
|
|
- if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
|
- mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
|
+ ret = dw_mci_init_slot_caps(slot);
|
|
+ if (ret)
|
|
+ goto err_host_allocated;
|
|
|
|
/* Useful defaults if platform data is unset. */
|
|
if (host->use_dma == TRANS_MODE_IDMAC) {
|
|
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
|
|
index 34474ad731aa..044c87ce6725 100644
|
|
--- a/drivers/mmc/host/dw_mmc.h
|
|
+++ b/drivers/mmc/host/dw_mmc.h
|
|
@@ -542,6 +542,7 @@ struct dw_mci_slot {
|
|
/**
|
|
* dw_mci driver data - dw-mshc implementation specific driver data.
|
|
* @caps: mmc subsystem specified capabilities of the controller(s).
|
|
+ * @num_caps: number of capabilities specified by @caps.
|
|
* @init: early implementation specific initialization.
|
|
* @set_ios: handle bus specific extensions.
|
|
* @parse_dt: parse implementation specific device tree properties.
|
|
@@ -553,6 +554,7 @@ struct dw_mci_slot {
|
|
*/
|
|
struct dw_mci_drv_data {
|
|
unsigned long *caps;
|
|
+ u32 num_caps;
|
|
int (*init)(struct dw_mci *host);
|
|
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
|
|
int (*parse_dt)(struct dw_mci *host);
|
|
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
|
|
index 67d787fa3306..070f5da06fd2 100644
|
|
--- a/drivers/mmc/host/sdhci-pci-core.c
|
|
+++ b/drivers/mmc/host/sdhci-pci-core.c
|
|
@@ -594,9 +594,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
|
|
slot->chip->rpm_retune = intel_host->d3_retune;
|
|
}
|
|
|
|
-static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
|
+static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|
+{
|
|
+ int err = sdhci_execute_tuning(mmc, opcode);
|
|
+ struct sdhci_host *host = mmc_priv(mmc);
|
|
+
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ /*
|
|
+ * Tuning can leave the IP in an active state (Buffer Read Enable bit
|
|
+ * set) which prevents the entry to low power states (i.e. S0i3). Data
|
|
+ * reset will clear it.
|
|
+ */
|
|
+ sdhci_reset(host, SDHCI_RESET_DATA);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void byt_probe_slot(struct sdhci_pci_slot *slot)
|
|
{
|
|
+ struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
|
|
+
|
|
byt_read_dsm(slot);
|
|
+
|
|
+ ops->execute_tuning = intel_execute_tuning;
|
|
+}
|
|
+
|
|
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
|
+{
|
|
+ byt_probe_slot(slot);
|
|
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
|
|
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
|
|
MMC_CAP_CMD_DURING_TFR |
|
|
@@ -651,7 +678,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
|
{
|
|
int err;
|
|
|
|
- byt_read_dsm(slot);
|
|
+ byt_probe_slot(slot);
|
|
|
|
err = ni_set_max_freq(slot);
|
|
if (err)
|
|
@@ -664,7 +691,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
|
|
|
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
|
{
|
|
- byt_read_dsm(slot);
|
|
+ byt_probe_slot(slot);
|
|
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
|
|
MMC_CAP_WAIT_WHILE_BUSY;
|
|
return 0;
|
|
@@ -672,7 +699,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
|
|
|
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
|
{
|
|
- byt_read_dsm(slot);
|
|
+ byt_probe_slot(slot);
|
|
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
|
|
MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
|
|
slot->cd_idx = 0;
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
|
|
index 608693d11bd7..75c4455e2271 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
|
|
@@ -595,7 +595,7 @@ static void xgbe_isr_task(unsigned long data)
|
|
|
|
reissue_mask = 1 << 0;
|
|
if (!pdata->per_channel_irq)
|
|
- reissue_mask |= 0xffff < 4;
|
|
+ reissue_mask |= 0xffff << 4;
|
|
|
|
XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
|
|
}
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
|
|
index 3e5833cf1fab..eb23f9ba1a9a 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
|
|
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
|
|
struct net_device *netdev = pdata->netdev;
|
|
int ret = 0;
|
|
|
|
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
|
|
+
|
|
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
|
|
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
|
|
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
index 879a9c4cef59..29f600fd6977 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
|
|
@@ -1877,6 +1877,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
|
ixgbe_rx_pg_size(rx_ring),
|
|
DMA_FROM_DEVICE,
|
|
IXGBE_RX_DMA_ATTR);
|
|
+ } else if (ring_uses_build_skb(rx_ring)) {
|
|
+ unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
|
|
+
|
|
+ dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
+ IXGBE_CB(skb)->dma,
|
|
+ offset,
|
|
+ skb_headlen(skb),
|
|
+ DMA_FROM_DEVICE);
|
|
} else {
|
|
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
index 3cdb932cae76..a863572882b2 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
|
@@ -1918,13 +1918,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
|
param->wq.linear = 1;
|
|
}
|
|
|
|
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
|
|
+static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
|
|
+ struct mlx5e_rq_param *param)
|
|
{
|
|
void *rqc = param->rqc;
|
|
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
|
|
|
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
|
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
|
|
+
|
|
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
|
}
|
|
|
|
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
|
|
@@ -2778,6 +2781,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
|
|
struct mlx5e_cq *cq,
|
|
struct mlx5e_cq_param *param)
|
|
{
|
|
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
|
+ param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
|
|
+
|
|
return mlx5e_alloc_cq_common(mdev, param, cq);
|
|
}
|
|
|
|
@@ -2789,7 +2795,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
|
|
struct mlx5e_cq *cq = &drop_rq->cq;
|
|
int err;
|
|
|
|
- mlx5e_build_drop_rq_param(&rq_param);
|
|
+ mlx5e_build_drop_rq_param(mdev, &rq_param);
|
|
|
|
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
|
|
if (err)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
index 91b1b0938931..3476f594c195 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
@@ -36,6 +36,7 @@
|
|
#include <linux/tcp.h>
|
|
#include <linux/bpf_trace.h>
|
|
#include <net/busy_poll.h>
|
|
+#include <net/ip6_checksum.h>
|
|
#include "en.h"
|
|
#include "en_tc.h"
|
|
#include "eswitch.h"
|
|
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
|
return true;
|
|
}
|
|
|
|
+static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
|
|
+{
|
|
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
|
+ u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
|
+ (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
|
+
|
|
+ tcp->check = 0;
|
|
+ tcp->psh = get_cqe_lro_tcppsh(cqe);
|
|
+
|
|
+ if (tcp_ack) {
|
|
+ tcp->ack = 1;
|
|
+ tcp->ack_seq = cqe->lro_ack_seq_num;
|
|
+ tcp->window = cqe->lro_tcp_win;
|
|
+ }
|
|
+}
|
|
+
|
|
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
|
u32 cqe_bcnt)
|
|
{
|
|
struct ethhdr *eth = (struct ethhdr *)(skb->data);
|
|
struct tcphdr *tcp;
|
|
int network_depth = 0;
|
|
+ __wsum check;
|
|
__be16 proto;
|
|
u16 tot_len;
|
|
void *ip_p;
|
|
|
|
- u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
|
- u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
|
- (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
|
-
|
|
skb->mac_len = ETH_HLEN;
|
|
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
|
|
|
|
@@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
|
ipv4->check = 0;
|
|
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
|
|
ipv4->ihl);
|
|
+
|
|
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
|
|
+ check = csum_partial(tcp, tcp->doff * 4,
|
|
+ csum_unfold((__force __sum16)cqe->check_sum));
|
|
+ /* Almost done, don't forget the pseudo header */
|
|
+ tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
|
|
+ tot_len - sizeof(struct iphdr),
|
|
+ IPPROTO_TCP, check);
|
|
} else {
|
|
+ u16 payload_len = tot_len - sizeof(struct ipv6hdr);
|
|
struct ipv6hdr *ipv6 = ip_p;
|
|
|
|
tcp = ip_p + sizeof(struct ipv6hdr);
|
|
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
|
|
|
|
ipv6->hop_limit = cqe->lro_min_ttl;
|
|
- ipv6->payload_len = cpu_to_be16(tot_len -
|
|
- sizeof(struct ipv6hdr));
|
|
- }
|
|
-
|
|
- tcp->psh = get_cqe_lro_tcppsh(cqe);
|
|
-
|
|
- if (tcp_ack) {
|
|
- tcp->ack = 1;
|
|
- tcp->ack_seq = cqe->lro_ack_seq_num;
|
|
- tcp->window = cqe->lro_tcp_win;
|
|
+ ipv6->payload_len = cpu_to_be16(payload_len);
|
|
+
|
|
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
|
|
+ check = csum_partial(tcp, tcp->doff * 4,
|
|
+ csum_unfold((__force __sum16)cqe->check_sum));
|
|
+ /* Almost done, don't forget the pseudo header */
|
|
+ tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
|
|
+ IPPROTO_TCP, check);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
|
|
index 5a4608281f38..707976482c09 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
|
|
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
|
|
if (iph->protocol != IPPROTO_UDP)
|
|
goto out;
|
|
|
|
- udph = udp_hdr(skb);
|
|
+ /* Don't assume skb_transport_header() was set */
|
|
+ udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
|
|
if (udph->dest != htons(9))
|
|
goto out;
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
|
|
index 1d6925d4369a..eea7f931cad3 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
|
|
@@ -155,7 +155,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
|
|
default:
|
|
hlen = mlx5e_skb_l2_header_offset(skb);
|
|
}
|
|
- return min_t(u16, hlen, skb->len);
|
|
+ return min_t(u16, hlen, skb_headlen(skb));
|
|
}
|
|
|
|
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
index 7bef80676464..516e63244606 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
|
|
@@ -729,26 +729,29 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
|
|
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
|
|
u32 tb_id)
|
|
{
|
|
+ struct mlxsw_sp_fib *fib4;
|
|
+ struct mlxsw_sp_fib *fib6;
|
|
struct mlxsw_sp_vr *vr;
|
|
int err;
|
|
|
|
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
|
|
if (!vr)
|
|
return ERR_PTR(-EBUSY);
|
|
- vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
|
|
- if (IS_ERR(vr->fib4))
|
|
- return ERR_CAST(vr->fib4);
|
|
- vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
|
|
- if (IS_ERR(vr->fib6)) {
|
|
- err = PTR_ERR(vr->fib6);
|
|
+ fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
|
|
+ if (IS_ERR(fib4))
|
|
+ return ERR_CAST(fib4);
|
|
+ fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
|
|
+ if (IS_ERR(fib6)) {
|
|
+ err = PTR_ERR(fib6);
|
|
goto err_fib6_create;
|
|
}
|
|
+ vr->fib4 = fib4;
|
|
+ vr->fib6 = fib6;
|
|
vr->tb_id = tb_id;
|
|
return vr;
|
|
|
|
err_fib6_create:
|
|
- mlxsw_sp_fib_destroy(vr->fib4);
|
|
- vr->fib4 = NULL;
|
|
+ mlxsw_sp_fib_destroy(fib4);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
@@ -3029,6 +3032,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
|
|
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
|
|
int i;
|
|
|
|
+ if (!list_is_singular(&nh_grp->fib_list))
|
|
+ return;
|
|
+
|
|
for (i = 0; i < nh_grp->count; i++) {
|
|
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
|
|
|
|
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
index f5863e5bec81..42a6afcaae03 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
|
|
@@ -1098,6 +1098,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
bool dynamic)
|
|
{
|
|
char *sfd_pl;
|
|
+ u8 num_rec;
|
|
int err;
|
|
|
|
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
|
|
@@ -1107,9 +1108,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
|
|
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
|
|
mac, fid, action, local_port);
|
|
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
|
- kfree(sfd_pl);
|
|
+ if (err)
|
|
+ goto out;
|
|
+
|
|
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
|
|
+ err = -EBUSY;
|
|
|
|
+out:
|
|
+ kfree(sfd_pl);
|
|
return err;
|
|
}
|
|
|
|
@@ -1134,6 +1142,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
|
|
bool adding, bool dynamic)
|
|
{
|
|
char *sfd_pl;
|
|
+ u8 num_rec;
|
|
int err;
|
|
|
|
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
|
|
@@ -1144,9 +1153,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
|
|
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
|
|
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
|
|
lag_vid, lag_id);
|
|
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
|
- kfree(sfd_pl);
|
|
+ if (err)
|
|
+ goto out;
|
|
+
|
|
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
|
|
+ err = -EBUSY;
|
|
|
|
+out:
|
|
+ kfree(sfd_pl);
|
|
return err;
|
|
}
|
|
|
|
@@ -1191,6 +1207,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
|
|
u16 fid, u16 mid, bool adding)
|
|
{
|
|
char *sfd_pl;
|
|
+ u8 num_rec;
|
|
int err;
|
|
|
|
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
|
|
@@ -1200,7 +1217,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
|
|
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
|
|
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
|
|
MLXSW_REG_SFD_REC_ACTION_NOP, mid);
|
|
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
|
+ if (err)
|
|
+ goto out;
|
|
+
|
|
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
|
|
+ err = -EBUSY;
|
|
+
|
|
+out:
|
|
kfree(sfd_pl);
|
|
return err;
|
|
}
|
|
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
|
|
index db8a4bcfc6c7..14b646b3b084 100644
|
|
--- a/drivers/net/ethernet/ti/cpsw.c
|
|
+++ b/drivers/net/ethernet/ti/cpsw.c
|
|
@@ -1618,6 +1618,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|
q_idx = q_idx % cpsw->tx_ch_num;
|
|
|
|
txch = cpsw->txv[q_idx].ch;
|
|
+ txq = netdev_get_tx_queue(ndev, q_idx);
|
|
ret = cpsw_tx_packet_submit(priv, skb, txch);
|
|
if (unlikely(ret != 0)) {
|
|
cpsw_err(priv, tx_err, "desc submit failed\n");
|
|
@@ -1628,15 +1629,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|
* tell the kernel to stop sending us tx frames.
|
|
*/
|
|
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
|
|
- txq = netdev_get_tx_queue(ndev, q_idx);
|
|
netif_tx_stop_queue(txq);
|
|
+
|
|
+ /* Barrier, so that stop_queue visible to other cpus */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ if (cpdma_check_free_tx_desc(txch))
|
|
+ netif_tx_wake_queue(txq);
|
|
}
|
|
|
|
return NETDEV_TX_OK;
|
|
fail:
|
|
ndev->stats.tx_dropped++;
|
|
- txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
|
|
netif_tx_stop_queue(txq);
|
|
+
|
|
+ /* Barrier, so that stop_queue visible to other cpus */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ if (cpdma_check_free_tx_desc(txch))
|
|
+ netif_tx_wake_queue(txq);
|
|
+
|
|
return NETDEV_TX_BUSY;
|
|
}
|
|
|
|
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
|
|
index 2b1e67bc1e73..3d860de5e342 100644
|
|
--- a/drivers/net/phy/phy.c
|
|
+++ b/drivers/net/phy/phy.c
|
|
@@ -842,7 +842,7 @@ void phy_start(struct phy_device *phydev)
|
|
break;
|
|
case PHY_HALTED:
|
|
/* make sure interrupts are re-enabled for the PHY */
|
|
- if (phydev->irq != PHY_POLL) {
|
|
+ if (phy_interrupt_is_valid(phydev)) {
|
|
err = phy_enable_interrupts(phydev);
|
|
if (err < 0)
|
|
break;
|
|
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
|
|
index 8c6b8918ec31..38cd2e8fae23 100644
|
|
--- a/drivers/net/ppp/ppp_generic.c
|
|
+++ b/drivers/net/ppp/ppp_generic.c
|
|
@@ -3158,6 +3158,15 @@ ppp_connect_channel(struct channel *pch, int unit)
|
|
goto outl;
|
|
|
|
ppp_lock(ppp);
|
|
+ spin_lock_bh(&pch->downl);
|
|
+ if (!pch->chan) {
|
|
+ /* Don't connect unregistered channels */
|
|
+ spin_unlock_bh(&pch->downl);
|
|
+ ppp_unlock(ppp);
|
|
+ ret = -ENOTCONN;
|
|
+ goto outl;
|
|
+ }
|
|
+ spin_unlock_bh(&pch->downl);
|
|
if (pch->file.hdrlen > ppp->file.hdrlen)
|
|
ppp->file.hdrlen = pch->file.hdrlen;
|
|
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index fa51b7b0e9ea..bc38d54e37b9 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -1315,6 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|
else
|
|
*skb_xdp = 0;
|
|
|
|
+ preempt_disable();
|
|
rcu_read_lock();
|
|
xdp_prog = rcu_dereference(tun->xdp_prog);
|
|
if (xdp_prog && !*skb_xdp) {
|
|
@@ -1333,9 +1334,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|
get_page(alloc_frag->page);
|
|
alloc_frag->offset += buflen;
|
|
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
|
|
+ xdp_do_flush_map();
|
|
if (err)
|
|
goto err_redirect;
|
|
rcu_read_unlock();
|
|
+ preempt_enable();
|
|
return NULL;
|
|
case XDP_TX:
|
|
xdp_xmit = true;
|
|
@@ -1357,6 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|
skb = build_skb(buf, buflen);
|
|
if (!skb) {
|
|
rcu_read_unlock();
|
|
+ preempt_enable();
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
@@ -1369,10 +1373,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|
skb->dev = tun->dev;
|
|
generic_xdp_tx(skb, xdp_prog);
|
|
rcu_read_unlock();
|
|
+ preempt_enable();
|
|
return NULL;
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
+ preempt_enable();
|
|
|
|
return skb;
|
|
|
|
@@ -1380,6 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|
put_page(alloc_frag->page);
|
|
err_xdp:
|
|
rcu_read_unlock();
|
|
+ preempt_enable();
|
|
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
|
return NULL;
|
|
}
|
|
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
|
|
index 7927e28f5336..6a785595b9b8 100644
|
|
--- a/drivers/net/virtio_net.c
|
|
+++ b/drivers/net/virtio_net.c
|
|
@@ -1995,8 +1995,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
}
|
|
|
|
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
- napi_disable(&vi->rq[i].napi);
|
|
+ if (netif_running(dev))
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++)
|
|
+ napi_disable(&vi->rq[i].napi);
|
|
|
|
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
|
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
|
|
@@ -2015,7 +2016,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
}
|
|
if (old_prog)
|
|
bpf_prog_put(old_prog);
|
|
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
+ if (netif_running(dev))
|
|
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
|
|
index 0d2e00ece804..f3c1d5245978 100644
|
|
--- a/drivers/net/wan/hdlc_ppp.c
|
|
+++ b/drivers/net/wan/hdlc_ppp.c
|
|
@@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
|
|
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
|
|
0, NULL);
|
|
proto->restart_counter--;
|
|
- } else
|
|
+ } else if (netif_carrier_ok(proto->dev))
|
|
+ ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
|
|
+ 0, NULL);
|
|
+ else
|
|
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
|
|
0, NULL);
|
|
break;
|
|
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
|
|
index 33d4431c2b4b..93a082e0bdd4 100644
|
|
--- a/drivers/nvme/host/rdma.c
|
|
+++ b/drivers/nvme/host/rdma.c
|
|
@@ -88,7 +88,6 @@ enum nvme_rdma_queue_flags {
|
|
|
|
struct nvme_rdma_queue {
|
|
struct nvme_rdma_qe *rsp_ring;
|
|
- atomic_t sig_count;
|
|
int queue_size;
|
|
size_t cmnd_capsule_len;
|
|
struct nvme_rdma_ctrl *ctrl;
|
|
@@ -521,7 +520,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
|
|
queue->cmnd_capsule_len = sizeof(struct nvme_command);
|
|
|
|
queue->queue_size = queue_size;
|
|
- atomic_set(&queue->sig_count, 0);
|
|
|
|
queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
|
|
RDMA_PS_TCP, IB_QPT_RC);
|
|
@@ -1232,21 +1230,9 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
nvme_end_request(rq, req->status, req->result);
|
|
}
|
|
|
|
-/*
|
|
- * We want to signal completion at least every queue depth/2. This returns the
|
|
- * largest power of two that is not above half of (queue size + 1) to optimize
|
|
- * (avoid divisions).
|
|
- */
|
|
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
|
|
-{
|
|
- int limit = 1 << ilog2((queue->queue_size + 1) / 2);
|
|
-
|
|
- return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
|
|
-}
|
|
-
|
|
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
|
|
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
|
|
- struct ib_send_wr *first, bool flush)
|
|
+ struct ib_send_wr *first)
|
|
{
|
|
struct ib_send_wr wr, *bad_wr;
|
|
int ret;
|
|
@@ -1255,31 +1241,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
|
|
sge->length = sizeof(struct nvme_command),
|
|
sge->lkey = queue->device->pd->local_dma_lkey;
|
|
|
|
- qe->cqe.done = nvme_rdma_send_done;
|
|
-
|
|
wr.next = NULL;
|
|
wr.wr_cqe = &qe->cqe;
|
|
wr.sg_list = sge;
|
|
wr.num_sge = num_sge;
|
|
wr.opcode = IB_WR_SEND;
|
|
- wr.send_flags = 0;
|
|
-
|
|
- /*
|
|
- * Unsignalled send completions are another giant desaster in the
|
|
- * IB Verbs spec: If we don't regularly post signalled sends
|
|
- * the send queue will fill up and only a QP reset will rescue us.
|
|
- * Would have been way to obvious to handle this in hardware or
|
|
- * at least the RDMA stack..
|
|
- *
|
|
- * Always signal the flushes. The magic request used for the flush
|
|
- * sequencer is not allocated in our driver's tagset and it's
|
|
- * triggered to be freed by blk_cleanup_queue(). So we need to
|
|
- * always mark it as signaled to ensure that the "wr_cqe", which is
|
|
- * embedded in request's payload, is not freed when __ib_process_cq()
|
|
- * calls wr_cqe->done().
|
|
- */
|
|
- if (nvme_rdma_queue_sig_limit(queue) || flush)
|
|
- wr.send_flags |= IB_SEND_SIGNALED;
|
|
+ wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
if (first)
|
|
first->next = ≀
|
|
@@ -1329,6 +1296,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
|
|
return queue->ctrl->tag_set.tags[queue_idx - 1];
|
|
}
|
|
|
|
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
+{
|
|
+ if (unlikely(wc->status != IB_WC_SUCCESS))
|
|
+ nvme_rdma_wr_error(cq, wc, "ASYNC");
|
|
+}
|
|
+
|
|
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
|
{
|
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
|
|
@@ -1350,10 +1323,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
|
cmd->common.flags |= NVME_CMD_SGL_METABUF;
|
|
nvme_rdma_set_sg_null(cmd);
|
|
|
|
+ sqe->cqe.done = nvme_rdma_async_done;
|
|
+
|
|
ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
|
|
DMA_TO_DEVICE);
|
|
|
|
- ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
|
|
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
|
|
WARN_ON_ONCE(ret);
|
|
}
|
|
|
|
@@ -1639,7 +1614,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
|
struct nvme_rdma_qe *sqe = &req->sqe;
|
|
struct nvme_command *c = sqe->data;
|
|
- bool flush = false;
|
|
struct ib_device *dev;
|
|
blk_status_t ret;
|
|
int err;
|
|
@@ -1668,13 +1642,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
goto err;
|
|
}
|
|
|
|
+ sqe->cqe.done = nvme_rdma_send_done;
|
|
+
|
|
ib_dma_sync_single_for_device(dev, sqe->dma,
|
|
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
|
|
|
- if (req_op(rq) == REQ_OP_FLUSH)
|
|
- flush = true;
|
|
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
|
- req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
|
|
+ req->mr->need_inval ? &req->reg_wr.wr : NULL);
|
|
if (unlikely(err)) {
|
|
nvme_rdma_unmap_data(queue, rq);
|
|
goto err;
|
|
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
|
|
index cae54f8320be..633e55c57b13 100644
|
|
--- a/drivers/pci/pcie/aspm.c
|
|
+++ b/drivers/pci/pcie/aspm.c
|
|
@@ -803,10 +803,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
|
|
|
|
/*
|
|
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
|
|
- * hierarchies.
|
|
+ * hierarchies. Note that some PCIe host implementations omit
|
|
+ * the root ports entirely, in which case a downstream port on
|
|
+ * a switch may become the root of the link state chain for all
|
|
+ * its subordinate endpoints.
|
|
*/
|
|
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
|
|
- pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
|
|
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
|
|
+ !pdev->bus->parent->self) {
|
|
link->root = link;
|
|
} else {
|
|
struct pcie_link_state *parent;
|
|
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
|
|
index 92dd4aef21a3..6b1e83539a9d 100644
|
|
--- a/drivers/s390/net/qeth_core.h
|
|
+++ b/drivers/s390/net/qeth_core.h
|
|
@@ -580,6 +580,11 @@ struct qeth_cmd_buffer {
|
|
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
|
|
};
|
|
|
|
+static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
|
|
+{
|
|
+ return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
|
|
+}
|
|
+
|
|
/**
|
|
* definition of a qeth channel, used for read and write
|
|
*/
|
|
@@ -834,7 +839,7 @@ struct qeth_trap_id {
|
|
*/
|
|
static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
|
|
{
|
|
- return PFN_UP(end - 1) - PFN_DOWN(start);
|
|
+ return PFN_UP(end) - PFN_DOWN(start);
|
|
}
|
|
|
|
static inline int qeth_get_micros(void)
|
|
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
|
|
index 7c7a244b6684..145b57762d8f 100644
|
|
--- a/drivers/s390/net/qeth_core_main.c
|
|
+++ b/drivers/s390/net/qeth_core_main.c
|
|
@@ -2073,7 +2073,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|
unsigned long flags;
|
|
struct qeth_reply *reply = NULL;
|
|
unsigned long timeout, event_timeout;
|
|
- struct qeth_ipa_cmd *cmd;
|
|
+ struct qeth_ipa_cmd *cmd = NULL;
|
|
|
|
QETH_CARD_TEXT(card, 2, "sendctl");
|
|
|
|
@@ -2087,23 +2087,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|
}
|
|
reply->callback = reply_cb;
|
|
reply->param = reply_param;
|
|
- if (card->state == CARD_STATE_DOWN)
|
|
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
|
|
- else
|
|
- reply->seqno = card->seqno.ipa++;
|
|
+
|
|
init_waitqueue_head(&reply->wait_q);
|
|
- spin_lock_irqsave(&card->lock, flags);
|
|
- list_add_tail(&reply->list, &card->cmd_waiter_list);
|
|
- spin_unlock_irqrestore(&card->lock, flags);
|
|
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
|
|
|
|
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
|
|
- qeth_prepare_control_data(card, len, iob);
|
|
|
|
- if (IS_IPA(iob->data))
|
|
+ if (IS_IPA(iob->data)) {
|
|
+ cmd = __ipa_cmd(iob);
|
|
+ cmd->hdr.seqno = card->seqno.ipa++;
|
|
+ reply->seqno = cmd->hdr.seqno;
|
|
event_timeout = QETH_IPA_TIMEOUT;
|
|
- else
|
|
+ } else {
|
|
+ reply->seqno = QETH_IDX_COMMAND_SEQNO;
|
|
event_timeout = QETH_TIMEOUT;
|
|
+ }
|
|
+ qeth_prepare_control_data(card, len, iob);
|
|
+
|
|
+ spin_lock_irqsave(&card->lock, flags);
|
|
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
|
|
+ spin_unlock_irqrestore(&card->lock, flags);
|
|
+
|
|
timeout = jiffies + event_timeout;
|
|
|
|
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
|
@@ -2128,9 +2132,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|
|
|
/* we have only one long running ipassist, since we can ensure
|
|
process context of this command we can sleep */
|
|
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
|
- if ((cmd->hdr.command == IPA_CMD_SETIP) &&
|
|
- (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
|
|
+ if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
|
|
+ cmd->hdr.prot_version == QETH_PROT_IPV4) {
|
|
if (!wait_event_timeout(reply->wait_q,
|
|
atomic_read(&reply->received), event_timeout))
|
|
goto time_err;
|
|
@@ -2894,7 +2897,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
|
|
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
|
|
cmd->hdr.command = command;
|
|
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
|
|
- cmd->hdr.seqno = card->seqno.ipa;
|
|
+ /* cmd->hdr.seqno is set by qeth_send_control_data() */
|
|
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
|
|
cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
|
|
if (card->options.layer2)
|
|
@@ -3859,10 +3862,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
|
|
int qeth_get_elements_no(struct qeth_card *card,
|
|
struct sk_buff *skb, int extra_elems, int data_offset)
|
|
{
|
|
- int elements = qeth_get_elements_for_range(
|
|
- (addr_t)skb->data + data_offset,
|
|
- (addr_t)skb->data + skb_headlen(skb)) +
|
|
- qeth_get_elements_for_frags(skb);
|
|
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
|
+ int elements = qeth_get_elements_for_frags(skb);
|
|
+ addr_t start = (addr_t)skb->data + data_offset;
|
|
+
|
|
+ if (start != end)
|
|
+ elements += qeth_get_elements_for_range(start, end);
|
|
|
|
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
|
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
|
|
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
|
|
index e5833837b799..8727b9517de8 100644
|
|
--- a/drivers/s390/net/qeth_l3.h
|
|
+++ b/drivers/s390/net/qeth_l3.h
|
|
@@ -40,8 +40,40 @@ struct qeth_ipaddr {
|
|
unsigned int pfxlen;
|
|
} a6;
|
|
} u;
|
|
-
|
|
};
|
|
+
|
|
+static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
|
|
+ struct qeth_ipaddr *a2)
|
|
+{
|
|
+ if (a1->proto != a2->proto)
|
|
+ return false;
|
|
+ if (a1->proto == QETH_PROT_IPV6)
|
|
+ return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
|
|
+ return a1->u.a4.addr == a2->u.a4.addr;
|
|
+}
|
|
+
|
|
+static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
|
|
+ struct qeth_ipaddr *a2)
|
|
+{
|
|
+ /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
|
|
+ * so 'proto' and 'addr' match for sure.
|
|
+ *
|
|
+ * For ucast:
|
|
+ * - 'mac' is always 0.
|
|
+ * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
|
|
+ * values are required to avoid mixups in takeover eligibility.
|
|
+ *
|
|
+ * For mcast,
|
|
+ * - 'mac' is mapped from the IP, and thus always matches.
|
|
+ * - 'mask'/'pfxlen' is always 0.
|
|
+ */
|
|
+ if (a1->type != a2->type)
|
|
+ return false;
|
|
+ if (a1->proto == QETH_PROT_IPV6)
|
|
+ return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
|
|
+ return a1->u.a4.mask == a2->u.a4.mask;
|
|
+}
|
|
+
|
|
static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
|
|
{
|
|
u64 ret = 0;
|
|
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
|
|
index 36dee176f8e2..96576e729222 100644
|
|
--- a/drivers/s390/net/qeth_l3_main.c
|
|
+++ b/drivers/s390/net/qeth_l3_main.c
|
|
@@ -149,6 +149,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
|
|
+ struct qeth_ipaddr *query)
|
|
+{
|
|
+ u64 key = qeth_l3_ipaddr_hash(query);
|
|
+ struct qeth_ipaddr *addr;
|
|
+
|
|
+ if (query->is_multicast) {
|
|
+ hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
|
|
+ if (qeth_l3_addr_match_ip(addr, query))
|
|
+ return addr;
|
|
+ } else {
|
|
+ hash_for_each_possible(card->ip_htable, addr, hnode, key)
|
|
+ if (qeth_l3_addr_match_ip(addr, query))
|
|
+ return addr;
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
|
|
{
|
|
int i, j;
|
|
@@ -202,34 +220,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
|
|
return rc;
|
|
}
|
|
|
|
-inline int
|
|
-qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
|
|
-{
|
|
- return addr1->proto == addr2->proto &&
|
|
- !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
|
|
- !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
|
|
-}
|
|
-
|
|
-static struct qeth_ipaddr *
|
|
-qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
|
-{
|
|
- struct qeth_ipaddr *addr;
|
|
-
|
|
- if (tmp_addr->is_multicast) {
|
|
- hash_for_each_possible(card->ip_mc_htable, addr,
|
|
- hnode, qeth_l3_ipaddr_hash(tmp_addr))
|
|
- if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
|
|
- return addr;
|
|
- } else {
|
|
- hash_for_each_possible(card->ip_htable, addr,
|
|
- hnode, qeth_l3_ipaddr_hash(tmp_addr))
|
|
- if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
|
|
- return addr;
|
|
- }
|
|
-
|
|
- return NULL;
|
|
-}
|
|
-
|
|
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
|
{
|
|
int rc = 0;
|
|
@@ -244,23 +234,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
|
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
|
|
}
|
|
|
|
- addr = qeth_l3_ip_from_hash(card, tmp_addr);
|
|
- if (!addr)
|
|
+ addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
|
|
+ if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
|
|
return -ENOENT;
|
|
|
|
addr->ref_counter--;
|
|
- if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
|
|
- addr->type == QETH_IP_TYPE_RXIP))
|
|
+ if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
|
|
return rc;
|
|
if (addr->in_progress)
|
|
return -EINPROGRESS;
|
|
|
|
- if (!qeth_card_hw_is_reachable(card)) {
|
|
- addr->disp_flag = QETH_DISP_ADDR_DELETE;
|
|
- return 0;
|
|
- }
|
|
-
|
|
- rc = qeth_l3_deregister_addr_entry(card, addr);
|
|
+ if (qeth_card_hw_is_reachable(card))
|
|
+ rc = qeth_l3_deregister_addr_entry(card, addr);
|
|
|
|
hash_del(&addr->hnode);
|
|
kfree(addr);
|
|
@@ -272,6 +257,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
|
{
|
|
int rc = 0;
|
|
struct qeth_ipaddr *addr;
|
|
+ char buf[40];
|
|
|
|
QETH_CARD_TEXT(card, 4, "addip");
|
|
|
|
@@ -282,8 +268,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
|
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
|
|
}
|
|
|
|
- addr = qeth_l3_ip_from_hash(card, tmp_addr);
|
|
- if (!addr) {
|
|
+ addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
|
|
+ if (addr) {
|
|
+ if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
|
|
+ return -EADDRINUSE;
|
|
+ if (qeth_l3_addr_match_all(addr, tmp_addr)) {
|
|
+ addr->ref_counter++;
|
|
+ return 0;
|
|
+ }
|
|
+ qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
|
|
+ buf);
|
|
+ dev_warn(&card->gdev->dev,
|
|
+ "Registering IP address %s failed\n", buf);
|
|
+ return -EADDRINUSE;
|
|
+ } else {
|
|
addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
@@ -323,19 +321,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
|
(rc == IPA_RC_LAN_OFFLINE)) {
|
|
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
|
|
if (addr->ref_counter < 1) {
|
|
- qeth_l3_delete_ip(card, addr);
|
|
+ qeth_l3_deregister_addr_entry(card, addr);
|
|
+ hash_del(&addr->hnode);
|
|
kfree(addr);
|
|
}
|
|
} else {
|
|
hash_del(&addr->hnode);
|
|
kfree(addr);
|
|
}
|
|
- } else {
|
|
- if (addr->type == QETH_IP_TYPE_NORMAL ||
|
|
- addr->type == QETH_IP_TYPE_RXIP)
|
|
- addr->ref_counter++;
|
|
}
|
|
-
|
|
return rc;
|
|
}
|
|
|
|
@@ -403,11 +397,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
|
|
spin_lock_bh(&card->ip_lock);
|
|
|
|
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
|
|
- if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
|
|
- qeth_l3_deregister_addr_entry(card, addr);
|
|
- hash_del(&addr->hnode);
|
|
- kfree(addr);
|
|
- } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
|
|
+ if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
|
|
if (addr->proto == QETH_PROT_IPV4) {
|
|
addr->in_progress = 1;
|
|
spin_unlock_bh(&card->ip_lock);
|
|
@@ -723,12 +713,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
|
|
return -ENOMEM;
|
|
|
|
spin_lock_bh(&card->ip_lock);
|
|
-
|
|
- if (qeth_l3_ip_from_hash(card, ipaddr))
|
|
- rc = -EEXIST;
|
|
- else
|
|
- qeth_l3_add_ip(card, ipaddr);
|
|
-
|
|
+ rc = qeth_l3_add_ip(card, ipaddr);
|
|
spin_unlock_bh(&card->ip_lock);
|
|
|
|
kfree(ipaddr);
|
|
@@ -791,12 +776,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
|
|
return -ENOMEM;
|
|
|
|
spin_lock_bh(&card->ip_lock);
|
|
-
|
|
- if (qeth_l3_ip_from_hash(card, ipaddr))
|
|
- rc = -EEXIST;
|
|
- else
|
|
- qeth_l3_add_ip(card, ipaddr);
|
|
-
|
|
+ rc = qeth_l3_add_ip(card, ipaddr);
|
|
spin_unlock_bh(&card->ip_lock);
|
|
|
|
kfree(ipaddr);
|
|
@@ -1404,8 +1384,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
|
|
memcpy(tmp->mac, buf, sizeof(tmp->mac));
|
|
tmp->is_multicast = 1;
|
|
|
|
- ipm = qeth_l3_ip_from_hash(card, tmp);
|
|
+ ipm = qeth_l3_find_addr_by_ip(card, tmp);
|
|
if (ipm) {
|
|
+ /* for mcast, by-IP match means full match */
|
|
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
|
|
} else {
|
|
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
|
|
@@ -1488,8 +1469,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
|
|
sizeof(struct in6_addr));
|
|
tmp->is_multicast = 1;
|
|
|
|
- ipm = qeth_l3_ip_from_hash(card, tmp);
|
|
+ ipm = qeth_l3_find_addr_by_ip(card, tmp);
|
|
if (ipm) {
|
|
+ /* for mcast, by-IP match means full match */
|
|
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
|
|
continue;
|
|
}
|
|
@@ -2633,11 +2615,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
|
|
static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
|
|
struct sk_buff *skb, int extra_elems)
|
|
{
|
|
- addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
|
|
- int elements = qeth_get_elements_for_range(
|
|
- tcpdptr,
|
|
- (addr_t)skb->data + skb_headlen(skb)) +
|
|
- qeth_get_elements_for_frags(skb);
|
|
+ addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
|
|
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
|
+ int elements = qeth_get_elements_for_frags(skb);
|
|
+
|
|
+ if (start != end)
|
|
+ elements += qeth_get_elements_for_range(start, end);
|
|
|
|
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
|
QETH_DBF_MESSAGE(2,
|
|
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
|
|
index 92155cce926d..fb4e6a7ee521 100644
|
|
--- a/drivers/vfio/vfio_iommu_type1.c
|
|
+++ b/drivers/vfio/vfio_iommu_type1.c
|
|
@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
|
{
|
|
struct page *page[1];
|
|
struct vm_area_struct *vma;
|
|
+ struct vm_area_struct *vmas[1];
|
|
int ret;
|
|
|
|
if (mm == current->mm) {
|
|
- ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
|
|
- page);
|
|
+ ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
|
|
+ page, vmas);
|
|
} else {
|
|
unsigned int flags = 0;
|
|
|
|
@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
|
|
|
down_read(&mm->mmap_sem);
|
|
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
|
|
- NULL, NULL);
|
|
+ vmas, NULL);
|
|
+ /*
|
|
+ * The lifetime of a vaddr_get_pfn() page pin is
|
|
+ * userspace-controlled. In the fs-dax case this could
|
|
+ * lead to indefinite stalls in filesystem operations.
|
|
+ * Disallow attempts to pin fs-dax pages via this
|
|
+ * interface.
|
|
+ */
|
|
+ if (ret > 0 && vma_is_fsdax(vmas[0])) {
|
|
+ ret = -EOPNOTSUPP;
|
|
+ put_page(page[0]);
|
|
+ }
|
|
up_read(&mm->mmap_sem);
|
|
}
|
|
|
|
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
|
|
index 883881b16c86..4447e0fe9b55 100644
|
|
--- a/fs/btrfs/sysfs.c
|
|
+++ b/fs/btrfs/sysfs.c
|
|
@@ -422,7 +422,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
|
|
{
|
|
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
|
|
|
- return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
|
|
+ return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize);
|
|
}
|
|
|
|
BTRFS_ATTR(nodesize, btrfs_nodesize_show);
|
|
@@ -432,8 +432,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
|
|
{
|
|
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
|
|
|
- return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
- fs_info->super_copy->sectorsize);
|
|
+ return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
|
|
}
|
|
|
|
BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
|
|
@@ -443,8 +442,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
|
|
{
|
|
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
|
|
|
- return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
- fs_info->super_copy->sectorsize);
|
|
+ return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
|
|
}
|
|
|
|
BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
|
|
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
|
|
index f615d59b0489..46bda13e5727 100644
|
|
--- a/fs/btrfs/transaction.c
|
|
+++ b/fs/btrfs/transaction.c
|
|
@@ -1722,19 +1722,23 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
|
|
|
|
super = fs_info->super_copy;
|
|
|
|
+ /* update latest btrfs_super_block::chunk_root refs */
|
|
root_item = &fs_info->chunk_root->root_item;
|
|
- super->chunk_root = root_item->bytenr;
|
|
- super->chunk_root_generation = root_item->generation;
|
|
- super->chunk_root_level = root_item->level;
|
|
+ btrfs_set_super_chunk_root(super, root_item->bytenr);
|
|
+ btrfs_set_super_chunk_root_generation(super, root_item->generation);
|
|
+ btrfs_set_super_chunk_root_level(super, root_item->level);
|
|
|
|
+ /* update latest btrfs_super_block::root refs */
|
|
root_item = &fs_info->tree_root->root_item;
|
|
- super->root = root_item->bytenr;
|
|
- super->generation = root_item->generation;
|
|
- super->root_level = root_item->level;
|
|
+ btrfs_set_super_root(super, root_item->bytenr);
|
|
+ btrfs_set_super_generation(super, root_item->generation);
|
|
+ btrfs_set_super_root_level(super, root_item->level);
|
|
+
|
|
if (btrfs_test_opt(fs_info, SPACE_CACHE))
|
|
- super->cache_generation = root_item->generation;
|
|
+ btrfs_set_super_cache_generation(super, root_item->generation);
|
|
if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
|
|
- super->uuid_tree_generation = root_item->generation;
|
|
+ btrfs_set_super_uuid_tree_generation(super,
|
|
+ root_item->generation);
|
|
}
|
|
|
|
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
|
|
diff --git a/fs/direct-io.c b/fs/direct-io.c
|
|
index b53e66d9abd7..625a84aa6484 100644
|
|
--- a/fs/direct-io.c
|
|
+++ b/fs/direct-io.c
|
|
@@ -1252,8 +1252,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|
*/
|
|
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
|
|
retval = 0;
|
|
- if ((iocb->ki_filp->f_flags & O_DSYNC) ||
|
|
- IS_SYNC(iocb->ki_filp->f_mapping->host))
|
|
+ if (iocb->ki_flags & IOCB_DSYNC)
|
|
retval = dio_set_defer_completion(dio);
|
|
else if (!dio->inode->i_sb->s_dio_done_wq) {
|
|
/*
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index 440281f8564d..d54f41a63dbf 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -3185,7 +3185,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
|
|
if (!vma_is_dax(vma))
|
|
return false;
|
|
inode = file_inode(vma->vm_file);
|
|
- if (inode->i_mode == S_IFCHR)
|
|
+ if (S_ISCHR(inode->i_mode))
|
|
return false; /* device-dax */
|
|
return true;
|
|
}
|
|
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
|
|
index fbc98e2c8228..132e3f5a2e0d 100644
|
|
--- a/include/linux/nospec.h
|
|
+++ b/include/linux/nospec.h
|
|
@@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
|
|
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
|
|
\
|
|
- _i &= _mask; \
|
|
- _i; \
|
|
+ (typeof(_i)) (_i & _mask); \
|
|
})
|
|
#endif /* _LINUX_NOSPEC_H */
|
|
diff --git a/include/net/udplite.h b/include/net/udplite.h
|
|
index 81bdbf97319b..9185e45b997f 100644
|
|
--- a/include/net/udplite.h
|
|
+++ b/include/net/udplite.h
|
|
@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
|
|
UDP_SKB_CB(skb)->cscov = cscov;
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
+ skb->csum_valid = 0;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
|
index db5e6daadd94..9fe525f410bf 100644
|
|
--- a/kernel/time/timer.c
|
|
+++ b/kernel/time/timer.c
|
|
@@ -1834,6 +1834,12 @@ int timers_dead_cpu(unsigned int cpu)
|
|
raw_spin_lock_irq(&new_base->lock);
|
|
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
+ /*
|
|
+ * The current CPUs base clock might be stale. Update it
|
|
+ * before moving the timers over.
|
|
+ */
|
|
+ forward_timer_base(new_base);
|
|
+
|
|
BUG_ON(old_base->running_timer);
|
|
|
|
for (i = 0; i < WHEEL_SIZE; i++)
|
|
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
|
|
index 5d5d413a6cf8..a097a8613a02 100644
|
|
--- a/net/bridge/br_sysfs_if.c
|
|
+++ b/net/bridge/br_sysfs_if.c
|
|
@@ -235,6 +235,9 @@ static ssize_t brport_show(struct kobject *kobj,
|
|
struct brport_attribute *brport_attr = to_brport_attr(attr);
|
|
struct net_bridge_port *p = to_brport(kobj);
|
|
|
|
+ if (!brport_attr->show)
|
|
+ return -EINVAL;
|
|
+
|
|
return brport_attr->show(p, buf);
|
|
}
|
|
|
|
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
|
|
index 233a30040c91..9b8a53568b0f 100644
|
|
--- a/net/bridge/br_vlan.c
|
|
+++ b/net/bridge/br_vlan.c
|
|
@@ -157,6 +157,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
|
|
masterv = br_vlan_find(vg, vid);
|
|
if (WARN_ON(!masterv))
|
|
return NULL;
|
|
+ refcount_set(&masterv->refcnt, 1);
|
|
+ return masterv;
|
|
}
|
|
refcount_inc(&masterv->refcnt);
|
|
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index d33bbed640b1..c75ef9d8105a 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -2343,8 +2343,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
|
|
*/
|
|
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
|
|
{
|
|
+ bool disabling;
|
|
int rc;
|
|
|
|
+ disabling = txq < dev->real_num_tx_queues;
|
|
+
|
|
if (txq < 1 || txq > dev->num_tx_queues)
|
|
return -EINVAL;
|
|
|
|
@@ -2360,15 +2363,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
|
|
if (dev->num_tc)
|
|
netif_setup_tc(dev, txq);
|
|
|
|
- if (txq < dev->real_num_tx_queues) {
|
|
+ dev->real_num_tx_queues = txq;
|
|
+
|
|
+ if (disabling) {
|
|
+ synchronize_net();
|
|
qdisc_reset_all_tx_gt(dev, txq);
|
|
#ifdef CONFIG_XPS
|
|
netif_reset_xps_queues_gt(dev, txq);
|
|
#endif
|
|
}
|
|
+ } else {
|
|
+ dev->real_num_tx_queues = txq;
|
|
}
|
|
|
|
- dev->real_num_tx_queues = txq;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
|
|
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
|
|
index 00ecec4891f3..7f980bd7426e 100644
|
|
--- a/net/core/gen_estimator.c
|
|
+++ b/net/core/gen_estimator.c
|
|
@@ -66,6 +66,7 @@ struct net_rate_estimator {
|
|
static void est_fetch_counters(struct net_rate_estimator *e,
|
|
struct gnet_stats_basic_packed *b)
|
|
{
|
|
+ memset(b, 0, sizeof(*b));
|
|
if (e->stats_lock)
|
|
spin_lock(e->stats_lock);
|
|
|
|
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
|
|
index aff3751df950..1ee6c0d8dde4 100644
|
|
--- a/net/ipv4/fib_semantics.c
|
|
+++ b/net/ipv4/fib_semantics.c
|
|
@@ -654,6 +654,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
|
|
fi->fib_nh, cfg, extack))
|
|
return 1;
|
|
}
|
|
+#ifdef CONFIG_IP_ROUTE_CLASSID
|
|
+ if (cfg->fc_flow &&
|
|
+ cfg->fc_flow != fi->fib_nh->nh_tclassid)
|
|
+ return 1;
|
|
+#endif
|
|
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
|
|
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
|
|
return 0;
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 0ba88efca7ad..9ff06c5051ae 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -128,10 +128,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
|
|
static int ip_rt_error_cost __read_mostly = HZ;
|
|
static int ip_rt_error_burst __read_mostly = 5 * HZ;
|
|
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
|
|
-static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
|
|
+static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
|
|
static int ip_rt_min_advmss __read_mostly = 256;
|
|
|
|
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
|
|
+
|
|
+static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
|
|
+
|
|
/*
|
|
* Interface to generic destination cache.
|
|
*/
|
|
@@ -1829,6 +1832,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
|
|
return skb_get_hash_raw(skb) >> 1;
|
|
memset(&hash_keys, 0, sizeof(hash_keys));
|
|
skb_flow_dissect_flow_keys(skb, &keys, flag);
|
|
+
|
|
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
|
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
|
|
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
|
|
hash_keys.ports.src = keys.ports.src;
|
|
@@ -2934,7 +2939,8 @@ static struct ctl_table ipv4_route_table[] = {
|
|
.data = &ip_rt_min_pmtu,
|
|
.maxlen = sizeof(int),
|
|
.mode = 0644,
|
|
- .proc_handler = proc_dointvec,
|
|
+ .proc_handler = proc_dointvec_minmax,
|
|
+ .extra1 = &ip_min_valid_pmtu,
|
|
},
|
|
{
|
|
.procname = "min_adv_mss",
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index d9d215e27b8a..14474acea0bb 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -2013,11 +2013,6 @@ void tcp_enter_loss(struct sock *sk)
|
|
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
|
|
* loss recovery is underway except recurring timeout(s) on
|
|
* the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
|
|
- *
|
|
- * In theory F-RTO can be used repeatedly during loss recovery.
|
|
- * In practice this interacts badly with broken middle-boxes that
|
|
- * falsely raise the receive window, which results in repeated
|
|
- * timeouts and stop-and-go behavior.
|
|
*/
|
|
tp->frto = sysctl_tcp_frto &&
|
|
(new_recovery || icsk->icsk_retransmits) &&
|
|
@@ -2699,18 +2694,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
|
|
tcp_try_undo_loss(sk, false))
|
|
return;
|
|
|
|
- /* The ACK (s)acks some never-retransmitted data meaning not all
|
|
- * the data packets before the timeout were lost. Therefore we
|
|
- * undo the congestion window and state. This is essentially
|
|
- * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
|
|
- * a retransmitted skb is permantly marked, we can apply such an
|
|
- * operation even if F-RTO was not used.
|
|
- */
|
|
- if ((flag & FLAG_ORIG_SACK_ACKED) &&
|
|
- tcp_try_undo_loss(sk, tp->undo_marker))
|
|
- return;
|
|
-
|
|
if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
|
|
+ /* Step 3.b. A timeout is spurious if not all data are
|
|
+ * lost, i.e., never-retransmitted data are (s)acked.
|
|
+ */
|
|
+ if ((flag & FLAG_ORIG_SACK_ACKED) &&
|
|
+ tcp_try_undo_loss(sk, true))
|
|
+ return;
|
|
+
|
|
if (after(tp->snd_nxt, tp->high_seq)) {
|
|
if (flag & FLAG_DATA_SACKED || is_dupack)
|
|
tp->frto = 0; /* Step 3.a. loss was real */
|
|
@@ -4020,6 +4011,7 @@ void tcp_reset(struct sock *sk)
|
|
/* This barrier is coupled with smp_rmb() in tcp_poll() */
|
|
smp_wmb();
|
|
|
|
+ tcp_write_queue_purge(sk);
|
|
tcp_done(sk);
|
|
|
|
if (!sock_flag(sk, SOCK_DEAD))
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index cd3d60bb7cc8..83d11cd2eb65 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -1681,7 +1681,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
|
|
*/
|
|
segs = max_t(u32, bytes / mss_now, min_tso_segs);
|
|
|
|
- return min_t(u32, segs, sk->sk_gso_max_segs);
|
|
+ return segs;
|
|
}
|
|
EXPORT_SYMBOL(tcp_tso_autosize);
|
|
|
|
@@ -1693,8 +1693,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
|
|
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
|
u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
|
|
|
|
- return tso_segs ? :
|
|
- tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
|
|
+ if (!tso_segs)
|
|
+ tso_segs = tcp_tso_autosize(sk, mss_now,
|
|
+ sysctl_tcp_min_tso_segs);
|
|
+ return min_t(u32, tso_segs, sk->sk_gso_max_segs);
|
|
}
|
|
|
|
/* Returns the portion of skb which can be sent right away */
|
|
@@ -1973,6 +1975,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
|
|
}
|
|
}
|
|
|
|
+static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
|
|
+{
|
|
+ struct sk_buff *skb, *next;
|
|
+
|
|
+ skb = tcp_send_head(sk);
|
|
+ tcp_for_write_queue_from_safe(skb, next, sk) {
|
|
+ if (len <= skb->len)
|
|
+ break;
|
|
+
|
|
+ if (unlikely(TCP_SKB_CB(skb)->eor))
|
|
+ return false;
|
|
+
|
|
+ len -= skb->len;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
/* Create a new MTU probe if we are ready.
|
|
* MTU probe is regularly attempting to increase the path MTU by
|
|
* deliberately sending larger packets. This discovers routing
|
|
@@ -2045,6 +2065,9 @@ static int tcp_mtu_probe(struct sock *sk)
|
|
return 0;
|
|
}
|
|
|
|
+ if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
|
|
+ return -1;
|
|
+
|
|
/* We're allowed to probe. Build it now. */
|
|
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
|
|
if (!nskb)
|
|
@@ -2080,6 +2103,10 @@ static int tcp_mtu_probe(struct sock *sk)
|
|
/* We've eaten all the data from this skb.
|
|
* Throw it away. */
|
|
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
|
|
+ /* If this is the last SKB we copy and eor is set
|
|
+ * we need to propagate it to the new skb.
|
|
+ */
|
|
+ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
|
|
tcp_unlink_write_queue(skb, sk);
|
|
sk_wmem_free_skb(sk, skb);
|
|
} else {
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index ebfbccae62fd..c79fa6f6b758 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -2032,6 +2032,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
|
|
err = udplite_checksum_init(skb, uh);
|
|
if (err)
|
|
return err;
|
|
+
|
|
+ if (UDP_SKB_CB(skb)->partial_cov) {
|
|
+ skb->csum = inet_compute_pseudo(skb, proto);
|
|
+ return 0;
|
|
+ }
|
|
}
|
|
|
|
/* Note, we are only interested in != 0 or == 0, thus the
|
|
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
|
|
index ec43d18b5ff9..547515e8450a 100644
|
|
--- a/net/ipv6/ip6_checksum.c
|
|
+++ b/net/ipv6/ip6_checksum.c
|
|
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
|
|
err = udplite_checksum_init(skb, uh);
|
|
if (err)
|
|
return err;
|
|
+
|
|
+ if (UDP_SKB_CB(skb)->partial_cov) {
|
|
+ skb->csum = ip6_compute_pseudo(skb, proto);
|
|
+ return 0;
|
|
+ }
|
|
}
|
|
|
|
/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
|
|
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
|
|
index e79854cc5790..cac815cc8600 100644
|
|
--- a/net/ipv6/sit.c
|
|
+++ b/net/ipv6/sit.c
|
|
@@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
|
|
#ifdef CONFIG_IPV6_SIT_6RD
|
|
struct ip_tunnel *t = netdev_priv(dev);
|
|
|
|
- if (t->dev == sitn->fb_tunnel_dev) {
|
|
+ if (dev == sitn->fb_tunnel_dev) {
|
|
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
|
|
t->ip6rd.relay_prefix = 0;
|
|
t->ip6rd.prefixlen = 16;
|
|
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
|
|
index 533fd0503ba0..9219bc134109 100644
|
|
--- a/net/netlink/af_netlink.c
|
|
+++ b/net/netlink/af_netlink.c
|
|
@@ -2276,7 +2276,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|
if (cb->start) {
|
|
ret = cb->start(cb);
|
|
if (ret)
|
|
- goto error_unlock;
|
|
+ goto error_put;
|
|
}
|
|
|
|
nlk->cb_running = true;
|
|
@@ -2296,6 +2296,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|
*/
|
|
return -EINTR;
|
|
|
|
+error_put:
|
|
+ module_put(control->module);
|
|
error_unlock:
|
|
sock_put(sk);
|
|
mutex_unlock(nlk->cb_mutex);
|
|
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
|
|
index d444daf1ac04..6f02499ef007 100644
|
|
--- a/net/netlink/genetlink.c
|
|
+++ b/net/netlink/genetlink.c
|
|
@@ -1081,6 +1081,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
|
|
{
|
|
struct sk_buff *tmp;
|
|
struct net *net, *prev = NULL;
|
|
+ bool delivered = false;
|
|
int err;
|
|
|
|
for_each_net_rcu(net) {
|
|
@@ -1092,14 +1093,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
|
|
}
|
|
err = nlmsg_multicast(prev->genl_sock, tmp,
|
|
portid, group, flags);
|
|
- if (err)
|
|
+ if (!err)
|
|
+ delivered = true;
|
|
+ else if (err != -ESRCH)
|
|
goto error;
|
|
}
|
|
|
|
prev = net;
|
|
}
|
|
|
|
- return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
|
|
+ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
|
|
+ if (!err)
|
|
+ delivered = true;
|
|
+ else if (err != -ESRCH)
|
|
+ goto error;
|
|
+ return delivered ? 0 : -ESRCH;
|
|
error:
|
|
kfree_skb(skb);
|
|
return err;
|
|
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
|
|
index 71e6f713fbe7..5b67cb5d47f0 100644
|
|
--- a/net/rxrpc/output.c
|
|
+++ b/net/rxrpc/output.c
|
|
@@ -395,7 +395,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
|
(char *)&opt, sizeof(opt));
|
|
if (ret == 0) {
|
|
ret = kernel_sendmsg(conn->params.local->socket, &msg,
|
|
- iov, 1, iov[0].iov_len);
|
|
+ iov, 2, len);
|
|
|
|
opt = IPV6_PMTUDISC_DO;
|
|
kernel_setsockopt(conn->params.local->socket,
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index 934c239cf98d..c2fab4bcb8be 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -871,13 +871,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|
if (tca[TCA_CHAIN] &&
|
|
nla_get_u32(tca[TCA_CHAIN]) != chain->index)
|
|
continue;
|
|
- if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
|
|
+ if (!tcf_chain_dump(chain, skb, cb, index_start, &index)) {
|
|
+ err = -EMSGSIZE;
|
|
break;
|
|
+ }
|
|
}
|
|
|
|
cb->args[0] = index;
|
|
|
|
out:
|
|
+ /* If we did no progress, the error (EMSGSIZE) is real */
|
|
+ if (skb->len == 0 && err)
|
|
+ return err;
|
|
return skb->len;
|
|
}
|
|
|
|
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
|
|
index b58eccb21f03..ba37d8f57e68 100644
|
|
--- a/net/sched/cls_u32.c
|
|
+++ b/net/sched/cls_u32.c
|
|
@@ -398,10 +398,12 @@ static int u32_init(struct tcf_proto *tp)
|
|
static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
|
|
bool free_pf)
|
|
{
|
|
+ struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
|
|
+
|
|
tcf_exts_destroy(&n->exts);
|
|
tcf_exts_put_net(&n->exts);
|
|
- if (n->ht_down)
|
|
- n->ht_down->refcnt--;
|
|
+ if (ht && --ht->refcnt == 0)
|
|
+ kfree(ht);
|
|
#ifdef CONFIG_CLS_U32_PERF
|
|
if (free_pf)
|
|
free_percpu(n->pf);
|
|
@@ -649,16 +651,15 @@ static void u32_destroy(struct tcf_proto *tp)
|
|
|
|
hlist_del(&tp_c->hnode);
|
|
|
|
- for (ht = rtnl_dereference(tp_c->hlist);
|
|
- ht;
|
|
- ht = rtnl_dereference(ht->next)) {
|
|
- ht->refcnt--;
|
|
- u32_clear_hnode(tp, ht);
|
|
- }
|
|
-
|
|
while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
|
|
+ u32_clear_hnode(tp, ht);
|
|
RCU_INIT_POINTER(tp_c->hlist, ht->next);
|
|
- kfree_rcu(ht, rcu);
|
|
+
|
|
+ /* u32_destroy_key() will later free ht for us, if it's
|
|
+ * still referenced by some knode
|
|
+ */
|
|
+ if (--ht->refcnt == 0)
|
|
+ kfree_rcu(ht, rcu);
|
|
}
|
|
|
|
kfree(tp_c);
|
|
@@ -927,7 +928,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
|
if (TC_U32_KEY(n->handle) == 0)
|
|
return -EINVAL;
|
|
|
|
- if (n->flags != flags)
|
|
+ if ((n->flags ^ flags) &
|
|
+ ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW))
|
|
return -EINVAL;
|
|
|
|
new = u32_init_knode(tp, n);
|
|
diff --git a/net/sctp/input.c b/net/sctp/input.c
|
|
index 141c9c466ec1..0247cc432e02 100644
|
|
--- a/net/sctp/input.c
|
|
+++ b/net/sctp/input.c
|
|
@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
|
|
rhl_for_each_entry_rcu(transport, tmp, list, node)
|
|
if (transport->asoc->ep == t->asoc->ep) {
|
|
rcu_read_unlock();
|
|
- err = -EEXIST;
|
|
- goto out;
|
|
+ return -EEXIST;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
|
|
&t->node, sctp_hash_params);
|
|
-
|
|
-out:
|
|
if (err)
|
|
pr_err_once("insert transport fail, errno %d\n", err);
|
|
|
|
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
|
|
index 3b18085e3b10..f27a9718554c 100644
|
|
--- a/net/sctp/ipv6.c
|
|
+++ b/net/sctp/ipv6.c
|
|
@@ -326,8 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
|
|
bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
|
|
|
|
- if (!IS_ERR(bdst) &&
|
|
- ipv6_chk_addr(dev_net(bdst->dev),
|
|
+ if (IS_ERR(bdst))
|
|
+ continue;
|
|
+
|
|
+ if (ipv6_chk_addr(dev_net(bdst->dev),
|
|
&laddr->a.v6.sin6_addr, bdst->dev, 1)) {
|
|
if (!IS_ERR_OR_NULL(dst))
|
|
dst_release(dst);
|
|
@@ -336,8 +338,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|
}
|
|
|
|
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
|
|
- if (matchlen > bmatchlen)
|
|
+ if (matchlen > bmatchlen) {
|
|
+ dst_release(bdst);
|
|
continue;
|
|
+ }
|
|
|
|
if (!IS_ERR_OR_NULL(dst))
|
|
dst_release(dst);
|
|
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
|
|
index fcd80feb293f..df22a9c352ad 100644
|
|
--- a/net/sctp/protocol.c
|
|
+++ b/net/sctp/protocol.c
|
|
@@ -514,22 +514,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|
if (IS_ERR(rt))
|
|
continue;
|
|
|
|
- if (!dst)
|
|
- dst = &rt->dst;
|
|
-
|
|
/* Ensure the src address belongs to the output
|
|
* interface.
|
|
*/
|
|
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
|
|
false);
|
|
if (!odev || odev->ifindex != fl4->flowi4_oif) {
|
|
- if (&rt->dst != dst)
|
|
+ if (!dst)
|
|
+ dst = &rt->dst;
|
|
+ else
|
|
dst_release(&rt->dst);
|
|
continue;
|
|
}
|
|
|
|
- if (dst != &rt->dst)
|
|
- dst_release(dst);
|
|
+ dst_release(dst);
|
|
dst = &rt->dst;
|
|
break;
|
|
}
|
|
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
|
|
index 514465b03829..e4a400f88168 100644
|
|
--- a/net/sctp/sm_make_chunk.c
|
|
+++ b/net/sctp/sm_make_chunk.c
|
|
@@ -1378,9 +1378,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
|
|
struct sctp_chunk *retval;
|
|
struct sk_buff *skb;
|
|
struct sock *sk;
|
|
+ int chunklen;
|
|
+
|
|
+ chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
|
|
+ if (chunklen > SCTP_MAX_CHUNK_LEN)
|
|
+ goto nodata;
|
|
|
|
/* No need to allocate LL here, as this is only a chunk. */
|
|
- skb = alloc_skb(SCTP_PAD4(sizeof(*chunk_hdr) + paylen), gfp);
|
|
+ skb = alloc_skb(chunklen, gfp);
|
|
if (!skb)
|
|
goto nodata;
|
|
|
|
diff --git a/sound/core/control.c b/sound/core/control.c
|
|
index 56b3e2d49c82..af7e6165e21e 100644
|
|
--- a/sound/core/control.c
|
|
+++ b/sound/core/control.c
|
|
@@ -888,7 +888,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
|
|
|
|
index_offset = snd_ctl_get_ioff(kctl, &control->id);
|
|
vd = &kctl->vd[index_offset];
|
|
- if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
|
|
+ if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
|
|
return -EPERM;
|
|
|
|
snd_ctl_build_ioff(&control->id, kctl, index_offset);
|
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
|
index c71dcacea807..96143df19b21 100644
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
|
|
};
|
|
#define param_check_xint param_check_int
|
|
|
|
-static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
|
|
+static int power_save = -1;
|
|
module_param(power_save, xint, 0644);
|
|
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
|
|
"(in second, 0 = disable).");
|
|
@@ -2186,6 +2186,24 @@ static int azx_probe(struct pci_dev *pci,
|
|
return err;
|
|
}
|
|
|
|
+#ifdef CONFIG_PM
|
|
+/* On some boards setting power_save to a non 0 value leads to clicking /
|
|
+ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
|
|
+ * figure out how to avoid these sounds, but that is not always feasible.
|
|
+ * So we keep a list of devices where we disable powersaving as its known
|
|
+ * to causes problems on these devices.
|
|
+ */
|
|
+static struct snd_pci_quirk power_save_blacklist[] = {
|
|
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
|
|
+ SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
|
|
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
|
|
+ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
|
|
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
|
|
+ SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
|
|
+ {}
|
|
+};
|
|
+#endif /* CONFIG_PM */
|
|
+
|
|
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
|
|
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
|
|
[AZX_DRIVER_NVIDIA] = 8,
|
|
@@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
|
|
struct hdac_bus *bus = azx_bus(chip);
|
|
struct pci_dev *pci = chip->pci;
|
|
int dev = chip->dev_index;
|
|
+ int val;
|
|
int err;
|
|
|
|
hda->probe_continued = 1;
|
|
@@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
|
|
|
|
chip->running = 1;
|
|
azx_add_card_list(chip);
|
|
- snd_hda_set_power_save(&chip->bus, power_save * 1000);
|
|
+
|
|
+ val = power_save;
|
|
+#ifdef CONFIG_PM
|
|
+ if (val == -1) {
|
|
+ const struct snd_pci_quirk *q;
|
|
+
|
|
+ val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
|
|
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
|
|
+ if (q && val) {
|
|
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
|
|
+ q->subvendor, q->subdevice);
|
|
+ val = 0;
|
|
+ }
|
|
+ }
|
|
+#endif /* CONFIG_PM */
|
|
+ snd_hda_set_power_save(&chip->bus, val * 1000);
|
|
if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
|
|
pm_runtime_put_autosuspend(&pci->dev);
|
|
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index b7acffdf16a4..454476b47b79 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -4852,13 +4852,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
|
|
|
|
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
|
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
|
|
+ snd_hda_apply_pincfgs(codec, pincfgs);
|
|
+ } else if (action == HDA_FIXUP_ACT_INIT) {
|
|
/* Enable DOCK device */
|
|
snd_hda_codec_write(codec, 0x17, 0,
|
|
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
|
|
/* Enable DOCK device */
|
|
snd_hda_codec_write(codec, 0x19, 0,
|
|
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
|
|
- snd_hda_apply_pincfgs(codec, pincfgs);
|
|
}
|
|
}
|
|
|
|
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
|
|
index 8a59d4782a0f..69bf5cf1e91e 100644
|
|
--- a/sound/usb/quirks-table.h
|
|
+++ b/sound/usb/quirks-table.h
|
|
@@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
|
}
|
|
},
|
|
|
|
+{
|
|
+ /*
|
|
+ * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
|
|
+ * even though it advertises more. The capture interface doesn't work
|
|
+ * even on windows.
|
|
+ */
|
|
+ USB_DEVICE(0x19b5, 0x0021),
|
|
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
|
+ .type = QUIRK_COMPOSITE,
|
|
+ .data = (const struct snd_usb_audio_quirk[]) {
|
|
+ {
|
|
+ .ifnum = 0,
|
|
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
|
|
+ },
|
|
+ /* Capture */
|
|
+ {
|
|
+ .ifnum = 1,
|
|
+ .type = QUIRK_IGNORE_INTERFACE,
|
|
+ },
|
|
+ /* Playback */
|
|
+ {
|
|
+ .ifnum = 2,
|
|
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
|
|
+ .data = &(const struct audioformat) {
|
|
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
|
|
+ .channels = 2,
|
|
+ .iface = 2,
|
|
+ .altsetting = 1,
|
|
+ .altset_idx = 1,
|
|
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
|
|
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
|
|
+ .endpoint = 0x03,
|
|
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
|
|
+ .rates = SNDRV_PCM_RATE_48000,
|
|
+ .rate_min = 48000,
|
|
+ .rate_max = 48000,
|
|
+ .nr_rates = 1,
|
|
+ .rate_table = (unsigned int[]) {
|
|
+ 48000
|
|
+ }
|
|
+ }
|
|
+ },
|
|
+ }
|
|
+ }
|
|
+},
|
|
+
|
|
#undef USB_DEVICE_VENDOR_SPEC
|
|
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
|
|
index a0951505c7f5..697872d8308e 100644
|
|
--- a/sound/x86/intel_hdmi_audio.c
|
|
+++ b/sound/x86/intel_hdmi_audio.c
|
|
@@ -1827,6 +1827,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
|
|
ctx->port = port;
|
|
ctx->pipe = -1;
|
|
|
|
+ spin_lock_init(&ctx->had_spinlock);
|
|
+ mutex_init(&ctx->mutex);
|
|
INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
|
|
|
|
ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index 8401774f5aeb..d81af263f50b 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -975,8 +975,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
/* Check for overlaps */
|
|
r = -EEXIST;
|
|
kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
|
|
- if ((slot->id >= KVM_USER_MEM_SLOTS) ||
|
|
- (slot->id == id))
|
|
+ if (slot->id == id)
|
|
continue;
|
|
if (!((base_gfn + npages <= slot->base_gfn) ||
|
|
(base_gfn >= slot->base_gfn + slot->npages)))
|