mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 09:11:49 +00:00
5005 lines
162 KiB
Text
5005 lines
162 KiB
Text
diff --git a/Makefile b/Makefile
|
|
index 8717f34464d5..36d9de42def3 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 4
|
|
PATCHLEVEL = 19
|
|
-SUBLEVEL = 9
|
|
+SUBLEVEL = 10
|
|
EXTRAVERSION =
|
|
NAME = "People's Front"
|
|
|
|
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
|
|
index 1d158cfda15f..c45aef806870 100644
|
|
--- a/arch/arm/boot/dts/am3517-evm.dts
|
|
+++ b/arch/arm/boot/dts/am3517-evm.dts
|
|
@@ -227,7 +227,7 @@
|
|
vmmc-supply = <&vmmc_fixed>;
|
|
bus-width = <4>;
|
|
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
|
|
- cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */
|
|
+ cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
|
|
};
|
|
|
|
&mmc3 {
|
|
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
|
|
index dae6e458e59f..b1c988eed87c 100644
|
|
--- a/arch/arm/boot/dts/am3517-som.dtsi
|
|
+++ b/arch/arm/boot/dts/am3517-som.dtsi
|
|
@@ -163,7 +163,7 @@
|
|
compatible = "ti,wl1271";
|
|
reg = <2>;
|
|
interrupt-parent = <&gpio6>;
|
|
- interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */
|
|
+ interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
|
|
ref-clock-frequency = <26000000>;
|
|
tcxo-clock-frequency = <26000000>;
|
|
};
|
|
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
index ac343330d0c8..98b682a8080c 100644
|
|
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
|
|
@@ -129,7 +129,7 @@
|
|
};
|
|
|
|
&mmc3 {
|
|
- interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
|
|
+ interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
|
|
pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
|
|
pinctrl-names = "default";
|
|
vmmc-supply = <&wl12xx_vmmc>;
|
|
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
|
|
index 9d5d53fbe9c0..c39cf2ca54da 100644
|
|
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
|
|
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
|
|
@@ -35,7 +35,7 @@
|
|
* jumpering combinations for the long run.
|
|
*/
|
|
&mmc3 {
|
|
- interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
|
|
+ interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
|
|
pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
|
|
pinctrl-names = "default";
|
|
vmmc-supply = <&wl12xx_vmmc>;
|
|
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
|
|
index 61f68e5c48e9..b405992eb601 100644
|
|
--- a/arch/arm/boot/dts/sama5d2.dtsi
|
|
+++ b/arch/arm/boot/dts/sama5d2.dtsi
|
|
@@ -308,7 +308,7 @@
|
|
0x1 0x0 0x60000000 0x10000000
|
|
0x2 0x0 0x70000000 0x10000000
|
|
0x3 0x0 0x80000000 0x10000000>;
|
|
- clocks = <&mck>;
|
|
+ clocks = <&h32ck>;
|
|
status = "disabled";
|
|
|
|
nand_controller: nand-controller {
|
|
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
|
|
index dd28d2614d7f..d10d8831f527 100644
|
|
--- a/arch/arm/mach-omap1/board-ams-delta.c
|
|
+++ b/arch/arm/mach-omap1/board-ams-delta.c
|
|
@@ -726,6 +726,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
|
|
struct modem_private_data *priv = port->private_data;
|
|
int ret;
|
|
|
|
+ if (!priv)
|
|
+ return;
|
|
+
|
|
if (IS_ERR(priv->regulator))
|
|
return;
|
|
|
|
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
|
|
index 7b95729e8359..38a1be6c3694 100644
|
|
--- a/arch/arm/mach-omap2/prm44xx.c
|
|
+++ b/arch/arm/mach-omap2/prm44xx.c
|
|
@@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
|
|
* to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
|
|
* omap44xx_prm_reconfigure_io_chain() must be called. No return value.
|
|
*/
|
|
-static void __init omap44xx_prm_enable_io_wakeup(void)
|
|
+static void omap44xx_prm_enable_io_wakeup(void)
|
|
{
|
|
s32 inst = omap4_prmst_get_prm_dev_inst();
|
|
|
|
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
|
|
index 6d651f314193..6921f8dc5ebb 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
|
|
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
|
|
@@ -31,6 +31,10 @@
|
|
status = "okay";
|
|
};
|
|
|
|
+&tlmm {
|
|
+ gpio-reserved-ranges = <0 4>, <81 4>;
|
|
+};
|
|
+
|
|
&uart9 {
|
|
status = "okay";
|
|
};
|
|
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
|
|
index 74091fd3101e..d5523adeddbf 100644
|
|
--- a/arch/s390/kernel/perf_cpum_cf.c
|
|
+++ b/arch/s390/kernel/perf_cpum_cf.c
|
|
@@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|
break;
|
|
|
|
case PERF_TYPE_HARDWARE:
|
|
+ if (is_sampling_event(event)) /* No sampling support */
|
|
+ return -ENOENT;
|
|
ev = attr->config;
|
|
/* Count user space (problem-state) only */
|
|
if (!attr->exclude_user && attr->exclude_kernel) {
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 2216d21e955d..3692de84c420 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -55,7 +55,7 @@
|
|
#define PRIo64 "o"
|
|
|
|
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
|
|
-#define apic_debug(fmt, arg...)
|
|
+#define apic_debug(fmt, arg...) do {} while (0)
|
|
|
|
/* 14 is the version for Xeon and Pentium 8.4.8*/
|
|
#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index e55f7a90d4b2..c97a9d60d305 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -962,6 +962,7 @@ struct vcpu_vmx {
|
|
struct shared_msr_entry *guest_msrs;
|
|
int nmsrs;
|
|
int save_nmsrs;
|
|
+ bool guest_msrs_dirty;
|
|
unsigned long host_idt_base;
|
|
#ifdef CONFIG_X86_64
|
|
u64 msr_host_kernel_gs_base;
|
|
@@ -1284,7 +1285,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
|
|
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
|
|
u16 error_code);
|
|
static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
|
|
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
u32 msr, int type);
|
|
|
|
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
|
@@ -2874,6 +2875,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|
|
|
vmx->req_immediate_exit = false;
|
|
|
|
+ /*
|
|
+ * Note that guest MSRs to be saved/restored can also be changed
|
|
+ * when guest state is loaded. This happens when guest transitions
|
|
+ * to/from long-mode by setting MSR_EFER.LMA.
|
|
+ */
|
|
+ if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
|
|
+ vmx->guest_msrs_dirty = false;
|
|
+ for (i = 0; i < vmx->save_nmsrs; ++i)
|
|
+ kvm_set_shared_msr(vmx->guest_msrs[i].index,
|
|
+ vmx->guest_msrs[i].data,
|
|
+ vmx->guest_msrs[i].mask);
|
|
+
|
|
+ }
|
|
+
|
|
if (vmx->loaded_cpu_state)
|
|
return;
|
|
|
|
@@ -2934,11 +2949,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|
vmcs_writel(HOST_GS_BASE, gs_base);
|
|
host_state->gs_base = gs_base;
|
|
}
|
|
-
|
|
- for (i = 0; i < vmx->save_nmsrs; ++i)
|
|
- kvm_set_shared_msr(vmx->guest_msrs[i].index,
|
|
- vmx->guest_msrs[i].data,
|
|
- vmx->guest_msrs[i].mask);
|
|
}
|
|
|
|
static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
|
@@ -3418,6 +3428,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
|
move_msr_up(vmx, index, save_nmsrs++);
|
|
|
|
vmx->save_nmsrs = save_nmsrs;
|
|
+ vmx->guest_msrs_dirty = true;
|
|
|
|
if (cpu_has_vmx_msr_bitmap())
|
|
vmx_update_msr_bitmap(&vmx->vcpu);
|
|
@@ -5924,7 +5935,7 @@ static void free_vpid(int vpid)
|
|
spin_unlock(&vmx_vpid_lock);
|
|
}
|
|
|
|
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
u32 msr, int type)
|
|
{
|
|
int f = sizeof(unsigned long);
|
|
@@ -5962,7 +5973,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
|
|
}
|
|
}
|
|
|
|
-static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
+static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
u32 msr, int type)
|
|
{
|
|
int f = sizeof(unsigned long);
|
|
@@ -6000,7 +6011,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
|
|
}
|
|
}
|
|
|
|
-static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
|
|
+static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
|
|
u32 msr, int type, bool value)
|
|
{
|
|
if (value)
|
|
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
|
|
index 2eeddd814653..c6c7c9b7b5c1 100644
|
|
--- a/arch/x86/xen/enlighten.c
|
|
+++ b/arch/x86/xen/enlighten.c
|
|
@@ -7,7 +7,6 @@
|
|
|
|
#include <xen/features.h>
|
|
#include <xen/page.h>
|
|
-#include <xen/interface/memory.h>
|
|
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/hypervisor.h>
|
|
@@ -343,80 +342,3 @@ void xen_arch_unregister_cpu(int num)
|
|
}
|
|
EXPORT_SYMBOL(xen_arch_unregister_cpu);
|
|
#endif
|
|
-
|
|
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
|
|
-void __init arch_xen_balloon_init(struct resource *hostmem_resource)
|
|
-{
|
|
- struct xen_memory_map memmap;
|
|
- int rc;
|
|
- unsigned int i, last_guest_ram;
|
|
- phys_addr_t max_addr = PFN_PHYS(max_pfn);
|
|
- struct e820_table *xen_e820_table;
|
|
- const struct e820_entry *entry;
|
|
- struct resource *res;
|
|
-
|
|
- if (!xen_initial_domain())
|
|
- return;
|
|
-
|
|
- xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
|
|
- if (!xen_e820_table)
|
|
- return;
|
|
-
|
|
- memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
|
|
- set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
|
|
- rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
|
|
- if (rc) {
|
|
- pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
|
|
- goto out;
|
|
- }
|
|
-
|
|
- last_guest_ram = 0;
|
|
- for (i = 0; i < memmap.nr_entries; i++) {
|
|
- if (xen_e820_table->entries[i].addr >= max_addr)
|
|
- break;
|
|
- if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
|
|
- last_guest_ram = i;
|
|
- }
|
|
-
|
|
- entry = &xen_e820_table->entries[last_guest_ram];
|
|
- if (max_addr >= entry->addr + entry->size)
|
|
- goto out; /* No unallocated host RAM. */
|
|
-
|
|
- hostmem_resource->start = max_addr;
|
|
- hostmem_resource->end = entry->addr + entry->size;
|
|
-
|
|
- /*
|
|
- * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
|
|
- * as unavailable. The rest of that region can be used for hotplug-based
|
|
- * ballooning.
|
|
- */
|
|
- for (; i < memmap.nr_entries; i++) {
|
|
- entry = &xen_e820_table->entries[i];
|
|
-
|
|
- if (entry->type == E820_TYPE_RAM)
|
|
- continue;
|
|
-
|
|
- if (entry->addr >= hostmem_resource->end)
|
|
- break;
|
|
-
|
|
- res = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
- if (!res)
|
|
- goto out;
|
|
-
|
|
- res->name = "Unavailable host RAM";
|
|
- res->start = entry->addr;
|
|
- res->end = (entry->addr + entry->size < hostmem_resource->end) ?
|
|
- entry->addr + entry->size : hostmem_resource->end;
|
|
- rc = insert_resource(hostmem_resource, res);
|
|
- if (rc) {
|
|
- pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
|
|
- __func__, res->start, res->end, rc);
|
|
- kfree(res);
|
|
- goto out;
|
|
- }
|
|
- }
|
|
-
|
|
- out:
|
|
- kfree(xen_e820_table);
|
|
-}
|
|
-#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
|
|
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
|
|
index 1163e33121fb..075ed47993bb 100644
|
|
--- a/arch/x86/xen/setup.c
|
|
+++ b/arch/x86/xen/setup.c
|
|
@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
|
|
addr = xen_e820_table.entries[0].addr;
|
|
size = xen_e820_table.entries[0].size;
|
|
while (i < xen_e820_table.nr_entries) {
|
|
+ bool discard = false;
|
|
|
|
chunk_size = size;
|
|
type = xen_e820_table.entries[i].type;
|
|
@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
|
|
xen_add_extra_mem(pfn_s, n_pfns);
|
|
xen_max_p2m_pfn = pfn_s + n_pfns;
|
|
} else
|
|
- type = E820_TYPE_UNUSABLE;
|
|
+ discard = true;
|
|
}
|
|
|
|
- xen_align_and_add_e820_region(addr, chunk_size, type);
|
|
+ if (!discard)
|
|
+ xen_align_and_add_e820_region(addr, chunk_size, type);
|
|
|
|
addr += chunk_size;
|
|
size -= chunk_size;
|
|
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
|
|
index 08f26db2da7e..e938576e58cb 100644
|
|
--- a/drivers/acpi/arm64/iort.c
|
|
+++ b/drivers/acpi/arm64/iort.c
|
|
@@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
|
|
*/
|
|
static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
|
|
{
|
|
- struct acpi_iort_node *node, *msi_parent;
|
|
+ struct acpi_iort_node *node, *msi_parent = NULL;
|
|
struct fwnode_handle *iort_fwnode;
|
|
struct acpi_iort_its_group *its;
|
|
int i;
|
|
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
|
|
index af3a20dd5aa4..99c99a5d57fe 100644
|
|
--- a/drivers/fsi/Kconfig
|
|
+++ b/drivers/fsi/Kconfig
|
|
@@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
|
|
tristate "FSI master based on Aspeed ColdFire coprocessor"
|
|
depends on GPIOLIB
|
|
depends on GPIO_ASPEED
|
|
+ select GENERIC_ALLOCATOR
|
|
---help---
|
|
This option enables a FSI master using the AST2400 and AST2500 GPIO
|
|
lines driven by the internal ColdFire coprocessor. This requires
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
index ef00d14f8645..325e2213cac5 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
|
@@ -2243,12 +2243,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
|
|
#endif
|
|
|
|
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
|
|
+ udelay(50);
|
|
|
|
/* carrizo do enable cp interrupt after cp inited */
|
|
- if (!(adev->flags & AMD_IS_APU))
|
|
+ if (!(adev->flags & AMD_IS_APU)) {
|
|
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
|
|
-
|
|
- udelay(50);
|
|
+ udelay(50);
|
|
+ }
|
|
|
|
#ifdef AMDGPU_RLC_DEBUG_RETRY
|
|
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
|
|
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
|
|
index 7c6ac3cadb6b..8bb355d5d43d 100644
|
|
--- a/drivers/gpu/drm/ast/ast_mode.c
|
|
+++ b/drivers/gpu/drm/ast/ast_mode.c
|
|
@@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
|
|
{
|
|
struct ast_i2c_chan *i2c = i2c_priv;
|
|
struct ast_private *ast = i2c->dev->dev_private;
|
|
- uint32_t val;
|
|
+ uint32_t val, val2, count, pass;
|
|
+
|
|
+ count = 0;
|
|
+ pass = 0;
|
|
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
|
|
+ do {
|
|
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
|
|
+ if (val == val2) {
|
|
+ pass++;
|
|
+ } else {
|
|
+ pass = 0;
|
|
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
|
|
+ }
|
|
+ } while ((pass < 5) && (count++ < 0x10000));
|
|
|
|
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
|
|
return val & 1 ? 1 : 0;
|
|
}
|
|
|
|
@@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
|
|
{
|
|
struct ast_i2c_chan *i2c = i2c_priv;
|
|
struct ast_private *ast = i2c->dev->dev_private;
|
|
- uint32_t val;
|
|
+ uint32_t val, val2, count, pass;
|
|
+
|
|
+ count = 0;
|
|
+ pass = 0;
|
|
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
|
|
+ do {
|
|
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
|
|
+ if (val == val2) {
|
|
+ pass++;
|
|
+ } else {
|
|
+ pass = 0;
|
|
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
|
|
+ }
|
|
+ } while ((pass < 5) && (count++ < 0x10000));
|
|
|
|
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
|
|
return val & 1 ? 1 : 0;
|
|
}
|
|
|
|
@@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
|
|
|
|
for (i = 0; i < 0x10000; i++) {
|
|
ujcrb7 = ((clock & 0x01) ? 0 : 1);
|
|
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
|
|
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
|
|
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
|
|
if (ujcrb7 == jtemp)
|
|
break;
|
|
@@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
|
|
|
|
for (i = 0; i < 0x10000; i++) {
|
|
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
|
|
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
|
|
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
|
|
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
|
|
if (ujcrb7 == jtemp)
|
|
break;
|
|
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
|
|
index 14aac661f38b..7a3a6ed9f27b 100644
|
|
--- a/drivers/gpu/drm/meson/meson_venc.c
|
|
+++ b/drivers/gpu/drm/meson/meson_venc.c
|
|
@@ -715,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
|
|
{ 5, &meson_hdmi_encp_mode_1080i60 },
|
|
{ 20, &meson_hdmi_encp_mode_1080i50 },
|
|
{ 32, &meson_hdmi_encp_mode_1080p24 },
|
|
+ { 33, &meson_hdmi_encp_mode_1080p50 },
|
|
{ 34, &meson_hdmi_encp_mode_1080p30 },
|
|
{ 31, &meson_hdmi_encp_mode_1080p50 },
|
|
{ 16, &meson_hdmi_encp_mode_1080p60 },
|
|
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
|
|
index 71d3445ba869..07ee19573b3f 100644
|
|
--- a/drivers/hwmon/ina2xx.c
|
|
+++ b/drivers/hwmon/ina2xx.c
|
|
@@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
|
|
break;
|
|
case INA2XX_CURRENT:
|
|
/* signed register, result in mA */
|
|
- val = regval * data->current_lsb_uA;
|
|
+ val = (s16)regval * data->current_lsb_uA;
|
|
val = DIV_ROUND_CLOSEST(val, 1000);
|
|
break;
|
|
case INA2XX_CALIBRATION:
|
|
@@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
|
|
}
|
|
|
|
data->groups[group++] = &ina2xx_group;
|
|
- if (id->driver_data == ina226)
|
|
+ if (chip == ina226)
|
|
data->groups[group++] = &ina226_group;
|
|
|
|
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
|
|
@@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
|
|
return PTR_ERR(hwmon_dev);
|
|
|
|
dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
|
|
- id->name, data->rshunt);
|
|
+ client->name, data->rshunt);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
|
|
index de46577c7d5a..d8fa4bea4bc8 100644
|
|
--- a/drivers/hwmon/mlxreg-fan.c
|
|
+++ b/drivers/hwmon/mlxreg-fan.c
|
|
@@ -51,7 +51,7 @@
|
|
*/
|
|
#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
|
|
((rval) + (s)) * (d)))
|
|
-#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
|
|
+#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
|
|
#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
|
|
MLXREG_FAN_MAX_STATE, \
|
|
MLXREG_FAN_MAX_DUTY))
|
|
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
|
|
index be5ba4690895..0d0457245e7d 100644
|
|
--- a/drivers/hwmon/raspberrypi-hwmon.c
|
|
+++ b/drivers/hwmon/raspberrypi-hwmon.c
|
|
@@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct rpi_hwmon_data *data;
|
|
- int ret;
|
|
|
|
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
|
if (!data)
|
|
@@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
|
|
/* Parent driver assure that firmware is correct */
|
|
data->fw = dev_get_drvdata(dev->parent);
|
|
|
|
- /* Init throttled */
|
|
- ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
|
|
- &data->last_throttled,
|
|
- sizeof(data->last_throttled));
|
|
-
|
|
data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
|
|
data,
|
|
&rpi_chip_info,
|
|
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
|
|
index 49276bbdac3d..1bb80f992aa8 100644
|
|
--- a/drivers/hwmon/w83795.c
|
|
+++ b/drivers/hwmon/w83795.c
|
|
@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
|
|
* somewhere else in the code
|
|
*/
|
|
#define SENSOR_ATTR_TEMP(index) { \
|
|
- SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
|
|
+ SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
|
|
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
|
|
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
|
|
NULL, TEMP_READ, index - 1), \
|
|
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
|
|
index ee366199b169..25d43c8f1c2a 100644
|
|
--- a/drivers/infiniband/core/roce_gid_mgmt.c
|
|
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
|
|
@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
|
|
|
|
case NETDEV_CHANGEADDR:
|
|
cmds[0] = netdev_del_cmd;
|
|
- cmds[1] = add_default_gid_cmd;
|
|
- cmds[2] = add_cmd;
|
|
+ if (ndev->reg_state == NETREG_REGISTERED) {
|
|
+ cmds[1] = add_default_gid_cmd;
|
|
+ cmds[2] = add_cmd;
|
|
+ }
|
|
break;
|
|
|
|
case NETDEV_CHANGEUPPER:
|
|
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
|
|
index 85cd1a3593d6..22bd9784fa2e 100644
|
|
--- a/drivers/infiniband/hw/bnxt_re/main.c
|
|
+++ b/drivers/infiniband/hw/bnxt_re/main.c
|
|
@@ -1252,6 +1252,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
|
/* Registered a new RoCE device instance to netdev */
|
|
rc = bnxt_re_register_netdev(rdev);
|
|
if (rc) {
|
|
+ rtnl_unlock();
|
|
pr_err("Failed to register with netedev: %#x\n", rc);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1461,6 +1462,7 @@ static void bnxt_re_task(struct work_struct *work)
|
|
"Failed to register with IB: %#x", rc);
|
|
bnxt_re_remove_one(rdev);
|
|
bnxt_re_dev_unreg(rdev);
|
|
+ goto exit;
|
|
}
|
|
break;
|
|
case NETDEV_UP:
|
|
@@ -1484,6 +1486,7 @@ static void bnxt_re_task(struct work_struct *work)
|
|
}
|
|
smp_mb__before_atomic();
|
|
atomic_dec(&rdev->sched_count);
|
|
+exit:
|
|
kfree(re_work);
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
|
|
index e1668bcc2d13..902d12d6d88b 100644
|
|
--- a/drivers/infiniband/hw/hfi1/chip.c
|
|
+++ b/drivers/infiniband/hw/hfi1/chip.c
|
|
@@ -12485,7 +12485,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
|
|
}
|
|
|
|
/* allocate space for the counter values */
|
|
- dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
|
|
+ dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
|
|
+ GFP_KERNEL);
|
|
if (!dd->cntrs)
|
|
goto bail;
|
|
|
|
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
|
|
index d9470317983f..cfd252386356 100644
|
|
--- a/drivers/infiniband/hw/hfi1/hfi.h
|
|
+++ b/drivers/infiniband/hw/hfi1/hfi.h
|
|
@@ -154,6 +154,8 @@ struct hfi1_ib_stats {
|
|
extern struct hfi1_ib_stats hfi1_stats;
|
|
extern const struct pci_error_handlers hfi1_pci_err_handler;
|
|
|
|
+extern int num_driver_cntrs;
|
|
+
|
|
/*
|
|
* First-cut criterion for "device is active" is
|
|
* two thousand dwords combined Tx, Rx traffic per
|
|
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
|
|
index a7c586a5589d..3dfb4cf2f8c9 100644
|
|
--- a/drivers/infiniband/hw/hfi1/verbs.c
|
|
+++ b/drivers/infiniband/hw/hfi1/verbs.c
|
|
@@ -1701,7 +1701,7 @@ static const char * const driver_cntr_names[] = {
|
|
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
|
|
static const char **dev_cntr_names;
|
|
static const char **port_cntr_names;
|
|
-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
|
|
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
|
|
static int num_dev_cntrs;
|
|
static int num_port_cntrs;
|
|
static int cntr_names_initialized;
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
index 0218c0f8c2a7..a442b29e7611 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
@@ -1661,10 +1661,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
|
|
return hns_roce_cmq_send(hr_dev, &desc, 1);
|
|
}
|
|
|
|
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|
- unsigned long mtpt_idx)
|
|
+static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
|
|
+ struct hns_roce_mr *mr)
|
|
{
|
|
- struct hns_roce_v2_mpt_entry *mpt_entry;
|
|
struct scatterlist *sg;
|
|
u64 page_addr;
|
|
u64 *pages;
|
|
@@ -1672,6 +1671,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|
int len;
|
|
int entry;
|
|
|
|
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
|
|
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
|
|
+ roce_set_field(mpt_entry->byte_48_mode_ba,
|
|
+ V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
|
|
+ upper_32_bits(mr->pbl_ba >> 3));
|
|
+
|
|
+ pages = (u64 *)__get_free_page(GFP_KERNEL);
|
|
+ if (!pages)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ i = 0;
|
|
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
|
|
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
|
|
+ for (j = 0; j < len; ++j) {
|
|
+ page_addr = sg_dma_address(sg) +
|
|
+ (j << mr->umem->page_shift);
|
|
+ pages[i] = page_addr >> 6;
|
|
+ /* Record the first 2 entry directly to MTPT table */
|
|
+ if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
|
+ goto found;
|
|
+ i++;
|
|
+ }
|
|
+ }
|
|
+found:
|
|
+ mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
|
|
+ roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
|
|
+ V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
|
|
+
|
|
+ mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
|
|
+ roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
|
|
+ V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
|
|
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
|
|
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
|
|
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
|
|
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
|
|
+
|
|
+ free_page((unsigned long)pages);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|
+ unsigned long mtpt_idx)
|
|
+{
|
|
+ struct hns_roce_v2_mpt_entry *mpt_entry;
|
|
+ int ret;
|
|
+
|
|
mpt_entry = mb_buf;
|
|
memset(mpt_entry, 0, sizeof(*mpt_entry));
|
|
|
|
@@ -1686,7 +1732,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|
mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
|
|
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
|
|
V2_MPT_BYTE_4_PD_S, mr->pd);
|
|
- mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
|
|
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
|
|
@@ -1700,13 +1745,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|
(mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
|
|
(mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
|
|
- mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
|
|
|
|
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
|
|
mr->type == MR_TYPE_MR ? 0 : 1);
|
|
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
|
|
1);
|
|
- mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
|
|
|
|
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
|
|
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
|
|
@@ -1717,53 +1760,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|
if (mr->type == MR_TYPE_DMA)
|
|
return 0;
|
|
|
|
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
|
|
-
|
|
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
|
|
- roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
|
|
- V2_MPT_BYTE_48_PBL_BA_H_S,
|
|
- upper_32_bits(mr->pbl_ba >> 3));
|
|
- mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
|
|
-
|
|
- pages = (u64 *)__get_free_page(GFP_KERNEL);
|
|
- if (!pages)
|
|
- return -ENOMEM;
|
|
-
|
|
- i = 0;
|
|
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
|
|
- len = sg_dma_len(sg) >> PAGE_SHIFT;
|
|
- for (j = 0; j < len; ++j) {
|
|
- page_addr = sg_dma_address(sg) +
|
|
- (j << mr->umem->page_shift);
|
|
- pages[i] = page_addr >> 6;
|
|
-
|
|
- /* Record the first 2 entry directly to MTPT table */
|
|
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
|
- goto found;
|
|
- i++;
|
|
- }
|
|
- }
|
|
+ ret = set_mtpt_pbl(mpt_entry, mr);
|
|
|
|
-found:
|
|
- mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
|
|
- roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
|
|
- V2_MPT_BYTE_56_PA0_H_S,
|
|
- upper_32_bits(pages[0]));
|
|
- mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
|
|
-
|
|
- mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
|
|
- roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
|
|
- V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
|
|
-
|
|
- free_page((unsigned long)pages);
|
|
-
|
|
- roce_set_field(mpt_entry->byte_64_buf_pa1,
|
|
- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
|
|
- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
|
|
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
|
|
- mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
|
|
-
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
|
|
@@ -1772,6 +1771,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
|
|
u64 size, void *mb_buf)
|
|
{
|
|
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
|
|
+ int ret = 0;
|
|
|
|
if (flags & IB_MR_REREG_PD) {
|
|
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
|
|
@@ -1784,14 +1784,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
|
|
V2_MPT_BYTE_8_BIND_EN_S,
|
|
(mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
|
|
- V2_MPT_BYTE_8_ATOMIC_EN_S,
|
|
- (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
|
|
+ V2_MPT_BYTE_8_ATOMIC_EN_S,
|
|
+ mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
|
|
- (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
|
|
+ mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
|
|
- (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
|
|
+ mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
|
|
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
|
|
- (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
|
|
+ mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
|
|
}
|
|
|
|
if (flags & IB_MR_REREG_TRANS) {
|
|
@@ -1800,21 +1800,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
|
|
mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
|
|
mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
|
|
|
|
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
|
|
- mpt_entry->pbl_ba_l =
|
|
- cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
|
|
- roce_set_field(mpt_entry->byte_48_mode_ba,
|
|
- V2_MPT_BYTE_48_PBL_BA_H_M,
|
|
- V2_MPT_BYTE_48_PBL_BA_H_S,
|
|
- upper_32_bits(mr->pbl_ba >> 3));
|
|
- mpt_entry->byte_48_mode_ba =
|
|
- cpu_to_le32(mpt_entry->byte_48_mode_ba);
|
|
-
|
|
mr->iova = iova;
|
|
mr->size = size;
|
|
+
|
|
+ ret = set_mtpt_pbl(mpt_entry, mr);
|
|
}
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
|
|
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
|
|
index d216e0d2921d..9e1cac8cb260 100644
|
|
--- a/drivers/infiniband/hw/mlx5/odp.c
|
|
+++ b/drivers/infiniband/hw/mlx5/odp.c
|
|
@@ -724,6 +724,7 @@ next_mr:
|
|
head = frame;
|
|
|
|
bcnt -= frame->bcnt;
|
|
+ offset = 0;
|
|
}
|
|
break;
|
|
|
|
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
|
|
index d53d954ac8af..183fe5c8ceb7 100644
|
|
--- a/drivers/infiniband/hw/mlx5/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx5/qp.c
|
|
@@ -4413,17 +4413,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
|
goto out;
|
|
}
|
|
|
|
- if (wr->opcode == IB_WR_LOCAL_INV ||
|
|
- wr->opcode == IB_WR_REG_MR) {
|
|
+ if (wr->opcode == IB_WR_REG_MR) {
|
|
fence = dev->umr_fence;
|
|
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
|
|
- } else if (wr->send_flags & IB_SEND_FENCE) {
|
|
- if (qp->next_fence)
|
|
- fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
|
|
- else
|
|
- fence = MLX5_FENCE_MODE_FENCE;
|
|
- } else {
|
|
- fence = qp->next_fence;
|
|
+ } else {
|
|
+ if (wr->send_flags & IB_SEND_FENCE) {
|
|
+ if (qp->next_fence)
|
|
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
|
|
+ else
|
|
+ fence = MLX5_FENCE_MODE_FENCE;
|
|
+ } else {
|
|
+ fence = qp->next_fence;
|
|
+ }
|
|
}
|
|
|
|
switch (ibqp->qp_type) {
|
|
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
|
|
index 89ec0f64abfc..084bb4baebb5 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/ah.c
|
|
+++ b/drivers/infiniband/sw/rdmavt/ah.c
|
|
@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
|
|
* rvt_create_ah - create an address handle
|
|
* @pd: the protection domain
|
|
* @ah_attr: the attributes of the AH
|
|
+ * @udata: pointer to user's input output buffer information.
|
|
*
|
|
* This may be called from interrupt context.
|
|
*
|
|
* Return: newly allocated ah
|
|
*/
|
|
struct ib_ah *rvt_create_ah(struct ib_pd *pd,
|
|
- struct rdma_ah_attr *ah_attr)
|
|
+ struct rdma_ah_attr *ah_attr,
|
|
+ struct ib_udata *udata)
|
|
{
|
|
struct rvt_ah *ah;
|
|
struct rvt_dev_info *dev = ib_to_rvt(pd->device);
|
|
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
|
|
index 16105af99189..25271b48a683 100644
|
|
--- a/drivers/infiniband/sw/rdmavt/ah.h
|
|
+++ b/drivers/infiniband/sw/rdmavt/ah.h
|
|
@@ -51,7 +51,8 @@
|
|
#include <rdma/rdma_vt.h>
|
|
|
|
struct ib_ah *rvt_create_ah(struct ib_pd *pd,
|
|
- struct rdma_ah_attr *ah_attr);
|
|
+ struct rdma_ah_attr *ah_attr,
|
|
+ struct ib_udata *udata);
|
|
int rvt_destroy_ah(struct ib_ah *ibah);
|
|
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
|
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
|
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
index 55af04fa03a7..6c8dcb65ff03 100644
|
|
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
|
|
@@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
|
|
{
|
|
struct nicpf *nic = pci_get_drvdata(pdev);
|
|
|
|
+ if (!nic)
|
|
+ return;
|
|
+
|
|
if (nic->flags & NIC_SRIOV_ENABLED)
|
|
pci_disable_sriov(pdev);
|
|
|
|
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
|
|
index 14374a856d30..6127697ede12 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
|
|
@@ -914,10 +914,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
|
|
}
|
|
|
|
ret = register_netdev(ndev);
|
|
- if (ret) {
|
|
- free_netdev(ndev);
|
|
+ if (ret)
|
|
goto alloc_fail;
|
|
- }
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
|
|
index e2f80cca9bed..0d2de6f67676 100644
|
|
--- a/drivers/net/ethernet/ibm/emac/emac.h
|
|
+++ b/drivers/net/ethernet/ibm/emac/emac.h
|
|
@@ -231,7 +231,7 @@ struct emac_regs {
|
|
#define EMAC_STACR_PHYE 0x00004000
|
|
#define EMAC_STACR_STAC_MASK 0x00003000
|
|
#define EMAC_STACR_STAC_READ 0x00001000
|
|
-#define EMAC_STACR_STAC_WRITE 0x00000800
|
|
+#define EMAC_STACR_STAC_WRITE 0x00002000
|
|
#define EMAC_STACR_OPBC_MASK 0x00000C00
|
|
#define EMAC_STACR_OPBC_50 0x00000000
|
|
#define EMAC_STACR_OPBC_66 0x00000400
|
|
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
|
|
index c54ebedca6da..c393cb2c0f16 100644
|
|
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
|
|
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
|
|
@@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
|
|
nvm_word = E1000_INVM_DEFAULT_AL;
|
|
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
|
|
igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
|
|
+ phy_word = E1000_PHY_PLL_UNCONF;
|
|
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
|
|
/* check current state directly from internal PHY */
|
|
igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
|
|
index a8148c7126e5..9772016222c3 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
|
|
@@ -2248,7 +2248,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
|
|
*autoneg = false;
|
|
|
|
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
|
|
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
|
|
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
|
|
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
|
|
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
|
|
*speed = IXGBE_LINK_SPEED_1GB_FULL;
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
|
|
index f11b45001cad..d290f0787dfb 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
|
|
@@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
|
|
|
|
tx_pause = !!(pause->tx_pause);
|
|
rx_pause = !!(pause->rx_pause);
|
|
- rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
|
|
- tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
|
|
+ rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
|
|
+ tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
|
|
|
|
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
|
priv->rx_skb_size + ETH_FCS_LEN,
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
|
index fe49384eba48..0d7fd3f043cf 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
|
@@ -3494,8 +3494,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
}
|
|
|
|
- /* MTU range: 46 - hw-specific max */
|
|
- dev->min_mtu = MLX4_EN_MIN_MTU;
|
|
+ /* MTU range: 68 - hw-specific max */
|
|
+ dev->min_mtu = ETH_MIN_MTU;
|
|
dev->max_mtu = priv->max_mtu;
|
|
|
|
mdev->pndev[port] = dev;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
|
|
index c3228b89df46..240f9c9ca943 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
|
|
@@ -161,7 +161,6 @@
|
|
#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
|
|
ETH_HLEN + PREAMBLE_LEN)
|
|
|
|
-#define MLX4_EN_MIN_MTU 46
|
|
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
|
|
* headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
|
|
*/
|
|
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
|
|
index 81045dfa1cd8..44f6e4873aad 100644
|
|
--- a/drivers/net/ethernet/realtek/8139cp.c
|
|
+++ b/drivers/net/ethernet/realtek/8139cp.c
|
|
@@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
|
|
struct cp_private *cp;
|
|
int handled = 0;
|
|
u16 status;
|
|
+ u16 mask;
|
|
|
|
if (unlikely(dev == NULL))
|
|
return IRQ_NONE;
|
|
@@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
|
|
|
|
spin_lock(&cp->lock);
|
|
|
|
+ mask = cpr16(IntrMask);
|
|
+ if (!mask)
|
|
+ goto out_unlock;
|
|
+
|
|
status = cpr16(IntrStatus);
|
|
if (!status || (status == 0xFFFF))
|
|
goto out_unlock;
|
|
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
|
|
index 733e35b7c4bb..20d1be2b070b 100644
|
|
--- a/drivers/net/phy/phy_device.c
|
|
+++ b/drivers/net/phy/phy_device.c
|
|
@@ -1738,20 +1738,17 @@ EXPORT_SYMBOL(genphy_loopback);
|
|
|
|
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
|
|
{
|
|
- phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
|
|
- PHY_10BT_FEATURES);
|
|
-
|
|
switch (max_speed) {
|
|
- default:
|
|
- return -ENOTSUPP;
|
|
- case SPEED_1000:
|
|
- phydev->supported |= PHY_1000BT_FEATURES;
|
|
+ case SPEED_10:
|
|
+ phydev->supported &= ~PHY_100BT_FEATURES;
|
|
/* fall through */
|
|
case SPEED_100:
|
|
- phydev->supported |= PHY_100BT_FEATURES;
|
|
- /* fall through */
|
|
- case SPEED_10:
|
|
- phydev->supported |= PHY_10BT_FEATURES;
|
|
+ phydev->supported &= ~PHY_1000BT_FEATURES;
|
|
+ break;
|
|
+ case SPEED_1000:
|
|
+ break;
|
|
+ default:
|
|
+ return -ENOTSUPP;
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
|
|
index 83060fb349f4..ad9db652874d 100644
|
|
--- a/drivers/net/phy/sfp-bus.c
|
|
+++ b/drivers/net/phy/sfp-bus.c
|
|
@@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
|
|
/* 1000Base-PX or 1000Base-BX10 */
|
|
if ((id->base.e_base_px || id->base.e_base_bx10) &&
|
|
br_min <= 1300 && br_max >= 1200)
|
|
- phylink_set(support, 1000baseX_Full);
|
|
+ phylink_set(modes, 1000baseX_Full);
|
|
|
|
/* For active or passive cables, select the link modes
|
|
* based on the bit rates and the cable compliance bytes.
|
|
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
|
|
index 573620771154..8c1abcba4cbd 100644
|
|
--- a/drivers/net/tun.c
|
|
+++ b/drivers/net/tun.c
|
|
@@ -2268,9 +2268,9 @@ static void tun_setup(struct net_device *dev)
|
|
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
- if (!data)
|
|
- return 0;
|
|
- return -EINVAL;
|
|
+ NL_SET_ERR_MSG(extack,
|
|
+ "tun/tap creation via rtnetlink is not supported.");
|
|
+ return -EOPNOTSUPP;
|
|
}
|
|
|
|
static size_t tun_get_size(const struct net_device *dev)
|
|
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
|
|
index c2ca6cd3fbe0..ad14fbfa1864 100644
|
|
--- a/drivers/net/virtio_net.c
|
|
+++ b/drivers/net/virtio_net.c
|
|
@@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
|
|
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
|
struct receive_queue *rq,
|
|
struct page *page, unsigned int offset,
|
|
- unsigned int len, unsigned int truesize)
|
|
+ unsigned int len, unsigned int truesize,
|
|
+ bool hdr_valid)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct virtio_net_hdr_mrg_rxbuf *hdr;
|
|
@@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
|
else
|
|
hdr_padded_len = sizeof(struct padded_vnet_hdr);
|
|
|
|
- memcpy(hdr, p, hdr_len);
|
|
+ if (hdr_valid)
|
|
+ memcpy(hdr, p, hdr_len);
|
|
|
|
len -= hdr_len;
|
|
offset += hdr_padded_len;
|
|
@@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
|
|
struct virtnet_rq_stats *stats)
|
|
{
|
|
struct page *page = buf;
|
|
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
|
|
+ struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
|
|
+ PAGE_SIZE, true);
|
|
|
|
stats->bytes += len - vi->hdr_len;
|
|
if (unlikely(!skb))
|
|
@@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
rcu_read_unlock();
|
|
put_page(page);
|
|
head_skb = page_to_skb(vi, rq, xdp_page,
|
|
- offset, len, PAGE_SIZE);
|
|
+ offset, len,
|
|
+ PAGE_SIZE, false);
|
|
return head_skb;
|
|
}
|
|
break;
|
|
@@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|
goto err_skb;
|
|
}
|
|
|
|
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
|
|
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
|
|
curr_skb = head_skb;
|
|
|
|
if (unlikely(!curr_skb))
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index 0ba301f7e8b4..b7b2659e02fa 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -3308,6 +3308,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
|
|
struct nvme_ns *ns, *next;
|
|
LIST_HEAD(ns_list);
|
|
|
|
+ /* prevent racing with ns scanning */
|
|
+ flush_work(&ctrl->scan_work);
|
|
+
|
|
/*
|
|
* The dead states indicates the controller was not gracefully
|
|
* disconnected. In that case, we won't be able to flush any data while
|
|
@@ -3463,7 +3466,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
|
|
nvme_mpath_stop(ctrl);
|
|
nvme_stop_keep_alive(ctrl);
|
|
flush_work(&ctrl->async_event_work);
|
|
- flush_work(&ctrl->scan_work);
|
|
cancel_work_sync(&ctrl->fw_act_work);
|
|
if (ctrl->ops->stop_ctrl)
|
|
ctrl->ops->stop_ctrl(ctrl);
|
|
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
|
|
index bb4a2003c097..60220de2db52 100644
|
|
--- a/drivers/nvme/host/nvme.h
|
|
+++ b/drivers/nvme/host/nvme.h
|
|
@@ -537,6 +537,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
|
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
|
|
struct nvme_id_ctrl *id)
|
|
{
|
|
+ if (ctrl->subsys->cmic & (1 << 3))
|
|
+ dev_warn(ctrl->device,
|
|
+"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
|
|
return 0;
|
|
}
|
|
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
|
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
|
|
index dc042017c293..b6a28de682e8 100644
|
|
--- a/drivers/nvme/host/rdma.c
|
|
+++ b/drivers/nvme/host/rdma.c
|
|
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
|
|
qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
|
|
if (ib_dma_mapping_error(ibdev, qe->dma)) {
|
|
kfree(qe->data);
|
|
+ qe->data = NULL;
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -816,6 +817,7 @@ out_free_tagset:
|
|
out_free_async_qe:
|
|
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
|
|
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
|
+ ctrl->async_event_sqe.data = NULL;
|
|
out_free_queue:
|
|
nvme_rdma_free_queue(&ctrl->queues[0]);
|
|
return error;
|
|
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
|
|
index 4a9a673b4777..975050a69494 100644
|
|
--- a/drivers/pci/controller/dwc/pci-imx6.c
|
|
+++ b/drivers/pci/controller/dwc/pci-imx6.c
|
|
@@ -80,8 +80,6 @@ struct imx6_pcie {
|
|
#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
|
|
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
|
|
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
|
|
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
|
|
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
|
|
|
|
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
|
|
#define PCIE_PHY_CTRL_DATA_LOC 0
|
|
@@ -641,12 +639,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
|
|
return 0;
|
|
}
|
|
|
|
-static int imx6_pcie_link_up(struct dw_pcie *pci)
|
|
-{
|
|
- return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
|
|
- PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
|
|
-}
|
|
-
|
|
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
|
|
.host_init = imx6_pcie_host_init,
|
|
};
|
|
@@ -679,7 +671,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
|
|
}
|
|
|
|
static const struct dw_pcie_ops dw_pcie_ops = {
|
|
- .link_up = imx6_pcie_link_up,
|
|
+ /* No special ops needed, but pcie-designware still expects this struct */
|
|
};
|
|
|
|
static int imx6_pcie_probe(struct platform_device *pdev)
|
|
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
|
|
index e70e425f26f5..69c92843eb3b 100644
|
|
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
|
|
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
|
|
@@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
|
|
.mask_core_ready = CORE_READY_STATUS,
|
|
.has_pll_override = true,
|
|
.autoresume_en = BIT(0),
|
|
+ .update_tune1_with_efuse = true,
|
|
};
|
|
|
|
static const char * const qusb2_phy_vreg_names[] = {
|
|
@@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
|
|
|
|
/*
|
|
* Read efuse register having TUNE2/1 parameter's high nibble.
|
|
- * If efuse register shows value as 0x0, or if we fail to find
|
|
- * a valid efuse register settings, then use default value
|
|
- * as 0xB for high nibble that we have already set while
|
|
- * configuring phy.
|
|
+ * If efuse register shows value as 0x0 (indicating value is not
|
|
+ * fused), or if we fail to find a valid efuse register setting,
|
|
+ * then use default value for high nibble that we have already
|
|
+ * set while configuring the phy.
|
|
*/
|
|
val = nvmem_cell_read(qphy->cell, NULL);
|
|
if (IS_ERR(val) || !val[0]) {
|
|
@@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
|
|
|
|
/* Fused TUNE1/2 value is the higher nibble only */
|
|
if (cfg->update_tune1_with_efuse)
|
|
- qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
|
|
- val[0] << 0x4);
|
|
+ qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
|
|
+ val[0] << HSTX_TRIM_SHIFT,
|
|
+ HSTX_TRIM_MASK);
|
|
else
|
|
- qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
|
|
- val[0] << 0x4);
|
|
-
|
|
+ qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
|
|
+ val[0] << HSTX_TRIM_SHIFT,
|
|
+ HSTX_TRIM_MASK);
|
|
}
|
|
|
|
static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
|
|
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
|
|
index fd77e46eb3b2..70a006ba4d05 100644
|
|
--- a/drivers/s390/cio/vfio_ccw_cp.c
|
|
+++ b/drivers/s390/cio/vfio_ccw_cp.c
|
|
@@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
|
|
* orb specified one of the unsupported formats, we defer
|
|
* checking for IDAWs in unsupported formats to here.
|
|
*/
|
|
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
|
|
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
|
|
+ kfree(p);
|
|
return -EOPNOTSUPP;
|
|
+ }
|
|
|
|
if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
|
|
break;
|
|
@@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
|
|
|
|
ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
|
|
if (ret < 0)
|
|
- goto out_init;
|
|
+ goto out_unpin;
|
|
|
|
/* Translate this direct ccw to a idal ccw. */
|
|
idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
|
|
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
|
|
index 508c61c669e7..e2be7da74343 100644
|
|
--- a/drivers/spi/spi-omap2-mcspi.c
|
|
+++ b/drivers/spi/spi-omap2-mcspi.c
|
|
@@ -1455,13 +1455,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
|
|
/* work with hotplug and coldplug */
|
|
MODULE_ALIAS("platform:omap2_mcspi");
|
|
|
|
-#ifdef CONFIG_SUSPEND
|
|
-static int omap2_mcspi_suspend_noirq(struct device *dev)
|
|
+static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
|
|
{
|
|
- return pinctrl_pm_select_sleep_state(dev);
|
|
+ struct spi_master *master = dev_get_drvdata(dev);
|
|
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
|
|
+ int error;
|
|
+
|
|
+ error = pinctrl_pm_select_sleep_state(dev);
|
|
+ if (error)
|
|
+ dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
|
|
+ __func__, error);
|
|
+
|
|
+ error = spi_master_suspend(master);
|
|
+ if (error)
|
|
+ dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
|
|
+ __func__, error);
|
|
+
|
|
+ return pm_runtime_force_suspend(dev);
|
|
}
|
|
|
|
-static int omap2_mcspi_resume_noirq(struct device *dev)
|
|
+static int __maybe_unused omap2_mcspi_resume(struct device *dev)
|
|
{
|
|
struct spi_master *master = dev_get_drvdata(dev);
|
|
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
|
|
@@ -1472,17 +1485,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
|
|
dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
|
|
__func__, error);
|
|
|
|
- return 0;
|
|
-}
|
|
+ error = spi_master_resume(master);
|
|
+ if (error)
|
|
+ dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
|
|
+ __func__, error);
|
|
|
|
-#else
|
|
-#define omap2_mcspi_suspend_noirq NULL
|
|
-#define omap2_mcspi_resume_noirq NULL
|
|
-#endif
|
|
+ return pm_runtime_force_resume(dev);
|
|
+}
|
|
|
|
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
|
|
- .suspend_noirq = omap2_mcspi_suspend_noirq,
|
|
- .resume_noirq = omap2_mcspi_resume_noirq,
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
|
|
+ omap2_mcspi_resume)
|
|
.runtime_resume = omap_mcspi_runtime_resume,
|
|
};
|
|
|
|
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
|
|
index c38298d960ff..4f120e72c7d2 100644
|
|
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
|
|
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
|
|
@@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
|
|
exit:
|
|
kfree(ptmp);
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static int rtw_wx_write32(struct net_device *dev,
|
|
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
|
|
index 7442bc4c6433..dd9ae6f5d19c 100644
|
|
--- a/drivers/thunderbolt/switch.c
|
|
+++ b/drivers/thunderbolt/switch.c
|
|
@@ -864,6 +864,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
|
|
}
|
|
static DEVICE_ATTR(key, 0600, key_show, key_store);
|
|
|
|
+static void nvm_authenticate_start(struct tb_switch *sw)
|
|
+{
|
|
+ struct pci_dev *root_port;
|
|
+
|
|
+ /*
|
|
+ * During host router NVM upgrade we should not allow root port to
|
|
+ * go into D3cold because some root ports cannot trigger PME
|
|
+ * itself. To be on the safe side keep the root port in D0 during
|
|
+ * the whole upgrade process.
|
|
+ */
|
|
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
|
|
+ if (root_port)
|
|
+ pm_runtime_get_noresume(&root_port->dev);
|
|
+}
|
|
+
|
|
+static void nvm_authenticate_complete(struct tb_switch *sw)
|
|
+{
|
|
+ struct pci_dev *root_port;
|
|
+
|
|
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
|
|
+ if (root_port)
|
|
+ pm_runtime_put(&root_port->dev);
|
|
+}
|
|
+
|
|
static ssize_t nvm_authenticate_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
@@ -913,10 +937,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
|
|
|
|
sw->nvm->authenticating = true;
|
|
|
|
- if (!tb_route(sw))
|
|
+ if (!tb_route(sw)) {
|
|
+ /*
|
|
+ * Keep root port from suspending as long as the
|
|
+ * NVM upgrade process is running.
|
|
+ */
|
|
+ nvm_authenticate_start(sw);
|
|
ret = nvm_authenticate_host(sw);
|
|
- else
|
|
+ if (ret)
|
|
+ nvm_authenticate_complete(sw);
|
|
+ } else {
|
|
ret = nvm_authenticate_device(sw);
|
|
+ }
|
|
pm_runtime_mark_last_busy(&sw->dev);
|
|
pm_runtime_put_autosuspend(&sw->dev);
|
|
}
|
|
@@ -1336,6 +1368,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
|
|
if (ret <= 0)
|
|
return ret;
|
|
|
|
+ /* Now we can allow root port to suspend again */
|
|
+ if (!tb_route(sw))
|
|
+ nvm_authenticate_complete(sw);
|
|
+
|
|
if (status) {
|
|
tb_sw_info(sw, "switch flash authentication failed\n");
|
|
tb_switch_set_uuid(sw);
|
|
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
|
|
index 1000d864929c..0f026d445e31 100644
|
|
--- a/drivers/usb/gadget/function/u_ether.c
|
|
+++ b/drivers/usb/gadget/function/u_ether.c
|
|
@@ -401,12 +401,12 @@ done:
|
|
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
|
|
{
|
|
struct usb_request *req;
|
|
- struct usb_request *tmp;
|
|
unsigned long flags;
|
|
|
|
/* fill unused rxq slots with some skb */
|
|
spin_lock_irqsave(&dev->req_lock, flags);
|
|
- list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
|
|
+ while (!list_empty(&dev->rx_reqs)) {
|
|
+ req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
|
|
list_del_init(&req->list);
|
|
spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
|
|
@@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
|
|
{
|
|
struct eth_dev *dev = link->ioport;
|
|
struct usb_request *req;
|
|
- struct usb_request *tmp;
|
|
|
|
WARN_ON(!dev);
|
|
if (!dev)
|
|
@@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
|
|
*/
|
|
usb_ep_disable(link->in_ep);
|
|
spin_lock(&dev->req_lock);
|
|
- list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
|
|
+ while (!list_empty(&dev->tx_reqs)) {
|
|
+ req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
|
|
list_del(&req->list);
|
|
|
|
spin_unlock(&dev->req_lock);
|
|
@@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
|
|
|
|
usb_ep_disable(link->out_ep);
|
|
spin_lock(&dev->req_lock);
|
|
- list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
|
|
+ while (!list_empty(&dev->rx_reqs)) {
|
|
+ req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
|
|
list_del(&req->list);
|
|
|
|
spin_unlock(&dev->req_lock);
|
|
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
|
|
index 3a16431da321..fcf13ef33b31 100644
|
|
--- a/drivers/usb/gadget/udc/omap_udc.c
|
|
+++ b/drivers/usb/gadget/udc/omap_udc.c
|
|
@@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
|
|
{
|
|
return machine_is_omap_innovator()
|
|
|| machine_is_omap_osk()
|
|
+ || machine_is_omap_palmte()
|
|
|| machine_is_sx1()
|
|
/* No known omap7xx boards with vbus sense */
|
|
|| cpu_is_omap7xx();
|
|
@@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
|
|
static int omap_udc_start(struct usb_gadget *g,
|
|
struct usb_gadget_driver *driver)
|
|
{
|
|
- int status = -ENODEV;
|
|
+ int status;
|
|
struct omap_ep *ep;
|
|
unsigned long flags;
|
|
|
|
@@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
|
|
goto done;
|
|
}
|
|
} else {
|
|
+ status = 0;
|
|
if (can_pullup(udc))
|
|
pullup_enable(udc);
|
|
else
|
|
@@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
|
|
|
|
static void omap_udc_release(struct device *dev)
|
|
{
|
|
- complete(udc->done);
|
|
+ pullup_disable(udc);
|
|
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
|
|
+ usb_put_phy(udc->transceiver);
|
|
+ udc->transceiver = NULL;
|
|
+ }
|
|
+ omap_writew(0, UDC_SYSCON1);
|
|
+ remove_proc_file();
|
|
+ if (udc->dc_clk) {
|
|
+ if (udc->clk_requested)
|
|
+ omap_udc_enable_clock(0);
|
|
+ clk_put(udc->hhc_clk);
|
|
+ clk_put(udc->dc_clk);
|
|
+ }
|
|
+ if (udc->done)
|
|
+ complete(udc->done);
|
|
kfree(udc);
|
|
- udc = NULL;
|
|
}
|
|
|
|
static int
|
|
@@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
|
|
udc->gadget.speed = USB_SPEED_UNKNOWN;
|
|
udc->gadget.max_speed = USB_SPEED_FULL;
|
|
udc->gadget.name = driver_name;
|
|
+ udc->gadget.quirk_ep_out_aligned_size = 1;
|
|
udc->transceiver = xceiv;
|
|
|
|
/* ep0 is special; put it right after the SETUP buffer */
|
|
@@ -2867,8 +2883,8 @@ bad_on_1710:
|
|
udc->clr_halt = UDC_RESET_EP;
|
|
|
|
/* USB general purpose IRQ: ep0, state changes, dma, etc */
|
|
- status = request_irq(pdev->resource[1].start, omap_udc_irq,
|
|
- 0, driver_name, udc);
|
|
+ status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
|
|
+ omap_udc_irq, 0, driver_name, udc);
|
|
if (status != 0) {
|
|
ERR("can't get irq %d, err %d\n",
|
|
(int) pdev->resource[1].start, status);
|
|
@@ -2876,20 +2892,20 @@ bad_on_1710:
|
|
}
|
|
|
|
/* USB "non-iso" IRQ (PIO for all but ep0) */
|
|
- status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
|
|
- 0, "omap_udc pio", udc);
|
|
+ status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
|
|
+ omap_udc_pio_irq, 0, "omap_udc pio", udc);
|
|
if (status != 0) {
|
|
ERR("can't get irq %d, err %d\n",
|
|
(int) pdev->resource[2].start, status);
|
|
- goto cleanup2;
|
|
+ goto cleanup1;
|
|
}
|
|
#ifdef USE_ISO
|
|
- status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
|
|
- 0, "omap_udc iso", udc);
|
|
+ status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
|
|
+ omap_udc_iso_irq, 0, "omap_udc iso", udc);
|
|
if (status != 0) {
|
|
ERR("can't get irq %d, err %d\n",
|
|
(int) pdev->resource[3].start, status);
|
|
- goto cleanup3;
|
|
+ goto cleanup1;
|
|
}
|
|
#endif
|
|
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
|
|
@@ -2900,23 +2916,8 @@ bad_on_1710:
|
|
}
|
|
|
|
create_proc_file();
|
|
- status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
|
|
- omap_udc_release);
|
|
- if (status)
|
|
- goto cleanup4;
|
|
-
|
|
- return 0;
|
|
-
|
|
-cleanup4:
|
|
- remove_proc_file();
|
|
-
|
|
-#ifdef USE_ISO
|
|
-cleanup3:
|
|
- free_irq(pdev->resource[2].start, udc);
|
|
-#endif
|
|
-
|
|
-cleanup2:
|
|
- free_irq(pdev->resource[1].start, udc);
|
|
+ return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
|
|
+ omap_udc_release);
|
|
|
|
cleanup1:
|
|
kfree(udc);
|
|
@@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
|
|
{
|
|
DECLARE_COMPLETION_ONSTACK(done);
|
|
|
|
- if (!udc)
|
|
- return -ENODEV;
|
|
-
|
|
- usb_del_gadget_udc(&udc->gadget);
|
|
- if (udc->driver)
|
|
- return -EBUSY;
|
|
-
|
|
udc->done = &done;
|
|
|
|
- pullup_disable(udc);
|
|
- if (!IS_ERR_OR_NULL(udc->transceiver)) {
|
|
- usb_put_phy(udc->transceiver);
|
|
- udc->transceiver = NULL;
|
|
- }
|
|
- omap_writew(0, UDC_SYSCON1);
|
|
-
|
|
- remove_proc_file();
|
|
-
|
|
-#ifdef USE_ISO
|
|
- free_irq(pdev->resource[3].start, udc);
|
|
-#endif
|
|
- free_irq(pdev->resource[2].start, udc);
|
|
- free_irq(pdev->resource[1].start, udc);
|
|
+ usb_del_gadget_udc(&udc->gadget);
|
|
|
|
- if (udc->dc_clk) {
|
|
- if (udc->clk_requested)
|
|
- omap_udc_enable_clock(0);
|
|
- clk_put(udc->hhc_clk);
|
|
- clk_put(udc->dc_clk);
|
|
- }
|
|
+ wait_for_completion(&done);
|
|
|
|
release_mem_region(pdev->resource[0].start,
|
|
pdev->resource[0].end - pdev->resource[0].start + 1);
|
|
|
|
- wait_for_completion(&done);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
|
|
index e12bb256036f..7ab6caef599c 100644
|
|
--- a/drivers/xen/balloon.c
|
|
+++ b/drivers/xen/balloon.c
|
|
@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
|
|
kfree(resource);
|
|
}
|
|
|
|
-/*
|
|
- * Host memory not allocated to dom0. We can use this range for hotplug-based
|
|
- * ballooning.
|
|
- *
|
|
- * It's a type-less resource. Setting IORESOURCE_MEM will make resource
|
|
- * management algorithms (arch_remove_reservations()) look into guest e820,
|
|
- * which we don't want.
|
|
- */
|
|
-static struct resource hostmem_resource = {
|
|
- .name = "Host RAM",
|
|
-};
|
|
-
|
|
-void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
|
|
-{}
|
|
-
|
|
static struct resource *additional_memory_resource(phys_addr_t size)
|
|
{
|
|
- struct resource *res, *res_hostmem;
|
|
- int ret = -ENOMEM;
|
|
+ struct resource *res;
|
|
+ int ret;
|
|
|
|
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
if (!res)
|
|
@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
|
|
res->name = "System RAM";
|
|
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
|
|
|
- res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
- if (res_hostmem) {
|
|
- /* Try to grab a range from hostmem */
|
|
- res_hostmem->name = "Host memory";
|
|
- ret = allocate_resource(&hostmem_resource, res_hostmem,
|
|
- size, 0, -1,
|
|
- PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
|
|
- }
|
|
-
|
|
- if (!ret) {
|
|
- /*
|
|
- * Insert this resource into iomem. Because hostmem_resource
|
|
- * tracks portion of guest e820 marked as UNUSABLE noone else
|
|
- * should try to use it.
|
|
- */
|
|
- res->start = res_hostmem->start;
|
|
- res->end = res_hostmem->end;
|
|
- ret = insert_resource(&iomem_resource, res);
|
|
- if (ret < 0) {
|
|
- pr_err("Can't insert iomem_resource [%llx - %llx]\n",
|
|
- res->start, res->end);
|
|
- release_memory_resource(res_hostmem);
|
|
- res_hostmem = NULL;
|
|
- res->start = res->end = 0;
|
|
- }
|
|
- }
|
|
-
|
|
- if (ret) {
|
|
- ret = allocate_resource(&iomem_resource, res,
|
|
- size, 0, -1,
|
|
- PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
|
|
- if (ret < 0) {
|
|
- pr_err("Cannot allocate new System RAM resource\n");
|
|
- kfree(res);
|
|
- return NULL;
|
|
- }
|
|
+ ret = allocate_resource(&iomem_resource, res,
|
|
+ size, 0, -1,
|
|
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
|
|
+ if (ret < 0) {
|
|
+ pr_err("Cannot allocate new System RAM resource\n");
|
|
+ kfree(res);
|
|
+ return NULL;
|
|
}
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
|
|
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
|
|
pfn, limit);
|
|
release_memory_resource(res);
|
|
- release_memory_resource(res_hostmem);
|
|
return NULL;
|
|
}
|
|
}
|
|
@@ -747,8 +702,6 @@ static int __init balloon_init(void)
|
|
set_online_page_callback(&xen_online_page);
|
|
register_memory_notifier(&xen_memory_nb);
|
|
register_sysctl_table(xen_root);
|
|
-
|
|
- arch_xen_balloon_init(&hostmem_resource);
|
|
#endif
|
|
|
|
#ifdef CONFIG_XEN_PV
|
|
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
|
|
index 2f11ca72a281..77224d8f3e6f 100644
|
|
--- a/drivers/xen/pvcalls-front.c
|
|
+++ b/drivers/xen/pvcalls-front.c
|
|
@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
|
|
out_error:
|
|
if (*evtchn >= 0)
|
|
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
|
|
- kfree(map->active.data.in);
|
|
- kfree(map->active.ring);
|
|
+ free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
|
|
+ free_page((unsigned long)map->active.ring);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
|
|
index 23f1387b3ef7..e7df65d32c91 100644
|
|
--- a/drivers/xen/xlate_mmu.c
|
|
+++ b/drivers/xen/xlate_mmu.c
|
|
@@ -36,6 +36,7 @@
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
#include <xen/xen.h>
|
|
+#include <xen/xen-ops.h>
|
|
#include <xen/page.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/interface/memory.h>
|
|
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
|
|
index 479b7fdda124..071075d775a9 100644
|
|
--- a/fs/afs/inode.c
|
|
+++ b/fs/afs/inode.c
|
|
@@ -379,7 +379,7 @@ void afs_zap_data(struct afs_vnode *vnode)
|
|
int afs_validate(struct afs_vnode *vnode, struct key *key)
|
|
{
|
|
time64_t now = ktime_get_real_seconds();
|
|
- bool valid = false;
|
|
+ bool valid;
|
|
int ret;
|
|
|
|
_enter("{v={%x:%u} fl=%lx},%x",
|
|
@@ -399,15 +399,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
|
|
vnode->cb_v_break = vnode->volume->cb_v_break;
|
|
valid = false;
|
|
} else if (vnode->status.type == AFS_FTYPE_DIR &&
|
|
- test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
|
|
- vnode->cb_expires_at - 10 > now) {
|
|
- valid = true;
|
|
- } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
|
|
- vnode->cb_expires_at - 10 > now) {
|
|
+ (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
|
|
+ vnode->cb_expires_at - 10 <= now)) {
|
|
+ valid = false;
|
|
+ } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
|
|
+ vnode->cb_expires_at - 10 <= now) {
|
|
+ valid = false;
|
|
+ } else {
|
|
valid = true;
|
|
}
|
|
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
|
|
valid = true;
|
|
+ } else {
|
|
+ vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
|
|
+ vnode->cb_v_break = vnode->volume->cb_v_break;
|
|
+ valid = false;
|
|
}
|
|
|
|
read_sequnlock_excl(&vnode->cb_lock);
|
|
diff --git a/fs/aio.c b/fs/aio.c
|
|
index b9350f3360c6..04c4d6218978 100644
|
|
--- a/fs/aio.c
|
|
+++ b/fs/aio.c
|
|
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
|
|
ret = ioprio_check_cap(iocb->aio_reqprio);
|
|
if (ret) {
|
|
pr_debug("aio ioprio check cap error: %d\n", ret);
|
|
+ fput(req->ki_filp);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
|
|
index ba8950bfd9c7..84cb6e5ef36c 100644
|
|
--- a/fs/btrfs/send.c
|
|
+++ b/fs/btrfs/send.c
|
|
@@ -3344,7 +3344,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
|
|
kfree(m);
|
|
}
|
|
|
|
-static void tail_append_pending_moves(struct pending_dir_move *moves,
|
|
+static void tail_append_pending_moves(struct send_ctx *sctx,
|
|
+ struct pending_dir_move *moves,
|
|
struct list_head *stack)
|
|
{
|
|
if (list_empty(&moves->list)) {
|
|
@@ -3355,6 +3356,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
|
|
list_add_tail(&moves->list, stack);
|
|
list_splice_tail(&list, stack);
|
|
}
|
|
+ if (!RB_EMPTY_NODE(&moves->node)) {
|
|
+ rb_erase(&moves->node, &sctx->pending_dir_moves);
|
|
+ RB_CLEAR_NODE(&moves->node);
|
|
+ }
|
|
}
|
|
|
|
static int apply_children_dir_moves(struct send_ctx *sctx)
|
|
@@ -3369,7 +3374,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
|
|
return 0;
|
|
|
|
INIT_LIST_HEAD(&stack);
|
|
- tail_append_pending_moves(pm, &stack);
|
|
+ tail_append_pending_moves(sctx, pm, &stack);
|
|
|
|
while (!list_empty(&stack)) {
|
|
pm = list_first_entry(&stack, struct pending_dir_move, list);
|
|
@@ -3380,7 +3385,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
|
|
goto out;
|
|
pm = get_pending_dir_moves(sctx, parent_ino);
|
|
if (pm)
|
|
- tail_append_pending_moves(pm, &stack);
|
|
+ tail_append_pending_moves(sctx, pm, &stack);
|
|
}
|
|
return 0;
|
|
|
|
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
|
|
index 95983c744164..5ab411d4bc59 100644
|
|
--- a/fs/cachefiles/namei.c
|
|
+++ b/fs/cachefiles/namei.c
|
|
@@ -244,11 +244,13 @@ wait_for_old_object:
|
|
|
|
ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
|
|
|
|
- cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry);
|
|
+ cache->cache.ops->put_object(&xobject->fscache,
|
|
+ (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
|
|
goto try_again;
|
|
|
|
requeue:
|
|
- cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
|
|
+ cache->cache.ops->put_object(&xobject->fscache,
|
|
+ (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
|
|
_leave(" = -ETIMEDOUT");
|
|
return -ETIMEDOUT;
|
|
}
|
|
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
|
|
index 40f7595aad10..8a577409d030 100644
|
|
--- a/fs/cachefiles/rdwr.c
|
|
+++ b/fs/cachefiles/rdwr.c
|
|
@@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
|
|
netpage->index, cachefiles_gfp);
|
|
if (ret < 0) {
|
|
if (ret == -EEXIST) {
|
|
+ put_page(backpage);
|
|
+ backpage = NULL;
|
|
put_page(netpage);
|
|
+ netpage = NULL;
|
|
fscache_retrieval_complete(op, 1);
|
|
continue;
|
|
}
|
|
@@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
|
|
netpage->index, cachefiles_gfp);
|
|
if (ret < 0) {
|
|
if (ret == -EEXIST) {
|
|
+ put_page(backpage);
|
|
+ backpage = NULL;
|
|
put_page(netpage);
|
|
+ netpage = NULL;
|
|
fscache_retrieval_complete(op, 1);
|
|
continue;
|
|
}
|
|
@@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
|
|
__releases(&object->fscache.cookie->lock)
|
|
{
|
|
struct cachefiles_object *object;
|
|
- struct cachefiles_cache *cache;
|
|
|
|
object = container_of(_object, struct cachefiles_object, fscache);
|
|
- cache = container_of(object->fscache.cache,
|
|
- struct cachefiles_cache, cache);
|
|
|
|
_enter("%p,{%lu}", object, page->index);
|
|
|
|
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
|
|
index 0a29a00aed2e..511e6c68156a 100644
|
|
--- a/fs/cachefiles/xattr.c
|
|
+++ b/fs/cachefiles/xattr.c
|
|
@@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
|
|
struct dentry *dentry = object->dentry;
|
|
int ret;
|
|
|
|
- ASSERT(dentry);
|
|
+ if (!dentry)
|
|
+ return -ESTALE;
|
|
|
|
_enter("%p,#%d", object, auxdata->len);
|
|
|
|
diff --git a/fs/dax.c b/fs/dax.c
|
|
index b0cd1364c68f..3a2682a6c832 100644
|
|
--- a/fs/dax.c
|
|
+++ b/fs/dax.c
|
|
@@ -423,7 +423,7 @@ bool dax_lock_mapping_entry(struct page *page)
|
|
for (;;) {
|
|
mapping = READ_ONCE(page->mapping);
|
|
|
|
- if (!dax_mapping(mapping))
|
|
+ if (!mapping || !dax_mapping(mapping))
|
|
break;
|
|
|
|
/*
|
|
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
|
|
index 645158dc33f1..63707abcbeb3 100644
|
|
--- a/fs/exportfs/expfs.c
|
|
+++ b/fs/exportfs/expfs.c
|
|
@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
|
|
struct dentry *parent = dget_parent(dentry);
|
|
|
|
dput(dentry);
|
|
- if (IS_ROOT(dentry)) {
|
|
+ if (dentry == parent) {
|
|
dput(parent);
|
|
return false;
|
|
}
|
|
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
|
|
index 9edc920f651f..6d9cb1719de5 100644
|
|
--- a/fs/fscache/object.c
|
|
+++ b/fs/fscache/object.c
|
|
@@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
|
|
|
|
if (awaken)
|
|
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
|
|
+ if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
|
|
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
|
|
+
|
|
|
|
/* Prevent a race with our last child, which has to signal EV_CLEARED
|
|
* before dropping our spinlock.
|
|
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
|
|
index 374b5688e29e..9bdff5e40626 100644
|
|
--- a/fs/hfs/btree.c
|
|
+++ b/fs/hfs/btree.c
|
|
@@ -329,13 +329,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
|
|
|
nidx -= len * 8;
|
|
i = node->next;
|
|
- hfs_bnode_put(node);
|
|
if (!i) {
|
|
/* panic */;
|
|
pr_crit("unable to free bnode %u. bmap not found!\n",
|
|
node->this);
|
|
+ hfs_bnode_put(node);
|
|
return;
|
|
}
|
|
+ hfs_bnode_put(node);
|
|
node = hfs_bnode_find(tree, i);
|
|
if (IS_ERR(node))
|
|
return;
|
|
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
|
|
index de14b2b6881b..3de3bc4918b5 100644
|
|
--- a/fs/hfsplus/btree.c
|
|
+++ b/fs/hfsplus/btree.c
|
|
@@ -454,14 +454,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
|
|
|
nidx -= len * 8;
|
|
i = node->next;
|
|
- hfs_bnode_put(node);
|
|
if (!i) {
|
|
/* panic */;
|
|
pr_crit("unable to free bnode %u. "
|
|
"bmap not found!\n",
|
|
node->this);
|
|
+ hfs_bnode_put(node);
|
|
return;
|
|
}
|
|
+ hfs_bnode_put(node);
|
|
node = hfs_bnode_find(tree, i);
|
|
if (IS_ERR(node))
|
|
return;
|
|
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
index 86ac2c5b93fe..e0fe9a0f1bf1 100644
|
|
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
@@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
|
|
if (fh)
|
|
hdr->args.fh = fh;
|
|
|
|
- if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
|
+ if (vers == 4 &&
|
|
+ !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
|
goto out_failed;
|
|
|
|
/*
|
|
@@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
|
|
if (fh)
|
|
hdr->args.fh = fh;
|
|
|
|
- if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
|
+ if (vers == 4 &&
|
|
+ !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
|
goto out_failed;
|
|
|
|
/*
|
|
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
|
|
index 9f88188060db..4bf8d5854b27 100644
|
|
--- a/fs/ocfs2/export.c
|
|
+++ b/fs/ocfs2/export.c
|
|
@@ -125,10 +125,10 @@ check_err:
|
|
|
|
check_gen:
|
|
if (handle->ih_generation != inode->i_generation) {
|
|
- iput(inode);
|
|
trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
|
|
handle->ih_generation,
|
|
inode->i_generation);
|
|
+ iput(inode);
|
|
result = ERR_PTR(-ESTALE);
|
|
goto bail;
|
|
}
|
|
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
|
|
index 7eb3b0a6347e..f55f82ca3425 100644
|
|
--- a/fs/ocfs2/move_extents.c
|
|
+++ b/fs/ocfs2/move_extents.c
|
|
@@ -156,18 +156,14 @@ out:
|
|
}
|
|
|
|
/*
|
|
- * lock allocators, and reserving appropriate number of bits for
|
|
- * meta blocks and data clusters.
|
|
- *
|
|
- * in some cases, we don't need to reserve clusters, just let data_ac
|
|
- * be NULL.
|
|
+ * lock allocator, and reserve appropriate number of bits for
|
|
+ * meta blocks.
|
|
*/
|
|
-static int ocfs2_lock_allocators_move_extents(struct inode *inode,
|
|
+static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
|
|
struct ocfs2_extent_tree *et,
|
|
u32 clusters_to_move,
|
|
u32 extents_to_split,
|
|
struct ocfs2_alloc_context **meta_ac,
|
|
- struct ocfs2_alloc_context **data_ac,
|
|
int extra_blocks,
|
|
int *credits)
|
|
{
|
|
@@ -192,13 +188,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
|
|
goto out;
|
|
}
|
|
|
|
- if (data_ac) {
|
|
- ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
|
|
- if (ret) {
|
|
- mlog_errno(ret);
|
|
- goto out;
|
|
- }
|
|
- }
|
|
|
|
*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
|
|
|
|
@@ -257,10 +246,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
|
|
}
|
|
}
|
|
|
|
- ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
|
|
- &context->meta_ac,
|
|
- &context->data_ac,
|
|
- extra_blocks, &credits);
|
|
+ ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
|
|
+ *len, 1,
|
|
+ &context->meta_ac,
|
|
+ extra_blocks, &credits);
|
|
if (ret) {
|
|
mlog_errno(ret);
|
|
goto out;
|
|
@@ -283,6 +272,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * Make sure ocfs2_reserve_cluster is called after
|
|
+ * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
|
|
+ *
|
|
+ * If ocfs2_reserve_cluster is called
|
|
+ * before __ocfs2_flush_truncate_log, dead lock on global bitmap
|
|
+ * may happen.
|
|
+ *
|
|
+ */
|
|
+ ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
|
|
+ if (ret) {
|
|
+ mlog_errno(ret);
|
|
+ goto out_unlock_mutex;
|
|
+ }
|
|
+
|
|
handle = ocfs2_start_trans(osb, credits);
|
|
if (IS_ERR(handle)) {
|
|
ret = PTR_ERR(handle);
|
|
@@ -600,9 +604,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
|
|
}
|
|
}
|
|
|
|
- ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
|
|
- &context->meta_ac,
|
|
- NULL, extra_blocks, &credits);
|
|
+ ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
|
|
+ len, 1,
|
|
+ &context->meta_ac,
|
|
+ extra_blocks, &credits);
|
|
if (ret) {
|
|
mlog_errno(ret);
|
|
goto out;
|
|
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
|
|
index f4fd2e72add4..03cd59375abe 100644
|
|
--- a/fs/pstore/ram.c
|
|
+++ b/fs/pstore/ram.c
|
|
@@ -806,17 +806,14 @@ static int ramoops_probe(struct platform_device *pdev)
|
|
|
|
cxt->pstore.data = cxt;
|
|
/*
|
|
- * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
|
|
- * have to handle dumps, we must have at least record_size buffer. And
|
|
- * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
|
|
- * ZERO_SIZE_PTR).
|
|
+ * Since bufsize is only used for dmesg crash dumps, it
|
|
+ * must match the size of the dprz record (after PRZ header
|
|
+ * and ECC bytes have been accounted for).
|
|
*/
|
|
- if (cxt->console_size)
|
|
- cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
|
|
- cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
|
|
- cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
|
|
+ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
|
|
+ cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
|
|
if (!cxt->pstore.buf) {
|
|
- pr_err("cannot allocate pstore buffer\n");
|
|
+ pr_err("cannot allocate pstore crash dump buffer\n");
|
|
err = -ENOMEM;
|
|
goto fail_clear;
|
|
}
|
|
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
|
|
index 499a20a5a010..273736f41be3 100644
|
|
--- a/fs/sysv/inode.c
|
|
+++ b/fs/sysv/inode.c
|
|
@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
|
|
}
|
|
}
|
|
brelse(bh);
|
|
- return 0;
|
|
+ return err;
|
|
}
|
|
|
|
int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
|
|
index 34cf0fdd7dc7..610815e3f1aa 100644
|
|
--- a/include/linux/fscache-cache.h
|
|
+++ b/include/linux/fscache-cache.h
|
|
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
|
|
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
|
|
int n_pages)
|
|
{
|
|
- atomic_sub(n_pages, &op->n_pages);
|
|
- if (atomic_read(&op->n_pages) <= 0)
|
|
+ if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
|
|
fscache_op_complete(&op->op, false);
|
|
}
|
|
|
|
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
|
|
index a15bc4d48752..30fcec375a3a 100644
|
|
--- a/include/linux/pstore.h
|
|
+++ b/include/linux/pstore.h
|
|
@@ -90,7 +90,10 @@ struct pstore_record {
|
|
*
|
|
* @buf_lock: spinlock to serialize access to @buf
|
|
* @buf: preallocated crash dump buffer
|
|
- * @bufsize: size of @buf available for crash dump writes
|
|
+ * @bufsize: size of @buf available for crash dump bytes (must match
|
|
+ * smallest number of bytes available for writing to a
|
|
+ * backend entry, since compressed bytes don't take kindly
|
|
+ * to being truncated)
|
|
*
|
|
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
|
|
* @flags: bitfield of frontends the backend can accept writes for
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index e6ef9cc05e60..60a2e7646985 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -1355,6 +1355,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
|
|
}
|
|
}
|
|
|
|
+static inline void skb_mark_not_on_list(struct sk_buff *skb)
|
|
+{
|
|
+ skb->next = NULL;
|
|
+}
|
|
+
|
|
+static inline void skb_list_del_init(struct sk_buff *skb)
|
|
+{
|
|
+ __list_del_entry(&skb->list);
|
|
+ skb_mark_not_on_list(skb);
|
|
+}
|
|
+
|
|
/**
|
|
* skb_queue_empty - check if a queue is empty
|
|
* @list: queue head
|
|
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
|
|
index 6c1eecd56a4d..beeeed126872 100644
|
|
--- a/include/net/neighbour.h
|
|
+++ b/include/net/neighbour.h
|
|
@@ -453,6 +453,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
|
|
|
|
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
|
|
{
|
|
+ unsigned int hh_alen = 0;
|
|
unsigned int seq;
|
|
unsigned int hh_len;
|
|
|
|
@@ -460,16 +461,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
|
|
seq = read_seqbegin(&hh->hh_lock);
|
|
hh_len = hh->hh_len;
|
|
if (likely(hh_len <= HH_DATA_MOD)) {
|
|
- /* this is inlined by gcc */
|
|
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
|
|
+ hh_alen = HH_DATA_MOD;
|
|
+
|
|
+ /* skb_push() would proceed silently if we have room for
|
|
+ * the unaligned size but not for the aligned size:
|
|
+ * check headroom explicitly.
|
|
+ */
|
|
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
|
|
+ /* this is inlined by gcc */
|
|
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
|
|
+ HH_DATA_MOD);
|
|
+ }
|
|
} else {
|
|
- unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
|
|
+ hh_alen = HH_DATA_ALIGN(hh_len);
|
|
|
|
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
|
|
+ if (likely(skb_headroom(skb) >= hh_alen)) {
|
|
+ memcpy(skb->data - hh_alen, hh->hh_data,
|
|
+ hh_alen);
|
|
+ }
|
|
}
|
|
} while (read_seqretry(&hh->hh_lock, seq));
|
|
|
|
- skb_push(skb, hh_len);
|
|
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
|
|
+ kfree_skb(skb);
|
|
+ return NET_XMIT_DROP;
|
|
+ }
|
|
+
|
|
+ __skb_push(skb, hh_len);
|
|
return dev_queue_xmit(skb);
|
|
}
|
|
|
|
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
|
|
index cd24be4c4a99..13d55206bb9f 100644
|
|
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
|
|
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
|
|
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
|
|
const struct nf_nat_range2 *range,
|
|
const struct net_device *out);
|
|
|
|
-void nf_nat_masquerade_ipv4_register_notifier(void);
|
|
+int nf_nat_masquerade_ipv4_register_notifier(void);
|
|
void nf_nat_masquerade_ipv4_unregister_notifier(void);
|
|
|
|
#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
|
|
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
|
|
index 0c3b5ebf0bb8..2917bf95c437 100644
|
|
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
|
|
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
|
|
@@ -5,7 +5,7 @@
|
|
unsigned int
|
|
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
|
|
const struct net_device *out);
|
|
-void nf_nat_masquerade_ipv6_register_notifier(void);
|
|
+int nf_nat_masquerade_ipv6_register_notifier(void);
|
|
void nf_nat_masquerade_ipv6_unregister_notifier(void);
|
|
|
|
#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
|
|
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
|
|
index a11f93790476..feada358d872 100644
|
|
--- a/include/net/sctp/structs.h
|
|
+++ b/include/net/sctp/structs.h
|
|
@@ -2075,6 +2075,8 @@ struct sctp_association {
|
|
|
|
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
|
|
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
|
|
+
|
|
+ struct rcu_head rcu;
|
|
};
|
|
|
|
|
|
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
|
|
index 61f410fd74e4..4914b93a23f2 100644
|
|
--- a/include/xen/balloon.h
|
|
+++ b/include/xen/balloon.h
|
|
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
|
|
{
|
|
}
|
|
#endif
|
|
-
|
|
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
|
|
-struct resource;
|
|
-void arch_xen_balloon_init(struct resource *hostmem_resource);
|
|
-#endif
|
|
diff --git a/init/initramfs.c b/init/initramfs.c
|
|
index 640557788026..f6f4a1e4cd54 100644
|
|
--- a/init/initramfs.c
|
|
+++ b/init/initramfs.c
|
|
@@ -291,16 +291,6 @@ static int __init do_reset(void)
|
|
return 1;
|
|
}
|
|
|
|
-static int __init maybe_link(void)
|
|
-{
|
|
- if (nlink >= 2) {
|
|
- char *old = find_link(major, minor, ino, mode, collected);
|
|
- if (old)
|
|
- return (ksys_link(old, collected) < 0) ? -1 : 1;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-
|
|
static void __init clean_path(char *path, umode_t fmode)
|
|
{
|
|
struct kstat st;
|
|
@@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
|
|
}
|
|
}
|
|
|
|
+static int __init maybe_link(void)
|
|
+{
|
|
+ if (nlink >= 2) {
|
|
+ char *old = find_link(major, minor, ino, mode, collected);
|
|
+ if (old) {
|
|
+ clean_path(collected, 0);
|
|
+ return (ksys_link(old, collected) < 0) ? -1 : 1;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static __initdata int wfd;
|
|
|
|
static int __init do_name(void)
|
|
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
|
|
index 830d7f095748..fc1605aee5ea 100644
|
|
--- a/kernel/bpf/local_storage.c
|
|
+++ b/kernel/bpf/local_storage.c
|
|
@@ -138,7 +138,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
|
|
return -ENOENT;
|
|
|
|
new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
|
|
- map->value_size, __GFP_ZERO | GFP_USER,
|
|
+ map->value_size,
|
|
+ __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
|
|
map->numa_node);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
|
|
index 5780876ac81a..56acfbb80104 100644
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -5283,7 +5283,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
|
|
return;
|
|
/* NOTE: fake 'exit' subprog should be updated as well. */
|
|
for (i = 0; i <= env->subprog_cnt; i++) {
|
|
- if (env->subprog_info[i].start < off)
|
|
+ if (env->subprog_info[i].start <= off)
|
|
continue;
|
|
env->subprog_info[i].start += len - 1;
|
|
}
|
|
diff --git a/kernel/kcov.c b/kernel/kcov.c
|
|
index 3ebd09efe72a..97959d7b77e2 100644
|
|
--- a/kernel/kcov.c
|
|
+++ b/kernel/kcov.c
|
|
@@ -56,7 +56,7 @@ struct kcov {
|
|
struct task_struct *t;
|
|
};
|
|
|
|
-static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
|
+static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
|
{
|
|
unsigned int mode;
|
|
|
|
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
|
return mode == needed_mode;
|
|
}
|
|
|
|
-static unsigned long canonicalize_ip(unsigned long ip)
|
|
+static notrace unsigned long canonicalize_ip(unsigned long ip)
|
|
{
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
ip -= kaslr_offset();
|
|
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
|
|
index 08fcfe440c63..9864a35c8bb5 100644
|
|
--- a/kernel/trace/bpf_trace.c
|
|
+++ b/kernel/trace/bpf_trace.c
|
|
@@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
|
i++;
|
|
} else if (fmt[i] == 'p' || fmt[i] == 's') {
|
|
mod[fmt_cnt]++;
|
|
- i++;
|
|
- if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
|
|
+ /* disallow any further format extensions */
|
|
+ if (fmt[i + 1] != 0 &&
|
|
+ !isspace(fmt[i + 1]) &&
|
|
+ !ispunct(fmt[i + 1]))
|
|
return -EINVAL;
|
|
fmt_cnt++;
|
|
- if (fmt[i - 1] == 's') {
|
|
+ if (fmt[i] == 's') {
|
|
if (str_seen)
|
|
/* allow only one '%s' per fmt string */
|
|
return -EINVAL;
|
|
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
|
|
index 70935ed91125..14afeeb7d6ef 100644
|
|
--- a/lib/debugobjects.c
|
|
+++ b/lib/debugobjects.c
|
|
@@ -135,7 +135,6 @@ static void fill_pool(void)
|
|
if (!new)
|
|
return;
|
|
|
|
- kmemleak_ignore(new);
|
|
raw_spin_lock_irqsave(&pool_lock, flags);
|
|
hlist_add_head(&new->node, &obj_pool);
|
|
debug_objects_allocated++;
|
|
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
|
|
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
|
|
if (!obj)
|
|
goto free;
|
|
- kmemleak_ignore(obj);
|
|
hlist_add_head(&obj->node, &objects);
|
|
}
|
|
|
|
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
|
|
|
|
obj_cache = kmem_cache_create("debug_objects_cache",
|
|
sizeof (struct debug_obj), 0,
|
|
- SLAB_DEBUG_OBJECTS, NULL);
|
|
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
|
|
+ NULL);
|
|
|
|
if (!obj_cache || debug_objects_replace_static_objects()) {
|
|
debug_objects_enabled = 0;
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index b721631d78ab..6a62b2421cdf 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -5733,8 +5733,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
|
|
unsigned long size)
|
|
{
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
+ int zone_idx = zone_idx(zone) + 1;
|
|
|
|
- pgdat->nr_zones = zone_idx(zone) + 1;
|
|
+ if (zone_idx > pgdat->nr_zones)
|
|
+ pgdat->nr_zones = zone_idx;
|
|
|
|
zone->zone_start_pfn = zone_start_pfn;
|
|
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 22af88c47756..1f1aae27d41f 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -2161,6 +2161,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
|
|
return active;
|
|
}
|
|
|
|
+static void reset_xps_maps(struct net_device *dev,
|
|
+ struct xps_dev_maps *dev_maps,
|
|
+ bool is_rxqs_map)
|
|
+{
|
|
+ if (is_rxqs_map) {
|
|
+ static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
|
|
+ RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
|
|
+ } else {
|
|
+ RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
|
|
+ }
|
|
+ static_key_slow_dec_cpuslocked(&xps_needed);
|
|
+ kfree_rcu(dev_maps, rcu);
|
|
+}
|
|
+
|
|
static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
|
|
struct xps_dev_maps *dev_maps, unsigned int nr_ids,
|
|
u16 offset, u16 count, bool is_rxqs_map)
|
|
@@ -2172,18 +2186,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
|
|
j < nr_ids;)
|
|
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
|
|
count);
|
|
- if (!active) {
|
|
- if (is_rxqs_map) {
|
|
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
|
|
- } else {
|
|
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
|
|
+ if (!active)
|
|
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
|
|
|
|
- for (i = offset + (count - 1); count--; i--)
|
|
- netdev_queue_numa_node_write(
|
|
- netdev_get_tx_queue(dev, i),
|
|
- NUMA_NO_NODE);
|
|
+ if (!is_rxqs_map) {
|
|
+ for (i = offset + (count - 1); count--; i--) {
|
|
+ netdev_queue_numa_node_write(
|
|
+ netdev_get_tx_queue(dev, i),
|
|
+ NUMA_NO_NODE);
|
|
}
|
|
- kfree_rcu(dev_maps, rcu);
|
|
}
|
|
}
|
|
|
|
@@ -2220,10 +2231,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
|
|
false);
|
|
|
|
out_no_maps:
|
|
- if (static_key_enabled(&xps_rxqs_needed))
|
|
- static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
|
|
-
|
|
- static_key_slow_dec_cpuslocked(&xps_needed);
|
|
mutex_unlock(&xps_map_mutex);
|
|
cpus_read_unlock();
|
|
}
|
|
@@ -2341,9 +2348,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
|
|
if (!new_dev_maps)
|
|
goto out_no_new_maps;
|
|
|
|
- static_key_slow_inc_cpuslocked(&xps_needed);
|
|
- if (is_rxqs_map)
|
|
- static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
|
|
+ if (!dev_maps) {
|
|
+ /* Increment static keys at most once per type */
|
|
+ static_key_slow_inc_cpuslocked(&xps_needed);
|
|
+ if (is_rxqs_map)
|
|
+ static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
|
|
+ }
|
|
|
|
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
|
|
j < nr_ids;) {
|
|
@@ -2441,13 +2451,8 @@ out_no_new_maps:
|
|
}
|
|
|
|
/* free map if not active */
|
|
- if (!active) {
|
|
- if (is_rxqs_map)
|
|
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
|
|
- else
|
|
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
|
|
- kfree_rcu(dev_maps, rcu);
|
|
- }
|
|
+ if (!active)
|
|
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
|
|
|
|
out_no_maps:
|
|
mutex_unlock(&xps_map_mutex);
|
|
@@ -4981,7 +4986,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
|
|
struct net_device *orig_dev = skb->dev;
|
|
struct packet_type *pt_prev = NULL;
|
|
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
|
|
if (!pt_prev)
|
|
continue;
|
|
@@ -5137,7 +5142,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
|
INIT_LIST_HEAD(&sublist);
|
|
list_for_each_entry_safe(skb, next, head, list) {
|
|
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
if (!skb_defer_rx_timestamp(skb))
|
|
list_add_tail(&skb->list, &sublist);
|
|
}
|
|
@@ -5148,7 +5153,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
|
rcu_read_lock();
|
|
list_for_each_entry_safe(skb, next, head, list) {
|
|
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
|
|
list_add_tail(&skb->list, &sublist);
|
|
}
|
|
@@ -5167,7 +5172,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
|
|
|
if (cpu >= 0) {
|
|
/* Will be handled, remove from list */
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
|
}
|
|
}
|
|
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
|
|
index 6e5d61a20a70..ebde98b565e9 100644
|
|
--- a/net/core/rtnetlink.c
|
|
+++ b/net/core/rtnetlink.c
|
|
@@ -3730,6 +3730,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
|
|
{
|
|
int err;
|
|
|
|
+ if (dev->type != ARPHRD_ETHER)
|
|
+ return -EINVAL;
|
|
+
|
|
netif_addr_lock_bh(dev);
|
|
err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
|
|
if (err)
|
|
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
|
|
index cb8fa5d7afe1..f686d7761acb 100644
|
|
--- a/net/ipv4/ip_fragment.c
|
|
+++ b/net/ipv4/ip_fragment.c
|
|
@@ -513,6 +513,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|
struct rb_node *rbn;
|
|
int len;
|
|
int ihlen;
|
|
+ int delta;
|
|
int err;
|
|
u8 ecn;
|
|
|
|
@@ -554,10 +555,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|
if (len > 65535)
|
|
goto out_oversize;
|
|
|
|
+ delta = - head->truesize;
|
|
+
|
|
/* Head of list must not be cloned. */
|
|
if (skb_unclone(head, GFP_ATOMIC))
|
|
goto out_nomem;
|
|
|
|
+ delta += head->truesize;
|
|
+ if (delta)
|
|
+ add_frag_mem_limit(qp->q.net, delta);
|
|
+
|
|
/* If the first fragment is fragmented itself, we split
|
|
* it to two chunks: the first with data and paged part
|
|
* and the second, holding only fragments. */
|
|
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
|
|
index 3196cf58f418..27c863f6dd83 100644
|
|
--- a/net/ipv4/ip_input.c
|
|
+++ b/net/ipv4/ip_input.c
|
|
@@ -551,7 +551,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
|
|
list_for_each_entry_safe(skb, next, head, list) {
|
|
struct dst_entry *dst;
|
|
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
/* if ingress device is enslaved to an L3 master device pass the
|
|
* skb to its handler for processing
|
|
*/
|
|
@@ -598,7 +598,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
|
|
struct net_device *dev = skb->dev;
|
|
struct net *net = dev_net(dev);
|
|
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
skb = ip_rcv_core(skb, net);
|
|
if (skb == NULL)
|
|
continue;
|
|
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
|
|
index ce1512b02cb2..fd3f9e8a74da 100644
|
|
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
|
|
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
|
|
@@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
|
|
int ret;
|
|
|
|
ret = xt_register_target(&masquerade_tg_reg);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
- if (ret == 0)
|
|
- nf_nat_masquerade_ipv4_register_notifier();
|
|
+ ret = nf_nat_masquerade_ipv4_register_notifier();
|
|
+ if (ret)
|
|
+ xt_unregister_target(&masquerade_tg_reg);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
|
|
index ad3aeff152ed..4c7fcd32f8e6 100644
|
|
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
|
|
@@ -131,28 +131,50 @@ static struct notifier_block masq_inet_notifier = {
|
|
.notifier_call = masq_inet_event,
|
|
};
|
|
|
|
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
|
|
+static int masq_refcnt;
|
|
+static DEFINE_MUTEX(masq_mutex);
|
|
|
|
-void nf_nat_masquerade_ipv4_register_notifier(void)
|
|
+int nf_nat_masquerade_ipv4_register_notifier(void)
|
|
{
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&masq_mutex);
|
|
/* check if the notifier was already set */
|
|
- if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
|
|
- return;
|
|
+ if (++masq_refcnt > 1)
|
|
+ goto out_unlock;
|
|
|
|
/* Register for device down reports */
|
|
- register_netdevice_notifier(&masq_dev_notifier);
|
|
+ ret = register_netdevice_notifier(&masq_dev_notifier);
|
|
+ if (ret)
|
|
+ goto err_dec;
|
|
/* Register IP address change reports */
|
|
- register_inetaddr_notifier(&masq_inet_notifier);
|
|
+ ret = register_inetaddr_notifier(&masq_inet_notifier);
|
|
+ if (ret)
|
|
+ goto err_unregister;
|
|
+
|
|
+ mutex_unlock(&masq_mutex);
|
|
+ return ret;
|
|
+
|
|
+err_unregister:
|
|
+ unregister_netdevice_notifier(&masq_dev_notifier);
|
|
+err_dec:
|
|
+ masq_refcnt--;
|
|
+out_unlock:
|
|
+ mutex_unlock(&masq_mutex);
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
|
|
|
|
void nf_nat_masquerade_ipv4_unregister_notifier(void)
|
|
{
|
|
+ mutex_lock(&masq_mutex);
|
|
/* check if the notifier still has clients */
|
|
- if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
|
|
- return;
|
|
+ if (--masq_refcnt > 0)
|
|
+ goto out_unlock;
|
|
|
|
unregister_netdevice_notifier(&masq_dev_notifier);
|
|
unregister_inetaddr_notifier(&masq_inet_notifier);
|
|
+out_unlock:
|
|
+ mutex_unlock(&masq_mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
|
|
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
|
|
index f1193e1e928a..6847de1d1db8 100644
|
|
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
|
|
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
|
|
@@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
- nf_nat_masquerade_ipv4_register_notifier();
|
|
+ ret = nf_nat_masquerade_ipv4_register_notifier();
|
|
+ if (ret)
|
|
+ nft_unregister_expr(&nft_masq_ipv4_type);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
|
|
index 68f65ddf9e3c..bd134e3a0473 100644
|
|
--- a/net/ipv4/tcp_output.c
|
|
+++ b/net/ipv4/tcp_output.c
|
|
@@ -1902,7 +1902,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
|
* This algorithm is from John Heffner.
|
|
*/
|
|
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
|
|
- bool *is_cwnd_limited, u32 max_segs)
|
|
+ bool *is_cwnd_limited,
|
|
+ bool *is_rwnd_limited,
|
|
+ u32 max_segs)
|
|
{
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
u32 age, send_win, cong_win, limit, in_flight;
|
|
@@ -1910,9 +1912,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
|
|
struct sk_buff *head;
|
|
int win_divisor;
|
|
|
|
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
|
- goto send_now;
|
|
-
|
|
if (icsk->icsk_ca_state >= TCP_CA_Recovery)
|
|
goto send_now;
|
|
|
|
@@ -1971,10 +1970,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
|
|
if (age < (tp->srtt_us >> 4))
|
|
goto send_now;
|
|
|
|
- /* Ok, it looks like it is advisable to defer. */
|
|
+ /* Ok, it looks like it is advisable to defer.
|
|
+ * Three cases are tracked :
|
|
+ * 1) We are cwnd-limited
|
|
+ * 2) We are rwnd-limited
|
|
+ * 3) We are application limited.
|
|
+ */
|
|
+ if (cong_win < send_win) {
|
|
+ if (cong_win <= skb->len) {
|
|
+ *is_cwnd_limited = true;
|
|
+ return true;
|
|
+ }
|
|
+ } else {
|
|
+ if (send_win <= skb->len) {
|
|
+ *is_rwnd_limited = true;
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
|
|
- if (cong_win < send_win && cong_win <= skb->len)
|
|
- *is_cwnd_limited = true;
|
|
+ /* If this packet won't get more data, do not wait. */
|
|
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
|
+ goto send_now;
|
|
|
|
return true;
|
|
|
|
@@ -2338,7 +2354,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|
} else {
|
|
if (!push_one &&
|
|
tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
|
|
- max_segs))
|
|
+ &is_rwnd_limited, max_segs))
|
|
break;
|
|
}
|
|
|
|
@@ -2476,15 +2492,18 @@ void tcp_send_loss_probe(struct sock *sk)
|
|
goto rearm_timer;
|
|
}
|
|
skb = skb_rb_last(&sk->tcp_rtx_queue);
|
|
+ if (unlikely(!skb)) {
|
|
+ WARN_ONCE(tp->packets_out,
|
|
+ "invalid inflight: %u state %u cwnd %u mss %d\n",
|
|
+ tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
|
|
+ inet_csk(sk)->icsk_pending = 0;
|
|
+ return;
|
|
+ }
|
|
|
|
/* At most one outstanding TLP retransmission. */
|
|
if (tp->tlp_high_seq)
|
|
goto rearm_timer;
|
|
|
|
- /* Retransmit last segment. */
|
|
- if (WARN_ON(!skb))
|
|
- goto rearm_timer;
|
|
-
|
|
if (skb_still_in_host_queue(sk, skb))
|
|
goto rearm_timer;
|
|
|
|
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
|
|
index 6242682be876..6b74523fc1c4 100644
|
|
--- a/net/ipv6/ip6_input.c
|
|
+++ b/net/ipv6/ip6_input.c
|
|
@@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
|
|
list_for_each_entry_safe(skb, next, head, list) {
|
|
struct dst_entry *dst;
|
|
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
/* if ingress device is enslaved to an L3 master device pass the
|
|
* skb to its handler for processing
|
|
*/
|
|
@@ -295,7 +295,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
|
|
struct net_device *dev = skb->dev;
|
|
struct net *net = dev_net(dev);
|
|
|
|
- list_del(&skb->list);
|
|
+ skb_list_del_init(skb);
|
|
skb = ip6_rcv_core(skb, dev, net);
|
|
if (skb == NULL)
|
|
continue;
|
|
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
|
|
index f9f8f554d141..2694def1e72c 100644
|
|
--- a/net/ipv6/ip6_output.c
|
|
+++ b/net/ipv6/ip6_output.c
|
|
@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
|
const struct ipv6_pinfo *np = inet6_sk(sk);
|
|
struct in6_addr *first_hop = &fl6->daddr;
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
+ unsigned int head_room;
|
|
struct ipv6hdr *hdr;
|
|
u8 proto = fl6->flowi6_proto;
|
|
int seg_len = skb->len;
|
|
int hlimit = -1;
|
|
u32 mtu;
|
|
|
|
- if (opt) {
|
|
- unsigned int head_room;
|
|
+ head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
|
|
+ if (opt)
|
|
+ head_room += opt->opt_nflen + opt->opt_flen;
|
|
|
|
- /* First: exthdrs may take lots of space (~8K for now)
|
|
- MAX_HEADER is not enough.
|
|
- */
|
|
- head_room = opt->opt_nflen + opt->opt_flen;
|
|
- seg_len += head_room;
|
|
- head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
|
|
-
|
|
- if (skb_headroom(skb) < head_room) {
|
|
- struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
|
|
- if (!skb2) {
|
|
- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
|
- IPSTATS_MIB_OUTDISCARDS);
|
|
- kfree_skb(skb);
|
|
- return -ENOBUFS;
|
|
- }
|
|
- if (skb->sk)
|
|
- skb_set_owner_w(skb2, skb->sk);
|
|
- consume_skb(skb);
|
|
- skb = skb2;
|
|
+ if (unlikely(skb_headroom(skb) < head_room)) {
|
|
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
|
|
+ if (!skb2) {
|
|
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
|
+ IPSTATS_MIB_OUTDISCARDS);
|
|
+ kfree_skb(skb);
|
|
+ return -ENOBUFS;
|
|
}
|
|
+ if (skb->sk)
|
|
+ skb_set_owner_w(skb2, skb->sk);
|
|
+ consume_skb(skb);
|
|
+ skb = skb2;
|
|
+ }
|
|
+
|
|
+ if (opt) {
|
|
+ seg_len += opt->opt_nflen + opt->opt_flen;
|
|
+
|
|
if (opt->opt_flen)
|
|
ipv6_push_frag_opts(skb, opt, &proto);
|
|
+
|
|
if (opt->opt_nflen)
|
|
ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
|
|
&fl6->saddr);
|
|
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
|
|
index 5ae8e1c51079..8b075f0bc351 100644
|
|
--- a/net/ipv6/netfilter.c
|
|
+++ b/net/ipv6/netfilter.c
|
|
@@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
|
|
unsigned int hh_len;
|
|
struct dst_entry *dst;
|
|
struct flowi6 fl6 = {
|
|
- .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
|
|
+ .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
|
|
+ rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
|
|
.flowi6_mark = skb->mark,
|
|
.flowi6_uid = sock_net_uid(net, sk),
|
|
.daddr = iph->daddr,
|
|
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
|
|
index 491f808e356a..29c7f1915a96 100644
|
|
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
|
|
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
|
|
@@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
|
|
int err;
|
|
|
|
err = xt_register_target(&masquerade_tg6_reg);
|
|
- if (err == 0)
|
|
- nf_nat_masquerade_ipv6_register_notifier();
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ err = nf_nat_masquerade_ipv6_register_notifier();
|
|
+ if (err)
|
|
+ xt_unregister_target(&masquerade_tg6_reg);
|
|
|
|
return err;
|
|
}
|
|
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
|
|
index f76bd4d15704..043ed8eb0ab9 100644
|
|
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
|
|
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
|
|
@@ -341,7 +341,7 @@ static bool
|
|
nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
|
|
{
|
|
struct sk_buff *fp, *head = fq->q.fragments;
|
|
- int payload_len;
|
|
+ int payload_len, delta;
|
|
u8 ecn;
|
|
|
|
inet_frag_kill(&fq->q);
|
|
@@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
|
|
return false;
|
|
}
|
|
|
|
+ delta = - head->truesize;
|
|
+
|
|
/* Head of list must not be cloned. */
|
|
if (skb_unclone(head, GFP_ATOMIC))
|
|
return false;
|
|
|
|
+ delta += head->truesize;
|
|
+ if (delta)
|
|
+ add_frag_mem_limit(fq->q.net, delta);
|
|
+
|
|
/* If the first fragment is fragmented itself, we split
|
|
* it to two chunks: the first with data and paged part
|
|
* and the second, holding only fragments. */
|
|
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
|
|
index e6eb7cf9b54f..37b1d413c825 100644
|
|
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
|
|
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
|
|
@@ -120,8 +120,8 @@ static void iterate_cleanup_work(struct work_struct *work)
|
|
* of ipv6 addresses being deleted), we also need to add an upper
|
|
* limit to the number of queued work items.
|
|
*/
|
|
-static int masq_inet_event(struct notifier_block *this,
|
|
- unsigned long event, void *ptr)
|
|
+static int masq_inet6_event(struct notifier_block *this,
|
|
+ unsigned long event, void *ptr)
|
|
{
|
|
struct inet6_ifaddr *ifa = ptr;
|
|
const struct net_device *dev;
|
|
@@ -158,30 +158,53 @@ static int masq_inet_event(struct notifier_block *this,
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
-static struct notifier_block masq_inet_notifier = {
|
|
- .notifier_call = masq_inet_event,
|
|
+static struct notifier_block masq_inet6_notifier = {
|
|
+ .notifier_call = masq_inet6_event,
|
|
};
|
|
|
|
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
|
|
+static int masq_refcnt;
|
|
+static DEFINE_MUTEX(masq_mutex);
|
|
|
|
-void nf_nat_masquerade_ipv6_register_notifier(void)
|
|
+int nf_nat_masquerade_ipv6_register_notifier(void)
|
|
{
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&masq_mutex);
|
|
/* check if the notifier is already set */
|
|
- if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
|
|
- return;
|
|
+ if (++masq_refcnt > 1)
|
|
+ goto out_unlock;
|
|
+
|
|
+ ret = register_netdevice_notifier(&masq_dev_notifier);
|
|
+ if (ret)
|
|
+ goto err_dec;
|
|
+
|
|
+ ret = register_inet6addr_notifier(&masq_inet6_notifier);
|
|
+ if (ret)
|
|
+ goto err_unregister;
|
|
|
|
- register_netdevice_notifier(&masq_dev_notifier);
|
|
- register_inet6addr_notifier(&masq_inet_notifier);
|
|
+ mutex_unlock(&masq_mutex);
|
|
+ return ret;
|
|
+
|
|
+err_unregister:
|
|
+ unregister_netdevice_notifier(&masq_dev_notifier);
|
|
+err_dec:
|
|
+ masq_refcnt--;
|
|
+out_unlock:
|
|
+ mutex_unlock(&masq_mutex);
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
|
|
|
|
void nf_nat_masquerade_ipv6_unregister_notifier(void)
|
|
{
|
|
+ mutex_lock(&masq_mutex);
|
|
/* check if the notifier still has clients */
|
|
- if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
|
|
- return;
|
|
+ if (--masq_refcnt > 0)
|
|
+ goto out_unlock;
|
|
|
|
- unregister_inet6addr_notifier(&masq_inet_notifier);
|
|
+ unregister_inet6addr_notifier(&masq_inet6_notifier);
|
|
unregister_netdevice_notifier(&masq_dev_notifier);
|
|
+out_unlock:
|
|
+ mutex_unlock(&masq_mutex);
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
|
|
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
|
|
index dd0122f3cffe..e06c82e9dfcd 100644
|
|
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
|
|
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
|
|
@@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
- nf_nat_masquerade_ipv6_register_notifier();
|
|
+ ret = nf_nat_masquerade_ipv6_register_notifier();
|
|
+ if (ret)
|
|
+ nft_unregister_expr(&nft_masq_ipv6_type);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
|
|
index 5c5b4f79296e..d3fd2d7e5aa4 100644
|
|
--- a/net/ipv6/reassembly.c
|
|
+++ b/net/ipv6/reassembly.c
|
|
@@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|
{
|
|
struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
|
|
struct sk_buff *fp, *head = fq->q.fragments;
|
|
- int payload_len;
|
|
+ int payload_len, delta;
|
|
unsigned int nhoff;
|
|
int sum_truesize;
|
|
u8 ecn;
|
|
@@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|
if (payload_len > IPV6_MAXPLEN)
|
|
goto out_oversize;
|
|
|
|
+ delta = - head->truesize;
|
|
+
|
|
/* Head of list must not be cloned. */
|
|
if (skb_unclone(head, GFP_ATOMIC))
|
|
goto out_oom;
|
|
|
|
+ delta += head->truesize;
|
|
+ if (delta)
|
|
+ add_frag_mem_limit(fq->q.net, delta);
|
|
+
|
|
/* If the first fragment is fragmented itself, we split
|
|
* it to two chunks: the first with data and paged part
|
|
* and the second, holding only fragments. */
|
|
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
|
|
index a8854dd3e9c5..8181ee7e1e27 100644
|
|
--- a/net/ipv6/seg6_iptunnel.c
|
|
+++ b/net/ipv6/seg6_iptunnel.c
|
|
@@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
|
struct flowi6 fl6;
|
|
|
|
+ memset(&fl6, 0, sizeof(fl6));
|
|
fl6.daddr = hdr->daddr;
|
|
fl6.saddr = hdr->saddr;
|
|
fl6.flowlabel = ip6_flowinfo(hdr);
|
|
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
index 62eefea48973..518364f4abcc 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_ctl.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
@@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
|
|
|
|
static struct notifier_block ip_vs_dst_notifier = {
|
|
.notifier_call = ip_vs_dst_event,
|
|
+#ifdef CONFIG_IP_VS_IPV6
|
|
+ .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
|
|
+#endif
|
|
};
|
|
|
|
int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
|
|
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
|
|
index 02ca7df793f5..b6d0f6deea86 100644
|
|
--- a/net/netfilter/nf_conncount.c
|
|
+++ b/net/netfilter/nf_conncount.c
|
|
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
|
|
struct nf_conntrack_zone zone;
|
|
int cpu;
|
|
u32 jiffies32;
|
|
+ bool dead;
|
|
struct rcu_head rcu_head;
|
|
};
|
|
|
|
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
|
|
conn->zone = *zone;
|
|
conn->cpu = raw_smp_processor_id();
|
|
conn->jiffies32 = (u32)jiffies;
|
|
- spin_lock(&list->list_lock);
|
|
+ conn->dead = false;
|
|
+ spin_lock_bh(&list->list_lock);
|
|
if (list->dead == true) {
|
|
kmem_cache_free(conncount_conn_cachep, conn);
|
|
- spin_unlock(&list->list_lock);
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
return NF_CONNCOUNT_SKIP;
|
|
}
|
|
list_add_tail(&conn->node, &list->head);
|
|
list->count++;
|
|
- spin_unlock(&list->list_lock);
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
return NF_CONNCOUNT_ADDED;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_add);
|
|
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
|
|
{
|
|
bool free_entry = false;
|
|
|
|
- spin_lock(&list->list_lock);
|
|
+ spin_lock_bh(&list->list_lock);
|
|
|
|
- if (list->count == 0) {
|
|
- spin_unlock(&list->list_lock);
|
|
- return free_entry;
|
|
+ if (conn->dead) {
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
+ return free_entry;
|
|
}
|
|
|
|
list->count--;
|
|
+ conn->dead = true;
|
|
list_del_rcu(&conn->node);
|
|
- if (list->count == 0)
|
|
+ if (list->count == 0) {
|
|
+ list->dead = true;
|
|
free_entry = true;
|
|
+ }
|
|
|
|
- spin_unlock(&list->list_lock);
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
call_rcu(&conn->rcu_head, __conn_free);
|
|
return free_entry;
|
|
}
|
|
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
|
|
{
|
|
spin_lock_init(&list->list_lock);
|
|
INIT_LIST_HEAD(&list->head);
|
|
- list->count = 1;
|
|
+ list->count = 0;
|
|
list->dead = false;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
|
|
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
|
|
struct nf_conn *found_ct;
|
|
unsigned int collected = 0;
|
|
bool free_entry = false;
|
|
+ bool ret = false;
|
|
|
|
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
|
|
found = find_or_evict(net, list, conn, &free_entry);
|
|
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
|
|
if (collected > CONNCOUNT_GC_MAX_NODES)
|
|
return false;
|
|
}
|
|
- return false;
|
|
+
|
|
+ spin_lock_bh(&list->list_lock);
|
|
+ if (!list->count) {
|
|
+ list->dead = true;
|
|
+ ret = true;
|
|
+ }
|
|
+ spin_unlock_bh(&list->list_lock);
|
|
+
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
|
|
|
|
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
|
|
while (gc_count) {
|
|
rbconn = gc_nodes[--gc_count];
|
|
spin_lock(&rbconn->list.list_lock);
|
|
- if (rbconn->list.count == 0 && rbconn->list.dead == false) {
|
|
- rbconn->list.dead = true;
|
|
- rb_erase(&rbconn->node, root);
|
|
- call_rcu(&rbconn->rcu_head, __tree_nodes_free);
|
|
- }
|
|
+ rb_erase(&rbconn->node, root);
|
|
+ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
|
|
spin_unlock(&rbconn->list.list_lock);
|
|
}
|
|
}
|
|
@@ -414,6 +425,7 @@ insert_tree(struct net *net,
|
|
nf_conncount_list_init(&rbconn->list);
|
|
list_add(&conn->node, &rbconn->list.head);
|
|
count = 1;
|
|
+ rbconn->list.count = count;
|
|
|
|
rb_link_node(&rbconn->node, parent, rbnode);
|
|
rb_insert_color(&rbconn->node, root);
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index 2cfb173cd0b2..fe0558b15fd3 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -2432,7 +2432,7 @@ err:
|
|
static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
|
|
struct nft_rule *rule)
|
|
{
|
|
- struct nft_expr *expr;
|
|
+ struct nft_expr *expr, *next;
|
|
|
|
lockdep_assert_held(&ctx->net->nft.commit_mutex);
|
|
/*
|
|
@@ -2441,8 +2441,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
|
|
*/
|
|
expr = nft_expr_first(rule);
|
|
while (expr != nft_expr_last(rule) && expr->ops) {
|
|
+ next = nft_expr_next(expr);
|
|
nf_tables_expr_destroy(ctx, expr);
|
|
- expr = nft_expr_next(expr);
|
|
+ expr = next;
|
|
}
|
|
kfree(rule);
|
|
}
|
|
@@ -2645,21 +2646,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
|
}
|
|
|
|
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
|
|
- if (!nft_is_active_next(net, old_rule)) {
|
|
- err = -ENOENT;
|
|
- goto err2;
|
|
- }
|
|
- trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
|
|
- old_rule);
|
|
+ trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
|
|
if (trans == NULL) {
|
|
err = -ENOMEM;
|
|
goto err2;
|
|
}
|
|
- nft_deactivate_next(net, old_rule);
|
|
- chain->use--;
|
|
-
|
|
- if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
|
- err = -ENOMEM;
|
|
+ err = nft_delrule(&ctx, old_rule);
|
|
+ if (err < 0) {
|
|
+ nft_trans_destroy(trans);
|
|
goto err2;
|
|
}
|
|
|
|
@@ -6277,7 +6271,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
|
|
call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
|
|
}
|
|
|
|
-static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain)
|
|
+static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
|
|
{
|
|
struct nft_rule **g0, **g1;
|
|
bool next_genbit;
|
|
@@ -6363,11 +6357,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|
|
|
/* step 2. Make rules_gen_X visible to packet path */
|
|
list_for_each_entry(table, &net->nft.tables, list) {
|
|
- list_for_each_entry(chain, &table->chains, list) {
|
|
- if (!nft_is_active_next(net, chain))
|
|
- continue;
|
|
- nf_tables_commit_chain_active(net, chain);
|
|
- }
|
|
+ list_for_each_entry(chain, &table->chains, list)
|
|
+ nf_tables_commit_chain(net, chain);
|
|
}
|
|
|
|
/*
|
|
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
|
|
index ad2fe6a7e47d..29d6fc73caf9 100644
|
|
--- a/net/netfilter/nft_compat.c
|
|
+++ b/net/netfilter/nft_compat.c
|
|
@@ -501,6 +501,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|
void *info)
|
|
{
|
|
struct xt_match *match = expr->ops->data;
|
|
+ struct module *me = match->me;
|
|
struct xt_mtdtor_param par;
|
|
|
|
par.net = ctx->net;
|
|
@@ -511,7 +512,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|
par.match->destroy(&par);
|
|
|
|
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
|
- module_put(match->me);
|
|
+ module_put(me);
|
|
}
|
|
|
|
static void
|
|
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
|
|
index d6bab8c3cbb0..5fd4c57c79cc 100644
|
|
--- a/net/netfilter/nft_flow_offload.c
|
|
+++ b/net/netfilter/nft_flow_offload.c
|
|
@@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
|
|
{
|
|
int err;
|
|
|
|
- register_netdevice_notifier(&flow_offload_netdev_notifier);
|
|
+ err = register_netdevice_notifier(&flow_offload_netdev_notifier);
|
|
+ if (err)
|
|
+ goto err;
|
|
|
|
err = nft_register_expr(&nft_flow_offload_type);
|
|
if (err < 0)
|
|
@@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
|
|
|
|
register_expr:
|
|
unregister_netdevice_notifier(&flow_offload_netdev_notifier);
|
|
+err:
|
|
return err;
|
|
}
|
|
|
|
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
|
|
index dec843cadf46..9e05c86ba5c4 100644
|
|
--- a/net/netfilter/xt_RATEEST.c
|
|
+++ b/net/netfilter/xt_RATEEST.c
|
|
@@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
|
|
return 0;
|
|
}
|
|
|
|
-static void __net_exit xt_rateest_net_exit(struct net *net)
|
|
-{
|
|
- struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
|
|
- int i;
|
|
-
|
|
- for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
|
|
- WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
|
|
-}
|
|
-
|
|
static struct pernet_operations xt_rateest_net_ops = {
|
|
.init = xt_rateest_net_init,
|
|
- .exit = xt_rateest_net_exit,
|
|
.id = &xt_rateest_id,
|
|
.size = sizeof(struct xt_rateest_net),
|
|
};
|
|
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
|
|
index 3e7d259e5d8d..1ad4017f9b73 100644
|
|
--- a/net/netfilter/xt_hashlimit.c
|
|
+++ b/net/netfilter/xt_hashlimit.c
|
|
@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
|
|
|
|
/* copy match config into hashtable config */
|
|
ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
|
|
-
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ vfree(hinfo);
|
|
return ret;
|
|
+ }
|
|
|
|
hinfo->cfg.size = size;
|
|
if (hinfo->cfg.max == 0)
|
|
@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
|
|
int ret;
|
|
|
|
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
|
|
-
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
|
|
int ret;
|
|
|
|
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
|
|
-
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
|
|
return ret;
|
|
|
|
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
|
|
-
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
|
|
return ret;
|
|
|
|
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
|
|
-
|
|
if (ret)
|
|
return ret;
|
|
|
|
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
|
|
index ad18a2052416..74c0f656f28c 100644
|
|
--- a/net/sched/sch_netem.c
|
|
+++ b/net/sched/sch_netem.c
|
|
@@ -441,6 +441,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
int count = 1;
|
|
int rc = NET_XMIT_SUCCESS;
|
|
|
|
+ /* Do not fool qdisc_drop_all() */
|
|
+ skb->prev = NULL;
|
|
+
|
|
/* Random duplication */
|
|
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
|
|
++count;
|
|
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
|
|
index 6a28b96e779e..914750b819b2 100644
|
|
--- a/net/sctp/associola.c
|
|
+++ b/net/sctp/associola.c
|
|
@@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
|
|
asoc->flowlabel = sp->flowlabel;
|
|
asoc->dscp = sp->dscp;
|
|
|
|
- /* Initialize default path MTU. */
|
|
- asoc->pathmtu = sp->pathmtu;
|
|
-
|
|
/* Set association default SACK delay */
|
|
asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
|
|
asoc->sackfreq = sp->sackfreq;
|
|
@@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init(
|
|
0, gfp))
|
|
goto fail_init;
|
|
|
|
+ /* Initialize default path MTU. */
|
|
+ asoc->pathmtu = sp->pathmtu;
|
|
+ sctp_assoc_update_frag_point(asoc);
|
|
+
|
|
/* Assume that peer would support both address types unless we are
|
|
* told otherwise.
|
|
*/
|
|
@@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
|
|
|
|
WARN_ON(atomic_read(&asoc->rmem_alloc));
|
|
|
|
- kfree(asoc);
|
|
+ kfree_rcu(asoc, rcu);
|
|
SCTP_DBG_OBJCNT_DEC(assoc);
|
|
}
|
|
|
|
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
|
|
index 4a4fd1971255..f4ac6c592e13 100644
|
|
--- a/net/sctp/sm_make_chunk.c
|
|
+++ b/net/sctp/sm_make_chunk.c
|
|
@@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
|
|
asoc->c.sinit_max_instreams, gfp))
|
|
goto clean_up;
|
|
|
|
+ /* Update frag_point when stream_interleave may get changed. */
|
|
+ sctp_assoc_update_frag_point(asoc);
|
|
+
|
|
if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
|
|
goto clean_up;
|
|
|
|
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
|
|
index 64c3cb0fb926..654a50319198 100644
|
|
--- a/sound/firewire/fireface/ff-protocol-ff400.c
|
|
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
|
|
@@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
|
|
int err;
|
|
|
|
err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
|
|
- FF400_SYNC_STATUS, ®, sizeof(reg), 0);
|
|
+ FF400_CLOCK_CONFIG, ®, sizeof(reg), 0);
|
|
if (err < 0)
|
|
return err;
|
|
data = le32_to_cpu(reg);
|
|
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
|
|
index 22ca1f0a858f..8a3d0694d2e5 100644
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -5520,6 +5520,9 @@ enum {
|
|
ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
|
|
ALC295_FIXUP_HP_AUTO_MUTE,
|
|
ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
|
|
+ ALC294_FIXUP_ASUS_MIC,
|
|
+ ALC294_FIXUP_ASUS_HEADSET_MIC,
|
|
+ ALC294_FIXUP_ASUS_SPK,
|
|
};
|
|
|
|
static const struct hda_fixup alc269_fixups[] = {
|
|
@@ -6392,6 +6395,8 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
[ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
.v.func = alc285_fixup_invalidate_dacs,
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
|
|
},
|
|
[ALC295_FIXUP_HP_AUTO_MUTE] = {
|
|
.type = HDA_FIXUP_FUNC,
|
|
@@ -6406,6 +6411,36 @@ static const struct hda_fixup alc269_fixups[] = {
|
|
.chained = true,
|
|
.chain_id = ALC269_FIXUP_HEADSET_MIC
|
|
},
|
|
+ [ALC294_FIXUP_ASUS_MIC] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x13, 0x90a60160 }, /* use as internal mic */
|
|
+ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
|
|
+ },
|
|
+ [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
|
|
+ .type = HDA_FIXUP_PINS,
|
|
+ .v.pins = (const struct hda_pintbl[]) {
|
|
+ { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
|
|
+ },
|
|
+ [ALC294_FIXUP_ASUS_SPK] = {
|
|
+ .type = HDA_FIXUP_VERBS,
|
|
+ .v.verbs = (const struct hda_verb[]) {
|
|
+ /* Set EAPD high */
|
|
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
|
|
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
|
|
+ { }
|
|
+ },
|
|
+ .chained = true,
|
|
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
|
|
+ },
|
|
};
|
|
|
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
@@ -6548,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
|
|
+ SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
|
|
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
|
|
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
|
|
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
|
|
@@ -7155,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
ALC292_STANDARD_PINS,
|
|
{0x13, 0x90a60140}),
|
|
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
|
|
+ {0x14, 0x90170110},
|
|
+ {0x1b, 0x90a70130},
|
|
+ {0x21, 0x04211020}),
|
|
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
|
|
+ {0x12, 0x90a60130},
|
|
+ {0x17, 0x90170110},
|
|
+ {0x21, 0x04211020}),
|
|
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
|
|
ALC295_STANDARD_PINS,
|
|
{0x17, 0x21014020},
|
|
@@ -7227,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
|
|
alc_update_coef_idx(codec, 0x4, 0, 1<<11);
|
|
}
|
|
|
|
+static void alc294_hp_init(struct hda_codec *codec)
|
|
+{
|
|
+ struct alc_spec *spec = codec->spec;
|
|
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
|
|
+ int i, val;
|
|
+
|
|
+ if (!hp_pin)
|
|
+ return;
|
|
+
|
|
+ snd_hda_codec_write(codec, hp_pin, 0,
|
|
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
|
+
|
|
+ msleep(100);
|
|
+
|
|
+ snd_hda_codec_write(codec, hp_pin, 0,
|
|
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
|
+
|
|
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
|
|
+ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
|
|
+
|
|
+ /* Wait for depop procedure finish */
|
|
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
|
|
+ for (i = 0; i < 20 && val & 0x0080; i++) {
|
|
+ msleep(50);
|
|
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
|
|
+ }
|
|
+ /* Set HP depop to auto mode */
|
|
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
|
|
+ msleep(50);
|
|
+}
|
|
+
|
|
/*
|
|
*/
|
|
static int patch_alc269(struct hda_codec *codec)
|
|
@@ -7352,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec)
|
|
spec->codec_variant = ALC269_TYPE_ALC294;
|
|
spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
|
|
alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
|
|
+ alc294_hp_init(codec);
|
|
break;
|
|
case 0x10ec0300:
|
|
spec->codec_variant = ALC269_TYPE_ALC300;
|
|
@@ -7363,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec)
|
|
spec->codec_variant = ALC269_TYPE_ALC700;
|
|
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
|
|
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
|
|
+ alc294_hp_init(codec);
|
|
break;
|
|
|
|
}
|
|
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
|
|
index 7b8533abf637..b61d518f4fef 100644
|
|
--- a/sound/soc/codecs/hdac_hdmi.c
|
|
+++ b/sound/soc/codecs/hdac_hdmi.c
|
|
@@ -2184,11 +2184,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
|
|
*/
|
|
snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
|
|
AC_PWRST_D3);
|
|
- err = snd_hdac_display_power(bus, false);
|
|
- if (err < 0) {
|
|
- dev_err(dev, "Cannot turn on display power on i915\n");
|
|
- return err;
|
|
- }
|
|
|
|
hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
|
|
if (!hlink) {
|
|
@@ -2198,7 +2193,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
|
|
|
|
snd_hdac_ext_bus_link_put(bus, hlink);
|
|
|
|
- return 0;
|
|
+ err = snd_hdac_display_power(bus, false);
|
|
+ if (err < 0)
|
|
+ dev_err(dev, "Cannot turn off display power on i915\n");
|
|
+
|
|
+ return err;
|
|
}
|
|
|
|
static int hdac_hdmi_runtime_resume(struct device *dev)
|
|
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
|
|
index f61656070225..4d3ec295679d 100644
|
|
--- a/sound/soc/codecs/wm_adsp.c
|
|
+++ b/sound/soc/codecs/wm_adsp.c
|
|
@@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
|
|
|
|
static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
|
|
{
|
|
- u16 scratch[4];
|
|
+ unsigned int scratch[4];
|
|
+ unsigned int addr = dsp->base + ADSP2_SCRATCH0;
|
|
+ unsigned int i;
|
|
int ret;
|
|
|
|
- ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0,
|
|
- scratch, sizeof(scratch));
|
|
- if (ret) {
|
|
- adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
|
|
- return;
|
|
+ for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
|
|
+ ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
|
|
+ if (ret) {
|
|
+ adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
|
|
+ return;
|
|
+ }
|
|
}
|
|
|
|
adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
|
|
- be16_to_cpu(scratch[0]),
|
|
- be16_to_cpu(scratch[1]),
|
|
- be16_to_cpu(scratch[2]),
|
|
- be16_to_cpu(scratch[3]));
|
|
+ scratch[0], scratch[1], scratch[2], scratch[3]);
|
|
}
|
|
|
|
static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
|
|
{
|
|
- u32 scratch[2];
|
|
+ unsigned int scratch[2];
|
|
int ret;
|
|
|
|
- ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
|
|
- scratch, sizeof(scratch));
|
|
-
|
|
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
|
|
+ &scratch[0]);
|
|
if (ret) {
|
|
- adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
|
|
+ adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
|
|
return;
|
|
}
|
|
|
|
- scratch[0] = be32_to_cpu(scratch[0]);
|
|
- scratch[1] = be32_to_cpu(scratch[1]);
|
|
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
|
|
+ &scratch[1]);
|
|
+ if (ret) {
|
|
+ adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
|
|
+ return;
|
|
+ }
|
|
|
|
adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
|
|
scratch[0] & 0xFFFF,
|
|
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
|
|
index 1d17be0f78a0..50f16a0f6535 100644
|
|
--- a/sound/soc/intel/skylake/skl.c
|
|
+++ b/sound/soc/intel/skylake/skl.c
|
|
@@ -752,6 +752,12 @@ static void skl_probe_work(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * we are done probing so decrement link counts
|
|
+ */
|
|
+ list_for_each_entry(hlink, &bus->hlink_list, list)
|
|
+ snd_hdac_ext_bus_link_put(bus, hlink);
|
|
+
|
|
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
|
|
err = snd_hdac_display_power(bus, false);
|
|
if (err < 0) {
|
|
@@ -761,12 +767,6 @@ static void skl_probe_work(struct work_struct *work)
|
|
}
|
|
}
|
|
|
|
- /*
|
|
- * we are done probing so decrement link counts
|
|
- */
|
|
- list_for_each_entry(hlink, &bus->hlink_list, list)
|
|
- snd_hdac_ext_bus_link_put(bus, hlink);
|
|
-
|
|
/* configure PM */
|
|
pm_runtime_put_noidle(bus->dev);
|
|
pm_runtime_allow(bus->dev);
|
|
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
|
|
index d5ae9eb8c756..fed45b41f9d3 100644
|
|
--- a/sound/soc/omap/omap-abe-twl6040.c
|
|
+++ b/sound/soc/omap/omap-abe-twl6040.c
|
|
@@ -36,6 +36,8 @@
|
|
#include "../codecs/twl6040.h"
|
|
|
|
struct abe_twl6040 {
|
|
+ struct snd_soc_card card;
|
|
+ struct snd_soc_dai_link dai_links[2];
|
|
int jack_detection; /* board can detect jack events */
|
|
int mclk_freq; /* MCLK frequency speed for twl6040 */
|
|
};
|
|
@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
|
|
ARRAY_SIZE(dmic_audio_map));
|
|
}
|
|
|
|
-/* Digital audio interface glue - connects codec <--> CPU */
|
|
-static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
|
|
- {
|
|
- .name = "TWL6040",
|
|
- .stream_name = "TWL6040",
|
|
- .codec_dai_name = "twl6040-legacy",
|
|
- .codec_name = "twl6040-codec",
|
|
- .init = omap_abe_twl6040_init,
|
|
- .ops = &omap_abe_ops,
|
|
- },
|
|
- {
|
|
- .name = "DMIC",
|
|
- .stream_name = "DMIC Capture",
|
|
- .codec_dai_name = "dmic-hifi",
|
|
- .codec_name = "dmic-codec",
|
|
- .init = omap_abe_dmic_init,
|
|
- .ops = &omap_abe_dmic_ops,
|
|
- },
|
|
-};
|
|
-
|
|
-/* Audio machine driver */
|
|
-static struct snd_soc_card omap_abe_card = {
|
|
- .owner = THIS_MODULE,
|
|
-
|
|
- .dapm_widgets = twl6040_dapm_widgets,
|
|
- .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
|
|
- .dapm_routes = audio_map,
|
|
- .num_dapm_routes = ARRAY_SIZE(audio_map),
|
|
-};
|
|
-
|
|
static int omap_abe_probe(struct platform_device *pdev)
|
|
{
|
|
struct device_node *node = pdev->dev.of_node;
|
|
- struct snd_soc_card *card = &omap_abe_card;
|
|
+ struct snd_soc_card *card;
|
|
struct device_node *dai_node;
|
|
struct abe_twl6040 *priv;
|
|
int num_links = 0;
|
|
@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
|
|
return -ENODEV;
|
|
}
|
|
|
|
- card->dev = &pdev->dev;
|
|
-
|
|
priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
|
|
if (priv == NULL)
|
|
return -ENOMEM;
|
|
|
|
+ card = &priv->card;
|
|
+ card->dev = &pdev->dev;
|
|
+ card->owner = THIS_MODULE;
|
|
+ card->dapm_widgets = twl6040_dapm_widgets;
|
|
+ card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
|
|
+ card->dapm_routes = audio_map;
|
|
+ card->num_dapm_routes = ARRAY_SIZE(audio_map);
|
|
+
|
|
if (snd_soc_of_parse_card_name(card, "ti,model")) {
|
|
dev_err(&pdev->dev, "Card name is not provided\n");
|
|
return -ENODEV;
|
|
@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
|
|
dev_err(&pdev->dev, "McPDM node is not provided\n");
|
|
return -EINVAL;
|
|
}
|
|
- abe_twl6040_dai_links[0].cpu_of_node = dai_node;
|
|
- abe_twl6040_dai_links[0].platform_of_node = dai_node;
|
|
+
|
|
+ priv->dai_links[0].name = "DMIC";
|
|
+ priv->dai_links[0].stream_name = "TWL6040";
|
|
+ priv->dai_links[0].cpu_of_node = dai_node;
|
|
+ priv->dai_links[0].platform_of_node = dai_node;
|
|
+ priv->dai_links[0].codec_dai_name = "twl6040-legacy";
|
|
+ priv->dai_links[0].codec_name = "twl6040-codec";
|
|
+ priv->dai_links[0].init = omap_abe_twl6040_init;
|
|
+ priv->dai_links[0].ops = &omap_abe_ops;
|
|
|
|
dai_node = of_parse_phandle(node, "ti,dmic", 0);
|
|
if (dai_node) {
|
|
num_links = 2;
|
|
- abe_twl6040_dai_links[1].cpu_of_node = dai_node;
|
|
- abe_twl6040_dai_links[1].platform_of_node = dai_node;
|
|
+ priv->dai_links[1].name = "TWL6040";
|
|
+ priv->dai_links[1].stream_name = "DMIC Capture";
|
|
+ priv->dai_links[1].cpu_of_node = dai_node;
|
|
+ priv->dai_links[1].platform_of_node = dai_node;
|
|
+ priv->dai_links[1].codec_dai_name = "dmic-hifi";
|
|
+ priv->dai_links[1].codec_name = "dmic-codec";
|
|
+ priv->dai_links[1].init = omap_abe_dmic_init;
|
|
+ priv->dai_links[1].ops = &omap_abe_dmic_ops;
|
|
} else {
|
|
num_links = 1;
|
|
}
|
|
@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
|
|
return -ENODEV;
|
|
}
|
|
|
|
- card->dai_link = abe_twl6040_dai_links;
|
|
+ card->dai_link = priv->dai_links;
|
|
card->num_links = num_links;
|
|
|
|
snd_soc_card_set_drvdata(card, priv);
|
|
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
|
|
index fe966272bd0c..cba9645b6487 100644
|
|
--- a/sound/soc/omap/omap-dmic.c
|
|
+++ b/sound/soc/omap/omap-dmic.c
|
|
@@ -48,6 +48,8 @@ struct omap_dmic {
|
|
struct device *dev;
|
|
void __iomem *io_base;
|
|
struct clk *fclk;
|
|
+ struct pm_qos_request pm_qos_req;
|
|
+ int latency;
|
|
int fclk_freq;
|
|
int out_freq;
|
|
int clk_div;
|
|
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
|
|
|
|
mutex_lock(&dmic->mutex);
|
|
|
|
+ pm_qos_remove_request(&dmic->pm_qos_req);
|
|
+
|
|
if (!dai->active)
|
|
dmic->active = 0;
|
|
|
|
@@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
|
|
/* packet size is threshold * channels */
|
|
dma_data = snd_soc_dai_get_dma_data(dai, substream);
|
|
dma_data->maxburst = dmic->threshold * channels;
|
|
+ dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
|
|
+ params_rate(params);
|
|
|
|
return 0;
|
|
}
|
|
@@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
|
|
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
|
|
u32 ctrl;
|
|
|
|
+ if (pm_qos_request_active(&dmic->pm_qos_req))
|
|
+ pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
|
|
+
|
|
/* Configure uplink threshold */
|
|
omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
|
|
|
|
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
|
|
index d0ebb6b9bfac..2d6decbfc99e 100644
|
|
--- a/sound/soc/omap/omap-mcbsp.c
|
|
+++ b/sound/soc/omap/omap-mcbsp.c
|
|
@@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
|
|
pkt_size = channels;
|
|
}
|
|
|
|
- latency = ((((buffer_size - pkt_size) / channels) * 1000)
|
|
- / (params->rate_num / params->rate_den));
|
|
-
|
|
+ latency = (buffer_size - pkt_size) / channels;
|
|
+ latency = latency * USEC_PER_SEC /
|
|
+ (params->rate_num / params->rate_den);
|
|
mcbsp->latency[substream->stream] = latency;
|
|
|
|
omap_mcbsp_set_threshold(substream, pkt_size);
|
|
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
|
|
index 4c1be36c2207..7d5bdc5a2890 100644
|
|
--- a/sound/soc/omap/omap-mcpdm.c
|
|
+++ b/sound/soc/omap/omap-mcpdm.c
|
|
@@ -54,6 +54,8 @@ struct omap_mcpdm {
|
|
unsigned long phys_base;
|
|
void __iomem *io_base;
|
|
int irq;
|
|
+ struct pm_qos_request pm_qos_req;
|
|
+ int latency[2];
|
|
|
|
struct mutex mutex;
|
|
|
|
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
|
|
struct snd_soc_dai *dai)
|
|
{
|
|
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
|
|
+ int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
|
|
+ int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
|
|
+ int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
|
|
|
|
mutex_lock(&mcpdm->mutex);
|
|
|
|
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
|
|
}
|
|
}
|
|
|
|
+ if (mcpdm->latency[stream2])
|
|
+ pm_qos_update_request(&mcpdm->pm_qos_req,
|
|
+ mcpdm->latency[stream2]);
|
|
+ else if (mcpdm->latency[stream1])
|
|
+ pm_qos_remove_request(&mcpdm->pm_qos_req);
|
|
+
|
|
+ mcpdm->latency[stream1] = 0;
|
|
+
|
|
mutex_unlock(&mcpdm->mutex);
|
|
}
|
|
|
|
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
|
|
int stream = substream->stream;
|
|
struct snd_dmaengine_dai_dma_data *dma_data;
|
|
u32 threshold;
|
|
- int channels;
|
|
+ int channels, latency;
|
|
int link_mask = 0;
|
|
|
|
channels = params_channels(params);
|
|
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
|
|
|
|
dma_data->maxburst =
|
|
(MCPDM_DN_THRES_MAX - threshold) * channels;
|
|
+ latency = threshold;
|
|
} else {
|
|
/* If playback is not running assume a stereo stream to come */
|
|
if (!mcpdm->config[!stream].link_mask)
|
|
mcpdm->config[!stream].link_mask = (0x3 << 3);
|
|
|
|
dma_data->maxburst = threshold * channels;
|
|
+ latency = (MCPDM_DN_THRES_MAX - threshold);
|
|
}
|
|
|
|
+ /*
|
|
+ * The DMA must act to a DMA request within latency time (usec) to avoid
|
|
+ * under/overflow
|
|
+ */
|
|
+ mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
|
|
+
|
|
+ if (!mcpdm->latency[stream])
|
|
+ mcpdm->latency[stream] = 10;
|
|
+
|
|
/* Check if we need to restart McPDM with this stream */
|
|
if (mcpdm->config[stream].link_mask &&
|
|
mcpdm->config[stream].link_mask != link_mask)
|
|
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
|
|
struct snd_soc_dai *dai)
|
|
{
|
|
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
|
|
+ struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
|
|
+ int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
|
|
+ int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
|
|
+ int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
|
|
+ int latency = mcpdm->latency[stream2];
|
|
+
|
|
+ /* Prevent omap hardware from hitting off between FIFO fills */
|
|
+ if (!latency || mcpdm->latency[stream1] < latency)
|
|
+ latency = mcpdm->latency[stream1];
|
|
+
|
|
+ if (pm_qos_request_active(pm_qos_req))
|
|
+ pm_qos_update_request(pm_qos_req, latency);
|
|
+ else if (latency)
|
|
+ pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
|
|
|
|
if (!omap_mcpdm_active(mcpdm)) {
|
|
omap_mcpdm_start(mcpdm);
|
|
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
|
|
free_irq(mcpdm->irq, (void *)mcpdm);
|
|
pm_runtime_disable(mcpdm->dev);
|
|
|
|
+ if (pm_qos_request_active(&mcpdm->pm_qos_req))
|
|
+ pm_qos_remove_request(&mcpdm->pm_qos_req);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
|
|
index eb1b9da05dd4..4715527054e5 100644
|
|
--- a/sound/soc/qcom/common.c
|
|
+++ b/sound/soc/qcom/common.c
|
|
@@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
|
|
struct device_node *cpu = NULL;
|
|
struct device *dev = card->dev;
|
|
struct snd_soc_dai_link *link;
|
|
+ struct of_phandle_args args;
|
|
int ret, num_links;
|
|
|
|
ret = snd_soc_of_parse_card_name(card, "model");
|
|
@@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
|
|
goto err;
|
|
}
|
|
|
|
- link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
|
|
- if (!link->cpu_of_node) {
|
|
+ ret = of_parse_phandle_with_args(cpu, "sound-dai",
|
|
+ "#sound-dai-cells", 0, &args);
|
|
+ if (ret) {
|
|
dev_err(card->dev, "error getting cpu phandle\n");
|
|
- ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
+ link->cpu_of_node = args.np;
|
|
+ link->id = args.args[0];
|
|
|
|
ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
|
|
if (ret) {
|
|
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
|
|
index 60ff4a2d3577..8f6c8fc073a9 100644
|
|
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
|
|
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
|
|
@@ -1112,204 +1112,204 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
|
|
}
|
|
|
|
static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
|
|
- SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
|
|
"Secondary MI2S Playback SD1",
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
|
|
0, 0, 0, 0),
|
|
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
|
|
0, 0, 0, 0),
|
|
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
|
|
0, 0, 0, 0),
|
|
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
|
|
0, 0, 0, 0),
|
|
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
|
|
0, 0, 0, 0),
|
|
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
|
|
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
|
|
0, 0, 0, 0),
|
|
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
|
|
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
|
|
0, 0, 0, 0),
|
|
};
|
|
|
|
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
|
|
index 000775b4bba8..829b5e987b2a 100644
|
|
--- a/sound/soc/qcom/qdsp6/q6afe.c
|
|
+++ b/sound/soc/qcom/qdsp6/q6afe.c
|
|
@@ -49,14 +49,14 @@
|
|
#define AFE_PORT_I2S_SD1 0x2
|
|
#define AFE_PORT_I2S_SD2 0x3
|
|
#define AFE_PORT_I2S_SD3 0x4
|
|
-#define AFE_PORT_I2S_SD0_MASK BIT(0x1)
|
|
-#define AFE_PORT_I2S_SD1_MASK BIT(0x2)
|
|
-#define AFE_PORT_I2S_SD2_MASK BIT(0x3)
|
|
-#define AFE_PORT_I2S_SD3_MASK BIT(0x4)
|
|
-#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1)
|
|
-#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3)
|
|
-#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1)
|
|
-#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1)
|
|
+#define AFE_PORT_I2S_SD0_MASK BIT(0x0)
|
|
+#define AFE_PORT_I2S_SD1_MASK BIT(0x1)
|
|
+#define AFE_PORT_I2S_SD2_MASK BIT(0x2)
|
|
+#define AFE_PORT_I2S_SD3_MASK BIT(0x3)
|
|
+#define AFE_PORT_I2S_SD0_1_MASK GENMASK(1, 0)
|
|
+#define AFE_PORT_I2S_SD2_3_MASK GENMASK(3, 2)
|
|
+#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(2, 0)
|
|
+#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
|
|
#define AFE_PORT_I2S_QUAD01 0x5
|
|
#define AFE_PORT_I2S_QUAD23 0x6
|
|
#define AFE_PORT_I2S_6CHS 0x7
|
|
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
|
|
index f77538319221..7029e0b85f9e 100644
|
|
--- a/sound/soc/rockchip/rockchip_pcm.c
|
|
+++ b/sound/soc/rockchip/rockchip_pcm.c
|
|
@@ -32,6 +32,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
|
|
|
|
static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
|
|
.pcm_hardware = &snd_rockchip_hardware,
|
|
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
|
|
.prealloc_buffer_size = 32 * 1024,
|
|
};
|
|
|
|
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
|
|
index 3f880ec66459..a566dae3ec8a 100644
|
|
--- a/sound/soc/sh/rcar/ssi.c
|
|
+++ b/sound/soc/sh/rcar/ssi.c
|
|
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
|
|
if (rsnd_ssi_is_multi_slave(mod, io))
|
|
return 0;
|
|
|
|
- if (ssi->rate) {
|
|
+ if (ssi->usrcnt > 1) {
|
|
if (ssi->rate != rate) {
|
|
dev_err(dev, "SSI parent/child should use same rate\n");
|
|
return -EINVAL;
|
|
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
|
|
index b8e72b52db30..4fb29f0e561e 100644
|
|
--- a/sound/soc/soc-acpi.c
|
|
+++ b/sound/soc/soc-acpi.c
|
|
@@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
|
|
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
|
|
{
|
|
struct snd_soc_acpi_mach *mach;
|
|
+ struct snd_soc_acpi_mach *mach_alt;
|
|
|
|
for (mach = machines; mach->id[0]; mach++) {
|
|
if (acpi_dev_present(mach->id, NULL, -1)) {
|
|
- if (mach->machine_quirk)
|
|
- mach = mach->machine_quirk(mach);
|
|
+ if (mach->machine_quirk) {
|
|
+ mach_alt = mach->machine_quirk(mach);
|
|
+ if (!mach_alt)
|
|
+ continue; /* not full match, ignore */
|
|
+ mach = mach_alt;
|
|
+ }
|
|
+
|
|
return mach;
|
|
}
|
|
}
|
|
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
|
|
index 473eefe8658e..62aa320c2070 100644
|
|
--- a/sound/soc/soc-core.c
|
|
+++ b/sound/soc/soc-core.c
|
|
@@ -2126,6 +2126,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
|
|
}
|
|
|
|
card->instantiated = 1;
|
|
+ dapm_mark_endpoints_dirty(card);
|
|
snd_soc_dapm_sync(&card->dapm);
|
|
mutex_unlock(&card->mutex);
|
|
mutex_unlock(&client_mutex);
|
|
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
|
|
index fb37dd927e33..bf615fa16dc8 100644
|
|
--- a/sound/soc/sunxi/sun8i-codec.c
|
|
+++ b/sound/soc/sunxi/sun8i-codec.c
|
|
@@ -589,16 +589,10 @@ err_pm_disable:
|
|
|
|
static int sun8i_codec_remove(struct platform_device *pdev)
|
|
{
|
|
- struct snd_soc_card *card = platform_get_drvdata(pdev);
|
|
- struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
|
|
-
|
|
pm_runtime_disable(&pdev->dev);
|
|
if (!pm_runtime_status_suspended(&pdev->dev))
|
|
sun8i_codec_runtime_suspend(&pdev->dev);
|
|
|
|
- clk_disable_unprepare(scodec->clk_module);
|
|
- clk_disable_unprepare(scodec->clk_bus);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
|
|
index 08aa78007020..1c73b9ed44a6 100644
|
|
--- a/sound/usb/quirks-table.h
|
|
+++ b/sound/usb/quirks-table.h
|
|
@@ -3387,5 +3387,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
|
|
.ifnum = QUIRK_NO_INTERFACE
|
|
}
|
|
},
|
|
+/* Dell WD19 Dock */
|
|
+{
|
|
+ USB_DEVICE(0x0bda, 0x402e),
|
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
|
+ .vendor_name = "Dell",
|
|
+ .product_name = "WD19 Dock",
|
|
+ .profile_name = "Dell-WD15-Dock",
|
|
+ .ifnum = QUIRK_NO_INTERFACE
|
|
+ }
|
|
+},
|
|
|
|
#undef USB_DEVICE_VENDOR_SPEC
|
|
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
|
|
index b3a0709ea7ed..fcaf00621102 100644
|
|
--- a/tools/bpf/bpftool/common.c
|
|
+++ b/tools/bpf/bpftool/common.c
|
|
@@ -304,7 +304,7 @@ char *get_fdinfo(int fd, const char *key)
|
|
return NULL;
|
|
}
|
|
|
|
- while ((n = getline(&line, &line_n, fdi))) {
|
|
+ while ((n = getline(&line, &line_n, fdi)) > 0) {
|
|
char *value;
|
|
int len;
|
|
|
|
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
|
|
index dce960d22106..0de024a6cc2b 100644
|
|
--- a/tools/bpf/bpftool/prog.c
|
|
+++ b/tools/bpf/bpftool/prog.c
|
|
@@ -749,6 +749,7 @@ static int do_load(int argc, char **argv)
|
|
}
|
|
NEXT_ARG();
|
|
} else if (is_prefix(*argv, "map")) {
|
|
+ void *new_map_replace;
|
|
char *endptr, *name;
|
|
int fd;
|
|
|
|
@@ -782,12 +783,15 @@ static int do_load(int argc, char **argv)
|
|
if (fd < 0)
|
|
goto err_free_reuse_maps;
|
|
|
|
- map_replace = reallocarray(map_replace, old_map_fds + 1,
|
|
- sizeof(*map_replace));
|
|
- if (!map_replace) {
|
|
+ new_map_replace = reallocarray(map_replace,
|
|
+ old_map_fds + 1,
|
|
+ sizeof(*map_replace));
|
|
+ if (!new_map_replace) {
|
|
p_err("mem alloc failed");
|
|
goto err_free_reuse_maps;
|
|
}
|
|
+ map_replace = new_map_replace;
|
|
+
|
|
map_replace[old_map_fds].idx = idx;
|
|
map_replace[old_map_fds].name = name;
|
|
map_replace[old_map_fds].fd = fd;
|
|
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
|
|
index 7ec85d567598..b75d004f6482 100644
|
|
--- a/tools/objtool/elf.c
|
|
+++ b/tools/objtool/elf.c
|
|
@@ -31,6 +31,8 @@
|
|
#include "elf.h"
|
|
#include "warn.h"
|
|
|
|
+#define MAX_NAME_LEN 128
|
|
+
|
|
struct section *find_section_by_name(struct elf *elf, const char *name)
|
|
{
|
|
struct section *sec;
|
|
@@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
|
|
/* Create parent/child links for any cold subfunctions */
|
|
list_for_each_entry(sec, &elf->sections, list) {
|
|
list_for_each_entry(sym, &sec->symbol_list, list) {
|
|
+ char pname[MAX_NAME_LEN + 1];
|
|
+ size_t pnamelen;
|
|
if (sym->type != STT_FUNC)
|
|
continue;
|
|
sym->pfunc = sym->cfunc = sym;
|
|
@@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
|
|
if (!coldstr)
|
|
continue;
|
|
|
|
- coldstr[0] = '\0';
|
|
- pfunc = find_symbol_by_name(elf, sym->name);
|
|
- coldstr[0] = '.';
|
|
+ pnamelen = coldstr - sym->name;
|
|
+ if (pnamelen > MAX_NAME_LEN) {
|
|
+ WARN("%s(): parent function name exceeds maximum length of %d characters",
|
|
+ sym->name, MAX_NAME_LEN);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ strncpy(pname, sym->name, pnamelen);
|
|
+ pname[pnamelen] = '\0';
|
|
+ pfunc = find_symbol_by_name(elf, pname);
|
|
|
|
if (!pfunc) {
|
|
WARN("%s(): can't find parent function",
|
|
sym->name);
|
|
- goto err;
|
|
+ return -1;
|
|
}
|
|
|
|
sym->pfunc = pfunc;
|
|
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
|
|
index 37940665f736..efd0157b9d22 100644
|
|
--- a/tools/perf/tests/attr/base-record
|
|
+++ b/tools/perf/tests/attr/base-record
|
|
@@ -9,7 +9,7 @@ size=112
|
|
config=0
|
|
sample_period=*
|
|
sample_type=263
|
|
-read_format=0
|
|
+read_format=0|4
|
|
disabled=1
|
|
inherit=1
|
|
pinned=0
|
|
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
|
|
index 03a72310315f..e7dbdcc8d465 100644
|
|
--- a/tools/perf/util/evsel.c
|
|
+++ b/tools/perf/util/evsel.c
|
|
@@ -1088,7 +1088,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
|
|
attr->exclude_user = 1;
|
|
}
|
|
|
|
- if (evsel->own_cpus)
|
|
+ if (evsel->own_cpus || evsel->unit)
|
|
evsel->attr.read_format |= PERF_FORMAT_ID;
|
|
|
|
/*
|
|
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
|
|
index cf8bd123cf73..aed170bd4384 100644
|
|
--- a/tools/perf/util/namespaces.c
|
|
+++ b/tools/perf/util/namespaces.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
#include <unistd.h>
|
|
+#include <asm/bug.h>
|
|
|
|
struct namespaces *namespaces__new(struct namespaces_event *event)
|
|
{
|
|
@@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
|
|
char curpath[PATH_MAX];
|
|
int oldns = -1;
|
|
int newns = -1;
|
|
+ char *oldcwd = NULL;
|
|
|
|
if (nc == NULL)
|
|
return;
|
|
@@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
|
|
if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
|
|
return;
|
|
|
|
+ oldcwd = get_current_dir_name();
|
|
+ if (!oldcwd)
|
|
+ return;
|
|
+
|
|
oldns = open(curpath, O_RDONLY);
|
|
if (oldns < 0)
|
|
- return;
|
|
+ goto errout;
|
|
|
|
newns = open(nsi->mntns_path, O_RDONLY);
|
|
if (newns < 0)
|
|
@@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
|
|
if (setns(newns, CLONE_NEWNS) < 0)
|
|
goto errout;
|
|
|
|
+ nc->oldcwd = oldcwd;
|
|
nc->oldns = oldns;
|
|
nc->newns = newns;
|
|
return;
|
|
|
|
errout:
|
|
+ free(oldcwd);
|
|
if (oldns > -1)
|
|
close(oldns);
|
|
if (newns > -1)
|
|
@@ -223,11 +231,16 @@ errout:
|
|
|
|
void nsinfo__mountns_exit(struct nscookie *nc)
|
|
{
|
|
- if (nc == NULL || nc->oldns == -1 || nc->newns == -1)
|
|
+ if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
|
|
return;
|
|
|
|
setns(nc->oldns, CLONE_NEWNS);
|
|
|
|
+ if (nc->oldcwd) {
|
|
+ WARN_ON_ONCE(chdir(nc->oldcwd));
|
|
+ zfree(&nc->oldcwd);
|
|
+ }
|
|
+
|
|
if (nc->oldns > -1) {
|
|
close(nc->oldns);
|
|
nc->oldns = -1;
|
|
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
|
|
index cae1a9a39722..d5f46c09ea31 100644
|
|
--- a/tools/perf/util/namespaces.h
|
|
+++ b/tools/perf/util/namespaces.h
|
|
@@ -38,6 +38,7 @@ struct nsinfo {
|
|
struct nscookie {
|
|
int oldns;
|
|
int newns;
|
|
+ char *oldcwd;
|
|
};
|
|
|
|
int nsinfo__init(struct nsinfo *nsi);
|
|
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
|
|
index f1fe492c8e17..f0017c831e57 100644
|
|
--- a/tools/testing/selftests/Makefile
|
|
+++ b/tools/testing/selftests/Makefile
|
|
@@ -24,6 +24,7 @@ TARGETS += memory-hotplug
|
|
TARGETS += mount
|
|
TARGETS += mqueue
|
|
TARGETS += net
|
|
+TARGETS += netfilter
|
|
TARGETS += nsfs
|
|
TARGETS += powerpc
|
|
TARGETS += proc
|
|
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
|
|
index 67c412d19c09..2bde9ee04db7 100644
|
|
--- a/tools/testing/selftests/bpf/test_verifier.c
|
|
+++ b/tools/testing/selftests/bpf/test_verifier.c
|
|
@@ -12511,6 +12511,25 @@ static struct bpf_test tests[] = {
|
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
|
.result = ACCEPT,
|
|
},
|
|
+ {
|
|
+ "calls: ctx read at start of subprog",
|
|
+ .insns = {
|
|
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
|
|
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
|
|
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
|
|
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
+ BPF_EXIT_INSN(),
|
|
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
|
|
+ BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
+ BPF_EXIT_INSN(),
|
|
+ },
|
|
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
|
|
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
|
|
+ .result_unpriv = REJECT,
|
|
+ .result = ACCEPT,
|
|
+ },
|
|
};
|
|
|
|
static int probe_filter_length(const struct bpf_insn *fp)
|
|
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
|
|
new file mode 100644
|
|
index 000000000000..47ed6cef93fb
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/netfilter/Makefile
|
|
@@ -0,0 +1,6 @@
|
|
+# SPDX-License-Identifier: GPL-2.0
|
|
+# Makefile for netfilter selftests
|
|
+
|
|
+TEST_PROGS := nft_trans_stress.sh
|
|
+
|
|
+include ../lib.mk
|
|
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
|
|
new file mode 100644
|
|
index 000000000000..1017313e41a8
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/netfilter/config
|
|
@@ -0,0 +1,2 @@
|
|
+CONFIG_NET_NS=y
|
|
+NF_TABLES_INET=y
|
|
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
|
|
new file mode 100755
|
|
index 000000000000..f1affd12c4b1
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
|
|
@@ -0,0 +1,78 @@
|
|
+#!/bin/bash
|
|
+#
|
|
+# This test is for stress-testing the nf_tables config plane path vs.
|
|
+# packet path processing: Make sure we never release rules that are
|
|
+# still visible to other cpus.
|
|
+#
|
|
+# set -e
|
|
+
|
|
+# Kselftest framework requirement - SKIP code is 4.
|
|
+ksft_skip=4
|
|
+
|
|
+testns=testns1
|
|
+tables="foo bar baz quux"
|
|
+
|
|
+nft --version > /dev/null 2>&1
|
|
+if [ $? -ne 0 ];then
|
|
+ echo "SKIP: Could not run test without nft tool"
|
|
+ exit $ksft_skip
|
|
+fi
|
|
+
|
|
+ip -Version > /dev/null 2>&1
|
|
+if [ $? -ne 0 ];then
|
|
+ echo "SKIP: Could not run test without ip tool"
|
|
+ exit $ksft_skip
|
|
+fi
|
|
+
|
|
+tmp=$(mktemp)
|
|
+
|
|
+for table in $tables; do
|
|
+ echo add table inet "$table" >> "$tmp"
|
|
+ echo flush table inet "$table" >> "$tmp"
|
|
+
|
|
+ echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
|
|
+ echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
|
|
+ for c in $(seq 1 400); do
|
|
+ chain=$(printf "chain%03u" "$c")
|
|
+ echo "add chain inet $table $chain" >> "$tmp"
|
|
+ done
|
|
+
|
|
+ for c in $(seq 1 400); do
|
|
+ chain=$(printf "chain%03u" "$c")
|
|
+ for BASE in INPUT OUTPUT; do
|
|
+ echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
|
|
+ done
|
|
+ echo "add rule inet $table $chain counter return" >> "$tmp"
|
|
+ done
|
|
+done
|
|
+
|
|
+ip netns add "$testns"
|
|
+ip -netns "$testns" link set lo up
|
|
+
|
|
+lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
|
|
+cpunum=$((cpunum-1))
|
|
+for i in $(seq 0 $cpunum);do
|
|
+ mask=$(printf 0x%x $((1<<$i)))
|
|
+ ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
|
|
+ ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
|
|
+done)
|
|
+
|
|
+sleep 1
|
|
+
|
|
+for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
|
|
+
|
|
+for table in $tables;do
|
|
+ randsleep=$((RANDOM%10))
|
|
+ sleep $randsleep
|
|
+ ip netns exec "$testns" nft delete table inet $table 2>/dev/null
|
|
+done
|
|
+
|
|
+randsleep=$((RANDOM%10))
|
|
+sleep $randsleep
|
|
+
|
|
+pkill -9 ping
|
|
+
|
|
+wait
|
|
+
|
|
+rm -f "$tmp"
|
|
+ip netns del "$testns"
|
|
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
|
|
index 6f1f4a6e1ecb..85744425b08d 100644
|
|
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
|
|
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
|
|
@@ -13,7 +13,7 @@
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
*/
|
|
-/* Test readlink /proc/self/map_files/... with address 0. */
|
|
+/* Test readlink /proc/self/map_files/... with minimum address. */
|
|
#include <errno.h>
|
|
#include <sys/types.h>
|
|
#include <sys/stat.h>
|
|
@@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
|
|
int main(void)
|
|
{
|
|
const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
|
|
+#ifdef __arm__
|
|
+ unsigned long va = 2 * PAGE_SIZE;
|
|
+#else
|
|
+ unsigned long va = 0;
|
|
+#endif
|
|
void *p;
|
|
int fd;
|
|
unsigned long a, b;
|
|
@@ -55,7 +60,7 @@ int main(void)
|
|
if (fd == -1)
|
|
return 1;
|
|
|
|
- p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
|
|
+ p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
|
|
if (p == MAP_FAILED) {
|
|
if (errno == EPERM)
|
|
return 2;
|