diff --git a/config/kernel/linux-odroidxu4-current.config b/config/kernel/linux-odroidxu4-current.config index 275af67f4..7a52e063b 100644 --- a/config/kernel/linux-odroidxu4-current.config +++ b/config/kernel/linux-odroidxu4-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.4.94 Kernel Configuration +# Linux/arm 5.4.97 Kernel Configuration # # diff --git a/patch/kernel/odroidxu4-current/patch-5.4.94-95.patch b/patch/kernel/odroidxu4-current/patch-5.4.94-95.patch new file mode 100644 index 000000000..f6a36b55c --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.94-95.patch @@ -0,0 +1,1747 @@ +diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt +index a18e996fa54b0..7064efd3b5ea3 100644 +--- a/Documentation/virt/kvm/api.txt ++++ b/Documentation/virt/kvm/api.txt +@@ -1132,6 +1132,9 @@ field userspace_addr, which must point at user addressable memory for + the entire memory slot size. Any object may back this memory, including + anonymous memory, ordinary files, and hugetlbfs. + ++On architectures that support a form of address tagging, userspace_addr must ++be an untagged address. ++ + It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr + be identical. This allows large pages in the guest to be backed by large + pages in the host. +diff --git a/Makefile b/Makefile +index ad1b8dc6e462a..aa3c2e834442e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 94 ++SUBLEVEL = 95 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +index 1a9a9d98f2848..14d6fec50dee2 100644 +--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +@@ -273,7 +273,7 @@ + + /* VDD_AUD_1P8: Audio codec */ + reg_aud_1p8v: ldo3 { +- regulator-name = "vdd1p8"; ++ regulator-name = "vdd1p8a"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; +diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +index 6acc8591219a7..eea317b41020d 100644 +--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +@@ -167,7 +167,7 @@ + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; +- status = "disabld"; ++ status = "disabled"; + }; + + i2c_cam: i2c-gpio-cam { +@@ -179,7 +179,7 @@ + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; +- status = "disabld"; ++ status = "disabled"; + }; + }; + +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S +index 1eabf2d2834be..e06f946b75b96 100644 +--- a/arch/arm/mach-imx/suspend-imx6.S ++++ b/arch/arm/mach-imx/suspend-imx6.S +@@ -67,6 +67,7 @@ + #define MX6Q_CCM_CCR 0x0 + + .align 3 ++ .arm + + .macro sync_l2_cache + +diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi +index aef8f2b00778d..5401a646c8406 100644 +--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi ++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi +@@ -4,11 +4,16 @@ + */ + usb { + compatible = "simple-bus"; +- dma-ranges; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>; + ++ /* ++ * Internally, USB bus to the interconnect can only address up ++ * to 40-bit ++ */ ++ dma-ranges = <0 0 0 0 0x100 0x0>; ++ + usbphy0: usb-phy@0 { + compatible = "brcm,sr-usb-combo-phy"; + reg = <0x0 0x00000000 0x0 0x100>; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +index 795d6ca4bbd1f..bd99fa68b7630 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +@@ -103,7 +103,7 @@ + reboot { + compatible ="syscon-reboot"; + regmap = <&rst>; +- offset = <0xb0>; ++ offset = <0>; + mask = <0x02>; + }; + +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 08e1e7544f823..e32e8bcf94553 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -5579,11 +5579,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, + if (is_guest_mode(vcpu)) { + sync_vmcs02_to_vmcs12(vcpu, vmcs12); + sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); +- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { +- if (vmx->nested.hv_evmcs) +- copy_enlightened_to_vmcs12(vmx); +- else if (enable_shadow_vmcs) +- copy_shadow_to_vmcs12(vmx); ++ } else { ++ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); ++ if (!vmx->nested.need_vmcs12_to_shadow_sync) { ++ if (vmx->nested.hv_evmcs) ++ copy_enlightened_to_vmcs12(vmx); ++ else if (enable_shadow_vmcs) ++ copy_shadow_to_vmcs12(vmx); ++ } + } + + BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c +index f8998a7bc7d56..181e352d38de4 100644 +--- a/arch/x86/kvm/vmx/pmu_intel.c ++++ b/arch/x86/kvm/vmx/pmu_intel.c +@@ -26,7 +26,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { + [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, + [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, +- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, ++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, + }; + + /* mapping between fixed pmc index and intel_arch_events array */ +@@ -296,7 +296,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) + + pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, + x86_pmu.num_counters_gp); ++ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); + pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; ++ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); + pmu->available_event_types = ~entry->ebx & + ((1ull << eax.split.mask_length) - 1); + +@@ -306,6 +308,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) + pmu->nr_arch_fixed_counters = + min_t(int, edx.split.num_counters_fixed, + x86_pmu.num_counters_fixed); ++ edx.split.bit_width_fixed = min_t(int, ++ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); + pmu->counter_bitmask[KVM_PMC_FIXED] = + ((u64)1 << edx.split.bit_width_fixed) - 1; + } +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 72990c3c6faf7..73095d7213993 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -102,6 +102,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; + + static void update_cr8_intercept(struct kvm_vcpu *vcpu); + static void process_nmi(struct kvm_vcpu *vcpu); ++static void process_smi(struct kvm_vcpu *vcpu); + static void enter_smm(struct kvm_vcpu *vcpu); + static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); + static void store_regs(struct kvm_vcpu *vcpu); +@@ -3772,6 +3773,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, + { + process_nmi(vcpu); + ++ ++ if (kvm_check_request(KVM_REQ_SMI, vcpu)) ++ process_smi(vcpu); ++ + /* + * The API doesn't provide the instruction length for software + * exceptions, so don't report them. As long as the guest RIP +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c +index 96869f1538b93..bfca116482b8b 100644 +--- a/drivers/acpi/device_sysfs.c ++++ b/drivers/acpi/device_sysfs.c +@@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + +- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); +- if (len < 0) +- return len; +- +- env->buflen += len; +- if (!adev->data.of_compatible) +- return 0; +- +- if (len > 0 && add_uevent_var(env, "MODALIAS=")) +- return -ENOMEM; +- +- len = create_of_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); ++ if (adev->data.of_compatible) ++ len = create_of_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); ++ else ++ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index a3037fe54c3ab..f068bb5d650eb 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -1014,6 +1014,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + if (!sock) + return err; + ++ /* ++ * We need to make sure we don't get any errant requests while we're ++ * reallocating the ->socks array. ++ */ ++ blk_mq_freeze_queue(nbd->disk->queue); ++ + if (!netlink && !nbd->task_setup && + !test_bit(NBD_RT_BOUND, &config->runtime_flags)) + nbd->task_setup = current; +@@ -1052,10 +1058,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + nsock->cookie = 0; + socks[config->num_connections++] = nsock; + atomic_inc(&config->live_connections); ++ blk_mq_unfreeze_queue(nbd->disk->queue); + + return 0; + + put_socket: ++ blk_mq_unfreeze_queue(nbd->disk->queue); + sockfd_put(sock); + return err; + } +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index ab5482202cfb3..def41e1bd7364 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -936,7 +936,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info) + if (info->feature_discard) { + blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); + blk_queue_max_discard_sectors(rq, get_capacity(gd)); +- rq->limits.discard_granularity = info->discard_granularity; ++ rq->limits.discard_granularity = info->discard_granularity ?: ++ info->physical_sector_size; + rq->limits.discard_alignment = info->discard_alignment; + if (info->feature_secdiscard) + blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); +@@ -2169,19 +2170,12 @@ static void blkfront_closing(struct blkfront_info *info) + + static void blkfront_setup_discard(struct blkfront_info *info) + { +- int err; +- unsigned int discard_granularity; +- unsigned int discard_alignment; +- + info->feature_discard = 1; +- err = xenbus_gather(XBT_NIL, info->xbdev->otherend, +- "discard-granularity", "%u", &discard_granularity, +- "discard-alignment", "%u", &discard_alignment, +- NULL); +- if (!err) { +- info->discard_granularity = discard_granularity; +- info->discard_alignment = discard_alignment; +- } ++ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend, ++ "discard-granularity", ++ 0); ++ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend, ++ "discard-alignment", 0); + info->feature_secdiscard = + !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", + 0); +diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig +index 0dbee32da4c6d..5d995fe64b5ca 100644 +--- a/drivers/firmware/imx/Kconfig ++++ b/drivers/firmware/imx/Kconfig +@@ -13,6 +13,7 @@ config IMX_DSP + config IMX_SCU + bool "IMX SCU Protocol driver" + depends on IMX_MBOX ++ select SOC_BUS + help + The System Controller Firmware (SCFW) is a low-level system function + which runs on a dedicated Cortex-M core to provide power, clock, and +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 126a0eb6e0542..00335a1c02b0e 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1894,7 +1894,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) + { + const unsigned int pi = __platform_mask_index(info, p); + +- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; ++ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1); + } + + static __always_inline bool +diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c +index 0be4668c780bf..8556804e96efd 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_svm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c +@@ -306,6 +306,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data, + struct drm_nouveau_svm_init *args = data; + int ret; + ++ /* We need to fail if svm is disabled */ ++ if (!cli->drm->svm) ++ return -ENOSYS; ++ + /* Allocate tracking for SVM-enabled VMM. */ + if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) + return -ENOMEM; +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c +index 89ac2f9ae6dd8..e7472f0da59d2 100644 +--- a/drivers/infiniband/hw/cxgb4/qp.c ++++ b/drivers/infiniband/hw/cxgb4/qp.c +@@ -2471,7 +2471,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; + init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; + init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; +- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; ++ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; + init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; + init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; + return 0; +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 30ac0ba55864e..1b9795743276d 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -1020,8 +1020,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + { + struct intel_iommu *iommu; + u32 ver, sts; +- int agaw = 0; +- int msagaw = 0; ++ int agaw = -1; ++ int msagaw = -1; + int err; + + if (!drhd->reg_base_addr) { +@@ -1046,17 +1046,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + err = -EINVAL; +- agaw = iommu_calculate_agaw(iommu); +- if (agaw < 0) { +- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ if (cap_sagaw(iommu->cap) == 0) { ++ pr_info("%s: No supported address widths. Not attempting DMA translation.\n", ++ iommu->name); ++ drhd->ignored = 1; + } +- msagaw = iommu_calculate_max_sagaw(iommu); +- if (msagaw < 0) { +- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ ++ if (!drhd->ignored) { ++ agaw = iommu_calculate_agaw(iommu); ++ if (agaw < 0) { ++ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ } ++ } ++ if (!drhd->ignored) { ++ msagaw = iommu_calculate_max_sagaw(iommu); ++ if (msagaw < 0) { ++ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ agaw = -1; ++ } + } + iommu->agaw = agaw; + iommu->msagaw = msagaw; +@@ -1083,7 +1094,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + + raw_spin_lock_init(&iommu->register_lock); + +- if (intel_iommu_enabled) { ++ /* ++ * This is only for hotplug; at boot time intel_iommu_enabled won't ++ * be set yet. When intel_iommu_init() runs, it registers the units ++ * present at boot time, then sets intel_iommu_enabled. ++ */ ++ if (intel_iommu_enabled && !drhd->ignored) { + err = iommu_device_sysfs_add(&iommu->iommu, NULL, + intel_iommu_groups, + "%s", iommu->name); +@@ -1098,6 +1114,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + drhd->iommu = iommu; ++ iommu->drhd = drhd; + + return 0; + +@@ -1112,7 +1129,7 @@ error: + + static void free_iommu(struct intel_iommu *iommu) + { +- if (intel_iommu_enabled) { ++ if (intel_iommu_enabled && !iommu->drhd->ignored) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c +index 23963e5cb5d6a..0d59763e40de1 100644 +--- a/drivers/leds/led-triggers.c ++++ b/drivers/leds/led-triggers.c +@@ -318,14 +318,15 @@ void led_trigger_event(struct led_trigger *trig, + enum led_brightness brightness) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) + led_set_brightness(led_cdev, brightness); +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + EXPORT_SYMBOL_GPL(led_trigger_event); + +@@ -336,11 +337,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + int invert) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { + if (oneshot) + led_blink_set_oneshot(led_cdev, delay_on, delay_off, +@@ -348,7 +350,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + else + led_blink_set(led_cdev, delay_on, delay_off); + } +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + + void led_trigger_blink(struct led_trigger *trig, +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c +index e84f9dccf448a..c4d7e06974d2c 100644 +--- a/drivers/media/rc/rc-main.c ++++ b/drivers/media/rc/rc-main.c +@@ -1892,6 +1892,8 @@ int rc_register_device(struct rc_dev *dev) + goto out_raw; + } + ++ dev->registered = true; ++ + rc = device_add(&dev->dev); + if (rc) + goto out_rx_free; +@@ -1901,8 +1903,6 @@ int rc_register_device(struct rc_dev *dev) + dev->device_name ?: "Unspecified device", path ?: "N/A"); + kfree(path); + +- dev->registered = true; +- + /* + * once the the input device is registered in rc_setup_rx_device, + * userspace can open the input device and rc_open() will be called +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 247aeacb3a440..2ae9feb99a07d 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -1134,7 +1134,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode cm = {.flags = priv->ctrlmode}; +- struct can_berr_counter bec; ++ struct can_berr_counter bec = { }; + enum can_state state = priv->state; + + if (priv->do_get_state) +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index c952212900fcf..c20dc689698ed 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -3980,20 +3980,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) + goto error_param; + + vf = &pf->vf[vf_id]; +- vsi = pf->vsi[vf->lan_vsi_idx]; + + /* When the VF is resetting wait until it is done. + * It can take up to 200 milliseconds, + * but wait for up to 300 milliseconds to be safe. +- * If the VF is indeed in reset, the vsi pointer has +- * to show on the newly loaded vsi under pf->vsi[id]. ++ * Acquire the VSI pointer only after the VF has been ++ * properly initialized. + */ + for (i = 0; i < 15; i++) { +- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { +- if (i > 0) +- vsi = pf->vsi[vf->lan_vsi_idx]; ++ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + break; +- } + msleep(20); + } + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { +@@ -4002,6 +3998,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) + ret = -EAGAIN; + goto error_param; + } ++ vsi = pf->vsi[vf->lan_vsi_idx]; + + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index ac98f1d968921..0303eeb760505 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -1670,12 +1670,18 @@ static int igc_get_link_ksettings(struct net_device *netdev, + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); ++ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); ++ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { +@@ -1786,6 +1792,12 @@ static int igc_set_link_ksettings(struct net_device *netdev, + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); ++ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. ++ * We have to check this and convert it to ADVERTISE_2500_FULL ++ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. ++ */ ++ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) ++ advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index ec117e4414250..6495c26d95969 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -57,6 +57,7 @@ + #include "lib/devcom.h" + #include "lib/geneve.h" + #include "diag/en_tc_tracepoint.h" ++#include + + struct mlx5_nic_flow_attr { + u32 action; +@@ -1837,8 +1838,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); +- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", +- dissector->used_keys); ++ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", ++ dissector->used_keys); + return -EOPNOTSUPP; + } + +@@ -3943,13 +3944,13 @@ errout: + return err; + } + +-static int apply_police_params(struct mlx5e_priv *priv, u32 rate, ++static int apply_police_params(struct mlx5e_priv *priv, u64 rate, + struct netlink_ext_ack *extack) + { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch *esw; ++ u32 rate_mbps = 0; + u16 vport_num; +- u32 rate_mbps; + int err; + + vport_num = rpriv->rep->vport; +@@ -3966,7 +3967,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + * Moreover, if rate is non zero we choose to configure to a minimum of + * 1 mbit/sec. + */ +- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; ++ if (rate) { ++ rate = (rate * BITS_PER_BYTE) + 500000; ++ rate_mbps = max_t(u32, do_div(rate, 1000000), 1); ++ } ++ + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + if (err) + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 2eceb72f0f647..4944c40436f08 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1068,6 +1068,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa + destroy_ft: + root->cmds->destroy_flow_table(root, ft); + free_ft: ++ rhltable_destroy(&ft->fgs_hash); + kfree(ft); + unlock_root: + mutex_unlock(&root->chain_lock); +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 606fee99221b8..0eb894b7c0bda 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -991,7 +991,8 @@ static void __team_compute_features(struct team *team) + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; + +- list_for_each_entry(port, &team->port_list, list) { ++ rcu_read_lock(); ++ list_for_each_entry_rcu(port, &team->port_list, list) { + vlan_features = netdev_increment_features(vlan_features, + port->dev->vlan_features, + TEAM_VLAN_FEATURES); +@@ -1005,6 +1006,7 @@ static void __team_compute_features(struct team *team) + if (port->dev->hard_header_len > max_hard_header_len) + max_hard_header_len = port->dev->hard_header_len; + } ++ rcu_read_unlock(); + + team->dev->vlan_features = vlan_features; + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | +@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team) + + static void team_compute_features(struct team *team) + { +- mutex_lock(&team->lock); + __team_compute_features(team); +- mutex_unlock(&team->lock); + netdev_change_features(team->dev); + } + +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index b0d748a614a9e..72a3a5dc51319 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1347,6 +1347,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ ++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +index ef5a8ecabc60a..0581f082301e0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +@@ -2183,7 +2183,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + + while (offs < dwords) { + /* limit the time we spin here under lock to 1/2s */ +- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC); ++ unsigned long end = jiffies + HZ / 2; ++ bool resched = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, +@@ -2194,14 +2195,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + HBUS_TARG_MEM_RDAT); + offs++; + +- /* calling ktime_get is expensive so +- * do it once in 128 reads +- */ +- if (offs % 128 == 0 && ktime_after(ktime_get(), +- timeout)) ++ if (time_after(jiffies, end)) { ++ resched = true; + break; ++ } + } + iwl_trans_release_nic_access(trans, &flags); ++ ++ if (resched) ++ cond_resched(); + } else { + return -EBUSY; + } +diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c +index f6a0454abe044..6f2172be7b66a 100644 +--- a/drivers/net/wireless/mediatek/mt7601u/dma.c ++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c +@@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) + + if (new_p) { + /* we have one extra ref from the allocator */ +- __free_pages(e->p, MT_RX_ORDER); +- ++ put_page(e->p); + e->p = new_p; + } + } +@@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + } + + e = &q->e[q->end]; +- e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt7601u_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); +@@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + + q->end = (q->end + 1) % q->entries; + q->used++; ++ e->skb = skb; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 3968f89f7855a..0ac0bd4c65c4c 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -233,7 +233,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, + } + + for (ns = nvme_next_ns(head, old); +- ns != old; ++ ns && ns != old; + ns = nvme_next_ns(head, ns)) { + if (nvme_path_is_disabled(ns)) + continue; +diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c +index be2520cc010be..7dc72cb718b0e 100644 +--- a/drivers/s390/crypto/vfio_ap_drv.c ++++ b/drivers/s390/crypto/vfio_ap_drv.c +@@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev) + static void vfio_ap_queue_dev_remove(struct ap_device *apdev) + { + struct vfio_ap_queue *q; +- int apid, apqi; + + mutex_lock(&matrix_dev->lock); + q = dev_get_drvdata(&apdev->device); ++ vfio_ap_mdev_reset_queue(q, 1); + dev_set_drvdata(&apdev->device, NULL); +- apid = AP_QID_CARD(q->apqn); +- apqi = AP_QID_QUEUE(q->apqn); +- vfio_ap_mdev_reset_queue(apid, apqi, 1); +- vfio_ap_irq_disable(q); + kfree(q); + mutex_unlock(&matrix_dev->lock); + } +diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c +index 5c0f53c6dde75..790b0b2b36272 100644 +--- a/drivers/s390/crypto/vfio_ap_ops.c ++++ b/drivers/s390/crypto/vfio_ap_ops.c +@@ -25,6 +25,7 @@ + #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" + + static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); ++static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); + + static int match_apqn(struct device *dev, const void *data) + { +@@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue( + int apqn) + { + struct vfio_ap_queue *q; +- struct device *dev; + + if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) + return NULL; + if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) + return NULL; + +- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, +- &apqn, match_apqn); +- if (!dev) +- return NULL; +- q = dev_get_drvdata(dev); +- q->matrix_mdev = matrix_mdev; +- put_device(dev); ++ q = vfio_ap_find_queue(apqn); ++ if (q) ++ q->matrix_mdev = matrix_mdev; + + return q; + } +@@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn) + */ + static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) + { +- if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev) ++ if (!q) ++ return; ++ if (q->saved_isc != VFIO_AP_ISC_INVALID && ++ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { + kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); +- if (q->saved_pfn && q->matrix_mdev) ++ q->saved_isc = VFIO_AP_ISC_INVALID; ++ } ++ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { + vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), + &q->saved_pfn, 1); +- q->saved_pfn = 0; +- q->saved_isc = VFIO_AP_ISC_INVALID; ++ q->saved_pfn = 0; ++ } + } + + /** +@@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) + * Returns if ap_aqic function failed with invalid, deconfigured or + * checkstopped AP. + */ +-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) ++static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) + { + struct ap_qirq_ctrl aqic_gisa = {}; + struct ap_queue_status status; +@@ -1114,48 +1115,70 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, + return NOTIFY_OK; + } + +-static void vfio_ap_irq_disable_apqn(int apqn) ++static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) + { + struct device *dev; +- struct vfio_ap_queue *q; ++ struct vfio_ap_queue *q = NULL; + + dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, + &apqn, match_apqn); + if (dev) { + q = dev_get_drvdata(dev); +- vfio_ap_irq_disable(q); + put_device(dev); + } ++ ++ return q; + } + +-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, ++int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, + unsigned int retry) + { + struct ap_queue_status status; ++ int ret; + int retry2 = 2; +- int apqn = AP_MKQID(apid, apqi); + +- do { +- status = ap_zapq(apqn); +- switch (status.response_code) { +- case AP_RESPONSE_NORMAL: +- while (!status.queue_empty && retry2--) { +- msleep(20); +- status = ap_tapq(apqn, NULL); +- } +- WARN_ON_ONCE(retry2 <= 0); +- return 0; +- case AP_RESPONSE_RESET_IN_PROGRESS: +- case AP_RESPONSE_BUSY: ++ if (!q) ++ return 0; ++ ++retry_zapq: ++ status = ap_zapq(q->apqn); ++ switch (status.response_code) { ++ case AP_RESPONSE_NORMAL: ++ ret = 0; ++ break; ++ case AP_RESPONSE_RESET_IN_PROGRESS: ++ if (retry--) { + msleep(20); +- break; +- default: +- /* things are really broken, give up */ +- return -EIO; ++ goto retry_zapq; + } +- } while (retry--); ++ ret = -EBUSY; ++ break; ++ case AP_RESPONSE_Q_NOT_AVAIL: ++ case AP_RESPONSE_DECONFIGURED: ++ case AP_RESPONSE_CHECKSTOPPED: ++ WARN_ON_ONCE(status.irq_enabled); ++ ret = -EBUSY; ++ goto free_resources; ++ default: ++ /* things are really broken, give up */ ++ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n", ++ status.response_code); ++ return -EIO; ++ } ++ ++ /* wait for the reset to take effect */ ++ while (retry2--) { ++ if (status.queue_empty && !status.irq_enabled) ++ break; ++ msleep(20); ++ status = ap_tapq(q->apqn, NULL); ++ } ++ WARN_ON_ONCE(retry2 <= 0); + +- return -EBUSY; ++free_resources: ++ vfio_ap_free_aqic_resources(q); ++ ++ return ret; + } + + static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) +@@ -1163,13 +1186,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) + int ret; + int rc = 0; + unsigned long apid, apqi; ++ struct vfio_ap_queue *q; + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, + matrix_mdev->matrix.apm_max + 1) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + matrix_mdev->matrix.aqm_max + 1) { +- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1); ++ q = vfio_ap_find_queue(AP_MKQID(apid, apqi)); ++ ret = vfio_ap_mdev_reset_queue(q, 1); + /* + * Regardless whether a queue turns out to be busy, or + * is not operational, we need to continue resetting +@@ -1177,7 +1202,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) + */ + if (ret) + rc = ret; +- vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi)); + } + } + +diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h +index f46dde56b4644..28e9d99897682 100644 +--- a/drivers/s390/crypto/vfio_ap_private.h ++++ b/drivers/s390/crypto/vfio_ap_private.h +@@ -88,11 +88,6 @@ struct ap_matrix_mdev { + struct mdev_device *mdev; + }; + +-extern int vfio_ap_mdev_register(void); +-extern void vfio_ap_mdev_unregister(void); +-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, +- unsigned int retry); +- + struct vfio_ap_queue { + struct ap_matrix_mdev *matrix_mdev; + unsigned long saved_pfn; +@@ -100,5 +95,10 @@ struct vfio_ap_queue { + #define VFIO_AP_ISC_INVALID 0xff + unsigned char saved_isc; + }; +-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q); ++ ++int vfio_ap_mdev_register(void); ++void vfio_ap_mdev_unregister(void); ++int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, ++ unsigned int retry); ++ + #endif /* _VFIO_AP_PRIVATE_H_ */ +diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c +index 096a83cf0caf3..4b4174597150d 100644 +--- a/drivers/soc/atmel/soc.c ++++ b/drivers/soc/atmel/soc.c +@@ -264,8 +264,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs) + return soc_dev; + } + ++static const struct of_device_id at91_soc_allowed_list[] __initconst = { ++ { .compatible = "atmel,at91rm9200", }, ++ { .compatible = "atmel,at91sam9", }, ++ { .compatible = "atmel,sama5", }, ++ { .compatible = "atmel,samv7", }, ++ { } ++}; ++ + static int __init atmel_soc_device_init(void) + { ++ struct device_node *np = of_find_node_by_path("/"); ++ ++ if (!of_match_node(at91_soc_allowed_list, np)) ++ return 0; ++ + at91_soc_init(socs); + + return 0; +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c +index aadedec3bfe7b..ea79482ebda46 100644 +--- a/drivers/tee/optee/call.c ++++ b/drivers/tee/optee/call.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { +- might_sleep(); ++ if (need_resched()) ++ cond_resched(); + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c +index 14ccf13ab8fa1..786494bb7f20b 100644 +--- a/drivers/xen/xenbus/xenbus_probe.c ++++ b/drivers/xen/xenbus/xenbus_probe.c +@@ -714,6 +714,23 @@ static bool xs_hvm_defer_init_for_callback(void) + #endif + } + ++static int xenbus_probe_thread(void *unused) ++{ ++ DEFINE_WAIT(w); ++ ++ /* ++ * We actually just want to wait for *any* trigger of xb_waitq, ++ * and run xenbus_probe() the moment it occurs. ++ */ ++ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&xb_waitq, &w); ++ ++ DPRINTK("probing"); ++ xenbus_probe(); ++ return 0; ++} ++ + static int __init xenbus_probe_initcall(void) + { + /* +@@ -725,6 +742,20 @@ static int __init xenbus_probe_initcall(void) + !xs_hvm_defer_init_for_callback())) + xenbus_probe(); + ++ /* ++ * For XS_LOCAL, spawn a thread which will wait for xenstored ++ * or a xenstore-stubdom to be started, then probe. It will be ++ * triggered when communication starts happening, by waiting ++ * on xb_waitq. ++ */ ++ if (xen_store_domain_type == XS_LOCAL) { ++ struct task_struct *probe_task; ++ ++ probe_task = kthread_run(xenbus_probe_thread, NULL, ++ "xenbus_probe"); ++ if (IS_ERR(probe_task)) ++ return PTR_ERR(probe_task); ++ } + return 0; + } + device_initcall(xenbus_probe_initcall); +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 59e7a2ad440fc..a32f23981f60f 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -640,7 +640,15 @@ static noinline void caching_thread(struct btrfs_work *work) + mutex_lock(&caching_ctl->mutex); + down_read(&fs_info->commit_root_sem); + +- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) ++ /* ++ * If we are in the transaction that populated the free space tree we ++ * can't actually cache from the free space tree as our commit root and ++ * real root are the same, so we could change the contents of the blocks ++ * while caching. Instead do the slow caching in this case, and after ++ * the transaction has committed we will be safe. ++ */ ++ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && ++ !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) + ret = load_free_space_tree(caching_ctl); + else + ret = load_extent_tree_free(caching_ctl); +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 27128164fac97..cda5534d3d0e3 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -136,6 +136,9 @@ enum { + BTRFS_FS_STATE_DEV_REPLACING, + /* The btrfs_fs_info created for self-tests */ + BTRFS_FS_STATE_DUMMY_FS_INFO, ++ ++ /* Indicate that we can't trust the free space tree for caching yet */ ++ BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, + }; + + #define BTRFS_BACKREF_REV_MAX 256 +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c +index 48a03f5240f59..dfabbbfc94ccb 100644 +--- a/fs/btrfs/free-space-tree.c ++++ b/fs/btrfs/free-space-tree.c +@@ -1149,6 +1149,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) + return PTR_ERR(trans); + + set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ++ set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + free_space_root = btrfs_create_tree(trans, + BTRFS_FREE_SPACE_TREE_OBJECTID); + if (IS_ERR(free_space_root)) { +@@ -1170,11 +1171,18 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); + clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ++ ret = btrfs_commit_transaction(trans); + +- return btrfs_commit_transaction(trans); ++ /* ++ * Now that we've committed the transaction any reading of our commit ++ * root will be safe, so we can cache from the free space tree now. ++ */ ++ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); ++ return ret; + + abort: + clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ++ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + return ret; +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 4232f956bdac0..ca1d98f274d12 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -2388,6 +2388,7 @@ out_forget: + spin_unlock(&ino->i_lock); + lseg->pls_layout = lo; + NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); ++ pnfs_free_lseg_list(&free_me); + return ERR_PTR(-EAGAIN); + } + +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 6b559d25a84ee..88ac8edf44e31 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -556,6 +556,8 @@ struct intel_iommu { + struct iommu_device iommu; /* IOMMU core code handle */ + int node; + u32 flags; /* Software defined flags */ ++ ++ struct dmar_drhd_unit *drhd; + }; + + /* PCI domain-device relationship */ +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 377179283c46c..4b38ba101b9b7 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -2030,7 +2030,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); + void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); + extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, + u32 reo_wnd); +-extern void tcp_rack_mark_lost(struct sock *sk); ++extern bool tcp_rack_mark_lost(struct sock *sk); + extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, + u64 xmit_time); + extern void tcp_rack_reo_timeout(struct sock *sk); +diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h +index 2622b5a3e6163..9a31ea2ad1cfc 100644 +--- a/include/uapi/linux/icmpv6.h ++++ b/include/uapi/linux/icmpv6.h +@@ -137,6 +137,7 @@ struct icmp6hdr { + #define ICMPV6_HDR_FIELD 0 + #define ICMPV6_UNK_NEXTHDR 1 + #define ICMPV6_UNK_OPTION 2 ++#define ICMPV6_HDR_INCOMP 3 + + /* + * constants for (set|get)sockopt +diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c +index 15d70a90b50dc..d65b0fc8fb48b 100644 +--- a/kernel/kexec_core.c ++++ b/kernel/kexec_core.c +@@ -1129,7 +1129,6 @@ int kernel_kexec(void) + + #ifdef CONFIG_KEXEC_JUMP + if (kexec_image->preserve_context) { +- lock_system_sleep(); + pm_prepare_console(); + error = freeze_processes(); + if (error) { +@@ -1192,7 +1191,6 @@ int kernel_kexec(void) + thaw_processes(); + Restore_console: + pm_restore_console(); +- unlock_system_sleep(); + } + #endif + +diff --git a/kernel/power/swap.c b/kernel/power/swap.c +index ca0fcb5ced714..0516c422206d8 100644 +--- a/kernel/power/swap.c ++++ b/kernel/power/swap.c +@@ -489,10 +489,10 @@ static int swap_writer_finish(struct swap_map_handle *handle, + unsigned int flags, int error) + { + if (!error) { +- flush_swap_writer(handle); + pr_info("S"); + error = mark_swapfiles(handle, flags); + pr_cont("|\n"); ++ flush_swap_writer(handle); + } + + if (error) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 7411a43134629..26305aa88651f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2764,7 +2764,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag) + } else if (tcp_is_rack(sk)) { + u32 prior_retrans = tp->retrans_out; + +- tcp_rack_mark_lost(sk); ++ if (tcp_rack_mark_lost(sk)) ++ *ack_flag &= ~FLAG_SET_XMIT_TIMER; + if (prior_retrans > tp->retrans_out) + *ack_flag |= FLAG_LOST_RETRANS; + } +@@ -3713,9 +3714,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + + if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); +- /* If needed, reset TLP/RTO timer; RACK may later override this. */ +- if (flag & FLAG_SET_XMIT_TIMER) +- tcp_set_xmit_timer(sk); + + if (tcp_ack_is_dubious(sk, flag)) { + if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) { +@@ -3728,6 +3726,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + &rexmit); + } + ++ /* If needed, reset TLP/RTO timer when RACK doesn't set. */ ++ if (flag & FLAG_SET_XMIT_TIMER) ++ tcp_set_xmit_timer(sk); ++ + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) + sk_dst_confirm(sk); + +diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c +index fdb715bdd2d11..8757bb6cb1d93 100644 +--- a/net/ipv4/tcp_recovery.c ++++ b/net/ipv4/tcp_recovery.c +@@ -110,13 +110,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) + } + } + +-void tcp_rack_mark_lost(struct sock *sk) ++bool tcp_rack_mark_lost(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); + u32 timeout; + + if (!tp->rack.advanced) +- return; ++ return false; + + /* Reset the advanced flag to avoid unnecessary queue scanning */ + tp->rack.advanced = 0; +@@ -126,6 +126,7 @@ void tcp_rack_mark_lost(struct sock *sk) + inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, + timeout, inet_csk(sk)->icsk_rto); + } ++ return !!timeout; + } + + /* Record the most recently (re)sent time among the (s)acked packets +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c +index 7d3a3894f785c..e9bb89131e02a 100644 +--- a/net/ipv6/icmp.c ++++ b/net/ipv6/icmp.c +@@ -158,7 +158,13 @@ static bool is_ineligible(const struct sk_buff *skb) + tp = skb_header_pointer(skb, + ptr+offsetof(struct icmp6hdr, icmp6_type), + sizeof(_type), &_type); +- if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) ++ ++ /* Based on RFC 8200, Section 4.5 Fragment Header, return ++ * false if this is a fragment packet with no icmp header info. ++ */ ++ if (!tp && frag_off != 0) ++ return false; ++ else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) + return true; + } + return false; +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c +index 1f5d4d196dcce..c8cf1bbad74a2 100644 +--- a/net/ipv6/reassembly.c ++++ b/net/ipv6/reassembly.c +@@ -42,6 +42,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -322,7 +324,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + struct frag_queue *fq; + const struct ipv6hdr *hdr = ipv6_hdr(skb); + struct net *net = dev_net(skb_dst(skb)->dev); +- int iif; ++ __be16 frag_off; ++ int iif, offset; ++ u8 nexthdr; + + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) + goto fail_hdr; +@@ -351,6 +355,33 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + return 1; + } + ++ /* RFC 8200, Section 4.5 Fragment Header: ++ * If the first fragment does not include all headers through an ++ * Upper-Layer header, then that fragment should be discarded and ++ * an ICMP Parameter Problem, Code 3, message should be sent to ++ * the source of the fragment, with the Pointer field set to zero. ++ */ ++ nexthdr = hdr->nexthdr; ++ offset = ipv6_skip_exthdr(skb, skb_transport_offset(skb), &nexthdr, &frag_off); ++ if (offset >= 0) { ++ /* Check some common protocols' header */ ++ if (nexthdr == IPPROTO_TCP) ++ offset += sizeof(struct tcphdr); ++ else if (nexthdr == IPPROTO_UDP) ++ offset += sizeof(struct udphdr); ++ else if (nexthdr == IPPROTO_ICMPV6) ++ offset += sizeof(struct icmp6hdr); ++ else ++ offset += 1; ++ ++ if (!(frag_off & htons(IP6_OFFSET)) && offset > skb->len) { ++ __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev), ++ IPSTATS_MIB_INHDRERRORS); ++ icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0); ++ return -1; ++ } ++ } ++ + iif = skb->dev ? skb->dev->ifindex : 0; + fq = fq_find(net, fhdr->identification, hdr, iif); + if (fq) { +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 05406e9c05b32..268f1d8f440ba 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -1061,6 +1061,7 @@ enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_FLUSH, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, + + IEEE80211_QUEUE_STOP_REASONS, + }; +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index af8b09214786d..6089b09ec13b6 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1537,6 +1537,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + if (ret) + return ret; + ++ ieee80211_stop_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); ++ synchronize_net(); ++ + ieee80211_do_stop(sdata, false); + + ieee80211_teardown_sdata(sdata); +@@ -1557,6 +1561,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + err = ieee80211_do_open(&sdata->wdev, false); + WARN(err, "type change: do_open returned %d", err); + ++ ieee80211_wake_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + return ret; + } + +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c +index 60236cc316d03..95415d2b81c93 100644 +--- a/net/netfilter/nft_dynset.c ++++ b/net/netfilter/nft_dynset.c +@@ -233,8 +233,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, + nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, + priv->expr->ops->size); + if (set->flags & NFT_SET_TIMEOUT) { +- if (timeout || set->timeout) ++ if (timeout || set->timeout) { ++ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); ++ } + } + + priv->timeout = timeout; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index 4170acc2dc282..99b06a16b8086 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -860,6 +860,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) + + if (!dev->polling) { + device_unlock(&dev->dev); ++ nfc_put_device(dev); + return -EINVAL; + } + +diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c +index b5c867fe32324..23d5e56306a4c 100644 +--- a/net/nfc/rawsock.c ++++ b/net/nfc/rawsock.c +@@ -105,7 +105,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, + if (addr->target_idx > dev->target_next_idx - 1 || + addr->target_idx < dev->target_next_idx - dev->n_targets) { + rc = -EINVAL; +- goto error; ++ goto put_dev; + } + + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c +index 032ed76c0166d..55fb3744552de 100644 +--- a/net/rxrpc/call_accept.c ++++ b/net/rxrpc/call_accept.c +@@ -207,6 +207,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) + tail = b->peer_backlog_tail; + while (CIRC_CNT(head, tail, size) > 0) { + struct rxrpc_peer *peer = b->peer_backlog[tail]; ++ rxrpc_put_local(peer->local); + kfree(peer); + tail = (tail + 1) & (size - 1); + } +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index 69102fda9ebd4..76a80a41615be 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -896,8 +896,9 @@ out: + int call_commit_handler(struct net_device *dev) + { + #ifdef CONFIG_WIRELESS_EXT +- if ((netif_running(dev)) && +- (dev->wireless_handlers->standard[0] != NULL)) ++ if (netif_running(dev) && ++ dev->wireless_handlers && ++ dev->wireless_handlers->standard[0]) + /* Call the commit handler on the driver */ + return dev->wireless_handlers->standard[0](dev, NULL, + NULL, NULL); +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 7a84745477919..e120df0a6da13 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -656,7 +656,7 @@ resume: + /* only the first xfrm gets the encap type */ + encap_type = 0; + +- if (async && x->repl->recheck(x, skb, seq)) { ++ if (x->repl->recheck(x, skb, seq)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); + goto drop_unlock; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 2917711ff8ab6..32c8163427970 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -790,15 +790,22 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a, + const xfrm_address_t *b, + u8 prefixlen, u16 family) + { ++ u32 ma, mb, mask; + unsigned int pdw, pbi; + int delta = 0; + + switch (family) { + case AF_INET: +- if (sizeof(long) == 4 && prefixlen == 0) +- return ntohl(a->a4) - ntohl(b->a4); +- return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) - +- (ntohl(b->a4) & ((~0UL << (32 - prefixlen)))); ++ if (prefixlen == 0) ++ return 0; ++ mask = ~0U << (32 - prefixlen); ++ ma = ntohl(a->a4) & mask; ++ mb = ntohl(b->a4) & mask; ++ if (ma < mb) ++ delta = -1; ++ else if (ma > mb) ++ delta = 1; ++ break; + case AF_INET6: + pdw = prefixlen >> 5; + pbi = prefixlen & 0x1f; +@@ -809,10 +816,13 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a, + return delta; + } + if (pbi) { +- u32 mask = ~0u << (32 - pbi); +- +- delta = (ntohl(a->a6[pdw]) & mask) - +- (ntohl(b->a6[pdw]) & mask); ++ mask = ~0U << (32 - pbi); ++ ma = ntohl(a->a6[pdw]) & mask; ++ mb = ntohl(b->a6[pdw]) & mask; ++ if (ma < mb) ++ delta = -1; ++ else if (ma > mb) ++ delta = 1; + } + break; + default: +@@ -3065,8 +3075,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net, + xflo.flags = flags; + + /* To accelerate a bit... */ +- if ((dst_orig->flags & DST_NOXFRM) || +- !net->xfrm.policy_count[XFRM_POLICY_OUT]) ++ if (!if_id && ((dst_orig->flags & DST_NOXFRM) || ++ !net->xfrm.policy_count[XFRM_POLICY_OUT])) + goto nopol; + + xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 8adbe45a54c11..f548bd48bf729 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -7907,6 +7907,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), ++ SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c +index 834367dd54e1b..a5c1a2c4eae4e 100644 +--- a/sound/pci/hda/patch_via.c ++++ b/sound/pci/hda/patch_via.c +@@ -1043,7 +1043,7 @@ static const struct hda_fixup via_fixups[] = { + static const struct snd_pci_quirk vt2002p_fixups[] = { + SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), + SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), +- SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE), ++ SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE), + {} + }; + +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index aa5833001fde5..2cb719893324a 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -3619,15 +3619,16 @@ static void skl_tplg_complete(struct snd_soc_component *component) + + list_for_each_entry(dobj, &component->dobj_list, list) { + struct snd_kcontrol *kcontrol = dobj->control.kcontrol; +- struct soc_enum *se = +- (struct soc_enum *)kcontrol->private_value; +- char **texts = dobj->control.dtexts; ++ struct soc_enum *se; ++ char **texts; + char chan_text[4]; + +- if (dobj->type != SND_SOC_DOBJ_ENUM || +- dobj->control.kcontrol->put != +- skl_tplg_multi_config_set_dmic) ++ if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol || ++ kcontrol->put != skl_tplg_multi_config_set_dmic) + continue; ++ ++ se = (struct soc_enum *)kcontrol->private_value; ++ texts = dobj->control.dtexts; + sprintf(chan_text, "c%d", mach->mach_params.dmic_num); + + for (i = 0; i < se->items; i++) { +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 0100f123484e6..c367609433bfc 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -982,7 +982,7 @@ static int soc_tplg_denum_create_values(struct soc_enum *se, + return -EINVAL; + + se->dobj.control.dvalues = kzalloc(le32_to_cpu(ec->items) * +- sizeof(u32), ++ sizeof(*se->dobj.control.dvalues), + GFP_KERNEL); + if (!se->dobj.control.dvalues) + return -ENOMEM; +diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh +index cf3d26c233e8e..7fcc42bc076fa 100755 +--- a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh ++++ b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh +@@ -197,7 +197,7 @@ multipath4_test() + t0_rp12=$(link_stats_tx_packets_get $rp12) + t0_rp13=$(link_stats_tx_packets_get $rp13) + +- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ ++ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + t1_rp12=$(link_stats_tx_packets_get $rp12) +diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh +index 79a2099279621..464821c587a5e 100755 +--- a/tools/testing/selftests/net/forwarding/router_multipath.sh ++++ b/tools/testing/selftests/net/forwarding/router_multipath.sh +@@ -178,7 +178,7 @@ multipath4_test() + t0_rp12=$(link_stats_tx_packets_get $rp12) + t0_rp13=$(link_stats_tx_packets_get $rp13) + +- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ ++ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + t1_rp12=$(link_stats_tx_packets_get $rp12) +diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh +index 7a1bf94c5bd38..bdf450eaf60cf 100755 +--- a/tools/testing/selftests/net/xfrm_policy.sh ++++ b/tools/testing/selftests/net/xfrm_policy.sh +@@ -202,7 +202,7 @@ check_xfrm() { + # 1: iptables -m policy rule count != 0 + rval=$1 + ip=$2 +- lret=0 ++ local lret=0 + + ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null + +@@ -287,6 +287,47 @@ check_hthresh_repeat() + return 0 + } + ++# insert non-overlapping policies in a random order and check that ++# all of them can be fetched using the traffic selectors. ++check_random_order() ++{ ++ local ns=$1 ++ local log=$2 ++ ++ for i in $(seq 100); do ++ ip -net $ns xfrm policy flush ++ for j in $(seq 0 16 255 | sort -R); do ++ ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow ++ done ++ for j in $(seq 0 16 255); do ++ if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then ++ echo "FAIL: $log" 1>&2 ++ return 1 ++ fi ++ done ++ done ++ ++ for i in $(seq 100); do ++ ip -net $ns xfrm policy flush ++ for j in $(seq 0 16 255 | sort -R); do ++ local addr=$(printf "e000:0000:%02x00::/56" $j) ++ ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow ++ done ++ for j in $(seq 0 16 255); do ++ local addr=$(printf "e000:0000:%02x00::/56" $j) ++ if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then ++ echo "FAIL: $log" 1>&2 ++ return 1 ++ fi ++ done ++ done ++ ++ ip -net $ns xfrm policy flush ++ ++ echo "PASS: $log" ++ return 0 ++} ++ + #check for needed privileges + if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" +@@ -438,6 +479,8 @@ check_exceptions "exceptions and block policies after htresh change to normal" + + check_hthresh_repeat "policies with repeated htresh change" + ++check_random_order ns3 "policies inserted in random order" ++ + for i in 1 2 3 4;do ip netns del ns$i;done + + exit $ret +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 8f3b40ec02b77..f25b5043cbcae 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -1017,6 +1017,7 @@ int __kvm_set_memory_region(struct kvm *kvm, + /* We can read the guest memory with __xxx_user() later on. */ + if ((id < KVM_USER_MEM_SLOTS) && + ((mem->userspace_addr & (PAGE_SIZE - 1)) || ++ (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || + !access_ok((void __user *)(unsigned long)mem->userspace_addr, + mem->memory_size))) + goto out; diff --git a/patch/kernel/odroidxu4-current/patch-5.4.95-96.patch b/patch/kernel/odroidxu4-current/patch-5.4.95-96.patch new file mode 100644 index 000000000..5e7c4fa59 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.95-96.patch @@ -0,0 +1,1218 @@ +diff --git a/Makefile b/Makefile +index aa3c2e834442e..7a47a2594f957 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 95 ++SUBLEVEL = 96 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index 51d867cf146c1..6c295a231882a 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -247,11 +247,11 @@ static inline const void *__tag_set(const void *addr, u8 tag) + + + /* +- * The linear kernel range starts at the bottom of the virtual address +- * space. Testing the top bit for the start of the region is a +- * sufficient check and avoids having to worry about the tag. ++ * Check whether an arbitrary address is within the linear map, which ++ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the ++ * kernel's TTBR1 address range. + */ +-#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) ++#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) + + #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) + #define __kimg_to_phys(addr) ((addr) - kimage_voffset) +@@ -332,7 +332,7 @@ static inline void *phys_to_virt(phys_addr_t x) + #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ + + #define virt_addr_valid(addr) ({ \ +- __typeof__(addr) __addr = addr; \ ++ __typeof__(addr) __addr = __tag_reset(addr); \ + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ + }) + +diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c +index 67a9ba9eaa96b..cde44c13dda1b 100644 +--- a/arch/arm64/mm/physaddr.c ++++ b/arch/arm64/mm/physaddr.c +@@ -9,7 +9,7 @@ + + phys_addr_t __virt_to_phys(unsigned long x) + { +- WARN(!__is_lm_address(x), ++ WARN(!__is_lm_address(__tag_reset(x)), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, + (void *)x); +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h +index 86f20d520a079..b40d0295d8129 100644 +--- a/arch/x86/include/asm/msr.h ++++ b/arch/x86/include/asm/msr.h +@@ -88,7 +88,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} + * think of extending them - you will be slapped with a stinking trout or a frozen + * shark will reach you, wherever you are! You've been warned. + */ +-static inline unsigned long long notrace __rdmsr(unsigned int msr) ++static __always_inline unsigned long long __rdmsr(unsigned int msr) + { + DECLARE_ARGS(val, low, high); + +@@ -100,7 +100,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr) + return EAX_EDX_VAL(val, low, high); + } + +-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high) ++static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) + { + asm volatile("1: wrmsr\n" + "2:\n" +diff --git a/block/blk-core.c b/block/blk-core.c +index d2213220099d3..5808baa950c35 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -886,11 +886,14 @@ generic_make_request_checks(struct bio *bio) + } + + /* +- * For a REQ_NOWAIT based request, return -EOPNOTSUPP +- * if queue is not a request based queue. ++ * Non-mq queues do not honor REQ_NOWAIT, so complete a bio ++ * with BLK_STS_AGAIN status in order to catch -EAGAIN and ++ * to give a chance to the caller to repeat request gracefully. + */ +- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) +- goto not_supported; ++ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { ++ status = BLK_STS_AGAIN; ++ goto end_io; ++ } + + if (should_fail_bio(bio)) + goto end_io; +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index d831a61e0010e..383c7029d3cee 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -174,6 +174,8 @@ struct acpi_thermal { + int tz_enabled; + int kelvin_offset; + struct work_struct thermal_check_work; ++ struct mutex thermal_check_lock; ++ refcount_t thermal_check_count; + }; + + /* -------------------------------------------------------------------------- +@@ -494,17 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) + return 0; + } + +-static void acpi_thermal_check(void *data) +-{ +- struct acpi_thermal *tz = data; +- +- if (!tz->tz_enabled) +- return; +- +- thermal_zone_device_update(tz->thermal_zone, +- THERMAL_EVENT_UNSPECIFIED); +-} +- + /* sys I/F for generic thermal sysfs support */ + + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) +@@ -538,6 +529,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, + return 0; + } + ++static void acpi_thermal_check_fn(struct work_struct *work); ++ + static int thermal_set_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) + { +@@ -563,7 +556,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "%s kernel ACPI thermal control\n", + tz->tz_enabled ? "Enable" : "Disable")); +- acpi_thermal_check(tz); ++ acpi_thermal_check_fn(&tz->thermal_check_work); + } + return 0; + } +@@ -932,6 +925,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) + Driver Interface + -------------------------------------------------------------------------- */ + ++static void acpi_queue_thermal_check(struct acpi_thermal *tz) ++{ ++ if (!work_pending(&tz->thermal_check_work)) ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++} ++ + static void acpi_thermal_notify(struct acpi_device *device, u32 event) + { + struct acpi_thermal *tz = acpi_driver_data(device); +@@ -942,17 +941,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) + + switch (event) { + case ACPI_THERMAL_NOTIFY_TEMPERATURE: +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + break; + case ACPI_THERMAL_NOTIFY_THRESHOLDS: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; + case ACPI_THERMAL_NOTIFY_DEVICES: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; +@@ -1052,7 +1051,27 @@ static void acpi_thermal_check_fn(struct work_struct *work) + { + struct acpi_thermal *tz = container_of(work, struct acpi_thermal, + thermal_check_work); +- acpi_thermal_check(tz); ++ ++ if (!tz->tz_enabled) ++ return; ++ /* ++ * In general, it is not sufficient to check the pending bit, because ++ * subsequent instances of this function may be queued after one of them ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just ++ * one of them is running, though, because it may have done the actual ++ * check some time ago, so allow at least one of them to block on the ++ * mutex while another one is running the update. ++ */ ++ if (!refcount_dec_not_one(&tz->thermal_check_count)) ++ return; ++ ++ mutex_lock(&tz->thermal_check_lock); ++ ++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); ++ ++ refcount_inc(&tz->thermal_check_count); ++ ++ mutex_unlock(&tz->thermal_check_lock); + } + + static int acpi_thermal_add(struct acpi_device *device) +@@ -1084,6 +1103,8 @@ static int acpi_thermal_add(struct acpi_device *device) + if (result) + goto free_memory; + ++ refcount_set(&tz->thermal_check_count, 3); ++ mutex_init(&tz->thermal_check_lock); + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); + + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), +@@ -1149,7 +1170,7 @@ static int acpi_thermal_resume(struct device *dev) + tz->state.active |= tz->trips.active[i].flags.enabled; + } + +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++ acpi_queue_thermal_check(tz); + + return AE_OK; + } +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index 959eb075d11ed..c18f39271b034 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -1914,6 +1914,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting + initial_link_setting; + uint32_t link_bw; + ++ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) ++ return false; ++ + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index bb7add5ea2273..a6d5beada6634 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -257,7 +257,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { + .num_banks = 8, + .num_chans = 4, + .vmm_page_size_bytes = 4096, +- .dram_clock_change_latency_us = 23.84, ++ .dram_clock_change_latency_us = 11.72, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3600, + .xfc_bus_transport_time_us = 4, +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index b16aea0e39992..6dd29bad1609f 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -421,15 +421,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); +- if (!priv->master_mii_bus) ++ if (!priv->master_mii_bus) { ++ of_node_put(dn); + return -EPROBE_DEFER; ++ } + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); +- if (!priv->slave_mii_bus) ++ if (!priv->slave_mii_bus) { ++ of_node_put(dn); + return -ENOMEM; ++ } + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 9040340fad198..c3079f436f6d7 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -4752,6 +4752,12 @@ static void ibmvnic_tasklet(void *data) + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { ++ /* This barrier makes sure ibmvnic_next_crq()'s ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded ++ * before ibmvnic_handle_crq()'s ++ * switch(gen_crq->first) and switch(gen_crq->cmd). ++ */ ++ dma_rmb(); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 7a964271959d8..c2cabd77884bf 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1295,8 +1295,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) + } + + length = (io.nblocks + 1) << ns->lba_shift; +- meta_len = (io.nblocks + 1) * ns->ms; +- metadata = nvme_to_user_ptr(io.metadata); ++ ++ if ((io.control & NVME_RW_PRINFO_PRACT) && ++ ns->ms == sizeof(struct t10_pi_tuple)) { ++ /* ++ * Protection information is stripped/inserted by the ++ * controller. ++ */ ++ if (nvme_to_user_ptr(io.metadata)) ++ return -EINVAL; ++ meta_len = 0; ++ metadata = NULL; ++ } else { ++ meta_len = (io.nblocks + 1) * ns->ms; ++ metadata = nvme_to_user_ptr(io.metadata); ++ } + + if (ns->ext) { + length += meta_len; +diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c +index 5baf64dfb24de..1bebad36bf2e5 100644 +--- a/drivers/phy/motorola/phy-cpcap-usb.c ++++ b/drivers/phy/motorola/phy-cpcap-usb.c +@@ -625,35 +625,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev) + generic_phy = devm_phy_create(ddata->dev, NULL, &ops); + if (IS_ERR(generic_phy)) { + error = PTR_ERR(generic_phy); +- return PTR_ERR(generic_phy); ++ goto out_reg_disable; + } + + phy_set_drvdata(generic_phy, ddata); + + phy_provider = devm_of_phy_provider_register(ddata->dev, + of_phy_simple_xlate); +- if (IS_ERR(phy_provider)) +- return PTR_ERR(phy_provider); ++ if (IS_ERR(phy_provider)) { ++ error = PTR_ERR(phy_provider); ++ goto out_reg_disable; ++ } + + error = cpcap_usb_init_optional_pins(ddata); + if (error) +- return error; ++ goto out_reg_disable; + + cpcap_usb_init_optional_gpios(ddata); + + error = cpcap_usb_init_iio(ddata); + if (error) +- return error; ++ goto out_reg_disable; + + error = cpcap_usb_init_interrupts(pdev, ddata); + if (error) +- return error; ++ goto out_reg_disable; + + usb_add_phy_dev(&ddata->phy); + atomic_set(&ddata->active, 1); + schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1)); + + return 0; ++ ++out_reg_disable: ++ regulator_disable(ddata->vusb); ++ ++ return error; + } + + static int cpcap_usb_phy_remove(struct platform_device *pdev) +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c +index 37035dca469cf..d4fc2cbf78703 100644 +--- a/drivers/platform/x86/intel-vbtn.c ++++ b/drivers/platform/x86/intel-vbtn.c +@@ -203,6 +203,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"), ++ }, ++ }, + {} /* Array terminator */ + }; + +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c +index 1e072dbba30d6..7ed1189a7200c 100644 +--- a/drivers/platform/x86/touchscreen_dmi.c ++++ b/drivers/platform/x86/touchscreen_dmi.c +@@ -231,6 +231,16 @@ static const struct ts_dmi_data digma_citi_e200_data = { + .properties = digma_citi_e200_props, + }; + ++static const struct property_entry estar_beauty_hd_props[] = { ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), ++ { } ++}; ++ ++static const struct ts_dmi_data estar_beauty_hd_data = { ++ .acpi_name = "GDIX1001:00", ++ .properties = estar_beauty_hd_props, ++}; ++ + static const struct property_entry gp_electronic_t701_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 960), + PROPERTY_ENTRY_U32("touchscreen-size-y", 640), +@@ -747,6 +757,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + }, + }, ++ { ++ /* Estar Beauty HD (MID 7316R) */ ++ .driver_data = (void *)&estar_beauty_hd_data, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Estar"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"), ++ }, ++ }, + { + /* GP-electronic T701 */ + .driver_data = (void *)&gp_electronic_t701_data, +diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c +index 522636e946282..c8bf8c7ada6a7 100644 +--- a/drivers/scsi/fnic/vnic_dev.c ++++ b/drivers/scsi/fnic/vnic_dev.c +@@ -444,7 +444,8 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + pr_err("error in devcmd2 init"); +- return -ENODEV; ++ err = -ENODEV; ++ goto err_free_wq; + } + + /* +@@ -460,7 +461,7 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) +- goto err_free_wq; ++ goto err_disable_wq; + + vdev->devcmd2->result = + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; +@@ -481,8 +482,9 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + + err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +-err_free_wq: ++err_disable_wq: + vnic_wq_disable(&vdev->devcmd2->wq); ++err_free_wq: + vnic_wq_free(&vdev->devcmd2->wq); + err_free_devcmd2: + kfree(vdev->devcmd2); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 8a76284b59b08..523809a8a2323 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -2881,8 +2881,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); +- if (sdev->type == TYPE_DISK) ++ if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); ++ } + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; + } +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 52e8666598531..e5b18e5d46dac 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -1619,8 +1619,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + rc = fc_exch_done_locked(ep); + WARN_ON(fc_seq_exch(sp) != ep); + spin_unlock_bh(&ep->ex_lock); +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + } + + /* +@@ -1639,6 +1644,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + ++skip_resp: + fc_exch_release(ep); + return; + rel: +@@ -1895,10 +1901,16 @@ static void fc_exch_reset(struct fc_exch *ep) + + fc_exch_hold(ep); + +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); ++skip_resp: + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); + } +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c +index d4d1104fac991..8cd0a87764dfd 100644 +--- a/drivers/scsi/scsi_transport_srp.c ++++ b/drivers/scsi/scsi_transport_srp.c +@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport) + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; +- scsi_target_block(&shost->shost_gendev); ++ if (rport->state != SRP_RPORT_FAIL_FAST) ++ /* ++ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition ++ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() ++ * later is ok though, scsi_internal_device_unblock_nowait() ++ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. ++ */ ++ scsi_target_block(&shost->shost_gendev); + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index 86e280edf8040..7f644a58db511 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -347,33 +347,10 @@ static int add_prelim_ref(const struct btrfs_fs_info *fs_info, + return -ENOMEM; + + ref->root_id = root_id; +- if (key) { ++ if (key) + ref->key_for_search = *key; +- /* +- * We can often find data backrefs with an offset that is too +- * large (>= LLONG_MAX, maximum allowed file offset) due to +- * underflows when subtracting a file's offset with the data +- * offset of its corresponding extent data item. This can +- * happen for example in the clone ioctl. +- * So if we detect such case we set the search key's offset to +- * zero to make sure we will find the matching file extent item +- * at add_all_parents(), otherwise we will miss it because the +- * offset taken form the backref is much larger then the offset +- * of the file extent item. This can make us scan a very large +- * number of file extent items, but at least it will not make +- * us miss any. +- * This is an ugly workaround for a behaviour that should have +- * never existed, but it does and a fix for the clone ioctl +- * would touch a lot of places, cause backwards incompatibility +- * and would not fix the problem for extents cloned with older +- * kernels. +- */ +- if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY && +- ref->key_for_search.offset >= LLONG_MAX) +- ref->key_for_search.offset = 0; +- } else { ++ else + memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); +- } + + ref->inode_list = NULL; + ref->level = level; +@@ -409,10 +386,36 @@ static int add_indirect_ref(const struct btrfs_fs_info *fs_info, + wanted_disk_byte, count, sc, gfp_mask); + } + ++static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) ++{ ++ struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; ++ struct rb_node *parent = NULL; ++ struct prelim_ref *ref = NULL; ++ struct prelim_ref target = {0}; ++ int result; ++ ++ target.parent = bytenr; ++ ++ while (*p) { ++ parent = *p; ++ ref = rb_entry(parent, struct prelim_ref, rbnode); ++ result = prelim_ref_compare(ref, &target); ++ ++ if (result < 0) ++ p = &(*p)->rb_left; ++ else if (result > 0) ++ p = &(*p)->rb_right; ++ else ++ return 1; ++ } ++ return 0; ++} ++ + static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, +- struct ulist *parents, struct prelim_ref *ref, ++ struct ulist *parents, ++ struct preftrees *preftrees, struct prelim_ref *ref, + int level, u64 time_seq, const u64 *extent_item_pos, +- u64 total_refs, bool ignore_offset) ++ bool ignore_offset) + { + int ret = 0; + int slot; +@@ -424,6 +427,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + u64 disk_byte; + u64 wanted_disk_byte = ref->wanted_disk_byte; + u64 count = 0; ++ u64 data_offset; + + if (level != 0) { + eb = path->nodes[level]; +@@ -434,18 +438,26 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + } + + /* +- * We normally enter this function with the path already pointing to +- * the first item to check. But sometimes, we may enter it with +- * slot==nritems. In that case, go to the next leaf before we continue. ++ * 1. We normally enter this function with the path already pointing to ++ * the first item to check. But sometimes, we may enter it with ++ * slot == nritems. ++ * 2. We are searching for normal backref but bytenr of this leaf ++ * matches shared data backref ++ * 3. The leaf owner is not equal to the root we are searching ++ * ++ * For these cases, go to the next leaf before we continue. + */ +- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ++ eb = path->nodes[0]; ++ if (path->slots[0] >= btrfs_header_nritems(eb) || ++ is_shared_data_backref(preftrees, eb->start) || ++ ref->root_id != btrfs_header_owner(eb)) { + if (time_seq == SEQ_LAST) + ret = btrfs_next_leaf(root, path); + else + ret = btrfs_next_old_leaf(root, path, time_seq); + } + +- while (!ret && count < total_refs) { ++ while (!ret && count < ref->count) { + eb = path->nodes[0]; + slot = path->slots[0]; + +@@ -455,13 +467,31 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + key.type != BTRFS_EXTENT_DATA_KEY) + break; + ++ /* ++ * We are searching for normal backref but bytenr of this leaf ++ * matches shared data backref, OR ++ * the leaf owner is not equal to the root we are searching for ++ */ ++ if (slot == 0 && ++ (is_shared_data_backref(preftrees, eb->start) || ++ ref->root_id != btrfs_header_owner(eb))) { ++ if (time_seq == SEQ_LAST) ++ ret = btrfs_next_leaf(root, path); ++ else ++ ret = btrfs_next_old_leaf(root, path, time_seq); ++ continue; ++ } + fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); + disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); ++ data_offset = btrfs_file_extent_offset(eb, fi); + + if (disk_byte == wanted_disk_byte) { + eie = NULL; + old = NULL; +- count++; ++ if (ref->key_for_search.offset == key.offset - data_offset) ++ count++; ++ else ++ goto next; + if (extent_item_pos) { + ret = check_extent_in_eb(&key, eb, fi, + *extent_item_pos, +@@ -502,9 +532,9 @@ next: + */ + static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + struct btrfs_path *path, u64 time_seq, ++ struct preftrees *preftrees, + struct prelim_ref *ref, struct ulist *parents, +- const u64 *extent_item_pos, u64 total_refs, +- bool ignore_offset) ++ const u64 *extent_item_pos, bool ignore_offset) + { + struct btrfs_root *root; + struct btrfs_key root_key; +@@ -513,6 +543,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + int root_level; + int level = ref->level; + int index; ++ struct btrfs_key search_key = ref->key_for_search; + + root_key.objectid = ref->root_id; + root_key.type = BTRFS_ROOT_ITEM_KEY; +@@ -545,13 +576,33 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + goto out; + } + ++ /* ++ * We can often find data backrefs with an offset that is too large ++ * (>= LLONG_MAX, maximum allowed file offset) due to underflows when ++ * subtracting a file's offset with the data offset of its ++ * corresponding extent data item. This can happen for example in the ++ * clone ioctl. ++ * ++ * So if we detect such case we set the search key's offset to zero to ++ * make sure we will find the matching file extent item at ++ * add_all_parents(), otherwise we will miss it because the offset ++ * taken form the backref is much larger then the offset of the file ++ * extent item. This can make us scan a very large number of file ++ * extent items, but at least it will not make us miss any. ++ * ++ * This is an ugly workaround for a behaviour that should have never ++ * existed, but it does and a fix for the clone ioctl would touch a lot ++ * of places, cause backwards incompatibility and would not fix the ++ * problem for extents cloned with older kernels. ++ */ ++ if (search_key.type == BTRFS_EXTENT_DATA_KEY && ++ search_key.offset >= LLONG_MAX) ++ search_key.offset = 0; + path->lowest_level = level; + if (time_seq == SEQ_LAST) +- ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path, +- 0, 0); ++ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); + else +- ret = btrfs_search_old_slot(root, &ref->key_for_search, path, +- time_seq); ++ ret = btrfs_search_old_slot(root, &search_key, path, time_seq); + + /* root node has been locked, we can release @subvol_srcu safely here */ + srcu_read_unlock(&fs_info->subvol_srcu, index); +@@ -574,8 +625,8 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + eb = path->nodes[level]; + } + +- ret = add_all_parents(root, path, parents, ref, level, time_seq, +- extent_item_pos, total_refs, ignore_offset); ++ ret = add_all_parents(root, path, parents, preftrees, ref, level, ++ time_seq, extent_item_pos, ignore_offset); + out: + path->lowest_level = 0; + btrfs_release_path(path); +@@ -609,7 +660,7 @@ unode_aux_to_inode_list(struct ulist_node *node) + static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, + struct btrfs_path *path, u64 time_seq, + struct preftrees *preftrees, +- const u64 *extent_item_pos, u64 total_refs, ++ const u64 *extent_item_pos, + struct share_check *sc, bool ignore_offset) + { + int err; +@@ -653,9 +704,9 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, + ret = BACKREF_FOUND_SHARED; + goto out; + } +- err = resolve_indirect_ref(fs_info, path, time_seq, ref, +- parents, extent_item_pos, +- total_refs, ignore_offset); ++ err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, ++ ref, parents, extent_item_pos, ++ ignore_offset); + /* + * we can only tolerate ENOENT,otherwise,we should catch error + * and return directly. +@@ -758,8 +809,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info, + */ + static int add_delayed_refs(const struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_head *head, u64 seq, +- struct preftrees *preftrees, u64 *total_refs, +- struct share_check *sc) ++ struct preftrees *preftrees, struct share_check *sc) + { + struct btrfs_delayed_ref_node *node; + struct btrfs_delayed_extent_op *extent_op = head->extent_op; +@@ -793,7 +843,6 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, + default: + BUG(); + } +- *total_refs += count; + switch (node->type) { + case BTRFS_TREE_BLOCK_REF_KEY: { + /* NORMAL INDIRECT METADATA backref */ +@@ -876,7 +925,7 @@ out: + static int add_inline_refs(const struct btrfs_fs_info *fs_info, + struct btrfs_path *path, u64 bytenr, + int *info_level, struct preftrees *preftrees, +- u64 *total_refs, struct share_check *sc) ++ struct share_check *sc) + { + int ret = 0; + int slot; +@@ -900,7 +949,6 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, + + ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); + flags = btrfs_extent_flags(leaf, ei); +- *total_refs += btrfs_extent_refs(leaf, ei); + btrfs_item_key_to_cpu(leaf, &found_key, slot); + + ptr = (unsigned long)(ei + 1); +@@ -1125,8 +1173,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, + struct prelim_ref *ref; + struct rb_node *node; + struct extent_inode_elem *eie = NULL; +- /* total of both direct AND indirect refs! */ +- u64 total_refs = 0; + struct preftrees preftrees = { + .direct = PREFTREE_INIT, + .indirect = PREFTREE_INIT, +@@ -1195,7 +1241,7 @@ again: + } + spin_unlock(&delayed_refs->lock); + ret = add_delayed_refs(fs_info, head, time_seq, +- &preftrees, &total_refs, sc); ++ &preftrees, sc); + mutex_unlock(&head->mutex); + if (ret) + goto out; +@@ -1216,8 +1262,7 @@ again: + (key.type == BTRFS_EXTENT_ITEM_KEY || + key.type == BTRFS_METADATA_ITEM_KEY)) { + ret = add_inline_refs(fs_info, path, bytenr, +- &info_level, &preftrees, +- &total_refs, sc); ++ &info_level, &preftrees, sc); + if (ret) + goto out; + ret = add_keyed_refs(fs_info, path, bytenr, info_level, +@@ -1236,7 +1281,7 @@ again: + WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); + + ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, +- extent_item_pos, total_refs, sc, ignore_offset); ++ extent_item_pos, sc, ignore_offset); + if (ret) + goto out; + +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 4aba4878ed967..8bb001c7927f0 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb) + struct buffer_head *bh = NULL; + int nsr = 0; + struct udf_sb_info *sbi; ++ loff_t session_offset; + + sbi = UDF_SB(sb); + if (sb->s_blocksize < sizeof(struct volStructDesc)) +@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb) + else + sectorsize = sb->s_blocksize; + +- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits); ++ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits; ++ sector += session_offset; + + udf_debug("Starting at sector %u (%lu byte sectors)\n", + (unsigned int)(sector >> sb->s_blocksize_bits), +@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb) + + if (nsr > 0) + return 1; +- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) == +- VSD_FIRST_SECTOR_OFFSET) ++ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) + return -1; + else + return 0; +diff --git a/include/linux/kthread.h b/include/linux/kthread.h +index 0f9da966934e2..c7108ce5a051c 100644 +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -31,6 +31,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + unsigned int cpu, + const char *namefmt); + ++void kthread_set_per_cpu(struct task_struct *k, int cpu); ++bool kthread_is_per_cpu(struct task_struct *k); ++ + /** + * kthread_run - create and wake a thread. + * @threadfn: the function to run until signal_pending(current). +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 4b38ba101b9b7..37b51456784f8 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -619,6 +619,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk) + + unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); + unsigned int tcp_current_mss(struct sock *sk); ++u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when); + + /* Bound MSS / TSO packet size with the half of the window */ + static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) +diff --git a/kernel/kthread.c b/kernel/kthread.c +index e51f0006057df..1d4c98a19043f 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -469,11 +469,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + return p; + kthread_bind(p, cpu); + /* CPU hotplug need to bind once again when unparking the thread. */ +- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); + to_kthread(p)->cpu = cpu; + return p; + } + ++void kthread_set_per_cpu(struct task_struct *k, int cpu) ++{ ++ struct kthread *kthread = to_kthread(k); ++ if (!kthread) ++ return; ++ ++ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); ++ ++ if (cpu < 0) { ++ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++ return; ++ } ++ ++ kthread->cpu = cpu; ++ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++} ++ ++bool kthread_is_per_cpu(struct task_struct *k) ++{ ++ struct kthread *kthread = to_kthread(k); ++ if (!kthread) ++ return false; ++ ++ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++} ++ + /** + * kthread_unpark - unpark a thread created by kthread_create(). + * @k: thread created by kthread_create(). +diff --git a/kernel/smpboot.c b/kernel/smpboot.c +index 2efe1e206167c..f25208e8df836 100644 +--- a/kernel/smpboot.c ++++ b/kernel/smpboot.c +@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) + kfree(td); + return PTR_ERR(tsk); + } ++ kthread_set_per_cpu(tsk, cpu); + /* + * Park the thread so that it could start right on the CPU + * when it is available. +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 28e52657e0930..29c36c0290623 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1847,12 +1847,6 @@ static void worker_attach_to_pool(struct worker *worker, + { + mutex_lock(&wq_pool_attach_mutex); + +- /* +- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any +- * online CPUs. It'll be re-applied when any of the CPUs come up. +- */ +- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); +- + /* + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains + * stable across this function. See the comments above the flag +@@ -1861,6 +1855,9 @@ static void worker_attach_to_pool(struct worker *worker, + if (pool->flags & POOL_DISASSOCIATED) + worker->flags |= WORKER_UNBOUND; + ++ if (worker->rescue_wq) ++ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); ++ + list_add_tail(&worker->node, &pool->workers); + worker->pool = pool; + +diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c +index bfe7bdd4c3406..98c396769be94 100644 +--- a/net/core/gen_estimator.c ++++ b/net/core/gen_estimator.c +@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t) + u64 rate, brate; + + est_fetch_counters(est, &b); +- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log); +- brate -= (est->avbps >> est->ewma_log); ++ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log); ++ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log); + +- rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log); +- rate -= (est->avpps >> est->ewma_log); ++ rate = (u64)(b.packets - est->last_packets) << (10 - est->intvl_log); ++ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log); + + write_seqcount_begin(&est->seq); + est->avbps += brate; +@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + if (parm->interval < -2 || parm->interval > 3) + return -EINVAL; + ++ if (parm->ewma_log == 0 || parm->ewma_log >= 31) ++ return -EINVAL; ++ + est = kzalloc(sizeof(*est), GFP_KERNEL); + if (!est) + return -ENOBUFS; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 26305aa88651f..a1768ded2d545 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3295,6 +3295,7 @@ static void tcp_ack_probe(struct sock *sk) + } else { + unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); + ++ when = tcp_clamp_probe0_to_user_timeout(sk, when); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, + when, TCP_RTO_MAX, NULL); + } +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 5da6ffce390c2..d0774b4e934d6 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -3850,6 +3850,8 @@ void tcp_send_probe0(struct sock *sk) + */ + timeout = TCP_RESOURCE_PROBE_INTERVAL; + } ++ ++ timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); + } + +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 7fcd116fbd378..fa2ae96ecdc40 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -40,6 +40,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) + return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); + } + ++u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) ++{ ++ struct inet_connection_sock *icsk = inet_csk(sk); ++ u32 remaining; ++ s32 elapsed; ++ ++ if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp) ++ return when; ++ ++ elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; ++ if (unlikely(elapsed < 0)) ++ elapsed = 0; ++ remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed; ++ remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN); ++ ++ return min_t(u32, remaining, when); ++} ++ + /** + * tcp_write_err() - close socket and save error info + * @sk: The socket the error has appeared on. +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 3ab85e1e38d82..1a15e7bae106a 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4080,6 +4080,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); ++ if (!key) ++ key = rcu_dereference(sdata->default_unicast_key); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c +index 3a1d428c13369..ea9ddea35a886 100644 +--- a/net/switchdev/switchdev.c ++++ b/net/switchdev/switchdev.c +@@ -461,10 +461,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev, + extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + + if (check_cb(dev)) { +- /* This flag is only checked if the return value is success. */ +- port_obj_info->handled = true; +- return add_cb(dev, port_obj_info->obj, port_obj_info->trans, +- extack); ++ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans, ++ extack); ++ if (err != -EOPNOTSUPP) ++ port_obj_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +@@ -513,9 +514,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev, + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { +- /* This flag is only checked if the return value is success. */ +- port_obj_info->handled = true; +- return del_cb(dev, port_obj_info->obj); ++ err = del_cb(dev, port_obj_info->obj); ++ if (err != -EOPNOTSUPP) ++ port_obj_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +@@ -563,9 +565,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev, + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { +- port_attr_info->handled = true; +- return set_cb(dev, port_attr_info->attr, +- port_attr_info->trans); ++ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans); ++ if (err != -EOPNOTSUPP) ++ port_attr_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 5f515a29668c8..b3667a5efdc1f 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2450,6 +2450,9 @@ static const struct pci_device_id azx_ids[] = { + /* CometLake-S */ + { PCI_DEVICE(0x8086, 0xa3f0), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ /* CometLake-R */ ++ { PCI_DEVICE(0x8086, 0xf0c8), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Icelake */ + { PCI_DEVICE(0x8086, 0x34c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, +diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c +index 9e8233c10d860..df38616c431a6 100644 +--- a/sound/soc/sof/intel/hda-codec.c ++++ b/sound/soc/sof/intel/hda-codec.c +@@ -68,8 +68,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev) + * has been recorded in STATESTS + */ + if (codec->jacktbl.used) +- schedule_delayed_work(&codec->jackpoll_work, +- codec->jackpoll_interval); ++ pm_request_resume(&codec->core.dev); + } + #else + void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {} +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c +index edba4745f25a9..693d740107a8b 100644 +--- a/tools/objtool/elf.c ++++ b/tools/objtool/elf.c +@@ -214,8 +214,11 @@ static int read_symbols(struct elf *elf) + + symtab = find_section_by_name(elf, ".symtab"); + if (!symtab) { +- WARN("missing symbol table"); +- return -1; ++ /* ++ * A missing symbol table is actually possible if it's an empty ++ * .o file. This can happen for thunk_64.o. ++ */ ++ return 0; + } + + symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +index 0453c50c949cb..0725239bbd85c 100644 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +@@ -380,7 +380,6 @@ int test_alignment_handler_integer(void) + LOAD_DFORM_TEST(ldu); + LOAD_XFORM_TEST(ldx); + LOAD_XFORM_TEST(ldux); +- LOAD_DFORM_TEST(lmw); + STORE_DFORM_TEST(stb); + STORE_XFORM_TEST(stbx); + STORE_DFORM_TEST(stbu); +@@ -399,7 +398,11 @@ int test_alignment_handler_integer(void) + STORE_XFORM_TEST(stdx); + STORE_DFORM_TEST(stdu); + STORE_XFORM_TEST(stdux); ++ ++#ifdef __BIG_ENDIAN__ ++ LOAD_DFORM_TEST(lmw); + STORE_DFORM_TEST(stmw); ++#endif + + return rc; + } diff --git a/patch/kernel/odroidxu4-current/patch-5.4.96-97.patch b/patch/kernel/odroidxu4-current/patch-5.4.96-97.patch new file mode 100644 index 000000000..eefa48f02 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.96-97.patch @@ -0,0 +1,2250 @@ +diff --git a/Makefile b/Makefile +index 7a47a2594f957..032751f6be0c1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 96 ++SUBLEVEL = 97 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -920,12 +920,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + # change __FILE__ to the relative path from the srctree + KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +-# ensure -fcf-protection is disabled when using retpoline as it is +-# incompatible with -mindirect-branch=thunk-extern +-ifdef CONFIG_RETPOLINE +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +-endif +- + include scripts/Makefile.kasan + include scripts/Makefile.extrawarn + include scripts/Makefile.ubsan +diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts +index 01ccff756996d..5740f9442705c 100644 +--- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts ++++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts +@@ -110,7 +110,7 @@ + pinctrl-names = "default"; + pinctrl-0 = <&gmac_rgmii_pins>; + phy-handle = <&phy1>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + phy-supply = <®_gmac_3v3>; + status = "okay"; + }; +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c +index 8b81a17f675d9..e17ec92b90dd8 100644 +--- a/arch/arm/mach-footbridge/dc21285.c ++++ b/arch/arm/mach-footbridge/dc21285.c +@@ -66,15 +66,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("ldrb %0, [%1, %2]" ++ asm volatile("ldrb %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 2: +- asm("ldrh %0, [%1, %2]" ++ asm volatile("ldrh %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 4: +- asm("ldr %0, [%1, %2]" ++ asm volatile("ldr %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + } +@@ -100,17 +100,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("strb %0, [%1, %2]" ++ asm volatile("strb %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 2: +- asm("strh %0, [%1, %2]" ++ asm volatile("strh %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 4: +- asm("str %0, [%1, %2]" ++ asm volatile("str %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +index 354ef2f3eac67..9533c85fb0a30 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +@@ -2382,7 +2382,7 @@ + interrupts = ; + dr_mode = "host"; + snps,dis_u2_susphy_quirk; +- snps,quirk-frame-length-adjustment; ++ snps,quirk-frame-length-adjustment = <0x20>; + snps,parkmode-disable-ss-quirk; + }; + }; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +index d4c1da3d4bde2..04d4b1b11a00a 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +@@ -304,7 +304,7 @@ + + dcfg: dcfg@1ee0000 { + compatible = "fsl,ls1046a-dcfg", "syscon"; +- reg = <0x0 0x1ee0000 0x0 0x10000>; ++ reg = <0x0 0x1ee0000 0x0 0x1000>; + big-endian; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +index f539b3655f6b9..e638f216dbfb3 100644 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +@@ -243,6 +243,8 @@ + &i2c3 { + status = "okay"; + clock-frequency = <400000>; ++ /* Overwrite pinctrl-0 from sdm845.dtsi */ ++ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>; + + tsel: hid@15 { + compatible = "hid-over-i2c"; +@@ -250,9 +252,6 @@ + hid-descr-addr = <0x1>; + + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&i2c3_hid_active>; + }; + + tsc2: hid@2c { +@@ -261,11 +260,6 @@ + hid-descr-addr = <0x20>; + + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&i2c3_hid_active>; +- +- status = "disabled"; + }; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi +index 9e09909a510a1..98b014a8f9165 100644 +--- a/arch/arm64/boot/dts/rockchip/px30.dtsi ++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi +@@ -860,7 +860,7 @@ + vopl_mmu: iommu@ff470f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff470f00 0x0 0x100>; +- interrupts = ; ++ interrupts = ; + interrupt-names = "vopl_mmu"; + clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>; + clock-names = "aclk", "hclk"; +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c +index 179b41ad63baf..18618af3835f9 100644 +--- a/arch/um/drivers/virtio_uml.c ++++ b/arch/um/drivers/virtio_uml.c +@@ -959,6 +959,7 @@ static void virtio_uml_release_dev(struct device *d) + } + + os_close_file(vu_dev->sock); ++ kfree(vu_dev); + } + + /* Platform device */ +@@ -977,7 +978,7 @@ static int virtio_uml_probe(struct platform_device *pdev) + if (!pdata) + return -EINVAL; + +- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL); ++ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); + if (!vu_dev) + return -ENOMEM; + +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 94df0868804bc..b5e3bfd4facea 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -131,6 +131,9 @@ else + + KBUILD_CFLAGS += -mno-red-zone + KBUILD_CFLAGS += -mcmodel=kernel ++ ++ # Intel CET isn't enabled in the kernel ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) + endif + + ifdef CONFIG_X86_X32 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 6016559ed1713..5bef1575708dc 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; } + #endif /* !CONFIG_X86_LOCAL_APIC */ + + #ifdef CONFIG_X86_X2APIC +-/* +- * Make previous memory operations globally visible before +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or +- * mfence for this. +- */ +-static inline void x2apic_wrmsr_fence(void) +-{ +- asm volatile("mfence" : : : "memory"); +-} +- + static inline void native_apic_msr_write(u32 reg, u32 v) + { + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index 7f828fe497978..4819d5e5a3353 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -84,4 +84,22 @@ do { \ + + #include + ++/* ++ * Make previous memory operations globally visible before ++ * a WRMSR. ++ * ++ * MFENCE makes writes visible, but only affects load/store ++ * instructions. WRMSR is unfortunately not a load/store ++ * instruction and is unaffected by MFENCE. The LFENCE ensures ++ * that the WRMSR is not reordered. ++ * ++ * Most WRMSRs are full serializing instructions themselves and ++ * do not require this barrier. This is only required for the ++ * IA32_TSC_DEADLINE and X2APIC MSRs. ++ */ ++static inline void weak_wrmsr_fence(void) ++{ ++ asm volatile("mfence; lfence" : : : "memory"); ++} ++ + #endif /* _ASM_X86_BARRIER_H */ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 06fa808d72032..3dca7b8642e9c 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta, + { + u64 tsc; + ++ /* This MSR is special and need a special fence: */ ++ weak_wrmsr_fence(); ++ + tsc = rdtsc(); + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + return 0; +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index b0889c48a2ac5..7eec3c154fa24 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); + } + +@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long flags; + u32 dest; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + local_irq_save(flags); + + tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index e14eae6d6ea71..032a00e5d9fa6 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); + } + +@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long this_cpu; + unsigned long flags; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which) + { + unsigned long cfg = __prepare_ICR(which, vector, 0); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + native_x2apic_icr_write(cfg, 0); + } + +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 39265b55929d2..60c8dcb907a50 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2890,6 +2890,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : + (u32)msr_data; ++ if (efer & EFER_LMA) ++ ctxt->mode = X86EMUL_MODE_PROT64; + + return X86EMUL_CONTINUE; + } +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 2b506904be024..4906e480b5bb6 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -889,6 +889,11 @@ static int has_svm(void) + return 0; + } + ++ if (sev_active()) { ++ pr_info("KVM is unsupported when running as an SEV guest\n"); ++ return 0; ++ } ++ + return 1; + } + +diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c +index 9268c12458c84..dfa01bcdc3694 100644 +--- a/arch/x86/mm/mem_encrypt.c ++++ b/arch/x86/mm/mem_encrypt.c +@@ -375,6 +375,7 @@ bool force_dma_unencrypted(struct device *dev) + + return false; + } ++EXPORT_SYMBOL_GPL(sev_active); + + /* Architecture __weak replacement functions */ + void __init mem_encrypt_free_decrypted_mem(void) +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index d2dd387c95d86..de06ee7d2ad46 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -1434,8 +1434,6 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) + + drm_connector_update_edid_property(connector, + aconnector->edid); +- drm_add_edid_modes(connector, aconnector->edid); +- + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 8c73377ac82ca..3d004ca76b6ed 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -215,9 +215,17 @@ static const struct xpad_device { + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, +@@ -296,6 +304,9 @@ static const struct xpad_device { + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, +@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + { } + }; + +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index eca931da76c3a..b7dbcbac3a1a5 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, ++ }, ++ { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 984c7a6ea4fe8..953d86ca6d2b2 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3285,6 +3285,12 @@ static int __init init_dmars(void) + + if (!ecap_pass_through(iommu->ecap)) + hw_pass_through = 0; ++ ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { ++ pr_info("Disable batched IOTLB flush due to virtualization"); ++ intel_iommu_strict = 1; ++ } ++ + #ifdef CONFIG_INTEL_IOMMU_SVM + if (pasid_supported(iommu)) + intel_svm_init(iommu); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index ec5dfb7ae4e16..cc38530804c90 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -538,8 +538,10 @@ static void md_submit_flush_data(struct work_struct *ws) + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ ++ spin_lock_irq(&mddev->lock); + mddev->last_flush = mddev->start_flush; + mddev->flush_bio = NULL; ++ spin_unlock_irq(&mddev->lock); + wake_up(&mddev->sb_wait); + + if (bio->bi_iter.bi_size == 0) { +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c +index 3efaa9534a777..9a5aaac29099b 100644 +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -20,6 +20,8 @@ + #include "sdio_cis.h" + #include "sdio_ops.h" + ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ ++ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, + const unsigned char *buf, unsigned size) + { +@@ -266,6 +268,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + + do { + unsigned char tpl_code, tpl_link; ++ unsigned long timeout = jiffies + ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); + + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); + if (ret) +@@ -318,6 +322,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + prev = &this->next; + + if (ret == -ENOENT) { ++ if (time_after(jiffies, timeout)) ++ break; + /* warn about unknown tuples */ + pr_warn_ratelimited("%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index 469b155df4885..1af09fd3fed1c 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -1517,7 +1517,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, + if (!entry.portvec) + entry.state = 0; + } else { +- entry.portvec |= BIT(port); ++ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) ++ entry.portvec = BIT(port); ++ else ++ entry.portvec |= BIT(port); ++ + entry.state = state; + } + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index c20dc689698ed..5acd599d6b9af 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; +- +- /* Always report link is down if the VF queues aren't enabled */ +- if (!vf->queues_enabled) { +- pfe.event_data.link_event.link_status = false; +- pfe.event_data.link_event.link_speed = 0; +- } else if (vf->link_forced) { ++ if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); +@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) + pfe.event_data.link_event.link_speed = + i40e_virtchnl_link_speed(ls->link_speed); + } +- + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + } +@@ -2393,8 +2387,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) + } + } + +- vf->queues_enabled = true; +- + error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, +@@ -2416,9 +2408,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + +- /* Immediately mark queues as disabled */ +- vf->queues_enabled = false; +- + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +index 7164b9bb294ff..f65cc0c165502 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +@@ -99,7 +99,6 @@ struct i40e_vf { + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + bool link_forced; + bool link_up; /* only valid if VF link is forced */ +- bool queues_enabled; /* true if the VF queues are enabled */ + bool spoofchk; + u16 num_mac; + u16 num_vlan; +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index 0303eeb760505..0365bf2b480e3 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -1709,7 +1709,8 @@ static int igc_get_link_ksettings(struct net_device *netdev, + Asym_Pause); + } + +- status = rd32(IGC_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c +index c25f555aaf822..ed5d09c11c389 100644 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c +@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) + { + struct igc_nvm_info *nvm = &hw->nvm; ++ s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; +- s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. +@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -IGC_ERR_NVM; + goto out; + } + +diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c +index 5eeb4c8caf4ae..08adf103e90b4 100644 +--- a/drivers/net/ethernet/intel/igc/igc_mac.c ++++ b/drivers/net/ethernet/intel/igc/igc_mac.c +@@ -647,7 +647,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) + } + + out: +- return 0; ++ return ret_val; + } + + /** +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +index a30eb90ba3d28..dd590086fe6a5 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) + /* Clear entry invalidation bit */ + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + +- /* Write tcam index - indirect access */ +- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); +- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) +- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); +- + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); + ++ /* Write tcam index - indirect access */ ++ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); ++ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) ++ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 4944c40436f08..11e12761b0a6e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1697,6 +1697,7 @@ search_again_locked: + if (!fte_tmp) + continue; + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); ++ /* No error check needed here, because insert_fte() is not called */ + up_write_ref_node(&fte_tmp->node, false); + tree_put_node(&fte_tmp->node, false); + kmem_cache_free(steering->ftes_cache, fte); +@@ -1745,6 +1746,8 @@ skip_search: + up_write_ref_node(&g->node, false); + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node, false); ++ if (IS_ERR(rule)) ++ tree_put_node(&fte->node, false); + return rule; + } + rule = ERR_PTR(-ENOENT); +@@ -1844,6 +1847,8 @@ search_again_locked: + up_write_ref_node(&g->node, false); + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node, false); ++ if (IS_ERR(rule)) ++ tree_put_node(&fte->node, false); + tree_put_node(&g->node, false); + return rule; + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 366ca1b5da5cc..1e8244ec5b332 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -6419,10 +6419,10 @@ static int rtl8169_close(struct net_device *dev) + + cancel_work_sync(&tp->wk.work); + +- phy_disconnect(tp->phydev); +- + free_irq(pci_irq_vector(pdev, 0), tp); + ++ phy_disconnect(tp->phydev); ++ + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +index c54fe6650018e..7272d8522a9e9 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +@@ -134,7 +134,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm) + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + +- /* Do not configure default queue, it is configured via context info */ ++ /* ++ * The default queue is configured via context info, so if we ++ * have a single queue, there's nothing to do here. ++ */ ++ if (mvm->trans->num_rx_queues == 1) ++ return 0; ++ ++ /* skip the default queue */ + num_queues = mvm->trans->num_rx_queues - 1; + + size = struct_size(cmd, data, num_queues); +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c +index 196aa44c4936a..e0f411021c59d 100644 +--- a/drivers/nvdimm/dimm_devs.c ++++ b/drivers/nvdimm/dimm_devs.c +@@ -344,16 +344,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, + } + static DEVICE_ATTR_RO(state); + +-static ssize_t available_slots_show(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) + { +- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ++ struct device *dev; + ssize_t rc; + u32 nfree; + + if (!ndd) + return -ENXIO; + ++ dev = ndd->dev; + nvdimm_bus_lock(dev); + nfree = nd_label_nfree(ndd); + if (nfree - 1 > nfree) { +@@ -365,6 +365,18 @@ static ssize_t available_slots_show(struct device *dev, + nvdimm_bus_unlock(dev); + return rc; + } ++ ++static ssize_t available_slots_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ ssize_t rc; ++ ++ nd_device_lock(dev); ++ rc = __available_slots_show(dev_get_drvdata(dev), buf); ++ nd_device_unlock(dev); ++ ++ return rc; ++} + static DEVICE_ATTR_RO(available_slots); + + __weak ssize_t security_show(struct device *dev, +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index ef93bd3ed339c..434d3f21f0e13 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3161,6 +3161,8 @@ static const struct pci_device_id nvme_id_table[] = { + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, ++ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), + .driver_data = NVME_QUIRK_SINGLE_VECTOR }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index e31823f19a0fa..9242224156f5b 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -292,7 +292,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) + length = cmd->pdu_len; + cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); + offset = cmd->rbytes_done; +- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); ++ cmd->sg_idx = offset / PAGE_SIZE; + sg_offset = offset % PAGE_SIZE; + sg = &cmd->req.sg[cmd->sg_idx]; + +@@ -305,6 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) + length -= iov_len; + sg = sg_next(sg); + iov++; ++ sg_offset = 0; + } + + iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 134dc2005ce97..c9f6e97582885 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) + return -EINVAL; + +- alts = usblp->protocol[protocol].alt_setting; +- if (alts < 0) +- return -EINVAL; +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts); +- if (r < 0) { +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", +- alts, usblp->ifnum); +- return r; ++ /* Don't unnecessarily set the interface if there's a single alt. */ ++ if (usblp->intf->num_altsetting > 1) { ++ alts = usblp->protocol[protocol].alt_setting; ++ if (alts < 0) ++ return -EINVAL; ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts); ++ if (r < 0) { ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", ++ alts, usblp->ifnum); ++ return r; ++ } + } + + usblp->bidir = (usblp->protocol[protocol].epread != NULL); +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index 70ac47a341ac2..e3f1f20c49221 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + u32 windex) + { +- struct dwc2_hsotg_ep *ep; + int dir = (windex & USB_DIR_IN) ? 1 : 0; + int idx = windex & 0x7F; + +@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + if (idx > hsotg->num_of_eps) + return NULL; + +- ep = index_to_ep(hsotg, idx, dir); +- +- if (idx && ep->dir_in != dir) +- return NULL; +- +- return ep; ++ return index_to_ep(hsotg, idx, dir); + } + + /** +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 440dbf55ddf70..90ec65d31059f 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1718,7 +1718,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) + if (PMSG_IS_AUTO(msg)) + break; + +- ret = dwc3_core_init(dwc); ++ ret = dwc3_core_init_for_resume(dwc); + if (ret) + return ret; + +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c +index 30313b233680d..99c7fc0d1d597 100644 +--- a/drivers/usb/gadget/legacy/ether.c ++++ b/drivers/usb/gadget/legacy/ether.c +@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail1; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c +index 45c54d56ecbd5..b45e5bf089979 100644 +--- a/drivers/usb/host/xhci-mtk-sch.c ++++ b/drivers/usb/host/xhci-mtk-sch.c +@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, + + sch_ep->sch_tt = tt; + sch_ep->ep = ep; ++ INIT_LIST_HEAD(&sch_ep->endpoint); ++ INIT_LIST_HEAD(&sch_ep->tt_endpoint); + + return sch_ep; + } +@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, + sch_ep->bw_budget_table[j]; + } + } ++ sch_ep->allocated = used; + } + + static int check_sch_tt(struct usb_device *udev, +@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev, + return 0; + } + ++static void destroy_sch_ep(struct usb_device *udev, ++ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) ++{ ++ /* only release ep bw check passed by check_sch_bw() */ ++ if (sch_ep->allocated) ++ update_bus_bw(sch_bw, sch_ep, 0); ++ ++ list_del(&sch_ep->endpoint); ++ ++ if (sch_ep->sch_tt) { ++ list_del(&sch_ep->tt_endpoint); ++ drop_tt(udev); ++ } ++ kfree(sch_ep); ++} ++ + static bool need_bw_sch(struct usb_host_endpoint *ep, + enum usb_device_speed speed, int has_tt) + { +@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) + + mtk->sch_array = sch_array; + ++ INIT_LIST_HEAD(&mtk->bw_ep_chk_list); ++ + return 0; + } + EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); +@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_virt_device *virt_dev; +- struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep; +- struct mu3h_sch_bw_info *sch_array; + unsigned int ep_index; +- int bw_index; +- int ret = 0; + + xhci = hcd_to_xhci(hcd); + virt_dev = xhci->devs[udev->slot_id]; + ep_index = xhci_get_endpoint_index(&ep->desc); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); +- sch_array = mtk->sch_array; + + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", + __func__, usb_endpoint_type(&ep->desc), udev->speed, +@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + return 0; + } + +- bw_index = get_bw_index(xhci, udev, ep); +- sch_bw = &sch_array[bw_index]; +- + sch_ep = create_sch_ep(udev, ep, ep_ctx); + if (IS_ERR_OR_NULL(sch_ep)) + return -ENOMEM; + + setup_sch_info(udev, ep_ctx, sch_ep); + +- ret = check_sch_bw(udev, sch_bw, sch_ep); +- if (ret) { +- xhci_err(xhci, "Not enough bandwidth!\n"); +- if (is_fs_or_ls(udev->speed)) +- drop_tt(udev); +- +- kfree(sch_ep); +- return -ENOSPC; +- } +- +- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); +- +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) +- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); +- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) +- | EP_BREPEAT(sch_ep->repeat)); +- +- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", +- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, +- sch_ep->offset, sch_ep->repeat); ++ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); + + return 0; + } +@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct xhci_virt_device *virt_dev; + struct mu3h_sch_bw_info *sch_array; + struct mu3h_sch_bw_info *sch_bw; +- struct mu3h_sch_ep_info *sch_ep; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index; + + xhci = hcd_to_xhci(hcd); +@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &sch_array[bw_index]; + +- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { ++ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { + if (sch_ep->ep == ep) { +- update_bus_bw(sch_bw, sch_ep, 0); +- list_del(&sch_ep->endpoint); +- if (is_fs_or_ls(udev->speed)) { +- list_del(&sch_ep->tt_endpoint); +- drop_tt(udev); +- } +- kfree(sch_ep); ++ destroy_sch_ep(udev, sch_bw, sch_ep); + break; + } + } + } + EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); ++ ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; ++ struct mu3h_sch_bw_info *sch_bw; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; ++ int bw_index, ret; ++ ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); ++ ++ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ ++ ret = check_sch_bw(udev, sch_bw, sch_ep); ++ if (ret) { ++ xhci_err(xhci, "Not enough bandwidth!\n"); ++ return -ENOSPC; ++ } ++ } ++ ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { ++ struct xhci_ep_ctx *ep_ctx; ++ struct usb_host_endpoint *ep = sch_ep->ep; ++ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); ++ ++ bw_index = get_bw_index(xhci, udev, ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ ++ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); ++ ++ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ++ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) ++ | EP_BCSCOUNT(sch_ep->cs_count) ++ | EP_BBM(sch_ep->burst_mode)); ++ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) ++ | EP_BREPEAT(sch_ep->repeat)); ++ ++ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", ++ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, ++ sch_ep->offset, sch_ep->repeat); ++ } ++ ++ return xhci_check_bandwidth(hcd, udev); ++} ++EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); ++ ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct mu3h_sch_bw_info *sch_bw; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; ++ int bw_index; ++ ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); ++ ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ destroy_sch_ep(udev, sch_bw, sch_ep); ++ } ++ ++ xhci_reset_bandwidth(hcd, udev); ++} ++EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c +index 85f1ff0399a9c..09b67219fd146 100644 +--- a/drivers/usb/host/xhci-mtk.c ++++ b/drivers/usb/host/xhci-mtk.c +@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) + static int xhci_mtk_setup(struct usb_hcd *hcd); + static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { + .reset = xhci_mtk_setup, ++ .check_bandwidth = xhci_mtk_check_bandwidth, ++ .reset_bandwidth = xhci_mtk_reset_bandwidth, + }; + + static struct hc_driver __read_mostly xhci_mtk_hc_driver; +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h +index 5ac458b7d2e0e..734c5513aa1bf 100644 +--- a/drivers/usb/host/xhci-mtk.h ++++ b/drivers/usb/host/xhci-mtk.h +@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info { + * @ep_type: endpoint type + * @maxpkt: max packet size of endpoint + * @ep: address of usb_host_endpoint struct ++ * @allocated: the bandwidth is aready allocated from bus_bw + * @offset: which uframe of the interval that transfer should be + * scheduled first time within the interval + * @repeat: the time gap between two uframes that transfers are +@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info { + u32 ep_type; + u32 maxpkt; + void *ep; ++ bool allocated; + /* + * mtk xHCI scheduling information put into reserved DWs + * in ep context +@@ -131,6 +133,7 @@ struct xhci_hcd_mtk { + struct device *dev; + struct usb_hcd *hcd; + struct mu3h_sch_bw_info *sch_array; ++ struct list_head bw_ep_chk_list; + struct mu3c_ippc_regs __iomem *ippc_regs; + bool has_ippc; + int num_u2_ports; +@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); + void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + + #else + static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, +@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, + { + } + ++static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, ++ struct usb_device *udev) ++{ ++ return 0; ++} ++ ++static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, ++ struct usb_device *udev) ++{ ++} + #endif + + #endif /* _XHCI_MTK_H_ */ +diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c +index 60651a50770f9..f27d5c2c42f31 100644 +--- a/drivers/usb/host/xhci-mvebu.c ++++ b/drivers/usb/host/xhci-mvebu.c +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + return 0; + } + ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct device *dev = hcd->self.controller; ++ struct phy *phy; ++ int ret; ++ ++ /* Old bindings miss the PHY handle */ ++ phy = of_phy_get(dev->of_node, "usb3-phy"); ++ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ else if (IS_ERR(phy)) ++ goto phy_out; ++ ++ ret = phy_init(phy); ++ if (ret) ++ goto phy_put; ++ ++ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); ++ if (ret) ++ goto phy_exit; ++ ++ ret = phy_power_on(phy); ++ if (ret == -EOPNOTSUPP) { ++ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ ++ dev_warn(dev, "PHY unsupported by firmware\n"); ++ xhci->quirks |= XHCI_SKIP_PHY_INIT; ++ } ++ if (ret) ++ goto phy_exit; ++ ++ phy_power_off(phy); ++phy_exit: ++ phy_exit(phy); ++phy_put: ++ phy_put(phy); ++phy_out: ++ ++ return 0; ++} ++ + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h +index ca0a3a5721dd7..74b4d21a498a0 100644 +--- a/drivers/usb/host/xhci-mvebu.h ++++ b/drivers/usb/host/xhci-mvebu.h +@@ -12,6 +12,7 @@ struct usb_hcd; + + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); + #else + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) +@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + return 0; + } + ++static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) ++{ ++ return 0; ++} ++ + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { + return 0; +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 52c625c023410..84cfa85442852 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) + priv->plat_start(hcd); + } + ++static int xhci_priv_plat_setup(struct usb_hcd *hcd) ++{ ++ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); ++ ++ if (!priv->plat_setup) ++ return 0; ++ ++ return priv->plat_setup(hcd); ++} ++ + static int xhci_priv_init_quirk(struct usb_hcd *hcd) + { + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +@@ -101,6 +111,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { + }; + + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { ++ .plat_setup = xhci_mvebu_a3700_plat_setup, + .init_quirk = xhci_mvebu_a3700_init_quirk, + }; + +@@ -163,6 +174,8 @@ static int xhci_plat_probe(struct platform_device *pdev) + struct usb_hcd *hcd; + int ret; + int irq; ++ struct xhci_plat_priv *priv = NULL; ++ + + if (usb_disabled()) + return -ENODEV; +@@ -257,8 +270,7 @@ static int xhci_plat_probe(struct platform_device *pdev) + + priv_match = of_device_get_match_data(&pdev->dev); + if (priv_match) { +- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +- ++ priv = hcd_to_xhci_priv(hcd); + /* Just copy data for now */ + if (priv_match) + *priv = *priv_match; +@@ -307,6 +319,16 @@ static int xhci_plat_probe(struct platform_device *pdev) + + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); + xhci->shared_hcd->tpl_support = hcd->tpl_support; ++ ++ if (priv) { ++ ret = xhci_priv_plat_setup(hcd); ++ if (ret) ++ goto disable_usb_phy; ++ } ++ ++ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) ++ hcd->skip_phy_initialization = 1; ++ + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (ret) + goto disable_usb_phy; +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h +index 5681723fc9cd7..b7749151bdfb8 100644 +--- a/drivers/usb/host/xhci-plat.h ++++ b/drivers/usb/host/xhci-plat.h +@@ -13,6 +13,7 @@ + struct xhci_plat_priv { + const char *firmware_name; + unsigned long long quirks; ++ int (*plat_setup)(struct usb_hcd *); + void (*plat_start)(struct usb_hcd *); + int (*init_quirk)(struct usb_hcd *); + int (*resume_quirk)(struct usb_hcd *); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 52e156c018042..900ea91fb3c6b 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -695,11 +695,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, +- seg->bounce_len, seg->bounce_offs); +- if (len != seg->bounce_len) +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", +- len, seg->bounce_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, ++ seg->bounce_len, seg->bounce_offs); ++ if (len != seg->bounce_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", ++ len, seg->bounce_len); ++ } else { ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, ++ seg->bounce_len); ++ } + seg->bounce_len = 0; + seg->bounce_offs = 0; + } +@@ -3263,12 +3268,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, +- seg->bounce_buf, new_buff_len, enqd_len); +- if (len != new_buff_len) +- xhci_warn(xhci, +- "WARN Wrong bounce buffer write length: %zu != %d\n", +- len, new_buff_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, ++ seg->bounce_buf, new_buff_len, enqd_len); ++ if (len != new_buff_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", ++ len, new_buff_len); ++ } else { ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); ++ } ++ + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 70aa3055c41e7..91330517444e7 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -2861,7 +2861,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, + * else should be touching the xhci->devs[slot_id] structure, so we + * don't need to take the xhci->lock for manipulating that. + */ +-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + { + int i; + int ret = 0; +@@ -2959,7 +2959,7 @@ command_cleanup: + return ret; + } + +-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + { + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; +@@ -5380,6 +5380,10 @@ void xhci_init_driver(struct hc_driver *drv, + drv->reset = over->reset; + if (over->start) + drv->start = over->start; ++ if (over->check_bandwidth) ++ drv->check_bandwidth = over->check_bandwidth; ++ if (over->reset_bandwidth) ++ drv->reset_bandwidth = over->reset_bandwidth; + } + } + EXPORT_SYMBOL_GPL(xhci_init_driver); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index b483317bcb17b..1ad1d6e9e9979 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1873,6 +1873,7 @@ struct xhci_hcd { + #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) + #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) + #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) ++#define XHCI_SKIP_PHY_INIT BIT_ULL(37) + #define XHCI_DISABLE_SPARSE BIT_ULL(38) + + unsigned int num_active_eps; +@@ -1911,6 +1912,8 @@ struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *hcd); + int (*start)(struct usb_hcd *hcd); ++ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); ++ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + }; + + #define XHCI_CFC_DELAY 10 +@@ -2063,6 +2066,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); + void xhci_shutdown(struct usb_hcd *hcd); + void xhci_init_driver(struct hc_driver *drv, + const struct xhci_driver_overrides *over); ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); + int xhci_ext_cap_init(struct xhci_hcd *xhci); + +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index 05cdad13933b1..cfc16943979d5 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) + } + + usbhs_pipe_clear_without_sequence(pipe, 0, 0); ++ usbhs_pipe_running(pipe, 0); + + __usbhsf_pkt_del(pkt); + } +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index a90801ef00554..361a2e3ccad8d 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ +@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index fd41b07b5aaf1..f49eae18500cc 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 + #define CINTERION_PRODUCT_CLS8 0x00b0 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 + + /* Olivetti products */ + #define OLIVETTI_VENDOR_ID 0x0b3c +@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), ++ .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), ++ .driver_info = RSVD(0)}, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = RSVD(4) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), +diff --git a/fs/afs/main.c b/fs/afs/main.c +index c9c45d7078bd1..5cd26af2464c9 100644 +--- a/fs/afs/main.c ++++ b/fs/afs/main.c +@@ -186,7 +186,7 @@ static int __init afs_init(void) + goto error_cache; + #endif + +- ret = register_pernet_subsys(&afs_net_ops); ++ ret = register_pernet_device(&afs_net_ops); + if (ret < 0) + goto error_net; + +@@ -206,7 +206,7 @@ static int __init afs_init(void) + error_proc: + afs_fs_exit(); + error_fs: +- unregister_pernet_subsys(&afs_net_ops); ++ unregister_pernet_device(&afs_net_ops); + error_net: + #ifdef CONFIG_AFS_FSCACHE + fscache_unregister_netfs(&afs_cache_netfs); +@@ -237,7 +237,7 @@ static void __exit afs_exit(void) + + proc_remove(afs_proc_symlink); + afs_fs_exit(); +- unregister_pernet_subsys(&afs_net_ops); ++ unregister_pernet_device(&afs_net_ops); + #ifdef CONFIG_AFS_FSCACHE + fscache_unregister_netfs(&afs_cache_netfs); + #endif +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 5a35850ccb1ab..9ae9a514676c3 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -738,6 +738,7 @@ static int + cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + { + struct inode *inode; ++ int rc; + + if (flags & LOOKUP_RCU) + return -ECHILD; +@@ -747,8 +748,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) + CIFS_I(inode)->time = 0; /* force reval */ + +- if (cifs_revalidate_dentry(direntry)) +- return 0; ++ rc = cifs_revalidate_dentry(direntry); ++ if (rc) { ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); ++ switch (rc) { ++ case -ENOENT: ++ case -ESTALE: ++ /* ++ * Those errors mean the dentry is invalid ++ * (file was deleted or recreated) ++ */ ++ return 0; ++ default: ++ /* ++ * Otherwise some unexpected error happened ++ * report it as-is to VFS layer ++ */ ++ return rc; ++ } ++ } + else { + /* + * If the inode wasn't known to be a dfs entry when +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index 2482978f09486..739556e385be8 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -227,7 +227,7 @@ struct smb2_negotiate_req { + __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ + __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ + __le16 Reserved2; +- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */ ++ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ + } __packed; + + /* Dialects */ +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 4ffbf8f965814..eab7940bfebef 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -659,10 +659,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num, + spin_lock(&server->req_lock); + if (*credits < num) { + /* +- * Return immediately if not too many requests in flight since +- * we will likely be stuck on waiting for credits. ++ * If the server is tight on resources or just gives us less ++ * credits for other reasons (e.g. requests are coming out of ++ * order and the server delays granting more credits until it ++ * processes a missing mid) and we exhausted most available ++ * credits there may be situations when we try to send ++ * a compound request but we don't have enough credits. At this ++ * point the client needs to decide if it should wait for ++ * additional credits or fail the request. If at least one ++ * request is in flight there is a high probability that the ++ * server will return enough credits to satisfy this compound ++ * request. ++ * ++ * Return immediately if no requests in flight since we will be ++ * stuck on waiting for credits. + */ +- if (server->in_flight < num - *credits) { ++ if (server->in_flight == 0) { + spin_unlock(&server->req_lock); + return -ENOTSUPP; + } +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 5fff7cb3582f0..cf3af2140c3d8 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -675,9 +675,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + ++ set_page_huge_active(page); + /* + * unlock_page because locked by add_to_page_cache() +- * page_put due to reference from alloc_huge_page() ++ * put_page() due to reference from alloc_huge_page() + */ + unlock_page(page); + put_page(page); +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c +index 29abdb1d3b5c6..6509ec3cb3730 100644 +--- a/fs/overlayfs/dir.c ++++ b/fs/overlayfs/dir.c +@@ -940,8 +940,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) + + buflen -= thislen; + memcpy(&buf[buflen], name, thislen); +- tmp = dget_dlock(d->d_parent); + spin_unlock(&d->d_lock); ++ tmp = dget_parent(d); + + dput(d); + d = tmp; +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 8a03f392f3680..0e080ba5efbcc 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -590,6 +590,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + } + #endif + ++void set_page_huge_active(struct page *page); ++ + #else /* CONFIG_HUGETLB_PAGE */ + struct hstate {}; + +diff --git a/include/linux/msi.h b/include/linux/msi.h +index 8ad679e9d9c04..d695e2eb2092d 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -139,6 +139,12 @@ struct msi_desc { + list_for_each_entry((desc), dev_to_msi_list((dev)), list) + #define for_each_msi_entry_safe(desc, tmp, dev) \ + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) ++#define for_each_msi_vector(desc, __irq, dev) \ ++ for_each_msi_entry((desc), (dev)) \ ++ if ((desc)->irq) \ ++ for (__irq = (desc)->irq; \ ++ __irq < ((desc)->irq + (desc)->nvec_used); \ ++ __irq++) + + #ifdef CONFIG_IRQ_MSI_IOMMU + static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 3d03756e10699..b2ceec7b280d4 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -1158,7 +1158,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) +- qdisc_tree_flush_backlog(old); ++ qdisc_purge_queue(old); + sch_tree_unlock(sch); + + return old; +diff --git a/init/init_task.c b/init/init_task.c +index df7041be96fca..5d8359c44564a 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -171,7 +171,8 @@ struct task_struct init_task + .lockdep_recursion = 0, + #endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +- .ret_stack = NULL, ++ .ret_stack = NULL, ++ .tracing_graph_pause = ATOMIC_INIT(0), + #endif + #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION) + .trace_recursion = 0, +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c +index 5a8b4dfdb1419..c2f0aa818b7af 100644 +--- a/kernel/bpf/cgroup.c ++++ b/kernel/bpf/cgroup.c +@@ -1109,6 +1109,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + goto out; + } + ++ if (ctx.optlen < 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ + if (copy_from_user(ctx.optval, optval, + min(ctx.optlen, max_optlen)) != 0) { + ret = -EFAULT; +@@ -1126,7 +1131,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + goto out; + } + +- if (ctx.optlen > max_optlen) { ++ if (ctx.optlen > max_optlen || ctx.optlen < 0) { + ret = -EFAULT; + goto out; + } +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +index eb95f6106a1ee..5d3da0db092ff 100644 +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + + can_reserve = msi_check_reservation_mode(domain, info, dev); + +- for_each_msi_entry(desc, dev) { +- virq = desc->irq; +- if (desc->nvec_used == 1) +- dev_dbg(dev, "irq %d for MSI\n", virq); +- else ++ /* ++ * This flag is set by the PCI layer as we need to activate ++ * the MSI entries before the PCI layer enables MSI in the ++ * card. Otherwise the card latches a random msi message. ++ */ ++ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) ++ goto skip_activate; ++ ++ for_each_msi_vector(desc, i, dev) { ++ if (desc->irq == i) { ++ virq = desc->irq; + dev_dbg(dev, "irq [%d-%d] for MSI\n", + virq, virq + desc->nvec_used - 1); +- /* +- * This flag is set by the PCI layer as we need to activate +- * the MSI entries before the PCI layer enables MSI in the +- * card. Otherwise the card latches a random msi message. +- */ +- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) +- continue; ++ } + +- irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ irq_data = irq_domain_get_irq_data(domain, i); + if (!can_reserve) { + irqd_clr_can_reserve(irq_data); + if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) +@@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + goto cleanup; + } + ++skip_activate: + /* + * If these interrupts use reservation mode, clear the activated bit + * so request_irq() will assign the final vector. + */ + if (can_reserve) { +- for_each_msi_entry(desc, dev) { +- irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ for_each_msi_vector(desc, i, dev) { ++ irq_data = irq_domain_get_irq_data(domain, i); + irqd_clr_activated(irq_data); + } + } + return 0; + + cleanup: +- for_each_msi_entry(desc, dev) { +- struct irq_data *irqd; +- +- if (desc->irq == virq) +- break; +- +- irqd = irq_domain_get_irq_data(domain, desc->irq); +- if (irqd_is_activated(irqd)) +- irq_domain_deactivate_irq(irqd); ++ for_each_msi_vector(desc, i, dev) { ++ irq_data = irq_domain_get_irq_data(domain, i); ++ if (irqd_is_activated(irq_data)) ++ irq_domain_deactivate_irq(irq_data); + } + msi_domain_free_irqs(domain, dev); + return ret; +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 283c8b01ce789..26ae92c12fc22 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1972,6 +1972,10 @@ int register_kretprobe(struct kretprobe *rp) + if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) + return -EINVAL; + ++ /* If only rp->kp.addr is specified, check reregistering kprobes */ ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) ++ return -EINVAL; ++ + if (kretprobe_blacklist_size) { + addr = kprobe_addr(&rp->kp); + if (IS_ERR(addr)) +diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c +index 7950a0356042a..888cd00174fe3 100644 +--- a/kernel/trace/fgraph.c ++++ b/kernel/trace/fgraph.c +@@ -367,7 +367,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) + } + + if (t->ret_stack == NULL) { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->curr_ret_stack = -1; + t->curr_ret_depth = -1; +@@ -462,7 +461,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + static void + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) + { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visible before we add the ret_stack */ +diff --git a/mm/compaction.c b/mm/compaction.c +index 92470625f0b1e..88c3f6bad1aba 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -1276,7 +1276,7 @@ fast_isolate_freepages(struct compact_control *cc) + { + unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); + unsigned int nr_scanned = 0; +- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; ++ unsigned long low_pfn, min_pfn, highest = 0; + unsigned long nr_isolated = 0; + unsigned long distance; + struct page *page = NULL; +@@ -1321,6 +1321,7 @@ fast_isolate_freepages(struct compact_control *cc) + struct page *freepage; + unsigned long flags; + unsigned int order_scanned = 0; ++ unsigned long high_pfn = 0; + + if (!area->nr_free) + continue; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 11aa763a31440..7bbf419bb86d6 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2306,7 +2306,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + { + spinlock_t *ptl; + struct mmu_notifier_range range; +- bool was_locked = false; ++ bool do_unlock_page = false; + pmd_t _pmd; + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, +@@ -2322,7 +2322,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + VM_BUG_ON(freeze && !page); + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); +- was_locked = true; + if (page != pmd_page(*pmd)) + goto out; + } +@@ -2331,19 +2330,29 @@ repeat: + if (pmd_trans_huge(*pmd)) { + if (!page) { + page = pmd_page(*pmd); +- if (unlikely(!trylock_page(page))) { +- get_page(page); +- _pmd = *pmd; +- spin_unlock(ptl); +- lock_page(page); +- spin_lock(ptl); +- if (unlikely(!pmd_same(*pmd, _pmd))) { +- unlock_page(page); ++ /* ++ * An anonymous page must be locked, to ensure that a ++ * concurrent reuse_swap_page() sees stable mapcount; ++ * but reuse_swap_page() is not used on shmem or file, ++ * and page lock must not be taken when zap_pmd_range() ++ * calls __split_huge_pmd() while i_mmap_lock is held. ++ */ ++ if (PageAnon(page)) { ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } + put_page(page); +- page = NULL; +- goto repeat; + } +- put_page(page); ++ do_unlock_page = true; + } + } + if (PageMlocked(page)) +@@ -2353,7 +2362,7 @@ repeat: + __split_huge_pmd_locked(vma, pmd, range.start, freeze); + out: + spin_unlock(ptl); +- if (!was_locked && page) ++ if (do_unlock_page) + unlock_page(page); + /* + * No need to double call mmu_notifier->invalidate_range() callback. +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 3bc33fa838177..d5b03b9262d4f 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -71,6 +71,21 @@ DEFINE_SPINLOCK(hugetlb_lock); + static int num_fault_mutexes; + struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; + ++static inline bool PageHugeFreed(struct page *head) ++{ ++ return page_private(head + 4) == -1UL; ++} ++ ++static inline void SetPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, -1UL); ++} ++ ++static inline void ClearPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, 0); ++} ++ + /* Forward declaration */ + static int hugetlb_acct_memory(struct hstate *h, long delta); + +@@ -869,6 +884,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) + list_move(&page->lru, &h->hugepage_freelists[nid]); + h->free_huge_pages++; + h->free_huge_pages_node[nid]++; ++ SetPageHugeFreed(page); + } + + static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) +@@ -886,6 +902,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) + return NULL; + list_move(&page->lru, &h->hugepage_activelist); + set_page_refcounted(page); ++ ClearPageHugeFreed(page); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + return page; +@@ -1217,12 +1234,11 @@ struct hstate *size_to_hstate(unsigned long size) + */ + bool page_huge_active(struct page *page) + { +- VM_BUG_ON_PAGE(!PageHuge(page), page); +- return PageHead(page) && PagePrivate(&page[1]); ++ return PageHeadHuge(page) && PagePrivate(&page[1]); + } + + /* never called for tail page */ +-static void set_page_huge_active(struct page *page) ++void set_page_huge_active(struct page *page) + { + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); + SetPagePrivate(&page[1]); +@@ -1375,6 +1391,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) + set_hugetlb_cgroup(page, NULL); + h->nr_huge_pages++; + h->nr_huge_pages_node[nid]++; ++ ClearPageHugeFreed(page); + spin_unlock(&hugetlb_lock); + } + +@@ -1602,6 +1619,7 @@ int dissolve_free_huge_page(struct page *page) + { + int rc = -EBUSY; + ++retry: + /* Not to disrupt normal path by vainly holding hugetlb_lock */ + if (!PageHuge(page)) + return 0; +@@ -1618,6 +1636,26 @@ int dissolve_free_huge_page(struct page *page) + int nid = page_to_nid(head); + if (h->free_huge_pages - h->resv_huge_pages == 0) + goto out; ++ ++ /* ++ * We should make sure that the page is already on the free list ++ * when it is dissolved. ++ */ ++ if (unlikely(!PageHugeFreed(head))) { ++ spin_unlock(&hugetlb_lock); ++ cond_resched(); ++ ++ /* ++ * Theoretically, we should return -EBUSY when we ++ * encounter this race. In fact, we have a chance ++ * to successfully dissolve the page if we do a ++ * retry. Because the race window is quite small. ++ * If we seize this opportunity, it is an optimization ++ * for increasing the success rate of dissolving page. ++ */ ++ goto retry; ++ } ++ + /* + * Move PageHWPoison flag from head page to the raw error page, + * which makes any subpages rather than the error page reusable. +@@ -5136,9 +5174,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) + { + bool ret = true; + +- VM_BUG_ON_PAGE(!PageHead(page), page); + spin_lock(&hugetlb_lock); +- if (!page_huge_active(page) || !get_page_unless_zero(page)) { ++ if (!PageHeadHuge(page) || !page_huge_active(page) || ++ !get_page_unless_zero(page)) { + ret = false; + goto unlock; + } +diff --git a/mm/memblock.c b/mm/memblock.c +index c4b16cae2bc9b..11f6ae37d6699 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -257,14 +257,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, + * + * Find @size free area aligned to @align in the specified range and node. + * +- * When allocation direction is bottom-up, the @start should be greater +- * than the end of the kernel image. Otherwise, it will be trimmed. The +- * reason is that we want the bottom-up allocation just near the kernel +- * image so it is highly likely that the allocated memory and the kernel +- * will reside in the same node. +- * +- * If bottom-up allocation failed, will try to allocate memory top-down. +- * + * Return: + * Found address on success, 0 on failure. + */ +@@ -273,8 +265,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + phys_addr_t end, int nid, + enum memblock_flags flags) + { +- phys_addr_t kernel_end, ret; +- + /* pump up @end */ + if (end == MEMBLOCK_ALLOC_ACCESSIBLE || + end == MEMBLOCK_ALLOC_KASAN) +@@ -283,40 +273,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + /* avoid allocating the first page */ + start = max_t(phys_addr_t, start, PAGE_SIZE); + end = max(start, end); +- kernel_end = __pa_symbol(_end); +- +- /* +- * try bottom-up allocation only when bottom-up mode +- * is set and @end is above the kernel image. +- */ +- if (memblock_bottom_up() && end > kernel_end) { +- phys_addr_t bottom_up_start; +- +- /* make sure we will allocate above the kernel */ +- bottom_up_start = max(start, kernel_end); + +- /* ok, try bottom-up allocation first */ +- ret = __memblock_find_range_bottom_up(bottom_up_start, end, +- size, align, nid, flags); +- if (ret) +- return ret; +- +- /* +- * we always limit bottom-up allocation above the kernel, +- * but top-down allocation doesn't have the limit, so +- * retrying top-down allocation may succeed when bottom-up +- * allocation failed. +- * +- * bottom-up allocation is expected to be fail very rarely, +- * so we use WARN_ONCE() here to see the stack trace if +- * fail happens. +- */ +- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), +- "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); +- } +- +- return __memblock_find_range_top_down(start, end, size, align, nid, +- flags); ++ if (memblock_bottom_up()) ++ return __memblock_find_range_bottom_up(start, end, size, align, ++ nid, flags); ++ else ++ return __memblock_find_range_top_down(start, end, size, align, ++ nid, flags); + } + + /** +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 6c270fce200f4..7080d708b7d08 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1244,13 +1244,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + old = neigh->nud_state; + err = -EPERM; + +- if (!(flags & NEIGH_UPDATE_F_ADMIN) && +- (old & (NUD_NOARP | NUD_PERMANENT))) +- goto out; + if (neigh->dead) { + NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); ++ new = old; + goto out; + } ++ if (!(flags & NEIGH_UPDATE_F_ADMIN) && ++ (old & (NUD_NOARP | NUD_PERMANENT))) ++ goto out; + + ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); + +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index ca525cf681a4e..f64d1743b86d6 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) + } + + dev->needed_headroom = t_hlen + hlen; +- mtu -= (dev->hard_header_len + t_hlen); ++ mtu -= t_hlen; + + if (mtu < IPV4_MIN_MTU) + mtu = IPV4_MIN_MTU; +@@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, + nt = netdev_priv(dev); + t_hlen = nt->hlen + sizeof(struct iphdr); + dev->min_mtu = ETH_MIN_MTU; +- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; ++ dev->max_mtu = IP_MAX_MTU - t_hlen; + ip_tunnel_add(itn, nt); + return nt; + +@@ -494,11 +494,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, + int mtu; + + tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; +- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len; ++ pkt_size = skb->len - tunnel_hlen; + + if (df) +- mtu = dst_mtu(&rt->dst) - dev->hard_header_len +- - sizeof(struct iphdr) - tunnel_hlen; ++ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); + else + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + +@@ -964,7 +963,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) + { + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); +- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; ++ int max_mtu = IP_MAX_MTU - t_hlen; + + if (new_mtu < ETH_MIN_MTU) + return -EINVAL; +@@ -1141,10 +1140,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], + + mtu = ip_tunnel_bind_dev(dev); + if (tb[IFLA_MTU]) { +- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; ++ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); + +- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, +- (unsigned int)(max - sizeof(struct iphdr))); ++ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); + } + + err = dev_set_mtu(dev, mtu); +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c +index 7a4d0715d1c32..a966d29c772d9 100644 +--- a/net/lapb/lapb_out.c ++++ b/net/lapb/lapb_out.c +@@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb) + skb = skb_dequeue(&lapb->write_queue); + + do { +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { ++ skbn = skb_copy(skb, GFP_ATOMIC); ++ if (!skbn) { + skb_queue_head(&lapb->write_queue, skb); + break; + } +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index c9a8a2433e8ac..48322e45e7ddb 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local, + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); +- if (ret == 0) ++ if (ret == 0) { + sta->uploaded = true; ++ if (rcu_access_pointer(sta->sta.rates)) ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta); ++ } + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index b051f125d3af2..9841db84bce0a 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -934,7 +934,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, + if (old) + kfree_rcu(old, rcu_head); + +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ++ if (sta->uploaded) ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + + ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); + +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index 2921fc2767134..9bacec6653bac 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -976,7 +976,7 @@ static int __init af_rxrpc_init(void) + goto error_security; + } + +- ret = register_pernet_subsys(&rxrpc_net_ops); ++ ret = register_pernet_device(&rxrpc_net_ops); + if (ret) + goto error_pernet; + +@@ -1021,7 +1021,7 @@ error_key_type: + error_sock: + proto_unregister(&rxrpc_proto); + error_proto: +- unregister_pernet_subsys(&rxrpc_net_ops); ++ unregister_pernet_device(&rxrpc_net_ops); + error_pernet: + rxrpc_exit_security(); + error_security: +@@ -1043,7 +1043,7 @@ static void __exit af_rxrpc_exit(void) + unregister_key_type(&key_type_rxrpc); + sock_unregister(PF_RXRPC); + proto_unregister(&rxrpc_proto); +- unregister_pernet_subsys(&rxrpc_net_ops); ++ unregister_pernet_device(&rxrpc_net_ops); + ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); + ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); +