diff --git a/config/bootscripts/boot-marvell.cmd b/config/bootscripts/boot-marvell.cmd index 1666ed37a..094783528 100644 --- a/config/bootscripts/boot-marvell.cmd +++ b/config/bootscripts/boot-marvell.cmd @@ -18,7 +18,10 @@ setenv eth3addr 00:50:43:0d:19:18 # setenv bootargs "selinux=0 cgroup_disable=memory scandelay root=/dev/mmcblk0p1 rw rootfstype=ext4 console=ttyS0,115200 loglevel=${verbosity} rootwait" ext2load mmc 0:1 ${fdtaddr} boot/dtb/armada-388-clearfog.dtb +ext2load mmc 0:1 ${ramdiskaddr} boot/uInitrd ext2load mmc 0:1 ${loadaddr} boot/zImage bootz ${loadaddr} - ${fdtaddr} +#ramdisk currently broken +#bootz ${loadaddr} ${ramdiskaddr} ${fdtaddr} # Recompile with: # mkimage -C none -A arm -T script -d /boot/boot.cmd /boot/boot.scr \ No newline at end of file diff --git a/config/kernel/linux-marvell-dev.config b/config/kernel/linux-marvell-dev.config index b2877382e..fa390dfa4 100644 --- a/config/kernel/linux-marvell-dev.config +++ b/config/kernel/linux-marvell-dev.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 4.4.16 Kernel Configuration +# Linux/arm 4.4.20 Kernel Configuration # CONFIG_ARM=y CONFIG_ARM_HAS_SG_CHAIN=y @@ -37,7 +37,7 @@ CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y -# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set @@ -52,6 +52,8 @@ CONFIG_CROSS_MEMORY_ATTACH=y CONFIG_FHANDLE=y CONFIG_USELIB=y # CONFIG_AUDIT is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + # # IRQ subsystem # @@ -891,7 +893,6 @@ CONFIG_NETFILTER_XT_SET=m # # Xtables targets # -CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m @@ -8017,7 +8018,6 @@ CONFIG_CRC32_SLICEBY8=y CONFIG_CRC7=m CONFIG_LIBCRC32C=m CONFIG_CRC8=m -CONFIG_AUDIT_GENERIC=y # CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set # CONFIG_RANDOM32_SELFTEST is not set CONFIG_842_COMPRESS=m diff --git a/patch/kernel/marvell-dev/patch-4.4.16-17.patch b/patch/kernel/marvell-dev/patch-4.4.16-17.patch new file mode 100644 index 000000000..c0fb1a6e9 --- /dev/null +++ b/patch/kernel/marvell-dev/patch-4.4.16-17.patch @@ -0,0 +1,2074 @@ +diff --git a/Makefile b/Makefile +index da7621cadc8e..76d34f763a41 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 16 ++SUBLEVEL = 17 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/Makefile b/arch/arc/Makefile +index aeb19021099e..209d8451e23d 100644 +--- a/arch/arc/Makefile ++++ b/arch/arc/Makefile +@@ -48,8 +48,6 @@ endif + + endif + +-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables +- + # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok + ifeq ($(atleast_gcc48),y) + cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c +index 001de4ce711e..11b50959f20e 100644 +--- a/arch/arc/kernel/stacktrace.c ++++ b/arch/arc/kernel/stacktrace.c +@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, + * prelogue is setup (callee regs saved and then fp set and not other + * way around + */ +- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); ++ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); + return 0; + + #endif +diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h +index 7a6bed5c08bc..baad72e4c100 100644 +--- a/arch/x86/include/asm/pvclock.h ++++ b/arch/x86/include/asm/pvclock.h +@@ -76,6 +76,8 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, + u8 ret_flags; + + version = src->version; ++ /* Make the latest version visible */ ++ smp_rmb(); + + offset = pvclock_get_nsec_offset(src); + ret = src->system_time + offset; +diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c +index 7abb2b88572e..1e7de3cefc9c 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c +@@ -1110,6 +1110,13 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) + void *at; + u64 pebs_status; + ++ /* ++ * fmt0 does not have a status bitfield (does not use ++ * perf_record_nhm format) ++ */ ++ if (x86_pmu.intel_cap.pebs_format < 1) ++ return base; ++ + if (base == NULL) + return NULL; + +@@ -1195,7 +1202,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) + if (!event->attr.precise_ip) + return; + +- n = (top - at) / x86_pmu.pebs_record_size; ++ n = top - at; + if (n <= 0) + return; + +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c +index db9a675e751b..9fdf1d330727 100644 +--- a/arch/x86/kernel/early-quirks.c ++++ b/arch/x86/kernel/early-quirks.c +@@ -11,7 +11,11 @@ + + #include + #include ++#include ++#include + #include ++#include ++#include + #include + #include + #include +@@ -21,6 +25,9 @@ + #include + #include + #include ++#include ++ ++#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg) + + static void __init fix_hypertransport_config(int num, int slot, int func) + { +@@ -76,6 +83,13 @@ static void __init nvidia_bugs(int num, int slot, int func) + #ifdef CONFIG_ACPI + #ifdef CONFIG_X86_IO_APIC + /* ++ * Only applies to Nvidia root ports (bus 0) and not to ++ * Nvidia graphics cards with PCI ports on secondary buses. ++ */ ++ if (num) ++ return; ++ ++ /* + * All timer overrides on Nvidia are + * wrong unless HPET is enabled. + * Unfortunately that's not true on many Asus boards. +@@ -589,6 +603,61 @@ static void __init force_disable_hpet(int num, int slot, int func) + #endif + } + ++#define BCM4331_MMIO_SIZE 16384 ++#define BCM4331_PM_CAP 0x40 ++#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg) ++#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg) ++ ++static void __init apple_airport_reset(int bus, int slot, int func) ++{ ++ void __iomem *mmio; ++ u16 pmcsr; ++ u64 addr; ++ int i; ++ ++ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) ++ return; ++ ++ /* Card may have been put into PCI_D3hot by grub quirk */ ++ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL); ++ ++ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) { ++ pmcsr &= ~PCI_PM_CTRL_STATE_MASK; ++ write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr); ++ mdelay(10); ++ ++ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL); ++ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) { ++ dev_err("Cannot power up Apple AirPort card\n"); ++ return; ++ } ++ } ++ ++ addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); ++ addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32; ++ addr &= PCI_BASE_ADDRESS_MEM_MASK; ++ ++ mmio = early_ioremap(addr, BCM4331_MMIO_SIZE); ++ if (!mmio) { ++ dev_err("Cannot iomap Apple AirPort card\n"); ++ return; ++ } ++ ++ pr_info("Resetting Apple AirPort card (left enabled by EFI)\n"); ++ ++ for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++) ++ udelay(10); ++ ++ bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET); ++ bcma_aread32(BCMA_RESET_CTL); ++ udelay(1); ++ ++ bcma_awrite32(BCMA_RESET_CTL, 0); ++ bcma_aread32(BCMA_RESET_CTL); ++ udelay(10); ++ ++ early_iounmap(mmio, BCM4331_MMIO_SIZE); ++} + + #define QFLAG_APPLY_ONCE 0x1 + #define QFLAG_APPLIED 0x2 +@@ -602,12 +671,6 @@ struct chipset { + void (*f)(int num, int slot, int func); + }; + +-/* +- * Only works for devices on the root bus. If you add any devices +- * not on bus 0 readd another loop level in early_quirks(). But +- * be careful because at least the Nvidia quirk here relies on +- * only matching on bus 0. +- */ + static struct chipset early_qrk[] __initdata = { + { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, +@@ -637,9 +700,13 @@ static struct chipset early_qrk[] __initdata = { + */ + { PCI_VENDOR_ID_INTEL, 0x0f00, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, ++ { PCI_VENDOR_ID_BROADCOM, 0x4331, ++ PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, + {} + }; + ++static void __init early_pci_scan_bus(int bus); ++ + /** + * check_dev_quirk - apply early quirks to a given PCI device + * @num: bus number +@@ -648,7 +715,7 @@ static struct chipset early_qrk[] __initdata = { + * + * Check the vendor & device ID against the early quirks table. + * +- * If the device is single function, let early_quirks() know so we don't ++ * If the device is single function, let early_pci_scan_bus() know so we don't + * poke at this device again. + */ + static int __init check_dev_quirk(int num, int slot, int func) +@@ -657,6 +724,7 @@ static int __init check_dev_quirk(int num, int slot, int func) + u16 vendor; + u16 device; + u8 type; ++ u8 sec; + int i; + + class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); +@@ -684,25 +752,36 @@ static int __init check_dev_quirk(int num, int slot, int func) + + type = read_pci_config_byte(num, slot, func, + PCI_HEADER_TYPE); ++ ++ if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) { ++ sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS); ++ if (sec > num) ++ early_pci_scan_bus(sec); ++ } ++ + if (!(type & 0x80)) + return -1; + + return 0; + } + +-void __init early_quirks(void) ++static void __init early_pci_scan_bus(int bus) + { + int slot, func; + +- if (!early_pci_allowed()) +- return; +- + /* Poor man's PCI discovery */ +- /* Only scan the root bus */ + for (slot = 0; slot < 32; slot++) + for (func = 0; func < 8; func++) { + /* Only probe function 0 on single fn devices */ +- if (check_dev_quirk(0, slot, func)) ++ if (check_dev_quirk(bus, slot, func)) + break; + } + } ++ ++void __init early_quirks(void) ++{ ++ if (!early_pci_allowed()) ++ return; ++ ++ early_pci_scan_bus(0); ++} +diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c +index 2f355d229a58..bf0ce75735b0 100644 +--- a/arch/x86/kernel/pvclock.c ++++ b/arch/x86/kernel/pvclock.c +@@ -66,6 +66,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) + + do { + version = __pvclock_read_cycles(src, &ret, &flags); ++ /* Make sure that the version double-check is last. */ ++ smp_rmb(); + } while ((src->version & 1) || version != src->version); + + return flags & valid_flags; +@@ -80,6 +82,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) + + do { + version = __pvclock_read_cycles(src, &ret, &flags); ++ /* Make sure that the version double-check is last. */ ++ smp_rmb(); + } while ((src->version & 1) || version != src->version); + + if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { +diff --git a/block/ioprio.c b/block/ioprio.c +index cc7800e9eb44..01b8116298a1 100644 +--- a/block/ioprio.c ++++ b/block/ioprio.c +@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p) + if (ret) + goto out; + ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); ++ task_lock(p); + if (p->io_context) + ret = p->io_context->ioprio; ++ task_unlock(p); + out: + return ret; + } +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index b79cb10e289e..bd370c98f77d 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4138,6 +4138,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + */ + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + ++ /* ++ * Device times out with higher max sects. ++ * https://bugzilla.kernel.org/show_bug.cgi?id=121671 ++ */ ++ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, ++ + /* Devices we expect to fail diagnostics */ + + /* Devices where NCQ should be avoided */ +diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h +index 38f156745d53..71df8f2afc6c 100644 +--- a/drivers/bcma/bcma_private.h ++++ b/drivers/bcma/bcma_private.h +@@ -8,8 +8,6 @@ + #include + #include + +-#define BCMA_CORE_SIZE 0x1000 +- + #define bcma_err(bus, fmt, ...) \ + pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + #define bcma_warn(bus, fmt, ...) \ +diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c +index 2685644826a0..33c20c6b45af 100644 +--- a/drivers/clk/rockchip/clk-mmc-phase.c ++++ b/drivers/clk/rockchip/clk-mmc-phase.c +@@ -153,6 +153,7 @@ struct clk *rockchip_clk_register_mmc(const char *name, + return NULL; + + init.name = name; ++ init.flags = 0; + init.num_parents = num_parents; + init.parent_names = parent_names; + init.ops = &rockchip_mmc_clk_ops; +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c +index 02f9aa4ebe05..e44a1bfb0250 100644 +--- a/drivers/dma/at_xdmac.c ++++ b/drivers/dma/at_xdmac.c +@@ -242,7 +242,7 @@ struct at_xdmac_lld { + u32 mbr_dus; /* Destination Microblock Stride Register */ + }; + +- ++/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ + struct at_xdmac_desc { + struct at_xdmac_lld lld; + enum dma_transfer_direction direction; +@@ -253,7 +253,7 @@ struct at_xdmac_desc { + unsigned int xfer_size; + struct list_head descs_list; + struct list_head xfer_node; +-}; ++} __aligned(sizeof(u64)); + + static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) + { +@@ -1388,6 +1388,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + u32 cur_nda, check_nda, cur_ubc, mask, value; + u8 dwidth = 0; + unsigned long flags; ++ bool initd; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) +@@ -1412,7 +1413,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + residue = desc->xfer_size; + /* + * Flush FIFO: only relevant when the transfer is source peripheral +- * synchronized. ++ * synchronized. Flush is needed before reading CUBC because data in ++ * the FIFO are not reported by CUBC. Reporting a residue of the ++ * transfer length while we have data in FIFO can cause issue. ++ * Usecase: atmel USART has a timeout which means I have received ++ * characters but there is no more character received for a while. On ++ * timeout, it requests the residue. If the data are in the DMA FIFO, ++ * we will return a residue of the transfer length. It means no data ++ * received. If an application is waiting for these data, it will hang ++ * since we won't have another USART timeout without receiving new ++ * data. + */ + mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; + value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; +@@ -1423,34 +1433,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + } + + /* +- * When processing the residue, we need to read two registers but we +- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where +- * we stand in the descriptor list and AT_XDMAC_CUBC is used +- * to know how many data are remaining for the current descriptor. +- * Since the dma channel is not paused to not loose data, between the +- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of +- * descriptor. +- * For that reason, after reading AT_XDMAC_CUBC, we check if we are +- * still using the same descriptor by reading a second time +- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to +- * read again AT_XDMAC_CUBC. ++ * The easiest way to compute the residue should be to pause the DMA ++ * but doing this can lead to miss some data as some devices don't ++ * have FIFO. ++ * We need to read several registers because: ++ * - DMA is running therefore a descriptor change is possible while ++ * reading these registers ++ * - When the block transfer is done, the value of the CUBC register ++ * is set to its initial value until the fetch of the next descriptor. ++ * This value will corrupt the residue calculation so we have to skip ++ * it. ++ * ++ * INITD -------- ------------ ++ * |____________________| ++ * _______________________ _______________ ++ * NDA @desc2 \/ @desc3 ++ * _______________________/\_______________ ++ * __________ ___________ _______________ ++ * CUBC 0 \/ MAX desc1 \/ MAX desc2 ++ * __________/\___________/\_______________ ++ * ++ * Since descriptors are aligned on 64 bits, we can assume that ++ * the update of NDA and CUBC is atomic. + * Memory barriers are used to ensure the read order of the registers. +- * A max number of retries is set because unlikely it can never ends if +- * we are transferring a lot of data with small buffers. ++ * A max number of retries is set because unlikely it could never ends. + */ +- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; +- rmb(); +- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { +- rmb(); + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; +- +- if (likely(cur_nda == check_nda)) +- break; +- +- cur_nda = check_nda; ++ rmb(); ++ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); + rmb(); + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); ++ rmb(); ++ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; ++ rmb(); ++ ++ if ((check_nda == cur_nda) && initd) ++ break; + } + + if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { +@@ -1459,6 +1478,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + } + + /* ++ * Flush FIFO: only relevant when the transfer is source peripheral ++ * synchronized. Another flush is needed here because CUBC is updated ++ * when the controller sends the data write command. It can lead to ++ * report data that are not written in the memory or the device. The ++ * FIFO flush ensures that data are really written. ++ */ ++ if ((desc->lld.mbr_cfg & mask) == value) { ++ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); ++ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) ++ cpu_relax(); ++ } ++ ++ /* + * Remove size of all microblocks already transferred and the current + * one. Then add the remaining size to transfer of the current + * microblock. +diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c +index 165d3001c301..c6ec5c62b7a9 100644 +--- a/drivers/hwtracing/intel_th/core.c ++++ b/drivers/hwtracing/intel_th/core.c +@@ -419,6 +419,38 @@ static struct intel_th_subdevice { + }, + }; + ++#ifdef CONFIG_MODULES ++static void __intel_th_request_hub_module(struct work_struct *work) ++{ ++ struct intel_th *th = container_of(work, struct intel_th, ++ request_module_work); ++ ++ request_module("intel_th_%s", th->hub->name); ++} ++ ++static int intel_th_request_hub_module(struct intel_th *th) ++{ ++ INIT_WORK(&th->request_module_work, __intel_th_request_hub_module); ++ schedule_work(&th->request_module_work); ++ ++ return 0; ++} ++ ++static void intel_th_request_hub_module_flush(struct intel_th *th) ++{ ++ flush_work(&th->request_module_work); ++} ++#else ++static inline int intel_th_request_hub_module(struct intel_th *th) ++{ ++ return -EINVAL; ++} ++ ++static inline void intel_th_request_hub_module_flush(struct intel_th *th) ++{ ++} ++#endif /* CONFIG_MODULES */ ++ + static int intel_th_populate(struct intel_th *th, struct resource *devres, + unsigned int ndevres, int irq) + { +@@ -488,7 +520,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres, + /* need switch driver to be loaded to enumerate the rest */ + if (subdev->type == INTEL_TH_SWITCH && !req) { + th->hub = thdev; +- err = request_module("intel_th_%s", subdev->name); ++ err = intel_th_request_hub_module(th); + if (!err) + req++; + } +@@ -603,6 +635,7 @@ void intel_th_free(struct intel_th *th) + { + int i; + ++ intel_th_request_hub_module_flush(th); + for (i = 0; i < TH_SUBDEVICE_MAX; i++) + if (th->thdev[i] != th->hub) + intel_th_device_remove(th->thdev[i]); +diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h +index 57fd72b20fae..d03a6cd1c65d 100644 +--- a/drivers/hwtracing/intel_th/intel_th.h ++++ b/drivers/hwtracing/intel_th/intel_th.h +@@ -197,6 +197,9 @@ struct intel_th { + + int id; + int major; ++#ifdef CONFIG_MODULES ++ struct work_struct request_module_work; ++#endif /* CONFIG_MODULES */ + #ifdef CONFIG_INTEL_TH_DEBUG + struct dentry *dbg; + #endif +diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c +index 641e87936064..d57a2f75dccf 100644 +--- a/drivers/hwtracing/intel_th/pci.c ++++ b/drivers/hwtracing/intel_th/pci.c +@@ -67,6 +67,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126), + .driver_data = (kernel_ulong_t)0, + }, ++ { ++ /* Kaby Lake PCH-H */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), ++ .driver_data = (kernel_ulong_t)0, ++ }, + { 0 }, + }; + +diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c +index 5fbd5bd0878f..49fc2c7e560a 100644 +--- a/drivers/i2c/muxes/i2c-mux-reg.c ++++ b/drivers/i2c/muxes/i2c-mux-reg.c +@@ -150,7 +150,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux, + mux->data.idle_in_use = true; + + /* map address from "reg" if exists */ +- if (of_address_to_resource(np, 0, &res)) { ++ if (of_address_to_resource(np, 0, &res) == 0) { + mux->data.reg_size = resource_size(&res); + mux->data.reg = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(mux->data.reg)) +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index fd4100d56d8c..2b2f9d66c2c7 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -1200,22 +1200,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + int ep_irq_in_idx; + int i, error; + ++ if (intf->cur_altsetting->desc.bNumEndpoints != 2) ++ return -ENODEV; ++ + for (i = 0; xpad_device[i].idVendor; i++) { + if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) && + (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct)) + break; + } + +- if (xpad_device[i].xtype == XTYPE_XBOXONE && +- intf->cur_altsetting->desc.bInterfaceNumber != 0) { +- /* +- * The Xbox One controller lists three interfaces all with the +- * same interface class, subclass and protocol. Differentiate by +- * interface number. +- */ +- return -ENODEV; +- } +- + xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL); + if (!xpad) + return -ENOMEM; +@@ -1246,6 +1239,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) { + if (intf->cur_altsetting->desc.bInterfaceProtocol == 129) + xpad->xtype = XTYPE_XBOX360W; ++ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208) ++ xpad->xtype = XTYPE_XBOXONE; + else + xpad->xtype = XTYPE_XBOX360; + } else { +@@ -1260,6 +1255,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + xpad->mapping |= MAP_STICKS_TO_NULL; + } + ++ if (xpad->xtype == XTYPE_XBOXONE && ++ intf->cur_altsetting->desc.bInterfaceNumber != 0) { ++ /* ++ * The Xbox One controller lists three interfaces all with the ++ * same interface class, subclass and protocol. Differentiate by ++ * interface number. ++ */ ++ error = -ENODEV; ++ goto err_free_in_urb; ++ } ++ + error = xpad_init_output(intf, xpad); + if (error) + goto err_free_in_urb; +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 78f93cf68840..be5b399da5d3 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd) + case 5: + etd->hw_version = 3; + break; +- case 6: +- case 7: +- case 8: +- case 9: +- case 10: +- case 13: +- case 14: ++ case 6 ... 14: + etd->hw_version = 4; + break; + default: +diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c +index a3f0f5a47490..0f586780ceb4 100644 +--- a/drivers/input/mouse/vmmouse.c ++++ b/drivers/input/mouse/vmmouse.c +@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) + return -ENXIO; + } + +- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) { +- psmouse_dbg(psmouse, "VMMouse port in use.\n"); +- return -EBUSY; +- } +- + /* Check if the device is present */ + response = ~VMMOUSE_PROTO_MAGIC; + VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2); +- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) { +- release_region(VMMOUSE_PROTO_PORT, 4); ++ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) + return -ENXIO; +- } + + if (set_properties) { + psmouse->vendor = VMMOUSE_VENDOR; +@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) + psmouse->model = version; + } + +- release_region(VMMOUSE_PROTO_PORT, 4); +- + return 0; + } + +@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse) + psmouse_reset(psmouse); + input_unregister_device(priv->abs_dev); + kfree(priv); +- release_region(VMMOUSE_PROTO_PORT, 4); + } + + /** +@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse) + struct input_dev *rel_dev = psmouse->dev, *abs_dev; + int error; + +- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) { +- psmouse_dbg(psmouse, "VMMouse port in use.\n"); +- return -EBUSY; +- } +- + psmouse_reset(psmouse); + error = vmmouse_enable(psmouse); + if (error) +- goto release_region; ++ return error; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + abs_dev = input_allocate_device(); +@@ -502,8 +487,5 @@ init_fail: + kfree(priv); + psmouse->private = NULL; + +-release_region: +- release_region(VMMOUSE_PROTO_PORT, 4); +- + return error; + } +diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c +index 7295c198aa08..6fe55d598fac 100644 +--- a/drivers/input/touchscreen/tsc2004.c ++++ b/drivers/input/touchscreen/tsc2004.c +@@ -22,6 +22,11 @@ + #include + #include "tsc200x-core.h" + ++static const struct input_id tsc2004_input_id = { ++ .bustype = BUS_I2C, ++ .product = 2004, ++}; ++ + static int tsc2004_cmd(struct device *dev, u8 cmd) + { + u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd; +@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) + + { +- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C, ++ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id, + devm_regmap_init_i2c(i2c, &tsc200x_regmap_config), + tsc2004_cmd); + } +diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c +index b9f593dfd2ef..f2c5f0e47f77 100644 +--- a/drivers/input/touchscreen/tsc2005.c ++++ b/drivers/input/touchscreen/tsc2005.c +@@ -24,6 +24,11 @@ + #include + #include "tsc200x-core.h" + ++static const struct input_id tsc2005_input_id = { ++ .bustype = BUS_SPI, ++ .product = 2005, ++}; ++ + static int tsc2005_cmd(struct device *dev, u8 cmd) + { + u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd; +@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi) + if (error) + return error; + +- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI, ++ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id, + devm_regmap_init_spi(spi, &tsc200x_regmap_config), + tsc2005_cmd); + } +diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c +index 15240c1ee850..dfa7f1c4f545 100644 +--- a/drivers/input/touchscreen/tsc200x-core.c ++++ b/drivers/input/touchscreen/tsc200x-core.c +@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input) + mutex_unlock(&ts->mutex); + } + +-int tsc200x_probe(struct device *dev, int irq, __u16 bustype, ++int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, + struct regmap *regmap, + int (*tsc200x_cmd)(struct device *dev, u8 cmd)) + { +@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype, + snprintf(ts->phys, sizeof(ts->phys), + "%s/input-ts", dev_name(dev)); + +- input_dev->name = "TSC200X touchscreen"; ++ if (tsc_id->product == 2004) { ++ input_dev->name = "TSC200X touchscreen"; ++ } else { ++ input_dev->name = devm_kasprintf(dev, GFP_KERNEL, ++ "TSC%04d touchscreen", ++ tsc_id->product); ++ if (!input_dev->name) ++ return -ENOMEM; ++ } ++ + input_dev->phys = ts->phys; +- input_dev->id.bustype = bustype; ++ input_dev->id = *tsc_id; + input_dev->dev.parent = dev; + input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY); + input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); +diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h +index 7a482d102614..49a63a3c6840 100644 +--- a/drivers/input/touchscreen/tsc200x-core.h ++++ b/drivers/input/touchscreen/tsc200x-core.h +@@ -70,7 +70,7 @@ + extern const struct regmap_config tsc200x_regmap_config; + extern const struct dev_pm_ops tsc200x_pm_ops; + +-int tsc200x_probe(struct device *dev, int irq, __u16 bustype, ++int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, + struct regmap *regmap, + int (*tsc200x_cmd)(struct device *dev, u8 cmd)); + int tsc200x_remove(struct device *dev); +diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c +index 2792ca397dd0..3ed0ce1e4dcb 100644 +--- a/drivers/input/touchscreen/wacom_w8001.c ++++ b/drivers/input/touchscreen/wacom_w8001.c +@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar "); + MODULE_DESCRIPTION(DRIVER_DESC); + MODULE_LICENSE("GPL"); + +-#define W8001_MAX_LENGTH 11 ++#define W8001_MAX_LENGTH 13 + #define W8001_LEAD_MASK 0x80 + #define W8001_LEAD_BYTE 0x80 + #define W8001_TAB_MASK 0x40 +diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c +index 565a59310747..34b35ebd60ac 100644 +--- a/drivers/media/usb/airspy/airspy.c ++++ b/drivers/media/usb/airspy/airspy.c +@@ -1073,7 +1073,7 @@ static int airspy_probe(struct usb_interface *intf, + if (ret) { + dev_err(s->dev, "Failed to register as video device (%d)\n", + ret); +- goto err_unregister_v4l2_dev; ++ goto err_free_controls; + } + dev_info(s->dev, "Registered as %s\n", + video_device_node_name(&s->vdev)); +@@ -1082,7 +1082,6 @@ static int airspy_probe(struct usb_interface *intf, + + err_free_controls: + v4l2_ctrl_handler_free(&s->hdl); +-err_unregister_v4l2_dev: + v4l2_device_unregister(&s->v4l2_dev); + err_free_mem: + kfree(s); +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index c641c202fe7e..64950035613b 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -1767,8 +1767,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, + + packed_cmd_hdr = packed->cmd_hdr; + memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); +- packed_cmd_hdr[0] = (packed->nr_entries << 16) | +- (PACKED_CMD_WR << 8) | PACKED_CMD_VER; ++ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) | ++ (PACKED_CMD_WR << 8) | PACKED_CMD_VER); + hdr_blocks = mmc_large_sector(card) ? 8 : 1; + + /* +@@ -1782,14 +1782,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + /* Argument of CMD23 */ +- packed_cmd_hdr[(i * 2)] = ++ packed_cmd_hdr[(i * 2)] = cpu_to_le32( + (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | + (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | +- blk_rq_sectors(prq); ++ blk_rq_sectors(prq)); + /* Argument of CMD18 or CMD25 */ +- packed_cmd_hdr[((i * 2)) + 1] = ++ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32( + mmc_card_blockaddr(card) ? +- blk_rq_pos(prq) : blk_rq_pos(prq) << 9; ++ blk_rq_pos(prq) : blk_rq_pos(prq) << 9); + packed->blocks += blk_rq_sectors(prq); + i++; + } +diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c +index 8b3275d7792a..8f5e93cb7975 100644 +--- a/drivers/net/can/at91_can.c ++++ b/drivers/net/can/at91_can.c +@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota) + + /* upper group completed, look again in lower */ + if (priv->rx_next > get_mb_rx_low_last(priv) && +- quota > 0 && mb > get_mb_rx_last(priv)) { ++ mb > get_mb_rx_last(priv)) { + priv->rx_next = get_mb_rx_first(priv); +- goto again; ++ if (quota > 0) ++ goto again; + } + + return received; +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c +index f91b094288da..e3dccd3200d5 100644 +--- a/drivers/net/can/c_can/c_can.c ++++ b/drivers/net/can/c_can/c_can.c +@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, + + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); + +- for (i = 0; i < frame->can_dlc; i += 2) { +- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, +- frame->data[i] | (frame->data[i + 1] << 8)); ++ if (priv->type == BOSCH_D_CAN) { ++ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface); ++ ++ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) { ++ data = (u32)frame->data[i]; ++ data |= (u32)frame->data[i + 1] << 8; ++ data |= (u32)frame->data[i + 2] << 16; ++ data |= (u32)frame->data[i + 3] << 24; ++ priv->write_reg32(priv, dreg, data); ++ } ++ } else { ++ for (i = 0; i < frame->can_dlc; i += 2) { ++ priv->write_reg(priv, ++ C_CAN_IFACE(DATA1_REG, iface) + i / 2, ++ frame->data[i] | ++ (frame->data[i + 1] << 8)); ++ } + } + } + +@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) + } else { + int i, dreg = C_CAN_IFACE(DATA1_REG, iface); + +- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) { +- data = priv->read_reg(priv, dreg); +- frame->data[i] = data; +- frame->data[i + 1] = data >> 8; ++ if (priv->type == BOSCH_D_CAN) { ++ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) { ++ data = priv->read_reg32(priv, dreg); ++ frame->data[i] = data; ++ frame->data[i + 1] = data >> 8; ++ frame->data[i + 2] = data >> 16; ++ frame->data[i + 3] = data >> 24; ++ } ++ } else { ++ for (i = 0; i < frame->can_dlc; i += 2, dreg++) { ++ data = priv->read_reg(priv, dreg); ++ frame->data[i] = data; ++ frame->data[i + 1] = data >> 8; ++ } + } + } + +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 910c12e2638e..ad535a854e5c 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[]) + * - control mode with CAN_CTRLMODE_FD set + */ + ++ if (!data) ++ return 0; ++ + if (data[IFLA_CAN_CTRLMODE]) { + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); + +@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev, + return -EOPNOTSUPP; + } + ++static void can_dellink(struct net_device *dev, struct list_head *head) ++{ ++ return; ++} ++ + static struct rtnl_link_ops can_link_ops __read_mostly = { + .kind = "can", + .maxtype = IFLA_CAN_MAX, +@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { + .validate = can_validate, + .newlink = can_newlink, + .changelink = can_changelink, ++ .dellink = can_dellink, + .get_size = can_get_size, + .fill_info = can_fill_info, + .get_xstats_size = can_get_xstats_size, +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c +index 1029aa7889b5..398ec45aadef 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c +@@ -207,9 +207,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, + pin_reg = &info->pin_regs[pin_id]; + + if (pin_reg->mux_reg == -1) { +- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n", ++ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n", + info->pins[pin_id].name); +- return -EINVAL; ++ continue; + } + + if (info->flags & SHARE_MUX_CONF_REG) { +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c +index 23b6b8c29a99..73d8d47ea465 100644 +--- a/drivers/pinctrl/pinctrl-single.c ++++ b/drivers/pinctrl/pinctrl-single.c +@@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc, + else + mask &= ~soc_mask; + pcs->write(mask, pcswi->reg); ++ ++ /* flush posted write */ ++ mask = pcs->read(pcswi->reg); + raw_spin_unlock(&pcs->lock); + } + +diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c +index d45cd254ed1c..2b331d5b9e79 100644 +--- a/drivers/platform/chrome/cros_ec_dev.c ++++ b/drivers/platform/chrome/cros_ec_dev.c +@@ -147,13 +147,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) + goto exit; + } + ++ if (u_cmd.outsize != s_cmd->outsize || ++ u_cmd.insize != s_cmd->insize) { ++ ret = -EINVAL; ++ goto exit; ++ } ++ + s_cmd->command += ec->cmd_offset; + ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); + /* Only copy data to userland if data was received. */ + if (ret < 0) + goto exit; + +- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) ++ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize)) + ret = -EFAULT; + exit: + kfree(s_cmd); +diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c +index 456987c88baa..b13cd074c52a 100644 +--- a/drivers/power/power_supply_core.c ++++ b/drivers/power/power_supply_core.c +@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd, + + WARN_ON(tzd == NULL); + psy = tzd->devdata; +- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val); ++ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val); ++ if (ret) ++ return ret; + + /* Convert tenths of degree Celsius to milli degree Celsius. */ +- if (!ret) +- *temp = val.intval * 100; ++ *temp = val.intval * 100; + + return ret; + } +@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd, + int ret; + + psy = tcd->devdata; +- ret = psy->desc->get_property(psy, +- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val); +- if (!ret) +- *state = val.intval; ++ ret = power_supply_get_property(psy, ++ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val); ++ if (ret) ++ return ret; ++ ++ *state = val.intval; + + return ret; + } +@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd, + int ret; + + psy = tcd->devdata; +- ret = psy->desc->get_property(psy, +- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val); +- if (!ret) +- *state = val.intval; ++ ret = power_supply_get_property(psy, ++ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val); ++ if (ret) ++ return ret; ++ ++ *state = val.intval; + + return ret; + } +diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c +index 38a8bbe74810..83797d89c30f 100644 +--- a/drivers/pps/clients/pps_parport.c ++++ b/drivers/pps/clients/pps_parport.c +@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port) + struct pps_client_pp *device; + + /* FIXME: oooh, this is ugly! */ +- if (strcmp(pardev->name, KBUILD_MODNAME)) ++ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME)) + /* not our port */ + return; + +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c +index 8f1b091e1732..12b2cb7769f9 100644 +--- a/drivers/s390/net/qeth_l2_main.c ++++ b/drivers/s390/net/qeth_l2_main.c +@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) + qeth_l2_set_offline(cgdev); + + if (card->dev) { ++ netif_napi_del(&card->napi); + unregister_netdev(card->dev); + card->dev = NULL; + } +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c +index 543960e96b42..50cec6b13d27 100644 +--- a/drivers/s390/net/qeth_l3_main.c ++++ b/drivers/s390/net/qeth_l3_main.c +@@ -3246,6 +3246,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) + qeth_l3_set_offline(cgdev); + + if (card->dev) { ++ netif_napi_del(&card->napi); + unregister_netdev(card->dev); + card->dev = NULL; + } +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c +index 43ac62623bf2..7a58128a0000 100644 +--- a/drivers/scsi/ipr.c ++++ b/drivers/scsi/ipr.c +@@ -10095,6 +10095,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev, + ioa_cfg->intr_flag = IPR_USE_MSI; + else { + ioa_cfg->intr_flag = IPR_USE_LSI; ++ ioa_cfg->clear_isr = 1; + ioa_cfg->nvectors = 1; + dev_info(&pdev->dev, "Cannot enable MSI.\n"); + } +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index 93cbefa75b26..11cdb172cfaf 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -426,7 +426,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + * here, and we don't know what device it is + * trying to work with, leave it as-is. + */ +- vmax = 8; /* max length of vendor */ ++ vmax = sizeof(devinfo->vendor); + vskip = vendor; + while (vmax > 0 && *vskip == ' ') { + vmax--; +@@ -436,7 +436,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + while (vmax > 0 && vskip[vmax - 1] == ' ') + --vmax; + +- mmax = 16; /* max length of model */ ++ mmax = sizeof(devinfo->model); + mskip = model; + while (mmax > 0 && *mskip == ' ') { + mmax--; +@@ -452,10 +452,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + * Behave like the older version of get_device_flags. + */ + if (memcmp(devinfo->vendor, vskip, vmax) || +- devinfo->vendor[vmax]) ++ (vmax < sizeof(devinfo->vendor) && ++ devinfo->vendor[vmax])) + continue; + if (memcmp(devinfo->model, mskip, mmax) || +- devinfo->model[mmax]) ++ (mmax < sizeof(devinfo->model) && ++ devinfo->model[mmax])) + continue; + return devinfo; + } else { +diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c +index fbb0a4d74e91..39d7c7c70112 100644 +--- a/drivers/spi/spi-sun4i.c ++++ b/drivers/spi/spi-sun4i.c +@@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master, + { + struct sun4i_spi *sspi = spi_master_get_devdata(master); + unsigned int mclk_rate, div, timeout; ++ unsigned int start, end, tx_time; + unsigned int tx_len = 0; + int ret = 0; + u32 reg; + + /* We don't support transfer larger than the FIFO */ + if (tfr->len > SUN4I_FIFO_DEPTH) +- return -EINVAL; ++ return -EMSGSIZE; ++ ++ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH) ++ return -EMSGSIZE; + + reinit_completion(&sspi->done); + sspi->tx_buf = tfr->tx_buf; +@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master, + sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); + sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); + +- /* Fill the TX FIFO */ +- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); ++ /* ++ * Fill the TX FIFO ++ * Filling the FIFO fully causes timeout for some reason ++ * at least on spi2 on A10s ++ */ ++ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1); + + /* Enable the interrupts */ + sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); +@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master, + reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); + ++ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); ++ start = jiffies; + timeout = wait_for_completion_timeout(&sspi->done, +- msecs_to_jiffies(1000)); ++ msecs_to_jiffies(tx_time)); ++ end = jiffies; + if (!timeout) { ++ dev_warn(&master->dev, ++ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", ++ dev_name(&spi->dev), tfr->len, tfr->speed_hz, ++ jiffies_to_msecs(end - start), tx_time); + ret = -ETIMEDOUT; + goto out; + } +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c +index ac48f59705a8..e77add01b0e9 100644 +--- a/drivers/spi/spi-sun6i.c ++++ b/drivers/spi/spi-sun6i.c +@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, + { + struct sun6i_spi *sspi = spi_master_get_devdata(master); + unsigned int mclk_rate, div, timeout; ++ unsigned int start, end, tx_time; + unsigned int tx_len = 0; + int ret = 0; + u32 reg; +@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master, + reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); + sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); + ++ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); ++ start = jiffies; + timeout = wait_for_completion_timeout(&sspi->done, +- msecs_to_jiffies(1000)); ++ msecs_to_jiffies(tx_time)); ++ end = jiffies; + if (!timeout) { ++ dev_warn(&master->dev, ++ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", ++ dev_name(&spi->dev), tfr->len, tfr->speed_hz, ++ jiffies_to_msecs(end - start), tx_time); + ret = -ETIMEDOUT; + goto out; + } +diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c +index d029bbe9eb36..641fed609911 100644 +--- a/drivers/usb/host/ohci-q.c ++++ b/drivers/usb/host/ohci-q.c +@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) + { + int branch; + +- ed->state = ED_OPER; + ed->ed_prev = NULL; + ed->ed_next = NULL; + ed->hwNextED = 0; +@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) + /* the HC may not see the schedule updates yet, but if it does + * then they'll be properly ordered. + */ ++ ++ ed->state = ED_OPER; + return 0; + } + +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c +index 9c234209d8b5..47a4177b16d2 100644 +--- a/drivers/xen/xen-pciback/conf_space.c ++++ b/drivers/xen/xen-pciback/conf_space.c +@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, + field_start = OFFSET(cfg_entry); + field_end = OFFSET(cfg_entry) + field->size; + +- if ((req_start >= field_start && req_start < field_end) +- || (req_end > field_start && req_end <= field_end)) { ++ if (req_end > field_start && field_end > req_start) { + err = conf_space_read(dev, cfg_entry, field_start, + &tmp_val); + if (err) +@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) + field_start = OFFSET(cfg_entry); + field_end = OFFSET(cfg_entry) + field->size; + +- if ((req_start >= field_start && req_start < field_end) +- || (req_end > field_start && req_end <= field_end)) { ++ if (req_end > field_start && field_end > req_start) { + tmp_val = 0; + + err = xen_pcibk_config_read(dev, field_start, +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c +index 9433e46518c8..531e76474983 100644 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c +@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type, + rc = -ENOMEM; + goto out; + } ++ } else { ++ list_for_each_entry(trans, &u->transactions, list) ++ if (trans->handle.id == u->u.msg.tx_id) ++ break; ++ if (&trans->list == &u->transactions) ++ return -ESRCH; + } + + reply = xenbus_dev_request_and_reply(&u->u.msg); + if (IS_ERR(reply)) { +- kfree(trans); ++ if (msg_type == XS_TRANSACTION_START) ++ kfree(trans); + rc = PTR_ERR(reply); + goto out; + } +@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type, + list_add(&trans->list, &u->transactions); + } + } else if (u->u.msg.type == XS_TRANSACTION_END) { +- list_for_each_entry(trans, &u->transactions, list) +- if (trans->handle.id == u->u.msg.tx_id) +- break; +- BUG_ON(&trans->list == &u->transactions); + list_del(&trans->list); +- + kfree(trans); + } + +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c +index ba804f3d8278..ce65591b4168 100644 +--- a/drivers/xen/xenbus/xenbus_xs.c ++++ b/drivers/xen/xenbus/xenbus_xs.c +@@ -250,9 +250,6 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) + + mutex_unlock(&xs_state.request_mutex); + +- if (IS_ERR(ret)) +- return ret; +- + if ((msg->type == XS_TRANSACTION_END) || + ((req_msg.type == XS_TRANSACTION_START) && + (msg->type == XS_ERROR))) +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c +index 7bf835f85bc8..12ceaf52dae6 100644 +--- a/fs/9p/vfs_file.c ++++ b/fs/9p/vfs_file.c +@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) + v9fs_proto_dotu(v9ses)); + fid = file->private_data; + if (!fid) { +- fid = v9fs_fid_clone(file->f_path.dentry); ++ fid = v9fs_fid_clone(file_dentry(file)); + if (IS_ERR(fid)) + return PTR_ERR(fid); + +@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) + * because we want write after unlink usecase + * to work. + */ +- fid = v9fs_writeback_fid(file->f_path.dentry); ++ fid = v9fs_writeback_fid(file_dentry(file)); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + mutex_unlock(&v9inode->v_mutex); +@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) + * because we want write after unlink usecase + * to work. + */ +- fid = v9fs_writeback_fid(filp->f_path.dentry); ++ fid = v9fs_writeback_fid(file_dentry(filp)); + if (IS_ERR(fid)) { + retval = PTR_ERR(fid); + mutex_unlock(&v9inode->v_mutex); +diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c +index feef8a9c4de7..11309683d65f 100644 +--- a/fs/ecryptfs/file.c ++++ b/fs/ecryptfs/file.c +@@ -170,6 +170,19 @@ out: + return rc; + } + ++static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct file *lower_file = ecryptfs_file_to_lower(file); ++ /* ++ * Don't allow mmap on top of file systems that don't support it ++ * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs ++ * allows recursive mounting, this will need to be extended. ++ */ ++ if (!lower_file->f_op->mmap) ++ return -ENODEV; ++ return generic_file_mmap(file, vma); ++} ++ + /** + * ecryptfs_open + * @inode: inode speciying file to open +@@ -364,7 +377,7 @@ const struct file_operations ecryptfs_main_fops = { + #ifdef CONFIG_COMPAT + .compat_ioctl = ecryptfs_compat_ioctl, + #endif +- .mmap = generic_file_mmap, ++ .mmap = ecryptfs_mmap, + .open = ecryptfs_open, + .flush = ecryptfs_flush, + .release = ecryptfs_release, +diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c +index e818f5ac7a26..866bb18efefe 100644 +--- a/fs/ecryptfs/kthread.c ++++ b/fs/ecryptfs/kthread.c +@@ -25,7 +25,6 @@ + #include + #include + #include +-#include + #include "ecryptfs_kernel.h" + + struct ecryptfs_open_req { +@@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file, + flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; + (*lower_file) = dentry_open(&req.path, flags, cred); + if (!IS_ERR(*lower_file)) +- goto have_file; ++ goto out; + if ((flags & O_ACCMODE) == O_RDONLY) { + rc = PTR_ERR((*lower_file)); + goto out; +@@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file, + mutex_unlock(&ecryptfs_kthread_ctl.mux); + wake_up(&ecryptfs_kthread_ctl.wait); + wait_for_completion(&req.done); +- if (IS_ERR(*lower_file)) { ++ if (IS_ERR(*lower_file)) + rc = PTR_ERR(*lower_file); +- goto out; +- } +-have_file: +- if ((*lower_file)->f_op->mmap == NULL) { +- fput(*lower_file); +- *lower_file = NULL; +- rc = -EMEDIUMTYPE; +- } + out: + return rc; + } +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 3578b25fccfd..62880586ed85 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -469,6 +469,10 @@ static int __ext4_ext_check(const char *function, unsigned int line, + error_msg = "invalid extent entries"; + goto corrupted; + } ++ if (unlikely(depth > 32)) { ++ error_msg = "too large eh_depth"; ++ goto corrupted; ++ } + /* Verify checksum on non-root extent tree nodes */ + if (ext_depth(inode) != depth && + !ext4_extent_block_csum_verify(inode, eh)) { +diff --git a/fs/inode.c b/fs/inode.c +index 1be5f9003eb3..b0edef500590 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -1733,8 +1733,8 @@ static int __remove_privs(struct dentry *dentry, int kill) + */ + int file_remove_privs(struct file *file) + { +- struct dentry *dentry = file->f_path.dentry; +- struct inode *inode = d_inode(dentry); ++ struct dentry *dentry = file_dentry(file); ++ struct inode *inode = file_inode(file); + int kill; + int error = 0; + +@@ -1742,7 +1742,7 @@ int file_remove_privs(struct file *file) + if (IS_NOSEC(inode)) + return 0; + +- kill = file_needs_remove_privs(file); ++ kill = dentry_needs_remove_privs(dentry); + if (kill < 0) + return kill; + if (kill) +diff --git a/fs/locks.c b/fs/locks.c +index 6333263b7bc8..8eddae23e10b 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -1602,7 +1602,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr + { + struct file_lock *fl, *my_fl = NULL, *lease; + struct dentry *dentry = filp->f_path.dentry; +- struct inode *inode = dentry->d_inode; ++ struct inode *inode = file_inode(filp); + struct file_lock_context *ctx; + bool is_deleg = (*flp)->fl_flags & FL_DELEG; + int error; +diff --git a/fs/namespace.c b/fs/namespace.c +index 33064fcbfff9..5be02a0635be 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry) + goto out_unlock; + + lock_mount_hash(); ++ event++; + while (!hlist_empty(&mp->m_list)) { + mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); + if (mnt->mnt.mnt_flags & MNT_UMOUNT) { +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c +index 69bd801afb53..37e49cb2ac4c 100644 +--- a/fs/nilfs2/the_nilfs.c ++++ b/fs/nilfs2/the_nilfs.c +@@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) + if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) + return 0; + bytes = le16_to_cpu(sbp->s_bytes); +- if (bytes > BLOCK_SIZE) ++ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE) + return 0; + crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, + sumoff); +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c +index 0597820f5d9d..4f729ffff75d 100644 +--- a/fs/overlayfs/inode.c ++++ b/fs/overlayfs/inode.c +@@ -63,6 +63,9 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) + if (!err) { + upperdentry = ovl_dentry_upper(dentry); + ++ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) ++ attr->ia_valid &= ~ATTR_MODE; ++ + mutex_lock(&upperdentry->d_inode->i_mutex); + err = notify_change(upperdentry, attr, NULL); + if (!err) +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index c4bd0e2c173c..ef2e8c97e183 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -531,15 +531,19 @@ + + #define INIT_TEXT \ + *(.init.text) \ ++ *(.text.startup) \ + MEM_DISCARD(init.text) + + #define EXIT_DATA \ + *(.exit.data) \ ++ *(.fini_array) \ ++ *(.dtors) \ + MEM_DISCARD(exit.data) \ + MEM_DISCARD(exit.rodata) + + #define EXIT_TEXT \ + *(.exit.text) \ ++ *(.text.exit) \ + MEM_DISCARD(exit.text) + + #define EXIT_CALL \ +diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h +index 3feb1b2d75d8..14cd6f77e284 100644 +--- a/include/linux/bcma/bcma.h ++++ b/include/linux/bcma/bcma.h +@@ -156,6 +156,7 @@ struct bcma_host_ops { + #define BCMA_CORE_DEFAULT 0xFFF + + #define BCMA_MAX_NR_CORES 16 ++#define BCMA_CORE_SIZE 0x1000 + + /* Chip IDs of PCIe devices */ + #define BCMA_CHIP_ID_BCM4313 0x4313 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index 1c9d701f7a72..a3424f28aaf4 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -4793,6 +4793,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css, + memset(css, 0, sizeof(*css)); + css->cgroup = cgrp; + css->ss = ss; ++ css->id = -1; + INIT_LIST_HEAD(&css->sibling); + INIT_LIST_HEAD(&css->children); + css->serial_nr = css_serial_nr_next++; +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 6c0cdb5a73f8..67d1e1597d9c 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4951,14 +4951,16 @@ void show_state_filter(unsigned long state_filter) + /* + * reset the NMI-timeout, listing all files on a slow + * console might take a lot of time: ++ * Also, reset softlockup watchdogs on all CPUs, because ++ * another CPU might be blocked waiting for us to process ++ * an IPI. + */ + touch_nmi_watchdog(); ++ touch_all_softlockup_watchdogs(); + if (!state_filter || (p->state & state_filter)) + sched_show_task(p); + } + +- touch_all_softlockup_watchdogs(); +- + #ifdef CONFIG_SCHED_DEBUG + sysrq_sched_debug_show(); + #endif +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 51c615279b23..b8b516c37bf1 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -687,8 +687,6 @@ void init_entity_runnable_average(struct sched_entity *se) + /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ + } + +-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq); +-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq); + #else + void init_entity_runnable_average(struct sched_entity *se) + { +@@ -4594,19 +4592,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) + return wl; + + for_each_sched_entity(se) { +- long w, W; ++ struct cfs_rq *cfs_rq = se->my_q; ++ long W, w = cfs_rq_load_avg(cfs_rq); + +- tg = se->my_q->tg; ++ tg = cfs_rq->tg; + + /* + * W = @wg + \Sum rw_j + */ +- W = wg + calc_tg_weight(tg, se->my_q); ++ W = wg + atomic_long_read(&tg->load_avg); ++ ++ /* Ensure \Sum rw_j >= rw_i */ ++ W -= cfs_rq->tg_load_avg_contrib; ++ W += w; + + /* + * w = rw_i + @wl + */ +- w = cfs_rq_load_avg(se->my_q) + wl; ++ w += wl; + + /* + * wl = S * s'_i; see (2) +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index f5e86d282d52..80016b329d94 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -808,6 +808,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) + timer->it.cpu.expires = 0; + sample_to_timespec(timer->it_clock, timer->it.cpu.expires, + &itp->it_value); ++ return; + } else { + cpu_timer_sample_group(timer->it_clock, p, &now); + unlock_task_sighand(p, &flags); +diff --git a/mm/compaction.c b/mm/compaction.c +index 7881e072dc33..dba02dec7195 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -475,25 +475,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, + + /* Found a free page, break it into order-0 pages */ + isolated = split_free_page(page); ++ if (!isolated) ++ break; ++ + total_isolated += isolated; ++ cc->nr_freepages += isolated; + for (i = 0; i < isolated; i++) { + list_add(&page->lru, freelist); + page++; + } +- +- /* If a page was split, advance to the end of it */ +- if (isolated) { +- cc->nr_freepages += isolated; +- if (!strict && +- cc->nr_migratepages <= cc->nr_freepages) { +- blockpfn += isolated; +- break; +- } +- +- blockpfn += isolated - 1; +- cursor += isolated - 1; +- continue; ++ if (!strict && cc->nr_migratepages <= cc->nr_freepages) { ++ blockpfn += isolated; ++ break; + } ++ /* Advance to the end of split page */ ++ blockpfn += isolated - 1; ++ cursor += isolated - 1; ++ continue; + + isolate_fail: + if (strict) +@@ -503,6 +501,9 @@ isolate_fail: + + } + ++ if (locked) ++ spin_unlock_irqrestore(&cc->zone->lock, flags); ++ + /* + * There is a tiny chance that we have read bogus compound_order(), + * so be careful to not go outside of the pageblock. +@@ -524,9 +525,6 @@ isolate_fail: + if (strict && blockpfn < end_pfn) + total_isolated = 0; + +- if (locked) +- spin_unlock_irqrestore(&cc->zone->lock, flags); +- + /* Update the pageblock-skip if the whole pageblock was scanned */ + if (blockpfn == end_pfn) + update_pageblock_skip(cc, valid_page, total_isolated, false); +@@ -966,7 +964,6 @@ static void isolate_freepages(struct compact_control *cc) + block_end_pfn = block_start_pfn, + block_start_pfn -= pageblock_nr_pages, + isolate_start_pfn = block_start_pfn) { +- + /* + * This can iterate a massively long zone without finding any + * suitable migration targets, so periodically check if we need +@@ -990,32 +987,30 @@ static void isolate_freepages(struct compact_control *cc) + continue; + + /* Found a block suitable for isolating free pages from. */ +- isolate_freepages_block(cc, &isolate_start_pfn, +- block_end_pfn, freelist, false); ++ isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, ++ freelist, false); + + /* +- * If we isolated enough freepages, or aborted due to async +- * compaction being contended, terminate the loop. +- * Remember where the free scanner should restart next time, +- * which is where isolate_freepages_block() left off. +- * But if it scanned the whole pageblock, isolate_start_pfn +- * now points at block_end_pfn, which is the start of the next +- * pageblock. +- * In that case we will however want to restart at the start +- * of the previous pageblock. ++ * If we isolated enough freepages, or aborted due to lock ++ * contention, terminate. + */ + if ((cc->nr_freepages >= cc->nr_migratepages) + || cc->contended) { +- if (isolate_start_pfn >= block_end_pfn) ++ if (isolate_start_pfn >= block_end_pfn) { ++ /* ++ * Restart at previous pageblock if more ++ * freepages can be isolated next time. ++ */ + isolate_start_pfn = + block_start_pfn - pageblock_nr_pages; ++ } + break; +- } else { ++ } else if (isolate_start_pfn < block_end_pfn) { + /* +- * isolate_freepages_block() should not terminate +- * prematurely unless contended, or isolated enough ++ * If isolation failed early, do not continue ++ * needlessly. + */ +- VM_BUG_ON(isolate_start_pfn < block_end_pfn); ++ break; + } + } + +diff --git a/mm/internal.h b/mm/internal.h +index 38e24b89e4c4..6979b2bd3227 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -22,7 +22,8 @@ + */ + #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ + __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ +- __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) ++ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ ++ __GFP_ATOMIC) + + /* The GFP flags allowed during early boot */ + #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 99c1738684ec..2bcdfbf8c36d 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -275,7 +275,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat) + /* Returns true if the struct page for the pfn is uninitialised */ + static inline bool __meminit early_page_uninitialised(unsigned long pfn) + { +- if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn) ++ int nid = early_pfn_to_nid(pfn); ++ ++ if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) + return true; + + return false; +@@ -1057,7 +1059,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn) + spin_lock(&early_pfn_lock); + nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); + if (nid < 0) +- nid = 0; ++ nid = first_online_node; + spin_unlock(&early_pfn_lock); + + return nid; +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c +index 7d8f581d9f1f..ddc3573894b0 100644 +--- a/net/ceph/osdmap.c ++++ b/net/ceph/osdmap.c +@@ -1192,6 +1192,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) + } + + /* ++ * Encoding order is (new_up_client, new_state, new_weight). Need to ++ * apply in the (new_weight, new_state, new_up_client) order, because ++ * an incremental map may look like e.g. ++ * ++ * new_up_client: { osd=6, addr=... } # set osd_state and addr ++ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state ++ */ ++static int decode_new_up_state_weight(void **p, void *end, ++ struct ceph_osdmap *map) ++{ ++ void *new_up_client; ++ void *new_state; ++ void *new_weight_end; ++ u32 len; ++ ++ new_up_client = *p; ++ ceph_decode_32_safe(p, end, len, e_inval); ++ len *= sizeof(u32) + sizeof(struct ceph_entity_addr); ++ ceph_decode_need(p, end, len, e_inval); ++ *p += len; ++ ++ new_state = *p; ++ ceph_decode_32_safe(p, end, len, e_inval); ++ len *= sizeof(u32) + sizeof(u8); ++ ceph_decode_need(p, end, len, e_inval); ++ *p += len; ++ ++ /* new_weight */ ++ ceph_decode_32_safe(p, end, len, e_inval); ++ while (len--) { ++ s32 osd; ++ u32 w; ++ ++ ceph_decode_need(p, end, 2*sizeof(u32), e_inval); ++ osd = ceph_decode_32(p); ++ w = ceph_decode_32(p); ++ BUG_ON(osd >= map->max_osd); ++ pr_info("osd%d weight 0x%x %s\n", osd, w, ++ w == CEPH_OSD_IN ? "(in)" : ++ (w == CEPH_OSD_OUT ? "(out)" : "")); ++ map->osd_weight[osd] = w; ++ ++ /* ++ * If we are marking in, set the EXISTS, and clear the ++ * AUTOOUT and NEW bits. ++ */ ++ if (w) { ++ map->osd_state[osd] |= CEPH_OSD_EXISTS; ++ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | ++ CEPH_OSD_NEW); ++ } ++ } ++ new_weight_end = *p; ++ ++ /* new_state (up/down) */ ++ *p = new_state; ++ len = ceph_decode_32(p); ++ while (len--) { ++ s32 osd; ++ u8 xorstate; ++ int ret; ++ ++ osd = ceph_decode_32(p); ++ xorstate = ceph_decode_8(p); ++ if (xorstate == 0) ++ xorstate = CEPH_OSD_UP; ++ BUG_ON(osd >= map->max_osd); ++ if ((map->osd_state[osd] & CEPH_OSD_UP) && ++ (xorstate & CEPH_OSD_UP)) ++ pr_info("osd%d down\n", osd); ++ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && ++ (xorstate & CEPH_OSD_EXISTS)) { ++ pr_info("osd%d does not exist\n", osd); ++ map->osd_weight[osd] = CEPH_OSD_IN; ++ ret = set_primary_affinity(map, osd, ++ CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); ++ if (ret) ++ return ret; ++ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); ++ map->osd_state[osd] = 0; ++ } else { ++ map->osd_state[osd] ^= xorstate; ++ } ++ } ++ ++ /* new_up_client */ ++ *p = new_up_client; ++ len = ceph_decode_32(p); ++ while (len--) { ++ s32 osd; ++ struct ceph_entity_addr addr; ++ ++ osd = ceph_decode_32(p); ++ ceph_decode_copy(p, &addr, sizeof(addr)); ++ ceph_decode_addr(&addr); ++ BUG_ON(osd >= map->max_osd); ++ pr_info("osd%d up\n", osd); ++ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; ++ map->osd_addr[osd] = addr; ++ } ++ ++ *p = new_weight_end; ++ return 0; ++ ++e_inval: ++ return -EINVAL; ++} ++ ++/* + * decode and apply an incremental map update. + */ + struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, +@@ -1290,49 +1399,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + __remove_pg_pool(&map->pg_pools, pi); + } + +- /* new_up */ +- ceph_decode_32_safe(p, end, len, e_inval); +- while (len--) { +- u32 osd; +- struct ceph_entity_addr addr; +- ceph_decode_32_safe(p, end, osd, e_inval); +- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval); +- ceph_decode_addr(&addr); +- pr_info("osd%d up\n", osd); +- BUG_ON(osd >= map->max_osd); +- map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS; +- map->osd_addr[osd] = addr; +- } +- +- /* new_state */ +- ceph_decode_32_safe(p, end, len, e_inval); +- while (len--) { +- u32 osd; +- u8 xorstate; +- ceph_decode_32_safe(p, end, osd, e_inval); +- xorstate = **(u8 **)p; +- (*p)++; /* clean flag */ +- if (xorstate == 0) +- xorstate = CEPH_OSD_UP; +- if (xorstate & CEPH_OSD_UP) +- pr_info("osd%d down\n", osd); +- if (osd < map->max_osd) +- map->osd_state[osd] ^= xorstate; +- } +- +- /* new_weight */ +- ceph_decode_32_safe(p, end, len, e_inval); +- while (len--) { +- u32 osd, off; +- ceph_decode_need(p, end, sizeof(u32)*2, e_inval); +- osd = ceph_decode_32(p); +- off = ceph_decode_32(p); +- pr_info("osd%d weight 0x%x %s\n", osd, off, +- off == CEPH_OSD_IN ? "(in)" : +- (off == CEPH_OSD_OUT ? "(out)" : "")); +- if (osd < map->max_osd) +- map->osd_weight[osd] = off; +- } ++ /* new_up_client, new_state, new_weight */ ++ err = decode_new_up_state_weight(p, end, map); ++ if (err) ++ goto bad; + + /* new_pg_temp */ + err = decode_new_pg_temp(p, end, map); +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index 9d6ddbacd875..18e50a8fc05f 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -421,7 +421,7 @@ static int rds_tcp_init(void) + + ret = rds_tcp_recv_init(); + if (ret) +- goto out_slab; ++ goto out_pernet; + + ret = rds_trans_register(&rds_tcp_transport); + if (ret) +@@ -433,8 +433,9 @@ static int rds_tcp_init(void) + + out_recv: + rds_tcp_recv_exit(); +-out_slab: ++out_pernet: + unregister_pernet_subsys(&rds_tcp_net_ops); ++out_slab: + kmem_cache_destroy(rds_tcp_conn_slab); + out: + return ret; +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 7c6155f5865b..637d034bb084 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -1247,6 +1247,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, + tu->tstamp = *tstamp; + if ((tu->filter & (1 << event)) == 0 || !tu->tread) + return; ++ memset(&r1, 0, sizeof(r1)); + r1.event = event; + r1.tstamp = *tstamp; + r1.val = resolution; +@@ -1281,6 +1282,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, + } + if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && + tu->last_resolution != resolution) { ++ memset(&r1, 0, sizeof(r1)); + r1.event = SNDRV_TIMER_EVENT_RESOLUTION; + r1.tstamp = tstamp; + r1.val = resolution; +@@ -1746,6 +1748,7 @@ static int snd_timer_user_params(struct file *file, + if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { + if (tu->tread) { + struct snd_timer_tread tread; ++ memset(&tread, 0, sizeof(tread)); + tread.event = SNDRV_TIMER_EVENT_EARLY; + tread.tstamp.tv_sec = 0; + tread.tstamp.tv_nsec = 0; diff --git a/patch/kernel/marvell-dev/patch-4.4.17-18.patch b/patch/kernel/marvell-dev/patch-4.4.17-18.patch new file mode 100644 index 000000000..4306189dc --- /dev/null +++ b/patch/kernel/marvell-dev/patch-4.4.17-18.patch @@ -0,0 +1,2072 @@ +diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt +index 54944c71b819..2a4ee6302122 100644 +--- a/Documentation/x86/pat.txt ++++ b/Documentation/x86/pat.txt +@@ -196,3 +196,35 @@ Another, more verbose way of getting PAT related debug messages is with + "debugpat" boot parameter. With this parameter, various debug messages are + printed to dmesg log. + ++PAT Initialization ++------------------ ++ ++The following table describes how PAT is initialized under various ++configurations. The PAT MSR must be updated by Linux in order to support WC ++and WT attributes. Otherwise, the PAT MSR has the value programmed in it ++by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests. ++ ++ MTRR PAT Call Sequence PAT State PAT MSR ++ ========================================================= ++ E E MTRR -> PAT init Enabled OS ++ E D MTRR -> PAT init Disabled - ++ D E MTRR -> PAT disable Disabled BIOS ++ D D MTRR -> PAT disable Disabled - ++ - np/E PAT -> PAT disable Disabled BIOS ++ - np/D PAT -> PAT disable Disabled - ++ E !P/E MTRR -> PAT init Disabled BIOS ++ D !P/E MTRR -> PAT disable Disabled BIOS ++ !M !P/E MTRR stub -> PAT disable Disabled BIOS ++ ++ Legend ++ ------------------------------------------------ ++ E Feature enabled in CPU ++ D Feature disabled/unsupported in CPU ++ np "nopat" boot option specified ++ !P CONFIG_X86_PAT option unset ++ !M CONFIG_MTRR option unset ++ Enabled PAT state set to enabled ++ Disabled PAT state set to disabled ++ OS PAT initializes PAT MSR with OS setting ++ BIOS PAT keeps PAT MSR with BIOS setting ++ +diff --git a/Makefile b/Makefile +index 76d34f763a41..eaedea88a8a7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 17 ++SUBLEVEL = 18 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c +index 087acb569b63..5f221acd21ae 100644 +--- a/arch/arm/kernel/sys_oabi-compat.c ++++ b/arch/arm/kernel/sys_oabi-compat.c +@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, + mm_segment_t fs; + long ret, err, i; + +- if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) ++ if (maxevents <= 0 || ++ maxevents > (INT_MAX/sizeof(*kbuf)) || ++ maxevents > (INT_MAX/sizeof(*events))) + return -EINVAL; ++ if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) ++ return -EFAULT; + kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; +@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid, + + if (nsops < 1 || nsops > SEMOPM) + return -EINVAL; ++ if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) ++ return -EFAULT; + sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); + if (!sops) + return -ENOMEM; +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S +index 5a69eb48d0a8..ee93d5fe61d7 100644 +--- a/arch/mips/kernel/scall64-n32.S ++++ b/arch/mips/kernel/scall64-n32.S +@@ -344,7 +344,7 @@ EXPORT(sysn32_call_table) + PTR sys_ni_syscall /* available, was setaltroot */ + PTR sys_add_key + PTR sys_request_key +- PTR sys_keyctl /* 6245 */ ++ PTR compat_sys_keyctl /* 6245 */ + PTR sys_set_thread_area + PTR sys_inotify_init + PTR sys_inotify_add_watch +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S +index e4b6d7c97822..b77052ec6fb2 100644 +--- a/arch/mips/kernel/scall64-o32.S ++++ b/arch/mips/kernel/scall64-o32.S +@@ -500,7 +500,7 @@ EXPORT(sys32_call_table) + PTR sys_ni_syscall /* available, was setaltroot */ + PTR sys_add_key /* 4280 */ + PTR sys_request_key +- PTR sys_keyctl ++ PTR compat_sys_keyctl + PTR sys_set_thread_area + PTR sys_inotify_init + PTR sys_inotify_add_watch /* 4285 */ +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c +index b1f0a90f933b..42570d8fb265 100644 +--- a/arch/s390/kernel/ipl.c ++++ b/arch/s390/kernel/ipl.c +@@ -2070,13 +2070,6 @@ void s390_reset_system(void (*fn_pre)(void), + S390_lowcore.program_new_psw.addr = + PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; + +- /* +- * Clear subchannel ID and number to signal new kernel that no CCW or +- * SCSI IPL has been done (for kexec and kdump) +- */ +- S390_lowcore.subchannel_id = 0; +- S390_lowcore.subchannel_nr = 0; +- + /* Store status at absolute zero */ + store_status(); + +diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl +index f17705e1332c..e62f4401e792 100644 +--- a/arch/x86/entry/syscalls/syscall_32.tbl ++++ b/arch/x86/entry/syscalls/syscall_32.tbl +@@ -294,7 +294,7 @@ + # 285 sys_setaltroot + 286 i386 add_key sys_add_key + 287 i386 request_key sys_request_key +-288 i386 keyctl sys_keyctl ++288 i386 keyctl sys_keyctl compat_sys_keyctl + 289 i386 ioprio_set sys_ioprio_set + 290 i386 ioprio_get sys_ioprio_get + 291 i386 inotify_init sys_inotify_init +diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h +index b94f6f64e23d..dbff1456d215 100644 +--- a/arch/x86/include/asm/mtrr.h ++++ b/arch/x86/include/asm/mtrr.h +@@ -24,6 +24,7 @@ + #define _ASM_X86_MTRR_H + + #include ++#include + + + /* +@@ -83,9 +84,12 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) + static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) + { + } ++static inline void mtrr_bp_init(void) ++{ ++ pat_disable("MTRRs disabled, skipping PAT initialization too."); ++} + + #define mtrr_ap_init() do {} while (0) +-#define mtrr_bp_init() do {} while (0) + #define set_mtrr_aps_delayed_init() do {} while (0) + #define mtrr_aps_init() do {} while (0) + #define mtrr_bp_restore() do {} while (0) +diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h +index ca6c228d5e62..0b1ff4c1c14e 100644 +--- a/arch/x86/include/asm/pat.h ++++ b/arch/x86/include/asm/pat.h +@@ -5,8 +5,8 @@ + #include + + bool pat_enabled(void); ++void pat_disable(const char *reason); + extern void pat_init(void); +-void pat_init_cache_modes(u64); + + extern int reserve_memtype(u64 start, u64 end, + enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c +index 3b533cf37c74..b5624fafa44a 100644 +--- a/arch/x86/kernel/cpu/mtrr/generic.c ++++ b/arch/x86/kernel/cpu/mtrr/generic.c +@@ -444,11 +444,24 @@ static void __init print_mtrr_state(void) + pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); + } + ++/* PAT setup for BP. We need to go through sync steps here */ ++void __init mtrr_bp_pat_init(void) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ prepare_set(); ++ ++ pat_init(); ++ ++ post_set(); ++ local_irq_restore(flags); ++} ++ + /* Grab all of the MTRR state for this CPU into *state */ + bool __init get_mtrr_state(void) + { + struct mtrr_var_range *vrs; +- unsigned long flags; + unsigned lo, dummy; + unsigned int i; + +@@ -481,15 +494,6 @@ bool __init get_mtrr_state(void) + + mtrr_state_set = 1; + +- /* PAT setup for BP. We need to go through sync steps here */ +- local_irq_save(flags); +- prepare_set(); +- +- pat_init(); +- +- post_set(); +- local_irq_restore(flags); +- + return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED); + } + +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c +index f891b4750f04..fa77ac8291f0 100644 +--- a/arch/x86/kernel/cpu/mtrr/main.c ++++ b/arch/x86/kernel/cpu/mtrr/main.c +@@ -752,6 +752,9 @@ void __init mtrr_bp_init(void) + /* BIOS may override */ + __mtrr_enabled = get_mtrr_state(); + ++ if (mtrr_enabled()) ++ mtrr_bp_pat_init(); ++ + if (mtrr_cleanup(phys_addr)) { + changed_by_mtrr_cleanup = 1; + mtrr_if->set_all(); +@@ -759,8 +762,16 @@ void __init mtrr_bp_init(void) + } + } + +- if (!mtrr_enabled()) ++ if (!mtrr_enabled()) { + pr_info("MTRR: Disabled\n"); ++ ++ /* ++ * PAT initialization relies on MTRR's rendezvous handler. ++ * Skip PAT init until the handler can initialize both ++ * features independently. ++ */ ++ pat_disable("MTRRs disabled, skipping PAT initialization too."); ++ } + } + + void mtrr_ap_init(void) +diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h +index 951884dcc433..6c7ced07d16d 100644 +--- a/arch/x86/kernel/cpu/mtrr/mtrr.h ++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h +@@ -52,6 +52,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); + void fill_mtrr_var_range(unsigned int index, + u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); + bool get_mtrr_state(void); ++void mtrr_bp_pat_init(void); + + extern void set_mtrr_ops(const struct mtrr_ops *ops); + +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 844b06d67df4..307f60ecfc6d 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -94,18 +94,6 @@ static unsigned long mmap_base(unsigned long rnd) + } + + /* +- * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 +- * does, but not when emulating X86_32 +- */ +-static unsigned long mmap_legacy_base(unsigned long rnd) +-{ +- if (mmap_is_ia32()) +- return TASK_UNMAPPED_BASE; +- else +- return TASK_UNMAPPED_BASE + rnd; +-} +- +-/* + * This function, called very early during the creation of a new + * process VM image, sets up which VM layout function to use: + */ +@@ -116,7 +104,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + +- mm->mmap_legacy_base = mmap_legacy_base(random_factor); ++ mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor; + + if (mmap_is_legacy()) { + mm->mmap_base = mm->mmap_legacy_base; +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index 188e3e07eeeb..6ad687d104ca 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -39,11 +39,22 @@ + static bool boot_cpu_done; + + static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); ++static void init_cache_modes(void); + +-static inline void pat_disable(const char *reason) ++void pat_disable(const char *reason) + { ++ if (!__pat_enabled) ++ return; ++ ++ if (boot_cpu_done) { ++ WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n"); ++ return; ++ } ++ + __pat_enabled = 0; + pr_info("x86/PAT: %s\n", reason); ++ ++ init_cache_modes(); + } + + static int __init nopat(char *str) +@@ -180,7 +191,7 @@ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg) + * configuration. + * Using lower indices is preferred, so we start with highest index. + */ +-void pat_init_cache_modes(u64 pat) ++static void __init_cache_modes(u64 pat) + { + enum page_cache_mode cache; + char pat_msg[33]; +@@ -201,14 +212,11 @@ static void pat_bsp_init(u64 pat) + { + u64 tmp_pat; + +- if (!cpu_has_pat) { ++ if (!boot_cpu_has(X86_FEATURE_PAT)) { + pat_disable("PAT not supported by CPU."); + return; + } + +- if (!pat_enabled()) +- goto done; +- + rdmsrl(MSR_IA32_CR_PAT, tmp_pat); + if (!tmp_pat) { + pat_disable("PAT MSR is 0, disabled."); +@@ -217,16 +225,12 @@ static void pat_bsp_init(u64 pat) + + wrmsrl(MSR_IA32_CR_PAT, pat); + +-done: +- pat_init_cache_modes(pat); ++ __init_cache_modes(pat); + } + + static void pat_ap_init(u64 pat) + { +- if (!pat_enabled()) +- return; +- +- if (!cpu_has_pat) { ++ if (!boot_cpu_has(X86_FEATURE_PAT)) { + /* + * If this happens we are on a secondary CPU, but switched to + * PAT on the boot CPU. We have no way to undo PAT. +@@ -237,18 +241,32 @@ static void pat_ap_init(u64 pat) + wrmsrl(MSR_IA32_CR_PAT, pat); + } + +-void pat_init(void) ++static void init_cache_modes(void) + { +- u64 pat; +- struct cpuinfo_x86 *c = &boot_cpu_data; ++ u64 pat = 0; ++ static int init_cm_done; + +- if (!pat_enabled()) { ++ if (init_cm_done) ++ return; ++ ++ if (boot_cpu_has(X86_FEATURE_PAT)) { ++ /* ++ * CPU supports PAT. Set PAT table to be consistent with ++ * PAT MSR. This case supports "nopat" boot option, and ++ * virtual machine environments which support PAT without ++ * MTRRs. In specific, Xen has unique setup to PAT MSR. ++ * ++ * If PAT MSR returns 0, it is considered invalid and emulates ++ * as No PAT. ++ */ ++ rdmsrl(MSR_IA32_CR_PAT, pat); ++ } ++ ++ if (!pat) { + /* + * No PAT. Emulate the PAT table that corresponds to the two +- * cache bits, PWT (Write Through) and PCD (Cache Disable). This +- * setup is the same as the BIOS default setup when the system +- * has PAT but the "nopat" boot option has been specified. This +- * emulated PAT table is used when MSR_IA32_CR_PAT returns 0. ++ * cache bits, PWT (Write Through) and PCD (Cache Disable). ++ * This setup is also the same as the BIOS default setup. + * + * PTE encoding: + * +@@ -265,10 +283,36 @@ void pat_init(void) + */ + pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | + PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); ++ } ++ ++ __init_cache_modes(pat); ++ ++ init_cm_done = 1; ++} ++ ++/** ++ * pat_init - Initialize PAT MSR and PAT table ++ * ++ * This function initializes PAT MSR and PAT table with an OS-defined value ++ * to enable additional cache attributes, WC and WT. ++ * ++ * This function must be called on all CPUs using the specific sequence of ++ * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this ++ * procedure for PAT. ++ */ ++void pat_init(void) ++{ ++ u64 pat; ++ struct cpuinfo_x86 *c = &boot_cpu_data; ++ ++ if (!pat_enabled()) { ++ init_cache_modes(); ++ return; ++ } + +- } else if ((c->x86_vendor == X86_VENDOR_INTEL) && +- (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || +- ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { ++ if ((c->x86_vendor == X86_VENDOR_INTEL) && ++ (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || ++ ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { + /* + * PAT support with the lower four entries. Intel Pentium 2, + * 3, M, and 4 are affected by PAT errata, which makes the +@@ -733,25 +777,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + if (file->f_flags & O_DSYNC) + pcm = _PAGE_CACHE_MODE_UC_MINUS; + +-#ifdef CONFIG_X86_32 +- /* +- * On the PPro and successors, the MTRRs are used to set +- * memory types for physical addresses outside main memory, +- * so blindly setting UC or PWT on those pages is wrong. +- * For Pentiums and earlier, the surround logic should disable +- * caching for the high addresses through the KEN pin, but +- * we maintain the tradition of paranoia in this code. +- */ +- if (!pat_enabled() && +- !(boot_cpu_has(X86_FEATURE_MTRR) || +- boot_cpu_has(X86_FEATURE_K6_MTRR) || +- boot_cpu_has(X86_FEATURE_CYRIX_ARR) || +- boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && +- (pfn << PAGE_SHIFT) >= __pa(high_memory)) { +- pcm = _PAGE_CACHE_MODE_UC; +- } +-#endif +- + *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | + cachemode2protval(pcm)); + return 1; +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index beab8c706ac9..ffa41591bff9 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -74,7 +74,6 @@ + #include + #include + #include +-#include + #include + + #ifdef CONFIG_ACPI +@@ -1519,7 +1518,6 @@ asmlinkage __visible void __init xen_start_kernel(void) + { + struct physdev_set_iopl set_iopl; + unsigned long initrd_start = 0; +- u64 pat; + int rc; + + if (!xen_start_info) +@@ -1627,13 +1625,6 @@ asmlinkage __visible void __init xen_start_kernel(void) + xen_start_info->nr_pages); + xen_reserve_special_pages(); + +- /* +- * Modify the cache mode translation tables to match Xen's PAT +- * configuration. +- */ +- rdmsrl(MSR_IA32_CR_PAT, pat); +- pat_init_cache_modes(pat); +- + /* keep using Xen gdt for now; no urgent need to change it */ + + #ifdef CONFIG_X86_32 +diff --git a/block/genhd.c b/block/genhd.c +index e5cafa51567c..d2a1d43bf9fa 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -831,6 +831,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) + if (iter) { + class_dev_iter_exit(iter); + kfree(iter); ++ seqf->private = NULL; + } + } + +diff --git a/crypto/gcm.c b/crypto/gcm.c +index bec329b3de8d..d9ea5f9c0574 100644 +--- a/crypto/gcm.c ++++ b/crypto/gcm.c +@@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, + + ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, + CRYPTO_ALG_TYPE_HASH, +- CRYPTO_ALG_TYPE_AHASH_MASK); ++ CRYPTO_ALG_TYPE_AHASH_MASK | ++ crypto_requires_sync(algt->type, ++ algt->mask)); + if (IS_ERR(ghash_alg)) + return PTR_ERR(ghash_alg); + +diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c +index ea5815c5e128..bc769c448d4a 100644 +--- a/crypto/scatterwalk.c ++++ b/crypto/scatterwalk.c +@@ -72,7 +72,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, + + void scatterwalk_done(struct scatter_walk *walk, int out, int more) + { +- if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) ++ if (!more || walk->offset >= walk->sg->offset + walk->sg->length || ++ !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); + } + EXPORT_SYMBOL_GPL(scatterwalk_done); +diff --git a/drivers/char/random.c b/drivers/char/random.c +index d0da5d852d41..0227b0465b40 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -722,15 +722,18 @@ retry: + } + } + +-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits) ++static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) + { + const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); + ++ if (nbits < 0) ++ return -EINVAL; ++ + /* Cap the value to avoid overflows */ + nbits = min(nbits, nbits_max); +- nbits = max(nbits, -nbits_max); + + credit_entropy_bits(r, nbits); ++ return 0; + } + + /********************************************************************* +@@ -1542,8 +1545,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) + return -EPERM; + if (get_user(ent_count, p)) + return -EFAULT; +- credit_entropy_bits_safe(&input_pool, ent_count); +- return 0; ++ return credit_entropy_bits_safe(&input_pool, ent_count); + case RNDADDENTROPY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; +@@ -1557,8 +1559,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) + size); + if (retval < 0) + return retval; +- credit_entropy_bits_safe(&input_pool, ent_count); +- return 0; ++ return credit_entropy_bits_safe(&input_pool, ent_count); + case RNDZAPENTCNT: + case RNDCLEARPOOL: + /* +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 62284e45d531..eb434881ddbc 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -1789,16 +1789,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate, + uint32_t mem_value) + { +- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; ++ /* ++ * We treat the cursor plane as always-on for the purposes of watermark ++ * calculation. Until we have two-stage watermark programming merged, ++ * this is necessary to avoid flickering. ++ */ ++ int cpp = 4; ++ int width = pstate->visible ? pstate->base.crtc_w : 64; + +- if (!cstate->base.active || !pstate->visible) ++ if (!cstate->base.active) + return 0; + + return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), + cstate->base.adjusted_mode.crtc_htotal, +- drm_rect_width(&pstate->dst), +- bpp, +- mem_value); ++ width, cpp, mem_value); + } + + /* Only for WM_LP. */ +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index 774cd2210566..21febbb0d84e 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -1418,8 +1418,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev) + } + + ret = hid_hw_output_report(hdev, buf, 1); +- if (ret < 0) +- hid_err(hdev, "can't set operational mode: step 3\n"); ++ if (ret < 0) { ++ hid_info(hdev, "can't set operational mode: step 3, ignoring\n"); ++ ret = 0; ++ } + + out: + kfree(buf); +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index 27fa0cb09538..85f39cc3e276 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -244,6 +244,13 @@ struct i801_priv { + struct platform_device *mux_pdev; + #endif + struct platform_device *tco_pdev; ++ ++ /* ++ * If set to true the host controller registers are reserved for ++ * ACPI AML use. Protected by acpi_lock. ++ */ ++ bool acpi_reserved; ++ struct mutex acpi_lock; + }; + + #define FEATURE_SMBUS_PEC (1 << 0) +@@ -714,9 +721,15 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, + { + int hwpec; + int block = 0; +- int ret, xact = 0; ++ int ret = 0, xact = 0; + struct i801_priv *priv = i2c_get_adapdata(adap); + ++ mutex_lock(&priv->acpi_lock); ++ if (priv->acpi_reserved) { ++ mutex_unlock(&priv->acpi_lock); ++ return -EBUSY; ++ } ++ + hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) + && size != I2C_SMBUS_QUICK + && size != I2C_SMBUS_I2C_BLOCK_DATA; +@@ -773,7 +786,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, + default: + dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n", + size); +- return -EOPNOTSUPP; ++ ret = -EOPNOTSUPP; ++ goto out; + } + + if (hwpec) /* enable/disable hardware PEC */ +@@ -796,11 +810,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, + ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); + + if (block) +- return ret; ++ goto out; + if (ret) +- return ret; ++ goto out; + if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK)) +- return 0; ++ goto out; + + switch (xact & 0x7f) { + case I801_BYTE: /* Result put in SMBHSTDAT0 */ +@@ -812,7 +826,10 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, + (inb_p(SMBHSTDAT1(priv)) << 8); + break; + } +- return 0; ++ ++out: ++ mutex_unlock(&priv->acpi_lock); ++ return ret; + } + + +@@ -1249,6 +1266,72 @@ static void i801_add_tco(struct i801_priv *priv) + priv->tco_pdev = pdev; + } + ++#ifdef CONFIG_ACPI ++static acpi_status ++i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, ++ u64 *value, void *handler_context, void *region_context) ++{ ++ struct i801_priv *priv = handler_context; ++ struct pci_dev *pdev = priv->pci_dev; ++ acpi_status status; ++ ++ /* ++ * Once BIOS AML code touches the OpRegion we warn and inhibit any ++ * further access from the driver itself. This device is now owned ++ * by the system firmware. ++ */ ++ mutex_lock(&priv->acpi_lock); ++ ++ if (!priv->acpi_reserved) { ++ priv->acpi_reserved = true; ++ ++ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); ++ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); ++ } ++ ++ if ((function & ACPI_IO_MASK) == ACPI_READ) ++ status = acpi_os_read_port(address, (u32 *)value, bits); ++ else ++ status = acpi_os_write_port(address, (u32)*value, bits); ++ ++ mutex_unlock(&priv->acpi_lock); ++ ++ return status; ++} ++ ++static int i801_acpi_probe(struct i801_priv *priv) ++{ ++ struct acpi_device *adev; ++ acpi_status status; ++ ++ adev = ACPI_COMPANION(&priv->pci_dev->dev); ++ if (adev) { ++ status = acpi_install_address_space_handler(adev->handle, ++ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, ++ NULL, priv); ++ if (ACPI_SUCCESS(status)) ++ return 0; ++ } ++ ++ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); ++} ++ ++static void i801_acpi_remove(struct i801_priv *priv) ++{ ++ struct acpi_device *adev; ++ ++ adev = ACPI_COMPANION(&priv->pci_dev->dev); ++ if (!adev) ++ return; ++ ++ acpi_remove_address_space_handler(adev->handle, ++ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); ++} ++#else ++static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } ++static inline void i801_acpi_remove(struct i801_priv *priv) { } ++#endif ++ + static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) + { + unsigned char temp; +@@ -1266,6 +1349,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) + priv->adapter.dev.parent = &dev->dev; + ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); + priv->adapter.retries = 3; ++ mutex_init(&priv->acpi_lock); + + priv->pci_dev = dev; + switch (dev->device) { +@@ -1328,10 +1412,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) + return -ENODEV; + } + +- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); +- if (err) { ++ if (i801_acpi_probe(priv)) + return -ENODEV; +- } + + err = pcim_iomap_regions(dev, 1 << SMBBAR, + dev_driver_string(&dev->dev)); +@@ -1340,6 +1422,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) + "Failed to request SMBus region 0x%lx-0x%Lx\n", + priv->smba, + (unsigned long long)pci_resource_end(dev, SMBBAR)); ++ i801_acpi_remove(priv); + return err; + } + +@@ -1404,6 +1487,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) + err = i2c_add_adapter(&priv->adapter); + if (err) { + dev_err(&dev->dev, "Failed to add SMBus adapter\n"); ++ i801_acpi_remove(priv); + return err; + } + +@@ -1422,6 +1506,7 @@ static void i801_remove(struct pci_dev *dev) + + i801_del_mux(priv); + i2c_del_adapter(&priv->adapter); ++ i801_acpi_remove(priv); + pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); + + platform_device_unregister(priv->tco_pdev); +diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c +index db760e84119f..b8df0f5e8c25 100644 +--- a/drivers/net/bonding/bond_netlink.c ++++ b/drivers/net/bonding/bond_netlink.c +@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, + if (err < 0) + return err; + +- return register_netdevice(bond_dev); ++ err = register_netdevice(bond_dev); ++ ++ netif_carrier_off(bond_dev); ++ ++ return err; + } + + static size_t bond_get_size(const struct net_device *bond_dev) +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c +index 28f7610b03fe..c32f5d32f811 100644 +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -219,7 +219,7 @@ err_dma: + dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), + DMA_TO_DEVICE); + +- while (i > 0) { ++ while (i-- > 0) { + int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[index]; + u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c +index 3dd548ab8df1..40365cb1abe6 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c +@@ -794,13 +794,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, + * in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ +- bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); ++ __set_bit(pos, p_spq->p_comp_bitmap); + + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { +- bitmap_clear(p_spq->p_comp_bitmap, +- p_spq->comp_bitmap_idx, +- SPQ_RING_SIZE); ++ __clear_bit(p_spq->comp_bitmap_idx, ++ p_spq->p_comp_bitmap); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index a790d5f90b83..e0e94b855bbe 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -952,8 +952,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); + + static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) + { +- int ret; +- + /* MBIM backwards compatible function? */ + if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) + return -ENODEV; +@@ -962,16 +960,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) + * Additionally, generic NCM devices are assumed to accept arbitrarily + * placed NDP. + */ +- ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); +- +- /* +- * We should get an event when network connection is "connected" or +- * "disconnected". Set network connection in "disconnected" state +- * (carrier is OFF) during attach, so the IP network stack does not +- * start IPv6 negotiation and more. +- */ +- usbnet_link_change(dev, 0, 0); +- return ret; ++ return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); + } + + static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max) +@@ -1554,7 +1543,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) + + static const struct driver_info cdc_ncm_info = { + .description = "CDC NCM", +- .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, ++ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET ++ | FLAG_LINK_INTR, + .bind = cdc_ncm_bind, + .unbind = cdc_ncm_unbind, + .manage_power = usbnet_manage_power, +@@ -1567,7 +1557,7 @@ static const struct driver_info cdc_ncm_info = { + static const struct driver_info wwan_info = { + .description = "Mobile Broadband Network Device", + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET +- | FLAG_WWAN, ++ | FLAG_LINK_INTR | FLAG_WWAN, + .bind = cdc_ncm_bind, + .unbind = cdc_ncm_unbind, + .manage_power = usbnet_manage_power, +@@ -1580,7 +1570,7 @@ static const struct driver_info wwan_info = { + static const struct driver_info wwan_noarp_info = { + .description = "Mobile Broadband Network Device (NO ARP)", + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET +- | FLAG_WWAN | FLAG_NOARP, ++ | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP, + .bind = cdc_ncm_bind, + .unbind = cdc_ncm_unbind, + .manage_power = usbnet_manage_power, +diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c +index 943c1cb9566c..d28e3ab9479c 100644 +--- a/drivers/pnp/quirks.c ++++ b/drivers/pnp/quirks.c +@@ -342,7 +342,9 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev) + /* Device IDs of parts that have 32KB MCH space */ + static const unsigned int mch_quirk_devices[] = { + 0x0154, /* Ivy Bridge */ ++ 0x0a04, /* Haswell-ULT */ + 0x0c00, /* Haswell */ ++ 0x1604, /* Broadwell */ + }; + + static struct pci_dev *get_intel_host(void) +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index f7ae898833dd..7232d43e2207 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -1058,11 +1058,12 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) + } + + error = scsi_dh_add_device(sdev); +- if (error) { ++ if (error) ++ /* ++ * device_handler is optional, so any error can be ignored ++ */ + sdev_printk(KERN_INFO, sdev, + "failed to add device handler: %d\n", error); +- return error; +- } + + device_enable_async_suspend(&sdev->sdev_dev); + error = device_add(&sdev->sdev_dev); +diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c +index 13c3cd11ab92..05d30f433b19 100644 +--- a/drivers/staging/rdma/ipath/ipath_file_ops.c ++++ b/drivers/staging/rdma/ipath/ipath_file_ops.c +@@ -45,6 +45,8 @@ + #include + #include + ++#include ++ + #include "ipath_kernel.h" + #include "ipath_common.h" + #include "ipath_user_sdma.h" +@@ -2243,6 +2245,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, + ssize_t ret = 0; + void *dest; + ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp))) ++ return -EACCES; ++ + if (count < sizeof(cmd.type)) { + ret = -EINVAL; + goto bail; +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 7865228f664f..807d80145686 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -679,14 +679,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) + /* this is called once with whichever end is closed last */ + static void pty_unix98_shutdown(struct tty_struct *tty) + { +- struct inode *ptmx_inode; ++ struct pts_fs_info *fsi; + + if (tty->driver->subtype == PTY_TYPE_MASTER) +- ptmx_inode = tty->driver_data; ++ fsi = tty->driver_data; + else +- ptmx_inode = tty->link->driver_data; +- devpts_kill_index(ptmx_inode, tty->index); +- devpts_del_ref(ptmx_inode); ++ fsi = tty->link->driver_data; ++ devpts_kill_index(fsi, tty->index); ++ devpts_put_ref(fsi); + } + + static const struct tty_operations ptm_unix98_ops = { +@@ -738,6 +738,7 @@ static const struct tty_operations pty_unix98_ops = { + + static int ptmx_open(struct inode *inode, struct file *filp) + { ++ struct pts_fs_info *fsi; + struct tty_struct *tty; + struct inode *slave_inode; + int retval; +@@ -752,47 +753,41 @@ static int ptmx_open(struct inode *inode, struct file *filp) + if (retval) + return retval; + ++ fsi = devpts_get_ref(inode, filp); ++ retval = -ENODEV; ++ if (!fsi) ++ goto out_free_file; ++ + /* find a device that is not in use. */ + mutex_lock(&devpts_mutex); +- index = devpts_new_index(inode); +- if (index < 0) { +- retval = index; +- mutex_unlock(&devpts_mutex); +- goto err_file; +- } +- ++ index = devpts_new_index(fsi); + mutex_unlock(&devpts_mutex); + +- mutex_lock(&tty_mutex); +- tty = tty_init_dev(ptm_driver, index); ++ retval = index; ++ if (index < 0) ++ goto out_put_ref; + +- if (IS_ERR(tty)) { +- retval = PTR_ERR(tty); +- goto out; +- } + ++ mutex_lock(&tty_mutex); ++ tty = tty_init_dev(ptm_driver, index); + /* The tty returned here is locked so we can safely + drop the mutex */ + mutex_unlock(&tty_mutex); + +- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ +- tty->driver_data = inode; ++ retval = PTR_ERR(tty); ++ if (IS_ERR(tty)) ++ goto out; + + /* +- * In the case where all references to ptmx inode are dropped and we +- * still have /dev/tty opened pointing to the master/slave pair (ptmx +- * is closed/released before /dev/tty), we must make sure that the inode +- * is still valid when we call the final pty_unix98_shutdown, thus we +- * hold an additional reference to the ptmx inode. For the same /dev/tty +- * last close case, we also need to make sure the super_block isn't +- * destroyed (devpts instance unmounted), before /dev/tty is closed and +- * on its release devpts_kill_index is called. ++ * From here on out, the tty is "live", and the index and ++ * fsi will be killed/put by the tty_release() + */ +- devpts_add_ref(inode); ++ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ ++ tty->driver_data = fsi; + + tty_add_file(tty, filp); + +- slave_inode = devpts_pty_new(inode, ++ slave_inode = devpts_pty_new(fsi, + MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, + tty->link); + if (IS_ERR(slave_inode)) { +@@ -811,12 +806,14 @@ static int ptmx_open(struct inode *inode, struct file *filp) + return 0; + err_release: + tty_unlock(tty); ++ // This will also put-ref the fsi + tty_release(inode, filp); + return retval; + out: +- mutex_unlock(&tty_mutex); +- devpts_kill_index(inode, index); +-err_file: ++ devpts_kill_index(fsi, index); ++out_put_ref: ++ devpts_put_ref(fsi); ++out_free_file: + tty_free_file(filp); + return retval; + } +diff --git a/fs/dcache.c b/fs/dcache.c +index 108d7d810be3..71b6056ad35d 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -578,7 +578,6 @@ static struct dentry *dentry_kill(struct dentry *dentry) + + failed: + spin_unlock(&dentry->d_lock); +- cpu_relax(); + return dentry; /* try again with same dentry */ + } + +@@ -752,6 +751,8 @@ void dput(struct dentry *dentry) + return; + + repeat: ++ might_sleep(); ++ + rcu_read_lock(); + if (likely(fast_dput(dentry))) { + rcu_read_unlock(); +@@ -783,8 +784,10 @@ repeat: + + kill_it: + dentry = dentry_kill(dentry); +- if (dentry) ++ if (dentry) { ++ cond_resched(); + goto repeat; ++ } + } + EXPORT_SYMBOL(dput); + +diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c +index 706de324f2a6..c82edb049117 100644 +--- a/fs/devpts/inode.c ++++ b/fs/devpts/inode.c +@@ -128,6 +128,7 @@ static const match_table_t tokens = { + struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; ++ struct super_block *sb; + struct dentry *ptmx_dentry; + }; + +@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = { + .show_options = devpts_show_options, + }; + +-static void *new_pts_fs_info(void) ++static void *new_pts_fs_info(struct super_block *sb) + { + struct pts_fs_info *fsi; + +@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void) + ida_init(&fsi->allocated_ptys); + fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; + fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; ++ fsi->sb = sb; + + return fsi; + } +@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) + s->s_op = &devpts_sops; + s->s_time_gran = 1; + +- s->s_fs_info = new_pts_fs_info(); ++ s->s_fs_info = new_pts_fs_info(s); + if (!s->s_fs_info) + goto fail; + +@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = { + * to the System V naming convention + */ + +-int devpts_new_index(struct inode *ptmx_inode) ++int devpts_new_index(struct pts_fs_info *fsi) + { +- struct super_block *sb = pts_sb_from_inode(ptmx_inode); +- struct pts_fs_info *fsi; + int index; + int ida_ret; + +- if (!sb) ++ if (!fsi) + return -ENODEV; + +- fsi = DEVPTS_SB(sb); + retry: + if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) + return -ENOMEM; +@@ -564,11 +563,8 @@ retry: + return index; + } + +-void devpts_kill_index(struct inode *ptmx_inode, int idx) ++void devpts_kill_index(struct pts_fs_info *fsi, int idx) + { +- struct super_block *sb = pts_sb_from_inode(ptmx_inode); +- struct pts_fs_info *fsi = DEVPTS_SB(sb); +- + mutex_lock(&allocated_ptys_lock); + ida_remove(&fsi->allocated_ptys, idx); + pty_count--; +@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) + /* + * pty code needs to hold extra references in case of last /dev/tty close + */ +- +-void devpts_add_ref(struct inode *ptmx_inode) ++struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file) + { +- struct super_block *sb = pts_sb_from_inode(ptmx_inode); ++ struct super_block *sb; ++ struct pts_fs_info *fsi; ++ ++ sb = pts_sb_from_inode(ptmx_inode); ++ if (!sb) ++ return NULL; ++ fsi = DEVPTS_SB(sb); ++ if (!fsi) ++ return NULL; + + atomic_inc(&sb->s_active); +- ihold(ptmx_inode); ++ return fsi; + } + +-void devpts_del_ref(struct inode *ptmx_inode) ++void devpts_put_ref(struct pts_fs_info *fsi) + { +- struct super_block *sb = pts_sb_from_inode(ptmx_inode); +- +- iput(ptmx_inode); +- deactivate_super(sb); ++ deactivate_super(fsi->sb); + } + + /** +@@ -604,22 +604,21 @@ void devpts_del_ref(struct inode *ptmx_inode) + * + * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. + */ +-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, ++struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, + void *priv) + { + struct dentry *dentry; +- struct super_block *sb = pts_sb_from_inode(ptmx_inode); ++ struct super_block *sb; + struct inode *inode; + struct dentry *root; +- struct pts_fs_info *fsi; + struct pts_mount_opts *opts; + char s[12]; + +- if (!sb) ++ if (!fsi) + return ERR_PTR(-ENODEV); + ++ sb = fsi->sb; + root = sb->s_root; +- fsi = DEVPTS_SB(sb); + opts = &fsi->mount_opts; + + inode = new_inode(sb); +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index fe1f50fe764f..f97110461c19 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb, + memset(bh->b_data, 0, sb->s_blocksize); + + bit_max = ext4_num_base_meta_clusters(sb, block_group); ++ if ((bit_max >> 3) >= bh->b_size) ++ return -EFSCORRUPTED; ++ + for (bit = 0; bit < bit_max; bit++) + ext4_set_bit(bit, bh->b_data); + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 62880586ed85..8eac7d586997 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -376,9 +376,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) + ext4_fsblk_t block = ext4_ext_pblock(ext); + int len = ext4_ext_get_actual_len(ext); + ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); +- ext4_lblk_t last = lblock + len - 1; + +- if (len == 0 || lblock > last) ++ /* ++ * We allow neither: ++ * - zero length ++ * - overflow/wrap-around ++ */ ++ if (lblock + len <= lblock) + return 0; + return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); + } +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index e31d762eedce..9a5ad0f0d3ed 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -205,9 +205,9 @@ void ext4_evict_inode(struct inode *inode) + * Note that directories do not have this problem because they + * don't use page cache. + */ +- if (ext4_should_journal_data(inode) && +- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && +- inode->i_ino != EXT4_JOURNAL_INO) { ++ if (inode->i_ino != EXT4_JOURNAL_INO && ++ ext4_should_journal_data(inode) && ++ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { + journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; + tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; + +@@ -2589,13 +2589,36 @@ retry: + done = true; + } + } +- ext4_journal_stop(handle); ++ /* ++ * Caution: If the handle is synchronous, ++ * ext4_journal_stop() can wait for transaction commit ++ * to finish which may depend on writeback of pages to ++ * complete or on page lock to be released. In that ++ * case, we have to wait until after after we have ++ * submitted all the IO, released page locks we hold, ++ * and dropped io_end reference (for extent conversion ++ * to be able to complete) before stopping the handle. ++ */ ++ if (!ext4_handle_valid(handle) || handle->h_sync == 0) { ++ ext4_journal_stop(handle); ++ handle = NULL; ++ } + /* Submit prepared bio */ + ext4_io_submit(&mpd.io_submit); + /* Unlock pages we didn't use */ + mpage_release_unused_pages(&mpd, give_up_on_write); +- /* Drop our io_end reference we got from init */ +- ext4_put_io_end(mpd.io_submit.io_end); ++ /* ++ * Drop our io_end reference we got from init. We have ++ * to be careful and use deferred io_end finishing if ++ * we are still holding the transaction as we can ++ * release the last reference to io_end which may end ++ * up doing unwritten extent conversion. ++ */ ++ if (handle) { ++ ext4_put_io_end_defer(mpd.io_submit.io_end); ++ ext4_journal_stop(handle); ++ } else ++ ext4_put_io_end(mpd.io_submit.io_end); + + if (ret == -ENOSPC && sbi->s_journal) { + /* +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index cf734170daa9..c4dcac8a018d 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2932,7 +2932,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, + ext4_error(sb, "Allocating blocks %llu-%llu which overlap " + "fs metadata", block, block+len); + /* File system mounted not to panic on error +- * Fix the bitmap and repeat the block allocation ++ * Fix the bitmap and return EFSCORRUPTED + * We leak some of the blocks here. + */ + ext4_lock_group(sb, ac->ac_b_ex.fe_group); +@@ -2941,7 +2941,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, + ext4_unlock_group(sb, ac->ac_b_ex.fe_group); + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); + if (!err) +- err = -EAGAIN; ++ err = -EFSCORRUPTED; + goto out_err; + } + +@@ -4506,18 +4506,7 @@ repeat: + } + if (likely(ac->ac_status == AC_STATUS_FOUND)) { + *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); +- if (*errp == -EAGAIN) { +- /* +- * drop the reference that we took +- * in ext4_mb_use_best_found +- */ +- ext4_mb_release_context(ac); +- ac->ac_b_ex.fe_group = 0; +- ac->ac_b_ex.fe_start = 0; +- ac->ac_b_ex.fe_len = 0; +- ac->ac_status = AC_STATUS_CONTINUE; +- goto repeat; +- } else if (*errp) { ++ if (*errp) { + ext4_discard_allocated_blocks(ac); + goto errout; + } else { +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 852c26806af2..c542ebcf7a92 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2240,6 +2240,16 @@ static void ext4_orphan_cleanup(struct super_block *sb, + while (es->s_last_orphan) { + struct inode *inode; + ++ /* ++ * We may have encountered an error during cleanup; if ++ * so, skip the rest. ++ */ ++ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { ++ jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); ++ es->s_last_orphan = 0; ++ break; ++ } ++ + inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); + if (IS_ERR(inode)) { + es->s_last_orphan = 0; +@@ -3372,6 +3382,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + goto failed_mount; + } + ++ if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { ++ ext4_msg(sb, KERN_ERR, ++ "Number of reserved GDT blocks insanely large: %d", ++ le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); ++ goto failed_mount; ++ } ++ + if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { + if (blocksize != PAGE_SIZE) { + ext4_msg(sb, KERN_ERR, +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index c2e340d6ec6e..d58d4c0af0ce 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -417,6 +417,15 @@ static int fuse_flush(struct file *file, fl_owner_t id) + fuse_sync_writes(inode); + mutex_unlock(&inode->i_mutex); + ++ if (test_bit(AS_ENOSPC, &file->f_mapping->flags) && ++ test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags)) ++ err = -ENOSPC; ++ if (test_bit(AS_EIO, &file->f_mapping->flags) && ++ test_and_clear_bit(AS_EIO, &file->f_mapping->flags)) ++ err = -EIO; ++ if (err) ++ return err; ++ + req = fuse_get_req_nofail_nopages(fc, file); + memset(&inarg, 0, sizeof(inarg)); + inarg.fh = ff->fh; +@@ -462,6 +471,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end, + goto out; + + fuse_sync_writes(inode); ++ ++ /* ++ * Due to implementation of fuse writeback ++ * filemap_write_and_wait_range() does not catch errors. ++ * We have to do this directly after fuse_sync_writes() ++ */ ++ if (test_bit(AS_ENOSPC, &file->f_mapping->flags) && ++ test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags)) ++ err = -ENOSPC; ++ if (test_bit(AS_EIO, &file->f_mapping->flags) && ++ test_and_clear_bit(AS_EIO, &file->f_mapping->flags)) ++ err = -EIO; ++ if (err) ++ goto out; ++ + err = sync_inode_metadata(inode, 1); + if (err) + goto out; +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index 2913db2a5b99..0d5e8e59b390 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -926,7 +926,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) + arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | + FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | + FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | +- FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | ++ FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | + FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | + FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; + req->in.h.opcode = FUSE_INIT; +diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h +index e0ee0b3000b2..358a4db72a27 100644 +--- a/include/linux/devpts_fs.h ++++ b/include/linux/devpts_fs.h +@@ -15,38 +15,24 @@ + + #include + ++struct pts_fs_info; ++ + #ifdef CONFIG_UNIX98_PTYS + +-int devpts_new_index(struct inode *ptmx_inode); +-void devpts_kill_index(struct inode *ptmx_inode, int idx); +-void devpts_add_ref(struct inode *ptmx_inode); +-void devpts_del_ref(struct inode *ptmx_inode); ++/* Look up a pts fs info and get a ref to it */ ++struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); ++void devpts_put_ref(struct pts_fs_info *); ++ ++int devpts_new_index(struct pts_fs_info *); ++void devpts_kill_index(struct pts_fs_info *, int); ++ + /* mknod in devpts */ +-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, +- void *priv); ++struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *); + /* get private structure */ + void *devpts_get_priv(struct inode *pts_inode); + /* unlink */ + void devpts_pty_kill(struct inode *inode); + +-#else +- +-/* Dummy stubs in the no-pty case */ +-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } +-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } +-static inline void devpts_add_ref(struct inode *ptmx_inode) { } +-static inline void devpts_del_ref(struct inode *ptmx_inode) { } +-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, +- dev_t device, int index, void *priv) +-{ +- return ERR_PTR(-EINVAL); +-} +-static inline void *devpts_get_priv(struct inode *pts_inode) +-{ +- return NULL; +-} +-static inline void devpts_pty_kill(struct inode *inode) { } +- + #endif + + +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h +index cd0e2413c358..435fd8426b8a 100644 +--- a/include/linux/memcontrol.h ++++ b/include/linux/memcontrol.h +@@ -174,6 +174,11 @@ struct mem_cgroup_thresholds { + struct mem_cgroup_threshold_ary *spare; + }; + ++struct mem_cgroup_id { ++ int id; ++ atomic_t ref; ++}; ++ + /* + * The memory controller data structure. The memory controller controls both + * page cache and RSS per cgroup. We would eventually like to provide +@@ -183,6 +188,9 @@ struct mem_cgroup_thresholds { + struct mem_cgroup { + struct cgroup_subsys_state css; + ++ /* Private memcg ID. Used to ID objects that outlive the cgroup */ ++ struct mem_cgroup_id id; ++ + /* Accounted resources */ + struct page_counter memory; + struct page_counter memsw; +diff --git a/ipc/msg.c b/ipc/msg.c +index 1471db9a7e61..c6521c205cb4 100644 +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, + rcu_read_lock(); + ipc_lock_object(&msq->q_perm); + +- ipc_rcu_putref(msq, ipc_rcu_free); ++ ipc_rcu_putref(msq, msg_rcu_free); + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + err = -EIDRM; +diff --git a/ipc/sem.c b/ipc/sem.c +index b471e5a3863d..20d07008ad5e 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -442,7 +442,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns + static inline void sem_lock_and_putref(struct sem_array *sma) + { + sem_lock(sma, NULL, -1); +- ipc_rcu_putref(sma, ipc_rcu_free); ++ ipc_rcu_putref(sma, sem_rcu_free); + } + + static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) +@@ -1385,7 +1385,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, + rcu_read_unlock(); + sem_io = ipc_alloc(sizeof(ushort)*nsems); + if (sem_io == NULL) { +- ipc_rcu_putref(sma, ipc_rcu_free); ++ ipc_rcu_putref(sma, sem_rcu_free); + return -ENOMEM; + } + +@@ -1419,20 +1419,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, + if (nsems > SEMMSL_FAST) { + sem_io = ipc_alloc(sizeof(ushort)*nsems); + if (sem_io == NULL) { +- ipc_rcu_putref(sma, ipc_rcu_free); ++ ipc_rcu_putref(sma, sem_rcu_free); + return -ENOMEM; + } + } + + if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { +- ipc_rcu_putref(sma, ipc_rcu_free); ++ ipc_rcu_putref(sma, sem_rcu_free); + err = -EFAULT; + goto out_free; + } + + for (i = 0; i < nsems; i++) { + if (sem_io[i] > SEMVMX) { +- ipc_rcu_putref(sma, ipc_rcu_free); ++ ipc_rcu_putref(sma, sem_rcu_free); + err = -ERANGE; + goto out_free; + } +@@ -1722,7 +1722,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) + /* step 2: allocate new undo structure */ + new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + if (!new) { +- ipc_rcu_putref(sma, ipc_rcu_free); ++ ipc_rcu_putref(sma, sem_rcu_free); + return ERR_PTR(-ENOMEM); + } + +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 67648e6b2ac8..6b90d184e9c0 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -272,21 +272,7 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) + + static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) + { +- return memcg->css.id; +-} +- +-/* +- * A helper function to get mem_cgroup from ID. must be called under +- * rcu_read_lock(). The caller is responsible for calling +- * css_tryget_online() if the mem_cgroup is used for charging. (dropping +- * refcnt from swap can be called against removed memcg.) +- */ +-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) +-{ +- struct cgroup_subsys_state *css; +- +- css = css_from_id(id, &memory_cgrp_subsys); +- return mem_cgroup_from_css(css); ++ return memcg->id.id; + } + + /* Writing them here to avoid exposing memcg's inner layout */ +@@ -4124,6 +4110,88 @@ static struct cftype mem_cgroup_legacy_files[] = { + { }, /* terminate */ + }; + ++/* ++ * Private memory cgroup IDR ++ * ++ * Swap-out records and page cache shadow entries need to store memcg ++ * references in constrained space, so we maintain an ID space that is ++ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of ++ * memory-controlled cgroups to 64k. ++ * ++ * However, there usually are many references to the oflline CSS after ++ * the cgroup has been destroyed, such as page cache or reclaimable ++ * slab objects, that don't need to hang on to the ID. We want to keep ++ * those dead CSS from occupying IDs, or we might quickly exhaust the ++ * relatively small ID space and prevent the creation of new cgroups ++ * even when there are much fewer than 64k cgroups - possibly none. ++ * ++ * Maintain a private 16-bit ID space for memcg, and allow the ID to ++ * be freed and recycled when it's no longer needed, which is usually ++ * when the CSS is offlined. ++ * ++ * The only exception to that are records of swapped out tmpfs/shmem ++ * pages that need to be attributed to live ancestors on swapin. But ++ * those references are manageable from userspace. ++ */ ++ ++static DEFINE_IDR(mem_cgroup_idr); ++ ++static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) ++{ ++ atomic_add(n, &memcg->id.ref); ++} ++ ++static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) ++{ ++ while (!atomic_inc_not_zero(&memcg->id.ref)) { ++ /* ++ * The root cgroup cannot be destroyed, so it's refcount must ++ * always be >= 1. ++ */ ++ if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { ++ VM_BUG_ON(1); ++ break; ++ } ++ memcg = parent_mem_cgroup(memcg); ++ if (!memcg) ++ memcg = root_mem_cgroup; ++ } ++ return memcg; ++} ++ ++static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) ++{ ++ if (atomic_sub_and_test(n, &memcg->id.ref)) { ++ idr_remove(&mem_cgroup_idr, memcg->id.id); ++ memcg->id.id = 0; ++ ++ /* Memcg ID pins CSS */ ++ css_put(&memcg->css); ++ } ++} ++ ++static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) ++{ ++ mem_cgroup_id_get_many(memcg, 1); ++} ++ ++static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) ++{ ++ mem_cgroup_id_put_many(memcg, 1); ++} ++ ++/** ++ * mem_cgroup_from_id - look up a memcg from a memcg id ++ * @id: the memcg id to look up ++ * ++ * Caller must hold rcu_read_lock(). ++ */ ++struct mem_cgroup *mem_cgroup_from_id(unsigned short id) ++{ ++ WARN_ON_ONCE(!rcu_read_lock_held()); ++ return idr_find(&mem_cgroup_idr, id); ++} ++ + static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) + { + struct mem_cgroup_per_node *pn; +@@ -4178,6 +4246,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + if (memcg_wb_domain_init(memcg, GFP_KERNEL)) + goto out_free_stat; + ++ memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, ++ 1, MEM_CGROUP_ID_MAX, ++ GFP_KERNEL); ++ if (memcg->id.id < 0) ++ goto out_free_stat; ++ + return memcg; + + out_free_stat: +@@ -4263,9 +4337,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + #ifdef CONFIG_CGROUP_WRITEBACK + INIT_LIST_HEAD(&memcg->cgwb_list); + #endif ++ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); + return &memcg->css; + + free_out: ++ idr_remove(&mem_cgroup_idr, memcg->id.id); + __mem_cgroup_free(memcg); + return ERR_PTR(error); + } +@@ -4277,8 +4353,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) + struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); + int ret; + +- if (css->id > MEM_CGROUP_ID_MAX) +- return -ENOSPC; ++ /* Online state pins memcg ID, memcg ID pins CSS */ ++ mem_cgroup_id_get(mem_cgroup_from_css(css)); ++ css_get(css); + + if (!parent) + return 0; +@@ -4352,6 +4429,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) + memcg_deactivate_kmem(memcg); + + wb_memcg_offline(memcg); ++ ++ mem_cgroup_id_put(memcg); + } + + static void mem_cgroup_css_released(struct cgroup_subsys_state *css) +@@ -4785,6 +4864,8 @@ static void __mem_cgroup_clear_mc(void) + if (!mem_cgroup_is_root(mc.from)) + page_counter_uncharge(&mc.from->memsw, mc.moved_swap); + ++ mem_cgroup_id_put_many(mc.from, mc.moved_swap); ++ + /* + * we charged both to->memory and to->memsw, so we + * should uncharge to->memory. +@@ -4792,9 +4873,9 @@ static void __mem_cgroup_clear_mc(void) + if (!mem_cgroup_is_root(mc.to)) + page_counter_uncharge(&mc.to->memory, mc.moved_swap); + +- css_put_many(&mc.from->css, mc.moved_swap); ++ mem_cgroup_id_get_many(mc.to, mc.moved_swap); ++ css_put_many(&mc.to->css, mc.moved_swap); + +- /* we've already done css_get(mc.to) */ + mc.moved_swap = 0; + } + memcg_oom_recover(from); +@@ -5670,7 +5751,7 @@ subsys_initcall(mem_cgroup_init); + */ + void mem_cgroup_swapout(struct page *page, swp_entry_t entry) + { +- struct mem_cgroup *memcg; ++ struct mem_cgroup *memcg, *swap_memcg; + unsigned short oldid; + + VM_BUG_ON_PAGE(PageLRU(page), page); +@@ -5685,15 +5766,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) + if (!memcg) + return; + +- oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); ++ /* ++ * In case the memcg owning these pages has been offlined and doesn't ++ * have an ID allocated to it anymore, charge the closest online ++ * ancestor for the swap instead and transfer the memory+swap charge. ++ */ ++ swap_memcg = mem_cgroup_id_get_online(memcg); ++ oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); + VM_BUG_ON_PAGE(oldid, page); +- mem_cgroup_swap_statistics(memcg, true); ++ mem_cgroup_swap_statistics(swap_memcg, true); + + page->mem_cgroup = NULL; + + if (!mem_cgroup_is_root(memcg)) + page_counter_uncharge(&memcg->memory, 1); + ++ if (memcg != swap_memcg) { ++ if (!mem_cgroup_is_root(swap_memcg)) ++ page_counter_charge(&swap_memcg->memsw, 1); ++ page_counter_uncharge(&memcg->memsw, 1); ++ } ++ + /* + * Interrupts should be disabled here because the caller holds the + * mapping->tree_lock lock which is taken with interrupts-off. It is +@@ -5703,6 +5796,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) + VM_BUG_ON(!irqs_disabled()); + mem_cgroup_charge_statistics(memcg, page, -1); + memcg_check_events(memcg, page); ++ ++ if (!mem_cgroup_is_root(memcg)) ++ css_put(&memcg->css); + } + + /** +@@ -5726,7 +5822,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) + if (!mem_cgroup_is_root(memcg)) + page_counter_uncharge(&memcg->memsw, 1); + mem_cgroup_swap_statistics(memcg, false); +- css_put(&memcg->css); ++ mem_cgroup_id_put(memcg); + } + rcu_read_unlock(); + } +diff --git a/mm/slab_common.c b/mm/slab_common.c +index 3c6a86b4ec25..bec2fce9fafc 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -521,8 +521,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, + goto out_unlock; + + cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); +- cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name, +- css->id, memcg_name_buf); ++ cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, ++ css->serial_nr, memcg_name_buf); + if (!cache_name) + goto out_unlock; + +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 2b68418c7198..ffe95d954007 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, + if (!rtnh_ok(rtnh, remaining)) + return -EINVAL; + ++ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) ++ return -EINVAL; ++ + nexthop_nh->nh_flags = + (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; + nexthop_nh->nh_oif = rtnh->rtnh_ifindex; +@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + if (fib_props[cfg->fc_type].scope > cfg->fc_scope) + goto err_inval; + ++ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) ++ goto err_inval; ++ + #ifdef CONFIG_IP_ROUTE_MULTIPATH + if (cfg->fc_mp) { + nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index d4c51158470f..12b98e257c5f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1; + EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); + + /* rfc5961 challenge ack rate limiting */ +-int sysctl_tcp_challenge_ack_limit = 100; ++int sysctl_tcp_challenge_ack_limit = 1000; + + int sysctl_tcp_stdurg __read_mostly; + int sysctl_tcp_rfc1337 __read_mostly; +@@ -3390,6 +3390,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 + return flag; + } + ++static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, ++ u32 *last_oow_ack_time) ++{ ++ if (*last_oow_ack_time) { ++ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); ++ ++ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { ++ NET_INC_STATS_BH(net, mib_idx); ++ return true; /* rate-limited: don't send yet! */ ++ } ++ } ++ ++ *last_oow_ack_time = tcp_time_stamp; ++ ++ return false; /* not rate-limited: go ahead, send dupack now! */ ++} ++ + /* Return true if we're currently rate-limiting out-of-window ACKs and + * thus shouldn't send a dupack right now. We rate-limit dupacks in + * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS +@@ -3403,21 +3420,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, + /* Data packets without SYNs are not likely part of an ACK loop. */ + if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && + !tcp_hdr(skb)->syn) +- goto not_rate_limited; +- +- if (*last_oow_ack_time) { +- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); +- +- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { +- NET_INC_STATS_BH(net, mib_idx); +- return true; /* rate-limited: don't send yet! */ +- } +- } +- +- *last_oow_ack_time = tcp_time_stamp; ++ return false; + +-not_rate_limited: +- return false; /* not rate-limited: go ahead, send dupack now! */ ++ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); + } + + /* RFC 5961 7 [ACK Throttling] */ +@@ -3427,21 +3432,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) + static u32 challenge_timestamp; + static unsigned int challenge_count; + struct tcp_sock *tp = tcp_sk(sk); +- u32 now; ++ u32 count, now; + + /* First check our per-socket dupack rate limit. */ +- if (tcp_oow_rate_limited(sock_net(sk), skb, +- LINUX_MIB_TCPACKSKIPPEDCHALLENGE, +- &tp->last_oow_ack_time)) ++ if (__tcp_oow_rate_limited(sock_net(sk), ++ LINUX_MIB_TCPACKSKIPPEDCHALLENGE, ++ &tp->last_oow_ack_time)) + return; + +- /* Then check the check host-wide RFC 5961 rate limit. */ ++ /* Then check host-wide RFC 5961 rate limit. */ + now = jiffies / HZ; + if (now != challenge_timestamp) { ++ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; ++ + challenge_timestamp = now; +- challenge_count = 0; ++ WRITE_ONCE(challenge_count, half + ++ prandom_u32_max(sysctl_tcp_challenge_ack_limit)); + } +- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { ++ count = READ_ONCE(challenge_count); ++ if (count > 0) { ++ WRITE_ONCE(challenge_count, count - 1); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); + tcp_send_ack(sk); + } +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 7c9883ab56e5..660c967ba84a 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -239,7 +239,8 @@ void tcp_select_initial_window(int __space, __u32 mss, + /* Set window scaling on max possible window + * See RFC1323 for an explanation of the limit to 14 + */ +- space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); ++ space = max_t(u32, space, sysctl_tcp_rmem[2]); ++ space = max_t(u32, space, sysctl_rmem_max); + space = min_t(u32, space, *window_clamp); + while (space > 65535 && (*rcv_wscale) < 14) { + space >>= 1; +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c +index 923abd6b3064..8d2f7c9b491d 100644 +--- a/net/irda/af_irda.c ++++ b/net/irda/af_irda.c +@@ -1024,8 +1024,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, + } + + /* Check if we have opened a local TSAP */ +- if (!self->tsap) +- irda_open_tsap(self, LSAP_ANY, addr->sir_name); ++ if (!self->tsap) { ++ err = irda_open_tsap(self, LSAP_ANY, addr->sir_name); ++ if (err) ++ goto out; ++ } + + /* Move to connecting socket, start sending Connect Requests */ + sock->state = SS_CONNECTING; +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index ad4fa49ad1db..9068369f8a1b 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void *v) + seq_printf(seq, "%.2x", profile->hash[i]); + seq_puts(seq, "\n"); + } ++ aa_put_profile(profile); + + return 0; + } diff --git a/patch/kernel/marvell-dev/patch-4.4.18-19.patch b/patch/kernel/marvell-dev/patch-4.4.18-19.patch new file mode 100644 index 000000000..552c978a9 --- /dev/null +++ b/patch/kernel/marvell-dev/patch-4.4.18-19.patch @@ -0,0 +1,5823 @@ +diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt +index a78bf1ffa68c..39b7f612c418 100644 +--- a/Documentation/module-signing.txt ++++ b/Documentation/module-signing.txt +@@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use + the private key to sign modules and compromise the operating system. The + private key must be either destroyed or moved to a secure location and not kept + in the root node of the kernel source tree. ++ ++If you use the same private key to sign modules for multiple kernel ++configurations, you must ensure that the module version information is ++sufficient to prevent loading a module into a different kernel. Either ++set CONFIG_MODVERSIONS=y or ensure that each configuration has a different ++kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION. +diff --git a/Makefile b/Makefile +index eaedea88a8a7..695c64ec160c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 18 ++SUBLEVEL = 19 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h +index 57af2f05ae84..3cab04255ae0 100644 +--- a/arch/arc/include/asm/pgtable.h ++++ b/arch/arc/include/asm/pgtable.h +@@ -110,7 +110,7 @@ + #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) + + /* Set of bits not changed in pte_modify */ +-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) ++#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) + + /* More Abbrevaited helpers */ + #define PAGE_U_NONE __pgprot(___DEF) +diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts +index 97570cb7f2fc..1d23527d4ecf 100644 +--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts ++++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts +@@ -84,6 +84,7 @@ + regulator-name = "emac-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; ++ startup-delay-us = <20000>; + enable-active-high; + gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>; + }; +diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts +index 2b17c5199151..6de83a6187d0 100644 +--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts ++++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts +@@ -66,6 +66,7 @@ + regulator-name = "emac-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; ++ startup-delay-us = <20000>; + enable-active-high; + gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; + }; +diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts +index 7afc7a64eef1..e28f080b1fd5 100644 +--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts ++++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts +@@ -80,6 +80,7 @@ + regulator-name = "emac-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; ++ startup-delay-us = <20000>; + enable-active-high; + gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */ + }; +diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts +index 9fea918f949e..39731a78f087 100644 +--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts ++++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts +@@ -79,6 +79,7 @@ + regulator-name = "emac-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; ++ startup-delay-us = <20000>; + enable-active-high; + gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>; + }; +diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi +index cc093a482aa4..8fe39e1b680e 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi +@@ -517,7 +517,7 @@ + #address-cells = <0>; + + reg = <0x0 0xffb71000 0x0 0x1000>, +- <0x0 0xffb72000 0x0 0x1000>, ++ <0x0 0xffb72000 0x0 0x2000>, + <0x0 0xffb74000 0x0 0x2000>, + <0x0 0xffb76000 0x0 0x2000>; + interrupts = + #include + #include ++#include + #include + #include + +@@ -93,7 +94,13 @@ + disable_step_tsk x19, x20 // exceptions when scheduling. + .else + add x21, sp, #S_FRAME_SIZE +- .endif ++ get_thread_info tsk ++ /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ ++ ldr x20, [tsk, #TI_ADDR_LIMIT] ++ str x20, [sp, #S_ORIG_ADDR_LIMIT] ++ mov x20, #TASK_SIZE_64 ++ str x20, [tsk, #TI_ADDR_LIMIT] ++ .endif /* \el == 0 */ + mrs x22, elr_el1 + mrs x23, spsr_el1 + stp lr, x21, [sp, #S_LR] +@@ -117,6 +124,12 @@ + .endm + + .macro kernel_exit, el ++ .if \el != 0 ++ /* Restore the task's original addr_limit. */ ++ ldr x20, [sp, #S_ORIG_ADDR_LIMIT] ++ str x20, [tsk, #TI_ADDR_LIMIT] ++ .endif ++ + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR + .if \el == 0 + ct_user_enter +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c +index b1adc51b2c2e..f3c3d8fee5ba 100644 +--- a/arch/arm64/kernel/smp.c ++++ b/arch/arm64/kernel/smp.c +@@ -188,7 +188,6 @@ asmlinkage void secondary_start_kernel(void) + set_cpu_online(cpu, true); + complete(&cpu_running); + +- local_dbg_enable(); + local_irq_enable(); + local_async_enable(); + +@@ -334,8 +333,8 @@ void __init smp_cpus_done(unsigned int max_cpus) + + void __init smp_prepare_boot_cpu(void) + { +- cpuinfo_store_boot_cpu(); + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); ++ cpuinfo_store_boot_cpu(); + } + + static u64 __init of_get_cpu_mpidr(struct device_node *dn) +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index 116ad654dd59..653735a8c58a 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -652,9 +652,9 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) + /* + * Check whether the physical FDT address is set and meets the minimum + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be +- * at least 8 bytes so that we can always access the size field of the +- * FDT header after mapping the first chunk, double check here if that +- * is indeed the case. ++ * at least 8 bytes so that we can always access the magic and size ++ * fields of the FDT header after mapping the first chunk, double check ++ * here if that is indeed the case. + */ + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) +@@ -682,7 +682,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) + create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, + SWAPPER_BLOCK_SIZE, prot); + +- if (fdt_check_header(dt_virt) != 0) ++ if (fdt_magic(dt_virt) != FDT_MAGIC) + return NULL; + + size = fdt_totalsize(dt_virt); +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S +index b8f04b3f2786..1f6bb29ca53b 100644 +--- a/arch/arm64/mm/proc.S ++++ b/arch/arm64/mm/proc.S +@@ -156,6 +156,8 @@ ENTRY(__cpu_setup) + msr cpacr_el1, x0 // Enable FP/ASIMD + mov x0, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x0 // access to the DCC from EL0 ++ isb // Unmask debug exceptions now, ++ enable_dbg // since this is per-cpu + reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + /* + * Memory region attributes for LPAE: +diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h +index 0154e2807ebb..2369ad394876 100644 +--- a/arch/metag/include/asm/cmpxchg_lnkget.h ++++ b/arch/metag/include/asm/cmpxchg_lnkget.h +@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + " DCACHE [%2], %0\n" + #endif + "2:\n" +- : "=&d" (temp), "=&da" (retval) ++ : "=&d" (temp), "=&d" (retval) + : "da" (m), "bd" (old), "da" (new) + : "cc" + ); +diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c +index 1f910563fdf6..d76275da54cb 100644 +--- a/arch/mips/kernel/csrc-r4k.c ++++ b/arch/mips/kernel/csrc-r4k.c +@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = { + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }; + +-static u64 notrace r4k_read_sched_clock(void) ++static u64 __maybe_unused notrace r4k_read_sched_clock(void) + { + return read_c0_count(); + } +@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void) + + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); + ++#ifndef CONFIG_CPU_FREQ + sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); ++#endif + + return 0; + } +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c +index dc10c77b7500..d6476d11212e 100644 +--- a/arch/mips/kvm/emulate.c ++++ b/arch/mips/kvm/emulate.c +@@ -1629,8 +1629,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, + + preempt_disable(); + if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { +- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) +- kvm_mips_handle_kseg0_tlb_fault(va, vcpu); ++ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && ++ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { ++ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", ++ __func__, va, vcpu, read_c0_entryhi()); ++ er = EMULATE_FAIL; ++ preempt_enable(); ++ goto done; ++ } + } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || + KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { + int index; +@@ -1665,14 +1671,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, + run, vcpu); + preempt_enable(); + goto dont_update_pc; +- } else { +- /* +- * We fault an entry from the guest tlb to the +- * shadow host TLB +- */ +- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, +- NULL, +- NULL); ++ } ++ /* ++ * We fault an entry from the guest tlb to the ++ * shadow host TLB ++ */ ++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, ++ NULL, NULL)) { ++ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", ++ __func__, va, index, vcpu, ++ read_c0_entryhi()); ++ er = EMULATE_FAIL; ++ preempt_enable(); ++ goto done; + } + } + } else { +@@ -2633,8 +2644,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, + * OK we have a Guest TLB entry, now inject it into the + * shadow host TLB + */ +- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, +- NULL); ++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, ++ NULL, NULL)) { ++ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", ++ __func__, va, index, vcpu, ++ read_c0_entryhi()); ++ er = EMULATE_FAIL; ++ } + } + } + +diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c +index aed0ac2a4972..7a7ed9ca01bb 100644 +--- a/arch/mips/kvm/tlb.c ++++ b/arch/mips/kvm/tlb.c +@@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, + } + + gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); +- if (gfn >= kvm->arch.guest_pmap_npages) { ++ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, + gfn, badvaddr); + kvm_mips_dump_host_tlbs(); +@@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; + struct kvm *kvm = vcpu->kvm; + pfn_t pfn0, pfn1; +- +- if ((tlb->tlb_hi & VPN2_MASK) == 0) { +- pfn0 = 0; +- pfn1 = 0; +- } else { +- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) +- >> PAGE_SHIFT) < 0) +- return -1; +- +- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) +- >> PAGE_SHIFT) < 0) +- return -1; +- +- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) +- >> PAGE_SHIFT]; +- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) +- >> PAGE_SHIFT]; ++ gfn_t gfn0, gfn1; ++ long tlb_lo[2]; ++ ++ tlb_lo[0] = tlb->tlb_lo0; ++ tlb_lo[1] = tlb->tlb_lo1; ++ ++ /* ++ * The commpage address must not be mapped to anything else if the guest ++ * TLB contains entries nearby, or commpage accesses will break. ++ */ ++ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & ++ VPN2_MASK & (PAGE_MASK << 1))) ++ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; ++ ++ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; ++ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; ++ if (gfn0 >= kvm->arch.guest_pmap_npages || ++ gfn1 >= kvm->arch.guest_pmap_npages) { ++ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", ++ __func__, gfn0, gfn1, tlb->tlb_hi); ++ kvm_mips_dump_guest_tlbs(vcpu); ++ return -1; + } + ++ if (kvm_mips_map_page(kvm, gfn0) < 0) ++ return -1; ++ ++ if (kvm_mips_map_page(kvm, gfn1) < 0) ++ return -1; ++ ++ pfn0 = kvm->arch.guest_pmap[gfn0]; ++ pfn1 = kvm->arch.guest_pmap[gfn1]; ++ + if (hpa0) + *hpa0 = pfn0 << PAGE_SHIFT; + +@@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + kvm_mips_get_kernel_asid(vcpu) : + kvm_mips_get_user_asid(vcpu)); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | +- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); ++ (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V); + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | +- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); ++ (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V); + + kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, + tlb->tlb_lo0, tlb->tlb_lo1); +@@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) + local_irq_restore(flags); + return KVM_INVALID_INST; + } +- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, +- &vcpu->arch. +- guest_tlb[index], +- NULL, NULL); ++ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, ++ &vcpu->arch.guest_tlb[index], ++ NULL, NULL)) { ++ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", ++ __func__, opc, index, vcpu, ++ read_c0_entryhi()); ++ kvm_mips_dump_guest_tlbs(vcpu); ++ local_irq_restore(flags); ++ return KVM_INVALID_INST; ++ } + inst = *(opc); + } + local_irq_restore(flags); +diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c +index a2631a52ca99..444802e78554 100644 +--- a/arch/mips/loongson64/loongson-3/hpet.c ++++ b/arch/mips/loongson64/loongson-3/hpet.c +@@ -13,8 +13,8 @@ + #define SMBUS_PCI_REG64 0x64 + #define SMBUS_PCI_REGB4 0xb4 + +-#define HPET_MIN_CYCLES 64 +-#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) ++#define HPET_MIN_CYCLES 16 ++#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12) + + static DEFINE_SPINLOCK(hpet_lock); + DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); +@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt) + static int hpet_next_event(unsigned long delta, + struct clock_event_device *evt) + { +- unsigned int cnt; +- int res; ++ u32 cnt; ++ s32 res; + + cnt = hpet_read(HPET_COUNTER); +- cnt += delta; ++ cnt += (u32) delta; + hpet_write(HPET_T0_CMP, cnt); + +- res = (int)(cnt - hpet_read(HPET_COUNTER)); ++ res = (s32)(cnt - hpet_read(HPET_COUNTER)); + + return res < HPET_MIN_CYCLES ? -ETIME : 0; + } +@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void) + + cd = &per_cpu(hpet_clockevent_device, cpu); + cd->name = "hpet"; +- cd->rating = 320; ++ cd->rating = 100; + cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + cd->set_state_shutdown = hpet_set_state_shutdown; + cd->set_state_periodic = hpet_set_state_periodic; +diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c +index b4a837893562..5abe51cad899 100644 +--- a/arch/mips/mm/uasm-mips.c ++++ b/arch/mips/mm/uasm-mips.c +@@ -65,7 +65,7 @@ static struct insn insn_table[] = { + #ifndef CONFIG_CPU_MIPSR6 + { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + #else +- { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, ++ { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, + #endif + { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index 463af88c95a2..974f73df00bb 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM + BEGIN_FTR_SECTION +- b skip_tm +-END_FTR_SECTION_IFCLR(CPU_FTR_TM) +- +- /* Turn on TM/FP/VSX/VMX so we can restore them. */ +- mfmsr r5 +- li r6, MSR_TM >> 32 +- sldi r6, r6, 32 +- or r5, r5, r6 +- ori r5, r5, MSR_FP +- oris r5, r5, (MSR_VEC | MSR_VSX)@h +- mtmsrd r5 +- +- /* +- * The user may change these outside of a transaction, so they must +- * always be context switched. +- */ +- ld r5, VCPU_TFHAR(r4) +- ld r6, VCPU_TFIAR(r4) +- ld r7, VCPU_TEXASR(r4) +- mtspr SPRN_TFHAR, r5 +- mtspr SPRN_TFIAR, r6 +- mtspr SPRN_TEXASR, r7 +- +- ld r5, VCPU_MSR(r4) +- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 +- beq skip_tm /* TM not active in guest */ +- +- /* Make sure the failure summary is set, otherwise we'll program check +- * when we trechkpt. It's possible that this might have been not set +- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the +- * host. +- */ +- oris r7, r7, (TEXASR_FS)@h +- mtspr SPRN_TEXASR, r7 +- +- /* +- * We need to load up the checkpointed state for the guest. +- * We need to do this early as it will blow away any GPRs, VSRs and +- * some SPRs. +- */ +- +- mr r31, r4 +- addi r3, r31, VCPU_FPRS_TM +- bl load_fp_state +- addi r3, r31, VCPU_VRS_TM +- bl load_vr_state +- mr r4, r31 +- lwz r7, VCPU_VRSAVE_TM(r4) +- mtspr SPRN_VRSAVE, r7 +- +- ld r5, VCPU_LR_TM(r4) +- lwz r6, VCPU_CR_TM(r4) +- ld r7, VCPU_CTR_TM(r4) +- ld r8, VCPU_AMR_TM(r4) +- ld r9, VCPU_TAR_TM(r4) +- mtlr r5 +- mtcr r6 +- mtctr r7 +- mtspr SPRN_AMR, r8 +- mtspr SPRN_TAR, r9 +- +- /* +- * Load up PPR and DSCR values but don't put them in the actual SPRs +- * till the last moment to avoid running with userspace PPR and DSCR for +- * too long. +- */ +- ld r29, VCPU_DSCR_TM(r4) +- ld r30, VCPU_PPR_TM(r4) +- +- std r2, PACATMSCRATCH(r13) /* Save TOC */ +- +- /* Clear the MSR RI since r1, r13 are all going to be foobar. */ +- li r5, 0 +- mtmsrd r5, 1 +- +- /* Load GPRs r0-r28 */ +- reg = 0 +- .rept 29 +- ld reg, VCPU_GPRS_TM(reg)(r31) +- reg = reg + 1 +- .endr +- +- mtspr SPRN_DSCR, r29 +- mtspr SPRN_PPR, r30 +- +- /* Load final GPRs */ +- ld 29, VCPU_GPRS_TM(29)(r31) +- ld 30, VCPU_GPRS_TM(30)(r31) +- ld 31, VCPU_GPRS_TM(31)(r31) +- +- /* TM checkpointed state is now setup. All GPRs are now volatile. */ +- TRECHKPT +- +- /* Now let's get back the state we need. */ +- HMT_MEDIUM +- GET_PACA(r13) +- ld r29, HSTATE_DSCR(r13) +- mtspr SPRN_DSCR, r29 +- ld r4, HSTATE_KVM_VCPU(r13) +- ld r1, HSTATE_HOST_R1(r13) +- ld r2, PACATMSCRATCH(r13) +- +- /* Set the MSR RI since we have our registers back. */ +- li r5, MSR_RI +- mtmsrd r5, 1 +-skip_tm: ++ bl kvmppc_restore_tm ++END_FTR_SECTION_IFSET(CPU_FTR_TM) + #endif + + /* Load guest PMU registers */ +@@ -841,12 +737,6 @@ BEGIN_FTR_SECTION + /* Skip next section on POWER7 */ + b 8f + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) +- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ +- mfmsr r8 +- li r0, 1 +- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG +- mtmsrd r8 +- + /* Load up POWER8-specific registers */ + ld r5, VCPU_IAMR(r4) + lwz r6, VCPU_PSPB(r4) +@@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM + BEGIN_FTR_SECTION +- b 2f +-END_FTR_SECTION_IFCLR(CPU_FTR_TM) +- /* Turn on TM. */ +- mfmsr r8 +- li r0, 1 +- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG +- mtmsrd r8 +- +- ld r5, VCPU_MSR(r9) +- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 +- beq 1f /* TM not active in guest. */ +- +- li r3, TM_CAUSE_KVM_RESCHED +- +- /* Clear the MSR RI since r1, r13 are all going to be foobar. */ +- li r5, 0 +- mtmsrd r5, 1 +- +- /* All GPRs are volatile at this point. */ +- TRECLAIM(R3) +- +- /* Temporarily store r13 and r9 so we have some regs to play with */ +- SET_SCRATCH0(r13) +- GET_PACA(r13) +- std r9, PACATMSCRATCH(r13) +- ld r9, HSTATE_KVM_VCPU(r13) +- +- /* Get a few more GPRs free. */ +- std r29, VCPU_GPRS_TM(29)(r9) +- std r30, VCPU_GPRS_TM(30)(r9) +- std r31, VCPU_GPRS_TM(31)(r9) +- +- /* Save away PPR and DSCR soon so don't run with user values. */ +- mfspr r31, SPRN_PPR +- HMT_MEDIUM +- mfspr r30, SPRN_DSCR +- ld r29, HSTATE_DSCR(r13) +- mtspr SPRN_DSCR, r29 +- +- /* Save all but r9, r13 & r29-r31 */ +- reg = 0 +- .rept 29 +- .if (reg != 9) && (reg != 13) +- std reg, VCPU_GPRS_TM(reg)(r9) +- .endif +- reg = reg + 1 +- .endr +- /* ... now save r13 */ +- GET_SCRATCH0(r4) +- std r4, VCPU_GPRS_TM(13)(r9) +- /* ... and save r9 */ +- ld r4, PACATMSCRATCH(r13) +- std r4, VCPU_GPRS_TM(9)(r9) +- +- /* Reload stack pointer and TOC. */ +- ld r1, HSTATE_HOST_R1(r13) +- ld r2, PACATOC(r13) +- +- /* Set MSR RI now we have r1 and r13 back. */ +- li r5, MSR_RI +- mtmsrd r5, 1 +- +- /* Save away checkpinted SPRs. */ +- std r31, VCPU_PPR_TM(r9) +- std r30, VCPU_DSCR_TM(r9) +- mflr r5 +- mfcr r6 +- mfctr r7 +- mfspr r8, SPRN_AMR +- mfspr r10, SPRN_TAR +- std r5, VCPU_LR_TM(r9) +- stw r6, VCPU_CR_TM(r9) +- std r7, VCPU_CTR_TM(r9) +- std r8, VCPU_AMR_TM(r9) +- std r10, VCPU_TAR_TM(r9) +- +- /* Restore r12 as trap number. */ +- lwz r12, VCPU_TRAP(r9) +- +- /* Save FP/VSX. */ +- addi r3, r9, VCPU_FPRS_TM +- bl store_fp_state +- addi r3, r9, VCPU_VRS_TM +- bl store_vr_state +- mfspr r6, SPRN_VRSAVE +- stw r6, VCPU_VRSAVE_TM(r9) +-1: +- /* +- * We need to save these SPRs after the treclaim so that the software +- * error code is recorded correctly in the TEXASR. Also the user may +- * change these outside of a transaction, so they must always be +- * context switched. +- */ +- mfspr r5, SPRN_TFHAR +- mfspr r6, SPRN_TFIAR +- mfspr r7, SPRN_TEXASR +- std r5, VCPU_TFHAR(r9) +- std r6, VCPU_TFIAR(r9) +- std r7, VCPU_TEXASR(r9) +-2: ++ bl kvmppc_save_tm ++END_FTR_SECTION_IFSET(CPU_FTR_TM) + #endif + + /* Increment yield count if they have a VPA */ +@@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ + /* save FP state */ + bl kvmppc_save_fp + ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM ++BEGIN_FTR_SECTION ++ ld r9, HSTATE_KVM_VCPU(r13) ++ bl kvmppc_save_tm ++END_FTR_SECTION_IFSET(CPU_FTR_TM) ++#endif ++ + /* + * Set DEC to the smaller of DEC and HDEC, so that we wake + * no later than the end of our timeslice (HDEC interrupts +@@ -2321,6 +2120,12 @@ kvm_end_cede: + bl kvmhv_accumulate_time + #endif + ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM ++BEGIN_FTR_SECTION ++ bl kvmppc_restore_tm ++END_FTR_SECTION_IFSET(CPU_FTR_TM) ++#endif ++ + /* load up FP state */ + bl kvmppc_load_fp + +@@ -2629,6 +2434,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) + mr r4,r31 + blr + ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM ++/* ++ * Save transactional state and TM-related registers. ++ * Called with r9 pointing to the vcpu struct. ++ * This can modify all checkpointed registers, but ++ * restores r1, r2 and r9 (vcpu pointer) before exit. ++ */ ++kvmppc_save_tm: ++ mflr r0 ++ std r0, PPC_LR_STKOFF(r1) ++ ++ /* Turn on TM. */ ++ mfmsr r8 ++ li r0, 1 ++ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG ++ mtmsrd r8 ++ ++ ld r5, VCPU_MSR(r9) ++ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 ++ beq 1f /* TM not active in guest. */ ++ ++ std r1, HSTATE_HOST_R1(r13) ++ li r3, TM_CAUSE_KVM_RESCHED ++ ++ /* Clear the MSR RI since r1, r13 are all going to be foobar. */ ++ li r5, 0 ++ mtmsrd r5, 1 ++ ++ /* All GPRs are volatile at this point. */ ++ TRECLAIM(R3) ++ ++ /* Temporarily store r13 and r9 so we have some regs to play with */ ++ SET_SCRATCH0(r13) ++ GET_PACA(r13) ++ std r9, PACATMSCRATCH(r13) ++ ld r9, HSTATE_KVM_VCPU(r13) ++ ++ /* Get a few more GPRs free. */ ++ std r29, VCPU_GPRS_TM(29)(r9) ++ std r30, VCPU_GPRS_TM(30)(r9) ++ std r31, VCPU_GPRS_TM(31)(r9) ++ ++ /* Save away PPR and DSCR soon so don't run with user values. */ ++ mfspr r31, SPRN_PPR ++ HMT_MEDIUM ++ mfspr r30, SPRN_DSCR ++ ld r29, HSTATE_DSCR(r13) ++ mtspr SPRN_DSCR, r29 ++ ++ /* Save all but r9, r13 & r29-r31 */ ++ reg = 0 ++ .rept 29 ++ .if (reg != 9) && (reg != 13) ++ std reg, VCPU_GPRS_TM(reg)(r9) ++ .endif ++ reg = reg + 1 ++ .endr ++ /* ... now save r13 */ ++ GET_SCRATCH0(r4) ++ std r4, VCPU_GPRS_TM(13)(r9) ++ /* ... and save r9 */ ++ ld r4, PACATMSCRATCH(r13) ++ std r4, VCPU_GPRS_TM(9)(r9) ++ ++ /* Reload stack pointer and TOC. */ ++ ld r1, HSTATE_HOST_R1(r13) ++ ld r2, PACATOC(r13) ++ ++ /* Set MSR RI now we have r1 and r13 back. */ ++ li r5, MSR_RI ++ mtmsrd r5, 1 ++ ++ /* Save away checkpinted SPRs. */ ++ std r31, VCPU_PPR_TM(r9) ++ std r30, VCPU_DSCR_TM(r9) ++ mflr r5 ++ mfcr r6 ++ mfctr r7 ++ mfspr r8, SPRN_AMR ++ mfspr r10, SPRN_TAR ++ std r5, VCPU_LR_TM(r9) ++ stw r6, VCPU_CR_TM(r9) ++ std r7, VCPU_CTR_TM(r9) ++ std r8, VCPU_AMR_TM(r9) ++ std r10, VCPU_TAR_TM(r9) ++ ++ /* Restore r12 as trap number. */ ++ lwz r12, VCPU_TRAP(r9) ++ ++ /* Save FP/VSX. */ ++ addi r3, r9, VCPU_FPRS_TM ++ bl store_fp_state ++ addi r3, r9, VCPU_VRS_TM ++ bl store_vr_state ++ mfspr r6, SPRN_VRSAVE ++ stw r6, VCPU_VRSAVE_TM(r9) ++1: ++ /* ++ * We need to save these SPRs after the treclaim so that the software ++ * error code is recorded correctly in the TEXASR. Also the user may ++ * change these outside of a transaction, so they must always be ++ * context switched. ++ */ ++ mfspr r5, SPRN_TFHAR ++ mfspr r6, SPRN_TFIAR ++ mfspr r7, SPRN_TEXASR ++ std r5, VCPU_TFHAR(r9) ++ std r6, VCPU_TFIAR(r9) ++ std r7, VCPU_TEXASR(r9) ++ ++ ld r0, PPC_LR_STKOFF(r1) ++ mtlr r0 ++ blr ++ ++/* ++ * Restore transactional state and TM-related registers. ++ * Called with r4 pointing to the vcpu struct. ++ * This potentially modifies all checkpointed registers. ++ * It restores r1, r2, r4 from the PACA. ++ */ ++kvmppc_restore_tm: ++ mflr r0 ++ std r0, PPC_LR_STKOFF(r1) ++ ++ /* Turn on TM/FP/VSX/VMX so we can restore them. */ ++ mfmsr r5 ++ li r6, MSR_TM >> 32 ++ sldi r6, r6, 32 ++ or r5, r5, r6 ++ ori r5, r5, MSR_FP ++ oris r5, r5, (MSR_VEC | MSR_VSX)@h ++ mtmsrd r5 ++ ++ /* ++ * The user may change these outside of a transaction, so they must ++ * always be context switched. ++ */ ++ ld r5, VCPU_TFHAR(r4) ++ ld r6, VCPU_TFIAR(r4) ++ ld r7, VCPU_TEXASR(r4) ++ mtspr SPRN_TFHAR, r5 ++ mtspr SPRN_TFIAR, r6 ++ mtspr SPRN_TEXASR, r7 ++ ++ ld r5, VCPU_MSR(r4) ++ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 ++ beqlr /* TM not active in guest */ ++ std r1, HSTATE_HOST_R1(r13) ++ ++ /* Make sure the failure summary is set, otherwise we'll program check ++ * when we trechkpt. It's possible that this might have been not set ++ * on a kvmppc_set_one_reg() call but we shouldn't let this crash the ++ * host. ++ */ ++ oris r7, r7, (TEXASR_FS)@h ++ mtspr SPRN_TEXASR, r7 ++ ++ /* ++ * We need to load up the checkpointed state for the guest. ++ * We need to do this early as it will blow away any GPRs, VSRs and ++ * some SPRs. ++ */ ++ ++ mr r31, r4 ++ addi r3, r31, VCPU_FPRS_TM ++ bl load_fp_state ++ addi r3, r31, VCPU_VRS_TM ++ bl load_vr_state ++ mr r4, r31 ++ lwz r7, VCPU_VRSAVE_TM(r4) ++ mtspr SPRN_VRSAVE, r7 ++ ++ ld r5, VCPU_LR_TM(r4) ++ lwz r6, VCPU_CR_TM(r4) ++ ld r7, VCPU_CTR_TM(r4) ++ ld r8, VCPU_AMR_TM(r4) ++ ld r9, VCPU_TAR_TM(r4) ++ mtlr r5 ++ mtcr r6 ++ mtctr r7 ++ mtspr SPRN_AMR, r8 ++ mtspr SPRN_TAR, r9 ++ ++ /* ++ * Load up PPR and DSCR values but don't put them in the actual SPRs ++ * till the last moment to avoid running with userspace PPR and DSCR for ++ * too long. ++ */ ++ ld r29, VCPU_DSCR_TM(r4) ++ ld r30, VCPU_PPR_TM(r4) ++ ++ std r2, PACATMSCRATCH(r13) /* Save TOC */ ++ ++ /* Clear the MSR RI since r1, r13 are all going to be foobar. */ ++ li r5, 0 ++ mtmsrd r5, 1 ++ ++ /* Load GPRs r0-r28 */ ++ reg = 0 ++ .rept 29 ++ ld reg, VCPU_GPRS_TM(reg)(r31) ++ reg = reg + 1 ++ .endr ++ ++ mtspr SPRN_DSCR, r29 ++ mtspr SPRN_PPR, r30 ++ ++ /* Load final GPRs */ ++ ld 29, VCPU_GPRS_TM(29)(r31) ++ ld 30, VCPU_GPRS_TM(30)(r31) ++ ld 31, VCPU_GPRS_TM(31)(r31) ++ ++ /* TM checkpointed state is now setup. All GPRs are now volatile. */ ++ TRECHKPT ++ ++ /* Now let's get back the state we need. */ ++ HMT_MEDIUM ++ GET_PACA(r13) ++ ld r29, HSTATE_DSCR(r13) ++ mtspr SPRN_DSCR, r29 ++ ld r4, HSTATE_KVM_VCPU(r13) ++ ld r1, HSTATE_HOST_R1(r13) ++ ld r2, PACATMSCRATCH(r13) ++ ++ /* Set the MSR RI since we have our registers back. */ ++ li r5, MSR_RI ++ mtmsrd r5, 1 ++ ++ ld r0, PPC_LR_STKOFF(r1) ++ mtlr r0 ++ blr ++#endif ++ + /* + * We come here if we get any exception or interrupt while we are + * executing host real mode code while in guest MMU context. +diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c +index c146f3c262c3..0149ac59c273 100644 +--- a/arch/x86/kvm/mtrr.c ++++ b/arch/x86/kvm/mtrr.c +@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter) + + iter->fixed = false; + iter->start_max = iter->start; ++ iter->range = NULL; + iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); + + __mtrr_lookup_var_next(iter); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 41e7943004fe..4589b6feeb7b 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8124,6 +8124,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) + if ((vectoring_info & VECTORING_INFO_VALID_MASK) && + (exit_reason != EXIT_REASON_EXCEPTION_NMI && + exit_reason != EXIT_REASON_EPT_VIOLATION && ++ exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_TASK_SWITCH)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; +@@ -8736,6 +8737,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) + put_cpu(); + } + ++/* ++ * Ensure that the current vmcs of the logical processor is the ++ * vmcs01 of the vcpu before calling free_nested(). ++ */ ++static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) ++{ ++ struct vcpu_vmx *vmx = to_vmx(vcpu); ++ int r; ++ ++ r = vcpu_load(vcpu); ++ BUG_ON(r); ++ vmx_load_vmcs01(vcpu); ++ free_nested(vmx); ++ vcpu_put(vcpu); ++} ++ + static void vmx_free_vcpu(struct kvm_vcpu *vcpu) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); +@@ -8744,8 +8761,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) + vmx_destroy_pml_buffer(vmx); + free_vpid(vmx->vpid); + leave_guest_mode(vcpu); +- vmx_load_vmcs01(vcpu); +- free_nested(vmx); ++ vmx_free_vcpu_nested(vcpu); + free_loaded_vmcs(vmx->loaded_vmcs); + kfree(vmx->guest_msrs); + kvm_vcpu_uninit(vcpu); +diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c +index 8b93e634af84..ae97f24a4371 100644 +--- a/arch/x86/pci/intel_mid_pci.c ++++ b/arch/x86/pci/intel_mid_pci.c +@@ -37,6 +37,7 @@ + + /* Quirks for the listed devices */ + #define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 ++#define PCI_DEVICE_ID_INTEL_MRFL_HSU 0x1191 + + /* Fixed BAR fields */ + #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */ +@@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) + /* Special treatment for IRQ0 */ + if (dev->irq == 0) { + /* ++ * Skip HS UART common registers device since it has ++ * IRQ0 assigned and not used by the kernel. ++ */ ++ if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU) ++ return -EBUSY; ++ /* + * TNG has IRQ0 assigned to eMMC controller. But there + * are also other devices with bogus PCI configuration + * that have IRQ0 assigned. This check ensures that +- * eMMC gets it. ++ * eMMC gets it. The rest of devices still could be ++ * enabled without interrupt line being allocated. + */ + if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC) +- return -EBUSY; ++ return 0; + } + break; + default: +diff --git a/block/bio.c b/block/bio.c +index d4d144363250..46e2cc1d4016 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -584,6 +584,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) + bio->bi_rw = bio_src->bi_rw; + bio->bi_iter = bio_src->bi_iter; + bio->bi_io_vec = bio_src->bi_io_vec; ++ ++ bio_clone_blkcg_association(bio, bio_src); + } + EXPORT_SYMBOL(__bio_clone_fast); + +@@ -689,6 +691,8 @@ integrity_clone: + } + } + ++ bio_clone_blkcg_association(bio, bio_src); ++ + return bio; + } + EXPORT_SYMBOL(bio_clone_bioset); +@@ -2014,6 +2018,17 @@ void bio_disassociate_task(struct bio *bio) + } + } + ++/** ++ * bio_clone_blkcg_association - clone blkcg association from src to dst bio ++ * @dst: destination bio ++ * @src: source bio ++ */ ++void bio_clone_blkcg_association(struct bio *dst, struct bio *src) ++{ ++ if (src->bi_css) ++ WARN_ON(bio_associate_blkcg(dst, src->bi_css)); ++} ++ + #endif /* CONFIG_BLK_CGROUP */ + + static void __init biovec_init_slabs(void) +diff --git a/block/genhd.c b/block/genhd.c +index d2a1d43bf9fa..a5bed6bc869d 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -612,7 +612,7 @@ void add_disk(struct gendisk *disk) + + /* Register BDI before referencing it from bdev */ + bdi = &disk->queue->backing_dev_info; +- bdi_register_dev(bdi, disk_devt(disk)); ++ bdi_register_owner(bdi, disk_to_dev(disk)); + + blk_register_region(disk_devt(disk), disk->minors, NULL, + exact_match, exact_lock, disk); +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index b420fb46669d..43f20328f830 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -101,6 +101,7 @@ enum ec_command { + #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */ + #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ ++#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */ + + enum { + EC_FLAGS_QUERY_PENDING, /* Query is pending */ +@@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; + module_param(ec_delay, uint, 0644); + MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); + ++static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES; ++module_param(ec_max_queries, uint, 0644); ++MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations"); ++ + static bool ec_busy_polling __read_mostly; + module_param(ec_busy_polling, bool, 0644); + MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction"); +@@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work); + + struct acpi_ec *boot_ec, *first_ec; + EXPORT_SYMBOL(first_ec); ++static struct workqueue_struct *ec_query_wq; + + static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ + static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +@@ -1097,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) + * work queue execution. + */ + ec_dbg_evt("Query(0x%02x) scheduled", value); +- if (!schedule_work(&q->work)) { ++ if (!queue_work(ec_query_wq, &q->work)) { + ec_dbg_evt("Query(0x%02x) overlapped", value); + result = -EBUSY; + } +@@ -1657,15 +1663,41 @@ static struct acpi_driver acpi_ec_driver = { + }, + }; + ++static inline int acpi_ec_query_init(void) ++{ ++ if (!ec_query_wq) { ++ ec_query_wq = alloc_workqueue("kec_query", 0, ++ ec_max_queries); ++ if (!ec_query_wq) ++ return -ENODEV; ++ } ++ return 0; ++} ++ ++static inline void acpi_ec_query_exit(void) ++{ ++ if (ec_query_wq) { ++ destroy_workqueue(ec_query_wq); ++ ec_query_wq = NULL; ++ } ++} ++ + int __init acpi_ec_init(void) + { +- int result = 0; ++ int result; + ++ /* register workqueue for _Qxx evaluations */ ++ result = acpi_ec_query_init(); ++ if (result) ++ goto err_exit; + /* Now register the driver for the EC */ + result = acpi_bus_register_driver(&acpi_ec_driver); +- if (result < 0) +- return -ENODEV; ++ if (result) ++ goto err_exit; + ++err_exit: ++ if (result) ++ acpi_ec_query_exit(); + return result; + } + +@@ -1675,5 +1707,6 @@ static void __exit acpi_ec_exit(void) + { + + acpi_bus_unregister_driver(&acpi_ec_driver); ++ acpi_ec_query_exit(); + } + #endif /* 0 */ +diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c +index 4a414a5a3165..b9065506a847 100644 +--- a/drivers/bluetooth/hci_intel.c ++++ b/drivers/bluetooth/hci_intel.c +@@ -1234,8 +1234,7 @@ static int intel_probe(struct platform_device *pdev) + + idev->pdev = pdev; + +- idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset", +- GPIOD_OUT_LOW); ++ idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(idev->reset)) { + dev_err(&pdev->dev, "Unable to retrieve gpio\n"); + return PTR_ERR(idev->reset); +@@ -1247,8 +1246,7 @@ static int intel_probe(struct platform_device *pdev) + + dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n"); + +- host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake", +- GPIOD_IN); ++ host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN); + if (IS_ERR(host_wake)) { + dev_err(&pdev->dev, "Unable to retrieve IRQ\n"); + goto no_irq; +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 0227b0465b40..491a4dce13fe 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -948,6 +948,7 @@ void add_interrupt_randomness(int irq, int irq_flags) + /* award one bit for the contents of the fast pool */ + credit_entropy_bits(r, credit + 1); + } ++EXPORT_SYMBOL_GPL(add_interrupt_randomness); + + #ifdef CONFIG_BLOCK + void add_disk_randomness(struct gendisk *disk) +@@ -1460,12 +1461,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) + static ssize_t + urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) + { ++ static int maxwarn = 10; + int ret; + +- if (unlikely(nonblocking_pool.initialized == 0)) +- printk_once(KERN_NOTICE "random: %s urandom read " +- "with %d bits of entropy available\n", +- current->comm, nonblocking_pool.entropy_total); ++ if (unlikely(nonblocking_pool.initialized == 0) && ++ maxwarn > 0) { ++ maxwarn--; ++ printk(KERN_NOTICE "random: %s: uninitialized urandom read " ++ "(%zd bytes read, %d bits of entropy available)\n", ++ current->comm, nbytes, nonblocking_pool.entropy_total); ++ } + + nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); + ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); +@@ -1847,12 +1852,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, + { + struct entropy_store *poolp = &input_pool; + +- /* Suspend writing if we're above the trickle threshold. +- * We'll be woken up again once below random_write_wakeup_thresh, +- * or when the calling thread is about to terminate. +- */ +- wait_event_interruptible(random_write_wait, kthread_should_stop() || ++ if (unlikely(nonblocking_pool.initialized == 0)) ++ poolp = &nonblocking_pool; ++ else { ++ /* Suspend writing if we're above the trickle ++ * threshold. We'll be woken up again once below ++ * random_write_wakeup_thresh, or when the calling ++ * thread is about to terminate. ++ */ ++ wait_event_interruptible(random_write_wait, ++ kthread_should_stop() || + ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); ++ } + mix_pool_bytes(poolp, buffer, count); + credit_entropy_bits(poolp, entropy); + } +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index f53b02a6bc05..6e80e4298274 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -662,7 +662,7 @@ static int core_get_max_pstate(void) + if (err) + goto skip_tar; + +- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; ++ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); + err = rdmsrl_safe(tdp_msr, &tdp_ratio); + if (err) + goto skip_tar; +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index 58aed67b7eba..3c8f19f5ac81 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = { + * possible dynamic channel DIMM Label attribute files + * + */ +- + DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 0); + DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, +@@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 4); + DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 5); ++DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 6); ++DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 7); + + /* Total possible dynamic DIMM Label attribute file table */ + static struct attribute *dynamic_csrow_dimm_attr[] = { +@@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { + &dev_attr_legacy_ch3_dimm_label.attr.attr, + &dev_attr_legacy_ch4_dimm_label.attr.attr, + &dev_attr_legacy_ch5_dimm_label.attr.attr, ++ &dev_attr_legacy_ch6_dimm_label.attr.attr, ++ &dev_attr_legacy_ch7_dimm_label.attr.attr, + NULL + }; + +@@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 4); + DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 5); ++DEVICE_CHANNEL(ch6_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 6); ++DEVICE_CHANNEL(ch7_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 7); + + /* Total possible dynamic ce_count attribute file table */ + static struct attribute *dynamic_csrow_ce_count_attr[] = { +@@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { + &dev_attr_legacy_ch3_ce_count.attr.attr, + &dev_attr_legacy_ch4_ce_count.attr.attr, + &dev_attr_legacy_ch5_ce_count.attr.attr, ++ &dev_attr_legacy_ch6_ce_count.attr.attr, ++ &dev_attr_legacy_ch7_ce_count.attr.attr, + NULL + }; + +@@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj, + + if (idx >= csrow->nr_channels) + return 0; ++ ++ if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) { ++ WARN_ONCE(1, "idx: %d\n", idx); ++ return 0; ++ } ++ + /* Only expose populated DIMMs */ + if (!csrow->channels[idx]->dimm->nr_pages) + return 0; ++ + return attr->mode; + } + +diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c +index 70097472b02c..c50e930d97d3 100644 +--- a/drivers/gpio/gpio-intel-mid.c ++++ b/drivers/gpio/gpio-intel-mid.c +@@ -17,7 +17,6 @@ + * Moorestown platform Langwell chip. + * Medfield platform Penwell chip. + * Clovertrail platform Cloverview chip. +- * Merrifield platform Tangier chip. + */ + + #include +@@ -64,10 +63,6 @@ enum GPIO_REG { + /* intel_mid gpio driver data */ + struct intel_mid_gpio_ddata { + u16 ngpio; /* number of gpio pins */ +- u32 gplr_offset; /* offset of first GPLR register from base */ +- u32 flis_base; /* base address of FLIS registers */ +- u32 flis_len; /* length of FLIS registers */ +- u32 (*get_flis_offset)(int gpio); + u32 chip_irq_type; /* chip interrupt type */ + }; + +@@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = { + .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, + }; + +-static const struct intel_mid_gpio_ddata gpio_tangier = { +- .ngpio = 192, +- .gplr_offset = 4, +- .flis_base = 0xff0c0000, +- .flis_len = 0x8000, +- .get_flis_offset = NULL, +- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, +-}; +- + static const struct pci_device_id intel_gpio_ids[] = { + { + /* Lincroft */ +@@ -292,11 +278,6 @@ static const struct pci_device_id intel_gpio_ids[] = { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), + .driver_data = (kernel_ulong_t)&gpio_cloverview_core, + }, +- { +- /* Tangier */ +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199), +- .driver_data = (kernel_ulong_t)&gpio_tangier, +- }, + { 0 } + }; + MODULE_DEVICE_TABLE(pci, intel_gpio_ids); +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index 2d4892cc70fb..c844d7eccb6c 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids); + #define MAX_BANK 5 + #define BANK_SZ 8 + +-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ) ++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ) + + struct pca953x_chip { + unsigned gpio_start; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +index 9416e0f5c1db..0aaa457a1710 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +@@ -566,28 +566,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) + le16_to_cpu(firmware_info->info.usReferenceClock); + ppll->reference_div = 0; + +- if (crev < 2) +- ppll->pll_out_min = +- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); +- else +- ppll->pll_out_min = +- le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); ++ ppll->pll_out_min = ++ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); + ppll->pll_out_max = + le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); + +- if (crev >= 4) { +- ppll->lcd_pll_out_min = +- le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; +- if (ppll->lcd_pll_out_min == 0) +- ppll->lcd_pll_out_min = ppll->pll_out_min; +- ppll->lcd_pll_out_max = +- le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; +- if (ppll->lcd_pll_out_max == 0) +- ppll->lcd_pll_out_max = ppll->pll_out_max; +- } else { ++ ppll->lcd_pll_out_min = ++ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; ++ if (ppll->lcd_pll_out_min == 0) + ppll->lcd_pll_out_min = ppll->pll_out_min; ++ ppll->lcd_pll_out_max = ++ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; ++ if (ppll->lcd_pll_out_max == 0) + ppll->lcd_pll_out_max = ppll->pll_out_max; +- } + + if (ppll->pll_out_min == 0) + ppll->pll_out_min = 64800; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +index 5a8fbadbd27b..29adbbe225c4 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "amdgpu_acpi.h" + +@@ -256,6 +257,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) + if (!info) + return -EIO; + kfree(info); ++ ++ /* 200ms delay is required after off */ ++ if (state == 0) ++ msleep(200); + } + return 0; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index 7ef2c13921b4..930083336968 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, + DRM_MODE_SCALE_NONE); + /* no HPD on analog connectors */ + amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; + break; +@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev, + } + + if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { +- if (i2c_bus->valid) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ if (i2c_bus->valid) { ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT | ++ DRM_CONNECTOR_POLL_DISCONNECT; ++ } + } else + connector->polled = DRM_CONNECTOR_POLL_HPD; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index c961fe093e12..16302f7d59f6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -1793,7 +1793,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) + } + + drm_kms_helper_poll_enable(dev); ++ ++ /* ++ * Most of the connector probing functions try to acquire runtime pm ++ * refs to ensure that the GPU is powered on when connector polling is ++ * performed. Since we're calling this from a runtime PM callback, ++ * trying to acquire rpm refs will cause us to deadlock. ++ * ++ * Since we're guaranteed to be holding the rpm lock, it's safe to ++ * temporarily disable the rpm helpers so this doesn't deadlock us. ++ */ ++#ifdef CONFIG_PM ++ dev->dev->power.disable_depth++; ++#endif + drm_helper_hpd_irq_event(dev); ++#ifdef CONFIG_PM ++ dev->dev->power.disable_depth--; ++#endif + + if (fbcon) { + amdgpu_fbdev_set_suspend(adev, 0); +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +index 1cd6de575305..542517d4e584 100644 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + if (dig->backlight_level == 0) + amdgpu_atombios_encoder_setup_dig_transmitter(encoder, + ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +index ea87033bfaf6..df17fababbd6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +@@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) + break; + case CHIP_KAVERI: + case CHIP_KABINI: ++ case CHIP_MULLINS: + return 0; + default: BUG(); + } +diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c +index 6743ff7dccfa..7f4a6c550319 100644 +--- a/drivers/gpu/drm/drm_cache.c ++++ b/drivers/gpu/drm/drm_cache.c +@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) + mb(); + for (; addr < end; addr += size) + clflushopt(addr); ++ clflushopt(end - 1); /* force serialisation */ + mb(); + return; + } +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index d5d2c03fd136..8c9ac021608f 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -73,6 +73,8 @@ + #define EDID_QUIRK_FORCE_8BPC (1 << 8) + /* Force 12bpc */ + #define EDID_QUIRK_FORCE_12BPC (1 << 9) ++/* Force 6bpc */ ++#define EDID_QUIRK_FORCE_6BPC (1 << 10) + + struct detailed_mode_closure { + struct drm_connector *connector; +@@ -99,6 +101,9 @@ static struct edid_quirk { + /* Unknown Acer */ + { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, + ++ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ ++ { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, ++ + /* Belinea 10 15 55 */ + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, +@@ -3820,6 +3825,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) + + drm_add_display_info(edid, &connector->display_info, connector); + ++ if (quirks & EDID_QUIRK_FORCE_6BPC) ++ connector->display_info.bpc = 6; ++ + if (quirks & EDID_QUIRK_FORCE_8BPC) + connector->display_info.bpc = 8; + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index c41bc42b6fa7..3292495ee10f 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -11952,21 +11952,11 @@ connected_sink_compute_bpp(struct intel_connector *connector, + pipe_config->pipe_bpp = connector->base.display_info.bpc*3; + } + +- /* Clamp bpp to default limit on screens without EDID 1.4 */ +- if (connector->base.display_info.bpc == 0) { +- int type = connector->base.connector_type; +- int clamp_bpp = 24; +- +- /* Fall back to 18 bpp when DP sink capability is unknown. */ +- if (type == DRM_MODE_CONNECTOR_DisplayPort || +- type == DRM_MODE_CONNECTOR_eDP) +- clamp_bpp = 18; +- +- if (bpp > clamp_bpp) { +- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", +- bpp, clamp_bpp); +- pipe_config->pipe_bpp = clamp_bpp; +- } ++ /* Clamp bpp to 8 on screens without EDID 1.4 */ ++ if (connector->base.display_info.bpc == 0 && bpp > 24) { ++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", ++ bpp); ++ pipe_config->pipe_bpp = 24; + } + } + +diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c +index 6dc13c02c28e..e362a30776fa 100644 +--- a/drivers/gpu/drm/i915/intel_opregion.c ++++ b/drivers/gpu/drm/i915/intel_opregion.c +@@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev) + } + + if (!acpi_video_bus) { +- DRM_ERROR("No ACPI video bus found\n"); ++ DRM_DEBUG_KMS("No ACPI video bus found\n"); + return; + } + +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index eb434881ddbc..1e851e037c29 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -4526,7 +4526,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) + else + gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + dev_priv->rps.last_adj = 0; +- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); ++ I915_WRITE(GEN6_PMINTRMSK, ++ gen6_sanitize_rps_pm_mask(dev_priv, ~0)); + } + mutex_unlock(&dev_priv->rps.hw_lock); + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index 1d3ee5179ab8..d236fc7c425b 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -308,7 +308,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev, + bool boot = false; + int ret; + +- /* remove conflicting drivers (vesafb, efifb etc) */ ++ /* We need to check that the chipset is supported before booting ++ * fbdev off the hardware, as there's no way to put it back. ++ */ ++ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); ++ if (ret) ++ return ret; ++ ++ nvkm_device_del(&device); ++ ++ /* Remove conflicting drivers (vesafb, efifb etc). */ + aper = alloc_apertures(3); + if (!aper) + return -ENOMEM; +diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c +index 8f715feadf56..f90568327468 100644 +--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c +@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + ((image->dx + image->width) & 0xffff)); + OUT_RING(chan, bg); + OUT_RING(chan, fg); +- OUT_RING(chan, (image->height << 16) | image->width); ++ OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8)); + OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + +- dsize = ALIGN(image->width * image->height, 32) >> 5; ++ dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; + while (dsize) { + int iter_len = dsize > 128 ? 128 : dsize; + +diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c +index a4e259a00430..c8e096533f60 100644 +--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c +@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + OUT_RING(chan, 0); + OUT_RING(chan, image->dy); + +- dwords = ALIGN(image->width * image->height, 32) >> 5; ++ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + +diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c +index f28315e865a5..22d32578dafd 100644 +--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c +@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + OUT_RING (chan, 0); + OUT_RING (chan, image->dy); + +- dwords = ALIGN(image->width * image->height, 32) >> 5; ++ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +index 69de8c6259fe..f1e15a4d4f64 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, + nvkm_wo32(chan->inst, i, 0x00040004); + for (i = 0x1f18; i <= 0x3088 ; i += 16) { + nvkm_wo32(chan->inst, i + 0, 0x10700ff9); +- nvkm_wo32(chan->inst, i + 1, 0x0436086c); +- nvkm_wo32(chan->inst, i + 2, 0x000c001b); ++ nvkm_wo32(chan->inst, i + 4, 0x0436086c); ++ nvkm_wo32(chan->inst, i + 8, 0x000c001b); + } + for (i = 0x30b8; i < 0x30c8; i += 4) + nvkm_wo32(chan->inst, i, 0x0000ffff); +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +index 2207dac23981..300f5ed5de0b 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, + nvkm_wo32(chan->inst, i, 0x00040004); + for (i = 0x15ac; i <= 0x271c ; i += 16) { + nvkm_wo32(chan->inst, i + 0, 0x10700ff9); +- nvkm_wo32(chan->inst, i + 1, 0x0436086c); +- nvkm_wo32(chan->inst, i + 2, 0x000c001b); ++ nvkm_wo32(chan->inst, i + 4, 0x0436086c); ++ nvkm_wo32(chan->inst, i + 8, 0x000c001b); + } + for (i = 0x274c; i < 0x275c; i += 4) + nvkm_wo32(chan->inst, i, 0x0000ffff); +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c +index 0b04b9282f56..d4ac8c837314 100644 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c +@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ++ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + if (dig->backlight_level == 0) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); + else { +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index de9a2ffcf5f7..0c5b3eeff82d 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) + le16_to_cpu(firmware_info->info.usReferenceClock); + p1pll->reference_div = 0; + +- if (crev < 2) ++ if ((frev < 2) && (crev < 2)) + p1pll->pll_out_min = + le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); + else +@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) + p1pll->pll_out_max = + le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); + +- if (crev >= 4) { ++ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) { + p1pll->lcd_pll_out_min = + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; + if (p1pll->lcd_pll_out_min == 0) +diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c +index c4b4f298a283..69ce95571136 100644 +--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c ++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "radeon_acpi.h" + +@@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) + if (!info) + return -EIO; + kfree(info); ++ ++ /* 200ms delay is required after off */ ++ if (state == 0) ++ msleep(200); + } + return 0; + } +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 9cfc1c3e1965..30f00748ed37 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -2058,7 +2058,6 @@ radeon_add_atom_connector(struct drm_device *dev, + RADEON_OUTPUT_CSC_BYPASS); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; + break; +@@ -2308,8 +2307,10 @@ radeon_add_atom_connector(struct drm_device *dev, + } + + if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { +- if (i2c_bus->valid) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ if (i2c_bus->valid) { ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT | ++ DRM_CONNECTOR_POLL_DISCONNECT; ++ } + } else + connector->polled = DRM_CONNECTOR_POLL_HPD; + +@@ -2385,7 +2386,6 @@ radeon_add_legacy_connector(struct drm_device *dev, + 1); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; + break; +@@ -2470,10 +2470,13 @@ radeon_add_legacy_connector(struct drm_device *dev, + } + + if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { +- if (i2c_bus->valid) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ if (i2c_bus->valid) { ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT | ++ DRM_CONNECTOR_POLL_DISCONNECT; ++ } + } else + connector->polled = DRM_CONNECTOR_POLL_HPD; ++ + connector->display_info.subpixel_order = subpixel_order; + drm_connector_register(connector); + } +diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c +index e094c572b86e..1a2032c2c1fb 100644 +--- a/drivers/hid/uhid.c ++++ b/drivers/hid/uhid.c +@@ -51,10 +51,26 @@ struct uhid_device { + u32 report_id; + u32 report_type; + struct uhid_event report_buf; ++ struct work_struct worker; + }; + + static struct miscdevice uhid_misc; + ++static void uhid_device_add_worker(struct work_struct *work) ++{ ++ struct uhid_device *uhid = container_of(work, struct uhid_device, worker); ++ int ret; ++ ++ ret = hid_add_device(uhid->hid); ++ if (ret) { ++ hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); ++ ++ hid_destroy_device(uhid->hid); ++ uhid->hid = NULL; ++ uhid->running = false; ++ } ++} ++ + static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) + { + __u8 newhead; +@@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid, + uhid->hid = hid; + uhid->running = true; + +- ret = hid_add_device(hid); +- if (ret) { +- hid_err(hid, "Cannot register HID device\n"); +- goto err_hid; +- } ++ /* Adding of a HID device is done through a worker, to allow HID drivers ++ * which use feature requests during .probe to work, without they would ++ * be blocked on devlock, which is held by uhid_char_write. ++ */ ++ schedule_work(&uhid->worker); + + return 0; + +-err_hid: +- hid_destroy_device(hid); +- uhid->hid = NULL; +- uhid->running = false; + err_free: + kfree(uhid->rd_data); + uhid->rd_data = NULL; +@@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid) + uhid->running = false; + wake_up_interruptible(&uhid->report_wait); + ++ cancel_work_sync(&uhid->worker); ++ + hid_destroy_device(uhid->hid); + kfree(uhid->rd_data); + +@@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file) + init_waitqueue_head(&uhid->waitq); + init_waitqueue_head(&uhid->report_wait); + uhid->running = false; ++ INIT_WORK(&uhid->worker, uhid_device_add_worker); + + file->private_data = uhid; + nonseekable_open(inode, file); +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index f19b6f7a467a..9b5440f6b3b4 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + #include "hyperv_vmbus.h" + + static struct acpi_device *hv_acpi_dev; +@@ -826,6 +827,8 @@ static void vmbus_isr(void) + else + tasklet_schedule(&msg_dpc); + } ++ ++ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); + } + + +diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c +index 8eff62738877..e253598d764c 100644 +--- a/drivers/i2c/busses/i2c-efm32.c ++++ b/drivers/i2c/busses/i2c-efm32.c +@@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev) + ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request irq (%d)\n", ret); +- return ret; ++ goto err_disable_clk; + } + + ret = i2c_add_adapter(&ddata->adapter); +diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c +index 5fb089e91353..fb43a242847b 100644 +--- a/drivers/infiniband/core/iwpm_util.c ++++ b/drivers/infiniband/core/iwpm_util.c +@@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) + if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, + RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { + pr_warn("%s Unable to put NLMSG_DONE\n", __func__); ++ dev_kfree_skb(skb); + return -ENOMEM; + } + nlh->nlmsg_type = NLMSG_DONE; +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c +index a95a32ba596e..d3b7ecd106f7 100644 +--- a/drivers/infiniband/core/sa_query.c ++++ b/drivers/infiniband/core/sa_query.c +@@ -534,7 +534,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) + data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, + RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); + if (!data) { +- kfree_skb(skb); ++ nlmsg_free(skb); + return -EMSGSIZE; + } + +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c +index 870e56b6b25f..05179f47bbde 100644 +--- a/drivers/infiniband/hw/mlx4/mad.c ++++ b/drivers/infiniband/hw/mlx4/mad.c +@@ -526,7 +526,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, + tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); + spin_unlock(&tun_qp->tx_lock); + if (ret) +- goto out; ++ goto end; + + tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); + if (tun_qp->tx_ring[tun_tx_ix].ah) +@@ -595,9 +595,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, + wr.wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(src_qp, &wr.wr, &bad_wr); +-out: +- if (ret) +- ib_destroy_ah(ah); ++ if (!ret) ++ return 0; ++ out: ++ spin_lock(&tun_qp->tx_lock); ++ tun_qp->tx_ix_tail++; ++ spin_unlock(&tun_qp->tx_lock); ++ tun_qp->tx_ring[tun_tx_ix].ah = NULL; ++end: ++ ib_destroy_ah(ah); + return ret; + } + +@@ -1278,9 +1284,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, + + + ret = ib_post_send(send_qp, &wr.wr, &bad_wr); ++ if (!ret) ++ return 0; ++ ++ spin_lock(&sqp->tx_lock); ++ sqp->tx_ix_tail++; ++ spin_unlock(&sqp->tx_lock); ++ sqp->tx_ring[wire_tx_ix].ah = NULL; + out: +- if (ret) +- ib_destroy_ah(ah); ++ ib_destroy_ah(ah); + return ret; + } + +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c +index 13eaaf45288f..ea1e2ddaddf5 100644 +--- a/drivers/infiniband/hw/mlx4/qp.c ++++ b/drivers/infiniband/hw/mlx4/qp.c +@@ -357,7 +357,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) + sizeof (struct mlx4_wqe_raddr_seg); + case MLX4_IB_QPT_RC: + return sizeof (struct mlx4_wqe_ctrl_seg) + +- sizeof (struct mlx4_wqe_atomic_seg) + ++ sizeof (struct mlx4_wqe_masked_atomic_seg) + + sizeof (struct mlx4_wqe_raddr_seg); + case MLX4_IB_QPT_SMI: + case MLX4_IB_QPT_GSI: +@@ -1162,8 +1162,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, + { + err = create_qp_common(to_mdev(pd->device), pd, init_attr, + udata, 0, &qp, gfp); +- if (err) ++ if (err) { ++ kfree(qp); + return ERR_PTR(err); ++ } + + qp->ibqp.qp_num = qp->mqp.qpn; + qp->xrcdn = xrcdn; +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c +index 92ddae101ecc..8184267c7901 100644 +--- a/drivers/infiniband/hw/mlx5/cq.c ++++ b/drivers/infiniband/hw/mlx5/cq.c +@@ -763,7 +763,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, + if (attr->flags) + return ERR_PTR(-EINVAL); + +- if (entries < 0) ++ if (entries < 0 || ++ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) + return ERR_PTR(-EINVAL); + + entries = roundup_pow_of_two(entries + 1); +@@ -1094,11 +1095,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) + return -ENOSYS; + } + +- if (entries < 1) ++ if (entries < 1 || ++ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { ++ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", ++ entries, ++ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); + return -EINVAL; ++ } + + entries = roundup_pow_of_two(entries + 1); +- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) ++ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) + return -EINVAL; + + if (entries == ibcq->cqe + 1) +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index fd17443aeacd..bfc940ff9c8a 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -962,14 +962,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, + break; + + case MLX5_DEV_EVENT_PORT_DOWN: ++ case MLX5_DEV_EVENT_PORT_INITIALIZED: + ibev.event = IB_EVENT_PORT_ERR; + port = (u8)param; + break; + +- case MLX5_DEV_EVENT_PORT_INITIALIZED: +- /* not used by ULPs */ +- return; +- + case MLX5_DEV_EVENT_LID_CHANGE: + ibev.event = IB_EVENT_LID_CHANGE; + port = (u8)param; +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c +index 307bdbca8938..cfcfbb6b84d7 100644 +--- a/drivers/infiniband/hw/mlx5/qp.c ++++ b/drivers/infiniband/hw/mlx5/qp.c +@@ -226,6 +226,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, + qp->rq.max_gs = 0; + qp->rq.wqe_cnt = 0; + qp->rq.wqe_shift = 0; ++ cap->max_recv_wr = 0; ++ cap->max_recv_sge = 0; + } else { + if (ucmd) { + qp->rq.wqe_cnt = ucmd->rq_wqe_count; +@@ -2525,10 +2527,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) + return MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + return fence; +- +- } else { +- return 0; ++ } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { ++ return MLX5_FENCE_MODE_FENCE; + } ++ ++ return 0; + } + + static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, +@@ -3092,17 +3095,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + + if (!ibqp->uobject) { +- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; ++ qp_attr->cap.max_send_wr = qp->sq.max_post; + qp_attr->cap.max_send_sge = qp->sq.max_gs; ++ qp_init_attr->qp_context = ibqp->qp_context; + } else { + qp_attr->cap.max_send_wr = 0; + qp_attr->cap.max_send_sge = 0; + } + +- /* We don't support inline sends for kernel QPs (yet), and we +- * don't know what userspace's value should be. +- */ +- qp_attr->cap.max_inline_data = 0; ++ qp_init_attr->qp_type = ibqp->qp_type; ++ qp_init_attr->recv_cq = ibqp->recv_cq; ++ qp_init_attr->send_cq = ibqp->send_cq; ++ qp_init_attr->srq = ibqp->srq; ++ qp_attr->cap.max_inline_data = qp->max_inline_data; + + qp_init_attr->cap = qp_attr->cap; + +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index 7d3281866ffc..942dffca6a9d 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -1131,7 +1131,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) + neigh = NULL; + goto out_unlock; + } +- neigh->alive = jiffies; ++ ++ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) ++ neigh->alive = jiffies; + goto out_unlock; + } + } +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 2f589857a039..d15b33813021 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -4,7 +4,8 @@ + * Copyright (c) 2013 ELAN Microelectronics Corp. + * + * Author: 林政維 (Duson Lin) +- * Version: 1.6.0 ++ * Author: KT Liao ++ * Version: 1.6.2 + * + * Based on cyapa driver: + * copyright (c) 2011-2012 Cypress Semiconductor, Inc. +@@ -40,7 +41,7 @@ + #include "elan_i2c.h" + + #define DRIVER_NAME "elan_i2c" +-#define ELAN_DRIVER_VERSION "1.6.1" ++#define ELAN_DRIVER_VERSION "1.6.2" + #define ELAN_VENDOR_ID 0x04f3 + #define ETP_MAX_PRESSURE 255 + #define ETP_FWIDTH_REDUCE 90 +@@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data) + return error; + } + ++static int elan_query_product(struct elan_tp_data *data) ++{ ++ int error; ++ ++ error = data->ops->get_product_id(data->client, &data->product_id); ++ if (error) ++ return error; ++ ++ error = data->ops->get_sm_version(data->client, &data->ic_type, ++ &data->sm_version); ++ if (error) ++ return error; ++ ++ return 0; ++} ++ ++static int elan_check_ASUS_special_fw(struct elan_tp_data *data) ++{ ++ if (data->ic_type != 0x0E) ++ return false; ++ ++ switch (data->product_id) { ++ case 0x05 ... 0x07: ++ case 0x09: ++ case 0x13: ++ return true; ++ default: ++ return false; ++ } ++} ++ + static int __elan_initialize(struct elan_tp_data *data) + { + struct i2c_client *client = data->client; ++ bool woken_up = false; + int error; + + error = data->ops->initialize(client); +@@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data) + return error; + } + ++ error = elan_query_product(data); ++ if (error) ++ return error; ++ ++ /* ++ * Some ASUS devices were shipped with firmware that requires ++ * touchpads to be woken up first, before attempting to switch ++ * them into absolute reporting mode. ++ */ ++ if (elan_check_ASUS_special_fw(data)) { ++ error = data->ops->sleep_control(client, false); ++ if (error) { ++ dev_err(&client->dev, ++ "failed to wake device up: %d\n", error); ++ return error; ++ } ++ ++ msleep(200); ++ woken_up = true; ++ } ++ + data->mode |= ETP_ENABLE_ABS; + error = data->ops->set_mode(client, data->mode); + if (error) { +@@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data) + return error; + } + +- error = data->ops->sleep_control(client, false); +- if (error) { +- dev_err(&client->dev, +- "failed to wake device up: %d\n", error); +- return error; ++ if (!woken_up) { ++ error = data->ops->sleep_control(client, false); ++ if (error) { ++ dev_err(&client->dev, ++ "failed to wake device up: %d\n", error); ++ return error; ++ } + } + + return 0; +@@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data) + { + int error; + +- error = data->ops->get_product_id(data->client, &data->product_id); +- if (error) +- return error; +- + error = data->ops->get_version(data->client, false, &data->fw_version); + if (error) + return error; +@@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data) + if (error) + return error; + +- error = data->ops->get_sm_version(data->client, &data->ic_type, +- &data->sm_version); +- if (error) +- return error; +- + error = data->ops->get_version(data->client, true, &data->iap_version); + if (error) + return error; +diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c +index d214f22ed305..45b466e3bbe8 100644 +--- a/drivers/input/touchscreen/sur40.c ++++ b/drivers/input/touchscreen/sur40.c +@@ -126,7 +126,7 @@ struct sur40_image_header { + #define VIDEO_PACKET_SIZE 16384 + + /* polling interval (ms) */ +-#define POLL_INTERVAL 4 ++#define POLL_INTERVAL 1 + + /* maximum number of contacts FIXME: this is a guess? */ + #define MAX_CONTACTS 64 +@@ -441,7 +441,7 @@ static void sur40_process_video(struct sur40_state *sur40) + + /* return error if streaming was stopped in the meantime */ + if (sur40->sequence == -1) +- goto err_poll; ++ return; + + /* mark as finished */ + v4l2_get_timestamp(&new_buf->vb.timestamp); +@@ -730,6 +730,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count) + static void sur40_stop_streaming(struct vb2_queue *vq) + { + struct sur40_state *sur40 = vb2_get_drv_priv(vq); ++ vb2_wait_for_all_buffers(vq); + sur40->sequence = -1; + + /* Release all active buffers */ +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index b9319b76a8a1..0397985a2601 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -352,9 +352,11 @@ static void init_iommu_group(struct device *dev) + if (!domain) + goto out; + +- dma_domain = to_pdomain(domain)->priv; ++ if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) { ++ dma_domain = to_pdomain(domain)->priv; ++ init_unity_mappings_for_device(dev, dma_domain); ++ } + +- init_unity_mappings_for_device(dev, dma_domain); + out: + iommu_group_put(group); + } +@@ -2322,8 +2324,15 @@ static void update_device_table(struct protection_domain *domain) + { + struct iommu_dev_data *dev_data; + +- list_for_each_entry(dev_data, &domain->dev_list, list) ++ list_for_each_entry(dev_data, &domain->dev_list, list) { + set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); ++ ++ if (dev_data->devid == dev_data->alias) ++ continue; ++ ++ /* There is an alias, update device table entry for it */ ++ set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled); ++ } + } + + static void update_domain(struct protection_domain *domain) +@@ -2970,9 +2979,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) + static void amd_iommu_domain_free(struct iommu_domain *dom) + { + struct protection_domain *domain; +- +- if (!dom) +- return; ++ struct dma_ops_domain *dma_dom; + + domain = to_pdomain(dom); + +@@ -2981,13 +2988,24 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) + + BUG_ON(domain->dev_cnt != 0); + +- if (domain->mode != PAGE_MODE_NONE) +- free_pagetable(domain); ++ if (!dom) ++ return; ++ ++ switch (dom->type) { ++ case IOMMU_DOMAIN_DMA: ++ dma_dom = domain->priv; ++ dma_ops_domain_free(dma_dom); ++ break; ++ default: ++ if (domain->mode != PAGE_MODE_NONE) ++ free_pagetable(domain); + +- if (domain->flags & PD_IOMMUV2_MASK) +- free_gcr3_table(domain); ++ if (domain->flags & PD_IOMMUV2_MASK) ++ free_gcr3_table(domain); + +- protection_domain_free(domain); ++ protection_domain_free(domain); ++ break; ++ } + } + + static void amd_iommu_detach_device(struct iommu_domain *dom, +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c +index 97c41b8ab5d9..29a31eb9ace3 100644 +--- a/drivers/iommu/exynos-iommu.c ++++ b/drivers/iommu/exynos-iommu.c +@@ -647,6 +647,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = { + .name = "exynos-sysmmu", + .of_match_table = sysmmu_of_match, + .pm = &sysmmu_pm_ops, ++ .suppress_bind_attrs = true, + } + }; + +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 6763a4dfed94..24d81308a1a6 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -2032,7 +2032,7 @@ out_unlock: + spin_unlock(&iommu->lock); + spin_unlock_irqrestore(&device_domain_lock, flags); + +- return 0; ++ return ret; + } + + struct domain_context_mapping_data { +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c +index 09e2afcafd2d..cd0a93df4cb7 100644 +--- a/drivers/md/dm-flakey.c ++++ b/drivers/md/dm-flakey.c +@@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) + pb->bio_submitted = true; + + /* +- * Map reads as normal. ++ * Map reads as normal only if corrupt_bio_byte set. + */ +- if (bio_data_dir(bio) == READ) +- goto map_bio; ++ if (bio_data_dir(bio) == READ) { ++ /* If flags were specified, only corrupt those that match. */ ++ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && ++ all_corrupt_bio_flags_match(bio, fc)) ++ goto map_bio; ++ else ++ return -EIO; ++ } + + /* + * Drop writes? +@@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) + + /* + * Corrupt successful READs while in down state. +- * If flags were specified, only corrupt those that match. + */ +- if (fc->corrupt_bio_byte && !error && pb->bio_submitted && +- (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && +- all_corrupt_bio_flags_match(bio, fc)) +- corrupt_bio_data(bio, fc); ++ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { ++ if (fc->corrupt_bio_byte) ++ corrupt_bio_data(bio, fc); ++ else ++ return -EIO; ++ } + + return error; + } +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index c338aebb4ccd..a42729ebf272 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -3078,7 +3078,8 @@ static void unlock_fs(struct mapped_device *md) + * Caller must hold md->suspend_lock + */ + static int __dm_suspend(struct mapped_device *md, struct dm_table *map, +- unsigned suspend_flags, int interruptible) ++ unsigned suspend_flags, int interruptible, ++ int dmf_suspended_flag) + { + bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; + bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; +@@ -3145,6 +3146,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, + * to finish. + */ + r = dm_wait_for_completion(md, interruptible); ++ if (!r) ++ set_bit(dmf_suspended_flag, &md->flags); + + if (noflush) + clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); +@@ -3206,12 +3209,10 @@ retry: + + map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + +- r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); ++ r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); + if (r) + goto out_unlock; + +- set_bit(DMF_SUSPENDED, &md->flags); +- + dm_table_postsuspend_targets(map); + + out_unlock: +@@ -3305,9 +3306,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla + * would require changing .presuspend to return an error -- avoid this + * until there is a need for more elaborate variants of internal suspend. + */ +- (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); +- +- set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); ++ (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, ++ DMF_SUSPENDED_INTERNALLY); + + dm_table_postsuspend_targets(map); + } +diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c +index 1100e98a7b1d..7df7fb3738a0 100644 +--- a/drivers/media/dvb-core/dvb_ringbuffer.c ++++ b/drivers/media/dvb-core/dvb_ringbuffer.c +@@ -55,7 +55,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) + + int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) + { +- return (rbuf->pread==rbuf->pwrite); ++ /* smp_load_acquire() to load write pointer on reader side ++ * this pairs with smp_store_release() in dvb_ringbuffer_write(), ++ * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() ++ * ++ * for memory barriers also see Documentation/circular-buffers.txt ++ */ ++ return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); + } + + +@@ -64,7 +70,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) + { + ssize_t free; + +- free = rbuf->pread - rbuf->pwrite; ++ /* ACCESS_ONCE() to load read pointer on writer side ++ * this pairs with smp_store_release() in dvb_ringbuffer_read(), ++ * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), ++ * or dvb_ringbuffer_reset() ++ */ ++ free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite; + if (free <= 0) + free += rbuf->size; + return free-1; +@@ -76,7 +87,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) + { + ssize_t avail; + +- avail = rbuf->pwrite - rbuf->pread; ++ /* smp_load_acquire() to load write pointer on reader side ++ * this pairs with smp_store_release() in dvb_ringbuffer_write(), ++ * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() ++ */ ++ avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; + if (avail < 0) + avail += rbuf->size; + return avail; +@@ -86,14 +101,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) + + void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) + { +- rbuf->pread = rbuf->pwrite; ++ /* dvb_ringbuffer_flush() counts as read operation ++ * smp_load_acquire() to load write pointer ++ * smp_store_release() to update read pointer, this ensures that the ++ * correct pointer is visible for subsequent dvb_ringbuffer_free() ++ * calls on other cpu cores ++ */ ++ smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); + rbuf->error = 0; + } + EXPORT_SYMBOL(dvb_ringbuffer_flush); + + void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf) + { +- rbuf->pread = rbuf->pwrite = 0; ++ /* dvb_ringbuffer_reset() counts as read and write operation ++ * smp_store_release() to update read pointer ++ */ ++ smp_store_release(&rbuf->pread, 0); ++ /* smp_store_release() to update write pointer */ ++ smp_store_release(&rbuf->pwrite, 0); + rbuf->error = 0; + } + +@@ -119,12 +145,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si + return -EFAULT; + buf += split; + todo -= split; +- rbuf->pread = 0; ++ /* smp_store_release() for read pointer update to ensure ++ * that buf is not overwritten until read is complete, ++ * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() ++ */ ++ smp_store_release(&rbuf->pread, 0); + } + if (copy_to_user(buf, rbuf->data+rbuf->pread, todo)) + return -EFAULT; + +- rbuf->pread = (rbuf->pread + todo) % rbuf->size; ++ /* smp_store_release() to update read pointer, see above */ ++ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); + + return len; + } +@@ -139,11 +170,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) + memcpy(buf, rbuf->data+rbuf->pread, split); + buf += split; + todo -= split; +- rbuf->pread = 0; ++ /* smp_store_release() for read pointer update to ensure ++ * that buf is not overwritten until read is complete, ++ * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() ++ */ ++ smp_store_release(&rbuf->pread, 0); + } + memcpy(buf, rbuf->data+rbuf->pread, todo); + +- rbuf->pread = (rbuf->pread + todo) % rbuf->size; ++ /* smp_store_release() to update read pointer, see above */ ++ smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); + } + + +@@ -158,10 +194,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t + memcpy(rbuf->data+rbuf->pwrite, buf, split); + buf += split; + todo -= split; +- rbuf->pwrite = 0; ++ /* smp_store_release() for write pointer update to ensure that ++ * written data is visible on other cpu cores before the pointer ++ * update, this pairs with smp_load_acquire() in ++ * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() ++ */ ++ smp_store_release(&rbuf->pwrite, 0); + } + memcpy(rbuf->data+rbuf->pwrite, buf, todo); +- rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size; ++ /* smp_store_release() for write pointer update, see above */ ++ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); + + return len; + } +@@ -181,12 +223,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + return len - todo; + buf += split; + todo -= split; +- rbuf->pwrite = 0; ++ /* smp_store_release() for write pointer update to ensure that ++ * written data is visible on other cpu cores before the pointer ++ * update, this pairs with smp_load_acquire() in ++ * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() ++ */ ++ smp_store_release(&rbuf->pwrite, 0); + } + status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo); + if (status) + return len - todo; +- rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size; ++ /* smp_store_release() for write pointer update, see above */ ++ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); + + return len; + } +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c +index 3ffe2ecfd5ef..c8946f98ced4 100644 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c +@@ -1029,6 +1029,11 @@ static int match_child(struct device *dev, void *data) + return !strcmp(dev_name(dev), (char *)data); + } + ++static void s5p_mfc_memdev_release(struct device *dev) ++{ ++ dma_release_declared_memory(dev); ++} ++ + static void *mfc_get_drv_data(struct platform_device *pdev); + + static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) +@@ -1041,6 +1046,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) + mfc_err("Not enough memory\n"); + return -ENOMEM; + } ++ ++ dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l"); ++ dev->mem_dev_l->release = s5p_mfc_memdev_release; + device_initialize(dev->mem_dev_l); + of_property_read_u32_array(dev->plat_dev->dev.of_node, + "samsung,mfc-l", mem_info, 2); +@@ -1058,6 +1066,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) + mfc_err("Not enough memory\n"); + return -ENOMEM; + } ++ ++ dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r"); ++ dev->mem_dev_r->release = s5p_mfc_memdev_release; + device_initialize(dev->mem_dev_r); + of_property_read_u32_array(dev->plat_dev->dev.of_node, + "samsung,mfc-r", mem_info, 2); +diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c +index 84fa6e9b59a1..67314c034cdb 100644 +--- a/drivers/media/rc/ir-rc5-decoder.c ++++ b/drivers/media/rc/ir-rc5-decoder.c +@@ -29,7 +29,7 @@ + #define RC5_BIT_START (1 * RC5_UNIT) + #define RC5_BIT_END (1 * RC5_UNIT) + #define RC5X_SPACE (4 * RC5_UNIT) +-#define RC5_TRAILER (10 * RC5_UNIT) /* In reality, approx 100 */ ++#define RC5_TRAILER (6 * RC5_UNIT) /* In reality, approx 100 */ + + enum rc5_state { + STATE_INACTIVE, +diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c +index 78c12d22dfbb..5dab02432e82 100644 +--- a/drivers/media/usb/usbtv/usbtv-audio.c ++++ b/drivers/media/usb/usbtv/usbtv-audio.c +@@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work) + { + struct usbtv *chip = container_of(work, struct usbtv, snd_trigger); + ++ if (!chip->snd) ++ return; ++ + if (atomic_read(&chip->snd_stream)) + usbtv_audio_start(chip); + else +@@ -378,6 +381,8 @@ err: + + void usbtv_audio_free(struct usbtv *usbtv) + { ++ cancel_work_sync(&usbtv->snd_trigger); ++ + if (usbtv->snd && usbtv->udev) { + snd_card_free(usbtv->snd); + usbtv->snd = NULL; +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c +index 11f39791ec33..47f37683893a 100644 +--- a/drivers/media/v4l2-core/videobuf2-core.c ++++ b/drivers/media/v4l2-core/videobuf2-core.c +@@ -1505,7 +1505,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, + void *pb, int nonblocking) + { + unsigned long flags; +- int ret; ++ int ret = 0; + + /* + * Wait for at least one buffer to become available on the done_list. +@@ -1521,10 +1521,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, + spin_lock_irqsave(&q->done_lock, flags); + *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); + /* +- * Only remove the buffer from done_list if v4l2_buffer can handle all +- * the planes. ++ * Only remove the buffer from done_list if all planes can be ++ * handled. Some cases such as V4L2 file I/O and DVB have pb ++ * == NULL; skip the check then as there's nothing to verify. + */ +- ret = call_bufop(q, verify_planes_array, *vb, pb); ++ if (pb) ++ ret = call_bufop(q, verify_planes_array, *vb, pb); + if (!ret) + list_del(&(*vb)->done_entry); + spin_unlock_irqrestore(&q->done_lock, flags); +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c +index 502984c724ff..6c441be8f893 100644 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c +@@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer + return 0; + } + ++static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb) ++{ ++ return __verify_planes_array(vb, pb); ++} ++ + /** + * __verify_length() - Verify that the bytesused value for each plane fits in + * the plane length and that the data offset doesn't exceed the bytesused value. +@@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, + } + + static const struct vb2_buf_ops v4l2_buf_ops = { ++ .verify_planes_array = __verify_planes_array_core, + .fill_user_buffer = __fill_v4l2_buffer, + .fill_vb2_buffer = __fill_vb2_buffer, + .set_timestamp = __set_timestamp, +diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c +index 207a3bd68559..a867cc91657e 100644 +--- a/drivers/mfd/qcom_rpm.c ++++ b/drivers/mfd/qcom_rpm.c +@@ -34,7 +34,13 @@ struct qcom_rpm_resource { + struct qcom_rpm_data { + u32 version; + const struct qcom_rpm_resource *resource_table; +- unsigned n_resources; ++ unsigned int n_resources; ++ unsigned int req_ctx_off; ++ unsigned int req_sel_off; ++ unsigned int ack_ctx_off; ++ unsigned int ack_sel_off; ++ unsigned int req_sel_size; ++ unsigned int ack_sel_size; + }; + + struct qcom_rpm { +@@ -61,11 +67,7 @@ struct qcom_rpm { + + #define RPM_REQUEST_TIMEOUT (5 * HZ) + +-#define RPM_REQUEST_CONTEXT 3 +-#define RPM_REQ_SELECT 11 +-#define RPM_ACK_CONTEXT 15 +-#define RPM_ACK_SELECTOR 23 +-#define RPM_SELECT_SIZE 7 ++#define RPM_MAX_SEL_SIZE 7 + + #define RPM_NOTIFICATION BIT(30) + #define RPM_REJECTED BIT(31) +@@ -157,6 +159,12 @@ static const struct qcom_rpm_data apq8064_template = { + .version = 3, + .resource_table = apq8064_rpm_resource_table, + .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table), ++ .req_ctx_off = 3, ++ .req_sel_off = 11, ++ .ack_ctx_off = 15, ++ .ack_sel_off = 23, ++ .req_sel_size = 4, ++ .ack_sel_size = 7, + }; + + static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = { +@@ -240,6 +248,12 @@ static const struct qcom_rpm_data msm8660_template = { + .version = 2, + .resource_table = msm8660_rpm_resource_table, + .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table), ++ .req_ctx_off = 3, ++ .req_sel_off = 11, ++ .ack_ctx_off = 19, ++ .ack_sel_off = 27, ++ .req_sel_size = 7, ++ .ack_sel_size = 7, + }; + + static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = { +@@ -322,6 +336,12 @@ static const struct qcom_rpm_data msm8960_template = { + .version = 3, + .resource_table = msm8960_rpm_resource_table, + .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table), ++ .req_ctx_off = 3, ++ .req_sel_off = 11, ++ .ack_ctx_off = 15, ++ .ack_sel_off = 23, ++ .req_sel_size = 4, ++ .ack_sel_size = 7, + }; + + static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = { +@@ -362,6 +382,12 @@ static const struct qcom_rpm_data ipq806x_template = { + .version = 3, + .resource_table = ipq806x_rpm_resource_table, + .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table), ++ .req_ctx_off = 3, ++ .req_sel_off = 11, ++ .ack_ctx_off = 15, ++ .ack_sel_off = 23, ++ .req_sel_size = 4, ++ .ack_sel_size = 7, + }; + + static const struct of_device_id qcom_rpm_of_match[] = { +@@ -380,7 +406,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm, + { + const struct qcom_rpm_resource *res; + const struct qcom_rpm_data *data = rpm->data; +- u32 sel_mask[RPM_SELECT_SIZE] = { 0 }; ++ u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 }; + int left; + int ret = 0; + int i; +@@ -398,12 +424,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm, + writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i)); + + bitmap_set((unsigned long *)sel_mask, res->select_id, 1); +- for (i = 0; i < ARRAY_SIZE(sel_mask); i++) { ++ for (i = 0; i < rpm->data->req_sel_size; i++) { + writel_relaxed(sel_mask[i], +- RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i)); ++ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i)); + } + +- writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT)); ++ writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off)); + + reinit_completion(&rpm->ack); + regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit)); +@@ -426,10 +452,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev) + u32 ack; + int i; + +- ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); +- for (i = 0; i < RPM_SELECT_SIZE; i++) +- writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i)); +- writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); ++ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off)); ++ for (i = 0; i < rpm->data->ack_sel_size; i++) ++ writel_relaxed(0, ++ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i)); ++ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off)); + + if (ack & RPM_NOTIFICATION) { + dev_warn(rpm->dev, "ignoring notification!\n"); +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c +index ce7b2cab5762..54ab48827258 100644 +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -2586,7 +2586,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, + int cached = writelen > bytes && page != blockmask; + uint8_t *wbuf = buf; + int use_bufpoi; +- int part_pagewr = (column || writelen < (mtd->writesize - 1)); ++ int part_pagewr = (column || writelen < mtd->writesize); + + if (part_pagewr) + use_bufpoi = 1; +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c +index 22fd19c0c5d3..27de0463226e 100644 +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -869,7 +869,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + for (i = 0; i < UBI_MAX_DEVICES; i++) { + ubi = ubi_devices[i]; + if (ubi && mtd->index == ubi->mtd->index) { +- ubi_err(ubi, "mtd%d is already attached to ubi%d", ++ pr_err("ubi: mtd%d is already attached to ubi%d", + mtd->index, i); + return -EEXIST; + } +@@ -884,7 +884,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + * no sense to attach emulated MTD devices, so we prohibit this. + */ + if (mtd->type == MTD_UBIVOLUME) { +- ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI", ++ pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI", + mtd->index); + return -EINVAL; + } +@@ -895,7 +895,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + if (!ubi_devices[ubi_num]) + break; + if (ubi_num == UBI_MAX_DEVICES) { +- ubi_err(ubi, "only %d UBI devices may be created", ++ pr_err("ubi: only %d UBI devices may be created", + UBI_MAX_DEVICES); + return -ENFILE; + } +@@ -905,7 +905,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + + /* Make sure ubi_num is not busy */ + if (ubi_devices[ubi_num]) { +- ubi_err(ubi, "already exists"); ++ pr_err("ubi: ubi%i already exists", ubi_num); + return -EEXIST; + } + } +@@ -987,6 +987,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + goto out_detach; + } + ++ /* Make device "available" before it becomes accessible via sysfs */ ++ ubi_devices[ubi_num] = ubi; ++ + err = uif_init(ubi, &ref); + if (err) + goto out_detach; +@@ -1031,7 +1034,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, + wake_up_process(ubi->bgt_thread); + spin_unlock(&ubi->wl_lock); + +- ubi_devices[ubi_num] = ubi; + ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); + return ubi_num; + +@@ -1042,6 +1044,7 @@ out_uif: + ubi_assert(ref); + uif_close(ubi); + out_detach: ++ ubi_devices[ubi_num] = NULL; + ubi_wl_close(ubi); + ubi_free_internal_volumes(ubi); + vfree(ubi->vtbl); +diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c +index 1ae17bb9b889..3ea4c022cbb9 100644 +--- a/drivers/mtd/ubi/vmt.c ++++ b/drivers/mtd/ubi/vmt.c +@@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) + spin_unlock(&ubi->volumes_lock); + } + +- /* Change volume table record */ +- vtbl_rec = ubi->vtbl[vol_id]; +- vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); +- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); +- if (err) +- goto out_acc; +- + if (pebs < 0) { + for (i = 0; i < -pebs; i++) { + err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); +@@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) + spin_unlock(&ubi->volumes_lock); + } + ++ /* ++ * When we shrink a volume we have to flush all pending (erase) work. ++ * Otherwise it can happen that upon next attach UBI finds a LEB with ++ * lnum > highest_lnum and refuses to attach. ++ */ ++ if (pebs < 0) { ++ err = ubi_wl_flush(ubi, vol_id, UBI_ALL); ++ if (err) ++ goto out_acc; ++ } ++ ++ /* Change volume table record */ ++ vtbl_rec = ubi->vtbl[vol_id]; ++ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); ++ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); ++ if (err) ++ goto out_acc; ++ + vol->reserved_pebs = reserved_pebs; + if (vol->vol_type == UBI_DYNAMIC_VOLUME) { + vol->used_ebs = reserved_pebs; +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 017dd94f16ea..942461f36616 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj, + return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); + } + ++/* always return newly allocated name, caller must free after use */ + static const char *safe_name(struct kobject *kobj, const char *orig_name) + { + const char *name = orig_name; +@@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name) + name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i); + } + +- if (name != orig_name) ++ if (name == orig_name) { ++ name = kstrdup(orig_name, GFP_KERNEL); ++ } else { + pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n", + kobject_name(kobj), name); ++ } + return name; + } + +@@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp) + int __of_attach_node_sysfs(struct device_node *np) + { + const char *name; ++ struct kobject *parent; + struct property *pp; + int rc; + +@@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np) + np->kobj.kset = of_kset; + if (!np->parent) { + /* Nodes without parents are new top level trees */ +- rc = kobject_add(&np->kobj, NULL, "%s", +- safe_name(&of_kset->kobj, "base")); ++ name = safe_name(&of_kset->kobj, "base"); ++ parent = NULL; + } else { + name = safe_name(&np->parent->kobj, kbasename(np->full_name)); +- if (!name || !name[0]) +- return -EINVAL; +- +- rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name); ++ parent = &np->parent->kobj; + } ++ if (!name) ++ return -ENOMEM; ++ rc = kobject_add(&np->kobj, parent, "%s", name); ++ kfree(name); + if (rc) + return rc; + +@@ -1753,6 +1759,12 @@ int __of_remove_property(struct device_node *np, struct property *prop) + return 0; + } + ++void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop) ++{ ++ sysfs_remove_bin_file(&np->kobj, &prop->attr); ++ kfree(prop->attr.attr.name); ++} ++ + void __of_remove_property_sysfs(struct device_node *np, struct property *prop) + { + if (!IS_ENABLED(CONFIG_SYSFS)) +@@ -1760,7 +1772,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop) + + /* at early boot, bail here and defer setup to of_init() */ + if (of_kset && of_node_is_attached(np)) +- sysfs_remove_bin_file(&np->kobj, &prop->attr); ++ __of_sysfs_remove_bin_file(np, prop); + } + + /** +@@ -1830,7 +1842,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop + return; + + if (oldprop) +- sysfs_remove_bin_file(&np->kobj, &oldprop->attr); ++ __of_sysfs_remove_bin_file(np, oldprop); + __of_add_property_sysfs(np, newprop); + } + +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c +index 53826b84e0ec..2d72ddcf534f 100644 +--- a/drivers/of/dynamic.c ++++ b/drivers/of/dynamic.c +@@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np) + /* only remove properties if on sysfs */ + if (of_node_is_attached(np)) { + for_each_property_of_node(np, pp) +- sysfs_remove_bin_file(&np->kobj, &pp->attr); ++ __of_sysfs_remove_bin_file(np, pp); + kobject_del(&np->kobj); + } + +diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h +index 8e882e706cd8..46ddbee22ce3 100644 +--- a/drivers/of/of_private.h ++++ b/drivers/of/of_private.h +@@ -81,6 +81,9 @@ extern int __of_attach_node_sysfs(struct device_node *np); + extern void __of_detach_node(struct device_node *np); + extern void __of_detach_node_sysfs(struct device_node *np); + ++extern void __of_sysfs_remove_bin_file(struct device_node *np, ++ struct property *prop); ++ + /* iterators for transactions, used for overlays */ + /* forward iterator */ + #define for_each_transaction_entry(_oft, _te) \ +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 7e327309cf69..3c4752a288e2 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3115,13 +3115,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev) + } + + /* +- * Atheros AR93xx chips do not behave after a bus reset. The device will +- * throw a Link Down error on AER-capable systems and regardless of AER, +- * config space of the device is never accessible again and typically +- * causes the system to hang or reset when access is attempted. ++ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. ++ * The device will throw a Link Down error on AER-capable systems and ++ * regardless of AER, config space of the device is never accessible again ++ * and typically causes the system to hang or reset when access is attempted. + * http://www.spinics.net/lists/linux-pci/msg34797.html + */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); + + static void quirk_no_pm_reset(struct pci_dev *dev) + { +diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c +index 84936bae6e5e..4e377599d266 100644 +--- a/drivers/pinctrl/intel/pinctrl-cherryview.c ++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c +@@ -160,7 +160,6 @@ struct chv_pin_context { + * @pctldev: Pointer to the pin controller device + * @chip: GPIO chip in this pin controller + * @regs: MMIO registers +- * @lock: Lock to serialize register accesses + * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO + * offset (in GPIO number space) + * @community: Community this pinctrl instance represents +@@ -174,7 +173,6 @@ struct chv_pinctrl { + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + void __iomem *regs; +- raw_spinlock_t lock; + unsigned intr_lines[16]; + const struct chv_community *community; + u32 saved_intmask; +@@ -659,6 +657,17 @@ static const struct chv_community *chv_communities[] = { + &southeast_community, + }; + ++/* ++ * Lock to serialize register accesses ++ * ++ * Due to a silicon issue, a shared lock must be used to prevent ++ * concurrent accesses across the 4 GPIO controllers. ++ * ++ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005), ++ * errata #CHT34, for further information. ++ */ ++static DEFINE_RAW_SPINLOCK(chv_lock); ++ + static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset, + unsigned reg) + { +@@ -720,13 +729,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, + u32 ctrl0, ctrl1; + bool locked; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); + ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1)); + locked = chv_pad_locked(pctrl, offset); + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + if (ctrl0 & CHV_PADCTRL0_GPIOEN) { + seq_puts(s, "GPIO "); +@@ -789,14 +798,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, + + grp = &pctrl->community->groups[group]; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + /* Check first that the pad is not locked */ + for (i = 0; i < grp->npins; i++) { + if (chv_pad_locked(pctrl, grp->pins[i])) { + dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n", + grp->pins[i]); +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + return -EBUSY; + } + } +@@ -839,7 +848,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, + pin, altfunc->mode, altfunc->invert_oe ? "" : "not "); + } + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + return 0; + } +@@ -853,13 +862,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, + void __iomem *reg; + u32 value; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + if (chv_pad_locked(pctrl, offset)) { + value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); + if (!(value & CHV_PADCTRL0_GPIOEN)) { + /* Locked so cannot enable */ +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + return -EBUSY; + } + } else { +@@ -899,7 +908,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, + chv_writel(value, reg); + } + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + return 0; + } +@@ -913,13 +922,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev, + void __iomem *reg; + u32 value; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + reg = chv_padreg(pctrl, offset, CHV_PADCTRL0); + value = readl(reg) & ~CHV_PADCTRL0_GPIOEN; + chv_writel(value, reg); + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + } + + static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, +@@ -931,7 +940,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, + unsigned long flags; + u32 ctrl0; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK; + if (input) +@@ -940,7 +949,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, + ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT; + chv_writel(ctrl0, reg); + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + return 0; + } +@@ -965,10 +974,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin, + u16 arg = 0; + u32 term; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT; + +@@ -1042,7 +1051,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, + unsigned long flags; + u32 ctrl0, pull; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + ctrl0 = readl(reg); + + switch (param) { +@@ -1065,7 +1074,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, + pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT; + break; + default: +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + return -EINVAL; + } + +@@ -1083,7 +1092,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, + pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT; + break; + default: +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + return -EINVAL; + } + +@@ -1091,12 +1100,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, + break; + + default: +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + return -EINVAL; + } + + chv_writel(ctrl0, reg); +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + return 0; + } +@@ -1162,9 +1171,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset) + unsigned long flags; + u32 ctrl0, cfg; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; + cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT; +@@ -1182,7 +1191,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value) + void __iomem *reg; + u32 ctrl0; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + reg = chv_padreg(pctrl, pin, CHV_PADCTRL0); + ctrl0 = readl(reg); +@@ -1194,7 +1203,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value) + + chv_writel(ctrl0, reg); + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + } + + static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset) +@@ -1204,9 +1213,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset) + u32 ctrl0, direction; + unsigned long flags; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; + direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT; +@@ -1244,14 +1253,14 @@ static void chv_gpio_irq_ack(struct irq_data *d) + int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d)); + u32 intr_line; + +- raw_spin_lock(&pctrl->lock); ++ raw_spin_lock(&chv_lock); + + intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intr_line &= CHV_PADCTRL0_INTSEL_MASK; + intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; + chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT); + +- raw_spin_unlock(&pctrl->lock); ++ raw_spin_unlock(&chv_lock); + } + + static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +@@ -1262,7 +1271,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) + u32 value, intr_line; + unsigned long flags; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intr_line &= CHV_PADCTRL0_INTSEL_MASK; +@@ -1275,7 +1284,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) + value |= BIT(intr_line); + chv_writel(value, pctrl->regs + CHV_INTMASK); + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + } + + static void chv_gpio_irq_mask(struct irq_data *d) +@@ -1309,7 +1318,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) + unsigned long flags; + u32 intsel, value; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intsel &= CHV_PADCTRL0_INTSEL_MASK; + intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; +@@ -1324,7 +1333,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) + irq_set_handler_locked(d, handler); + pctrl->intr_lines[intsel] = offset; + } +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + } + + chv_gpio_irq_unmask(d); +@@ -1340,7 +1349,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) + unsigned long flags; + u32 value; + +- raw_spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&chv_lock, flags); + + /* + * Pins which can be used as shared interrupt are configured in +@@ -1389,7 +1398,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + +- raw_spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&chv_lock, flags); + + return 0; + } +@@ -1501,7 +1510,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev) + if (i == ARRAY_SIZE(chv_communities)) + return -ENODEV; + +- raw_spin_lock_init(&pctrl->lock); + pctrl->dev = &pdev->dev; + + #ifdef CONFIG_PM_SLEEP +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c +index fb4dd7b3ee71..af2046c87806 100644 +--- a/drivers/platform/x86/hp-wmi.c ++++ b/drivers/platform/x86/hp-wmi.c +@@ -723,6 +723,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device) + if (err) + return err; + ++ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless, ++ sizeof(wireless), 0); ++ if (err) ++ return err; ++ + if (wireless & 0x1) { + wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, + RFKILL_TYPE_WLAN, +@@ -910,7 +915,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) + gps_rfkill = NULL; + rfkill2_count = 0; + +- if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device)) ++ if (hp_wmi_rfkill_setup(device)) + hp_wmi_rfkill2_setup(device); + + err = device_create_file(&device->dev, &dev_attr_display); +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c +index 9e03d158f411..4f7ce0097191 100644 +--- a/drivers/remoteproc/remoteproc_core.c ++++ b/drivers/remoteproc/remoteproc_core.c +@@ -1239,11 +1239,6 @@ int rproc_add(struct rproc *rproc) + if (ret < 0) + return ret; + +- /* expose to rproc_get_by_phandle users */ +- mutex_lock(&rproc_list_mutex); +- list_add(&rproc->node, &rproc_list); +- mutex_unlock(&rproc_list_mutex); +- + dev_info(dev, "%s is available\n", rproc->name); + + dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); +@@ -1251,8 +1246,16 @@ int rproc_add(struct rproc *rproc) + + /* create debugfs entries */ + rproc_create_debug_dir(rproc); ++ ret = rproc_add_virtio_devices(rproc); ++ if (ret < 0) ++ return ret; + +- return rproc_add_virtio_devices(rproc); ++ /* expose to rproc_get_by_phandle users */ ++ mutex_lock(&rproc_list_mutex); ++ list_add(&rproc->node, &rproc_list); ++ mutex_unlock(&rproc_list_mutex); ++ ++ return 0; + } + EXPORT_SYMBOL(rproc_add); + +diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c +index ffb860d18701..f92528822f06 100644 +--- a/drivers/rtc/rtc-s3c.c ++++ b/drivers/rtc/rtc-s3c.c +@@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq) + if (!is_power_of_2(freq)) + return -EINVAL; + ++ s3c_rtc_enable_clk(info); + spin_lock_irq(&info->pie_lock); + + if (info->data->set_freq) + info->data->set_freq(info, freq); + + spin_unlock_irq(&info->pie_lock); ++ s3c_rtc_disable_clk(info); + + return 0; + } +diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c +index b2afad5a5682..2a34eb5f6161 100644 +--- a/drivers/s390/cio/cmf.c ++++ b/drivers/s390/cio/cmf.c +@@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device *cdev) + cmf_generic_reset(cdev); + } + ++static int cmf_enabled(struct ccw_device *cdev) ++{ ++ int enabled; ++ ++ spin_lock_irq(cdev->ccwlock); ++ enabled = !!cdev->private->cmb; ++ spin_unlock_irq(cdev->ccwlock); ++ ++ return enabled; ++} ++ + static struct attribute_group cmf_attr_group; + + static struct cmb_operations cmbops_basic = { +@@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct device *dev, + char *buf) + { + struct ccw_device *cdev = to_ccwdev(dev); +- int enabled; + +- spin_lock_irq(cdev->ccwlock); +- enabled = !!cdev->private->cmb; +- spin_unlock_irq(cdev->ccwlock); +- +- return sprintf(buf, "%d\n", enabled); ++ return sprintf(buf, "%d\n", cmf_enabled(cdev)); + } + + static ssize_t cmb_enable_store(struct device *dev, +@@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable) + * @cdev: The ccw device to be enabled + * + * Returns %0 for success or a negative error value. +- * ++ * Note: If this is called on a device for which channel measurement is already ++ * enabled a reset of the measurement data is triggered. + * Context: + * non-atomic + */ + int enable_cmf(struct ccw_device *cdev) + { +- int ret; ++ int ret = 0; + + device_lock(&cdev->dev); ++ if (cmf_enabled(cdev)) { ++ cmbops->reset(cdev); ++ goto out_unlock; ++ } + get_device(&cdev->dev); + ret = cmbops->alloc(cdev); + if (ret) +@@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev) + out: + if (ret) + put_device(&cdev->dev); +- ++out_unlock: + device_unlock(&cdev->dev); + return ret; + } +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index 4679ed4444a7..9e165bc05ee1 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -3859,7 +3859,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, + uint32_t tag; + uint16_t hwq; + +- if (shost_use_blk_mq(cmnd->device->host)) { ++ if (cmnd && shost_use_blk_mq(cmnd->device->host)) { + tag = blk_mq_unique_tag(cmnd->request); + hwq = blk_mq_unique_tag_to_hwq(tag); + +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 73c8ea0b1360..3cac73e4c3e4 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -548,7 +548,14 @@ static void reset_sccr1(struct driver_data *drv_data) + u32 sccr1_reg; + + sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; +- sccr1_reg &= ~SSCR1_RFT; ++ switch (drv_data->ssp_type) { ++ case QUARK_X1000_SSP: ++ sccr1_reg &= ~QUARK_X1000_SSCR1_RFT; ++ break; ++ default: ++ sccr1_reg &= ~SSCR1_RFT; ++ break; ++ } + sccr1_reg |= chip->threshold; + pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); + } +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 72204fbf2bb1..bd810c109277 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -492,7 +492,8 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); + + spin_lock_bh(&conn->cmd_lock); +- if (!list_empty(&cmd->i_conn_node)) ++ if (!list_empty(&cmd->i_conn_node) && ++ !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + +@@ -4194,6 +4195,7 @@ transport_err: + + static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) + { ++ LIST_HEAD(tmp_list); + struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; + struct iscsi_session *sess = conn->sess; + /* +@@ -4202,18 +4204,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) + * has been reset -> returned sleeping pre-handler state. + */ + spin_lock_bh(&conn->cmd_lock); +- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { ++ list_splice_init(&conn->conn_cmd_list, &tmp_list); + ++ list_for_each_entry(cmd, &tmp_list, i_conn_node) { ++ struct se_cmd *se_cmd = &cmd->se_cmd; ++ ++ if (se_cmd->se_tfo != NULL) { ++ spin_lock(&se_cmd->t_state_lock); ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ spin_unlock(&se_cmd->t_state_lock); ++ } ++ } ++ spin_unlock_bh(&conn->cmd_lock); ++ ++ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); +- spin_unlock_bh(&conn->cmd_lock); + + iscsit_increment_maxcmdsn(cmd, sess); +- + iscsit_free_cmd(cmd, true); + +- spin_lock_bh(&conn->cmd_lock); + } +- spin_unlock_bh(&conn->cmd_lock); + } + + static void iscsit_stop_timers_for_cmds( +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 96e78c823d13..316f66172335 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -1357,8 +1357,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) + } + login->zero_tsih = zero_tsih; + +- conn->sess->se_sess->sup_prot_ops = +- conn->conn_transport->iscsit_get_sup_prot_ops(conn); ++ if (conn->sess) ++ conn->sess->se_sess->sup_prot_ops = ++ conn->conn_transport->iscsit_get_sup_prot_ops(conn); + + tpg = conn->tpg; + if (!tpg) { +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 3436a83568ea..dcd5ed26eb18 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -832,13 +832,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + * in ATA and we need to set TPE=1 + */ + bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, +- struct request_queue *q, int block_size) ++ struct request_queue *q) + { ++ int block_size = queue_logical_block_size(q); ++ + if (!blk_queue_discard(q)) + return false; + +- attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / +- block_size; ++ attrib->max_unmap_lba_count = ++ q->limits.max_discard_sectors >> (ilog2(block_size) - 9); + /* + * Currently hardcoded to 1 in Linux/SCSI code.. + */ +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index 75f0f08b2a34..79291869bce6 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev) + dev_size, div_u64(dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); + +- if (target_configure_unmap_from_queue(&dev->dev_attrib, q, +- fd_dev->fd_block_size)) ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) + pr_debug("IFILE: BLOCK Discard support available," + " disabled by default\n"); + /* +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index 2c53dcefff3e..4620c1dcdbc7 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev) + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + +- if (target_configure_unmap_from_queue(&dev->dev_attrib, q, +- dev->dev_attrib.hw_block_size)) ++ if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) + pr_debug("IBLOCK: BLOCK Discard support available," + " disabled by default\n"); + +diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h +index dae0750c2032..253a91bff943 100644 +--- a/drivers/target/target_core_internal.h ++++ b/drivers/target/target_core_internal.h +@@ -148,6 +148,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); + void target_qf_do_work(struct work_struct *work); + bool target_check_wce(struct se_device *dev); + bool target_check_fua(struct se_device *dev); ++void __target_execute_cmd(struct se_cmd *, bool); + + /* target_core_stat.c */ + void target_stat_setup_dev_default_groups(struct se_device *); +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index 98698d875742..c220bb8dfa9d 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -594,7 +594,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes + cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + +- __target_execute_cmd(cmd); ++ __target_execute_cmd(cmd, false); + + kfree(buf); + return ret; +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index d151bc3d6971..7bc3778a1ac9 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1270,23 +1270,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) + + trace_target_sequencer_start(cmd); + +- /* +- * Check for an existing UNIT ATTENTION condition +- */ +- ret = target_scsi3_ua_check(cmd); +- if (ret) +- return ret; +- +- ret = target_alua_state_check(cmd); +- if (ret) +- return ret; +- +- ret = target_check_reservation(cmd); +- if (ret) { +- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; +- return ret; +- } +- + ret = dev->transport->parse_cdb(cmd); + if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) + pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", +@@ -1749,20 +1732,45 @@ queue_full: + } + EXPORT_SYMBOL(transport_generic_request_failure); + +-void __target_execute_cmd(struct se_cmd *cmd) ++void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) + { + sense_reason_t ret; + +- if (cmd->execute_cmd) { +- ret = cmd->execute_cmd(cmd); +- if (ret) { +- spin_lock_irq(&cmd->t_state_lock); +- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); +- spin_unlock_irq(&cmd->t_state_lock); ++ if (!cmd->execute_cmd) { ++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ++ goto err; ++ } ++ if (do_checks) { ++ /* ++ * Check for an existing UNIT ATTENTION condition after ++ * target_handle_task_attr() has done SAM task attr ++ * checking, and possibly have already defered execution ++ * out to target_restart_delayed_cmds() context. ++ */ ++ ret = target_scsi3_ua_check(cmd); ++ if (ret) ++ goto err; ++ ++ ret = target_alua_state_check(cmd); ++ if (ret) ++ goto err; + +- transport_generic_request_failure(cmd, ret); ++ ret = target_check_reservation(cmd); ++ if (ret) { ++ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; ++ goto err; + } + } ++ ++ ret = cmd->execute_cmd(cmd); ++ if (!ret) ++ return; ++err: ++ spin_lock_irq(&cmd->t_state_lock); ++ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); ++ spin_unlock_irq(&cmd->t_state_lock); ++ ++ transport_generic_request_failure(cmd, ret); + } + + static int target_write_prot_action(struct se_cmd *cmd) +@@ -1807,6 +1815,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + return false; + ++ cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; ++ + /* + * Check for the existence of HEAD_OF_QUEUE, and if true return 1 + * to allow the passed struct se_cmd list of tasks to the front of the list. +@@ -1887,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd) + return; + } + +- __target_execute_cmd(cmd); ++ __target_execute_cmd(cmd, true); + } + EXPORT_SYMBOL(target_execute_cmd); + +@@ -1911,7 +1921,7 @@ static void target_restart_delayed_cmds(struct se_device *dev) + list_del(&cmd->se_delayed_node); + spin_unlock(&dev->delayed_cmd_lock); + +- __target_execute_cmd(cmd); ++ __target_execute_cmd(cmd, true); + + if (cmd->sam_task_attr == TCM_ORDERED_TAG) + break; +@@ -1929,6 +1939,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + return; + ++ if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) ++ goto restart; ++ + if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { + atomic_dec_mb(&dev->simple_cmds); + dev->dev_cur_ordered_id++; +@@ -1945,7 +1958,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", + dev->dev_cur_ordered_id); + } +- ++restart: + target_restart_delayed_cmds(dev); + } + +@@ -2533,15 +2546,10 @@ static void target_release_cmd_kref(struct kref *kref) + bool fabric_stop; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); +- if (list_empty(&se_cmd->se_cmd_list)) { +- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); +- target_free_cmd_mem(se_cmd); +- se_cmd->se_tfo->release_cmd(se_cmd); +- return; +- } + + spin_lock(&se_cmd->t_state_lock); +- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); ++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && ++ (se_cmd->transport_state & CMD_T_ABORTED); + spin_unlock(&se_cmd->t_state_lock); + + if (se_cmd->cmd_wait_set || fabric_stop) { +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 7bbadd176c74..7b5462eb8388 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -485,19 +485,21 @@ static void atmel_start_tx(struct uart_port *port) + { + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + +- if (atmel_use_pdc_tx(port)) { +- if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN) +- /* The transmitter is already running. Yes, we +- really need this.*/ +- return; ++ if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) ++ & ATMEL_PDC_TXTEN)) ++ /* The transmitter is already running. Yes, we ++ really need this.*/ ++ return; + ++ if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) + if ((port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) + atmel_stop_rx(port); + ++ if (atmel_use_pdc_tx(port)) + /* re-enable PDC transmit */ + atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); +- } ++ + /* Enable interrupts */ + atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); + } +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index dcde955475dc..e1de4944e0ce 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -726,7 +726,7 @@ static void msm_handle_tx(struct uart_port *port) + return; + } + +- pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); ++ pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + + dma_min = 1; /* Always DMA */ +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 8320173af846..237ef5573c18 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -1676,7 +1676,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, + return -ENODEV; + + if (port->mapbase != 0) +- return 0; ++ return -EINVAL; + + /* setup info for port */ + port->dev = &platdev->dev; +@@ -1730,22 +1730,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, + ourport->dma = devm_kzalloc(port->dev, + sizeof(*ourport->dma), + GFP_KERNEL); +- if (!ourport->dma) +- return -ENOMEM; ++ if (!ourport->dma) { ++ ret = -ENOMEM; ++ goto err; ++ } + } + + ourport->clk = clk_get(&platdev->dev, "uart"); + if (IS_ERR(ourport->clk)) { + pr_err("%s: Controller clock not found\n", + dev_name(&platdev->dev)); +- return PTR_ERR(ourport->clk); ++ ret = PTR_ERR(ourport->clk); ++ goto err; + } + + ret = clk_prepare_enable(ourport->clk); + if (ret) { + pr_err("uart: clock failed to prepare+enable: %d\n", ret); + clk_put(ourport->clk); +- return ret; ++ goto err; + } + + /* Keep all interrupts masked and cleared */ +@@ -1761,7 +1764,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, + + /* reset the fifos (and setup the uart) */ + s3c24xx_serial_resetport(port, cfg); ++ + return 0; ++ ++err: ++ port->mapbase = 0; ++ return ret; + } + + /* Device driver serial port probe */ +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 38ae877c46e3..3ffb01ff6549 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -1203,10 +1203,11 @@ static int proc_getdriver(struct usb_dev_state *ps, void __user *arg) + + static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) + { +- struct usbdevfs_connectinfo ci = { +- .devnum = ps->dev->devnum, +- .slow = ps->dev->speed == USB_SPEED_LOW +- }; ++ struct usbdevfs_connectinfo ci; ++ ++ memset(&ci, 0, sizeof(ci)); ++ ci.devnum = ps->dev->devnum; ++ ci.slow = ps->dev->speed == USB_SPEED_LOW; + + if (copy_to_user(arg, &ci, sizeof(ci))) + return -EFAULT; +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 944a6dca0fcb..d2e50a27140c 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x04f3, 0x016f), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + ++ { USB_DEVICE(0x04f3, 0x0381), .driver_info = ++ USB_QUIRK_NO_LPM }, ++ + { USB_DEVICE(0x04f3, 0x21b8), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 69ffe6e8d77f..70900e6ca9bc 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1965,6 +1965,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + return 1; + } + ++ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) ++ if ((event->status & DEPEVT_STATUS_IOC) && ++ (trb->ctrl & DWC3_TRB_CTRL_IOC)) ++ return 0; + return 1; + } + +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 97ef75af9632..803c503a2e3d 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -2740,6 +2740,7 @@ static int _ffs_func_bind(struct usb_configuration *c, + func->ffs->ss_descs_count; + + int fs_len, hs_len, ss_len, ret, i; ++ struct ffs_ep *eps_ptr; + + /* Make it a single chunk, less management later on */ + vla_group(d); +@@ -2788,12 +2789,9 @@ static int _ffs_func_bind(struct usb_configuration *c, + ffs->raw_descs_length); + + memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); +- for (ret = ffs->eps_count; ret; --ret) { +- struct ffs_ep *ptr; +- +- ptr = vla_ptr(vlabuf, d, eps); +- ptr[ret].num = -1; +- } ++ eps_ptr = vla_ptr(vlabuf, d, eps); ++ for (i = 0; i < ffs->eps_count; i++) ++ eps_ptr[i].num = -1; + + /* Save pointers + * d_eps == vlabuf, func->eps used to kfree vlabuf later +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index 044ca79d3cb5..12628dd36e55 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -1291,6 +1291,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { + struct cntrl_cur_lay3 c; ++ memset(&c, 0, sizeof(struct cntrl_cur_lay3)); + + if (entity_id == USB_IN_CLK_ID) + c.dCUR = p_srate; +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index f1893e08e51a..db565f620f82 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -808,20 +808,27 @@ static void xfer_work(struct work_struct *work) + { + struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); + struct usbhs_pipe *pipe = pkt->pipe; +- struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); ++ struct usbhs_fifo *fifo; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct dma_async_tx_descriptor *desc; +- struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); ++ struct dma_chan *chan; + struct device *dev = usbhs_priv_to_dev(priv); + enum dma_transfer_direction dir; ++ unsigned long flags; + ++ usbhs_lock(priv, flags); ++ fifo = usbhs_pipe_to_fifo(pipe); ++ if (!fifo) ++ goto xfer_work_end; ++ ++ chan = usbhsf_dma_chan_get(fifo, pkt); + dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; + + desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual, + pkt->trans, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) +- return; ++ goto xfer_work_end; + + desc->callback = usbhsf_dma_complete; + desc->callback_param = pipe; +@@ -829,7 +836,7 @@ static void xfer_work(struct work_struct *work) + pkt->cookie = dmaengine_submit(desc); + if (pkt->cookie < 0) { + dev_err(dev, "Failed to submit dma descriptor\n"); +- return; ++ goto xfer_work_end; + } + + dev_dbg(dev, " %s %d (%d/ %d)\n", +@@ -840,6 +847,9 @@ static void xfer_work(struct work_struct *work) + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); + dma_async_issue_pending(chan); + usbhs_pipe_enable(pipe); ++ ++xfer_work_end: ++ usbhs_unlock(priv, flags); + } + + /* +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c +index fa14198daf77..5a3abf56d56b 100644 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c +@@ -586,6 +586,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep, + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); + struct usbhs_pipe *pipe; + int ret = -EIO; ++ unsigned long flags; ++ ++ usbhs_lock(priv, flags); + + /* + * if it already have pipe, +@@ -594,7 +597,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep, + if (uep->pipe) { + usbhs_pipe_clear(uep->pipe); + usbhs_pipe_sequence_data0(uep->pipe); +- return 0; ++ ret = 0; ++ goto usbhsg_ep_enable_end; + } + + pipe = usbhs_pipe_malloc(priv, +@@ -622,6 +626,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep, + ret = 0; + } + ++usbhsg_ep_enable_end: ++ usbhs_unlock(priv, flags); ++ + return ret; + } + +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index d96d423d00e6..8e07536c233a 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 ++#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 + + /* ZTE PRODUCTS */ + #define ZTE_VENDOR_ID 0x19d2 +@@ -1198,6 +1199,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c +index 8ab6238c9299..56f7e2521202 100644 +--- a/drivers/virtio/virtio_balloon.c ++++ b/drivers/virtio/virtio_balloon.c +@@ -196,6 +196,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) + num = min(num, ARRAY_SIZE(vb->pfns)); + + mutex_lock(&vb->balloon_lock); ++ /* We can't release more pages than taken */ ++ num = min(num, (size_t)vb->num_pages); + for (vb->num_pfns = 0; vb->num_pfns < num; + vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { + page = balloon_page_dequeue(vb_dev_info); +diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c +index 0e2f43bccf1f..0c427d6a12d1 100644 +--- a/drivers/w1/masters/omap_hdq.c ++++ b/drivers/w1/masters/omap_hdq.c +@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) + goto out; + } + +- hdq_data->hdq_irqstatus = 0; +- + if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { + hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, + OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 9abe18763a7f..257bbdcb5df6 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -2786,12 +2786,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) + btrfs_bio->csum = NULL; + btrfs_bio->csum_allocated = NULL; + btrfs_bio->end_io = NULL; +- +-#ifdef CONFIG_BLK_CGROUP +- /* FIXME, put this into bio_clone_bioset */ +- if (bio->bi_css) +- bio_associate_blkcg(new, bio->bi_css); +-#endif + } + return new; + } +diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h +index 3182273a3407..1418daa03d95 100644 +--- a/fs/cifs/cifs_fs_sb.h ++++ b/fs/cifs/cifs_fs_sb.h +@@ -46,6 +46,9 @@ + #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */ + #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */ + #define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */ ++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible ++ * root mountable ++ */ + + struct cifs_sb_info { + struct rb_root tlink_tree; +@@ -67,5 +70,6 @@ struct cifs_sb_info { + struct backing_dev_info bdi; + struct delayed_work prune_tlinks; + struct rcu_head rcu; ++ char *prepath; + }; + #endif /* _CIFS_FS_SB_H */ +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c +index e682b36a210f..4acbc390a7d6 100644 +--- a/fs/cifs/cifsencrypt.c ++++ b/fs/cifs/cifsencrypt.c +@@ -731,24 +731,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + + memcpy(ses->auth_key.response + baselen, tiblob, tilen); + ++ mutex_lock(&ses->server->srv_mutex); ++ + rc = crypto_hmacmd5_alloc(ses->server); + if (rc) { + cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc); +- goto setup_ntlmv2_rsp_ret; ++ goto unlock; + } + + /* calculate ntlmv2_hash */ + rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp); + if (rc) { + cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc); +- goto setup_ntlmv2_rsp_ret; ++ goto unlock; + } + + /* calculate first part of the client response (CR1) */ + rc = CalcNTLMv2_response(ses, ntlmv2_hash); + if (rc) { + cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc); +- goto setup_ntlmv2_rsp_ret; ++ goto unlock; + } + + /* now calculate the session key for NTLMv2 */ +@@ -757,13 +759,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + if (rc) { + cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n", + __func__); +- goto setup_ntlmv2_rsp_ret; ++ goto unlock; + } + + rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash); + if (rc) { + cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__); +- goto setup_ntlmv2_rsp_ret; ++ goto unlock; + } + + rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, +@@ -771,7 +773,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + CIFS_HMAC_MD5_HASH_SIZE); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with response\n", __func__); +- goto setup_ntlmv2_rsp_ret; ++ goto unlock; + } + + rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash, +@@ -779,6 +781,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) + if (rc) + cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); + ++unlock: ++ mutex_unlock(&ses->server->srv_mutex); + setup_ntlmv2_rsp_ret: + kfree(tiblob); + +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index cbc0f4bca0c0..450578097fb7 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -686,6 +686,14 @@ cifs_do_mount(struct file_system_type *fs_type, + goto out_cifs_sb; + } + ++ if (volume_info->prepath) { ++ cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL); ++ if (cifs_sb->prepath == NULL) { ++ root = ERR_PTR(-ENOMEM); ++ goto out_cifs_sb; ++ } ++ } ++ + cifs_setup_cifs_sb(volume_info, cifs_sb); + + rc = cifs_mount(cifs_sb, volume_info); +@@ -724,7 +732,11 @@ cifs_do_mount(struct file_system_type *fs_type, + sb->s_flags |= MS_ACTIVE; + } + +- root = cifs_get_root(volume_info, sb); ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ root = dget(sb->s_root); ++ else ++ root = cifs_get_root(volume_info, sb); ++ + if (IS_ERR(root)) + goto out_super; + +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 5481a6eb9a95..61c3a5ab8637 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -3517,6 +3517,44 @@ cifs_get_volume_info(char *mount_data, const char *devname) + return volume_info; + } + ++static int ++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, ++ unsigned int xid, ++ struct cifs_tcon *tcon, ++ struct cifs_sb_info *cifs_sb, ++ char *full_path) ++{ ++ int rc; ++ char *s; ++ char sep, tmp; ++ ++ sep = CIFS_DIR_SEP(cifs_sb); ++ s = full_path; ++ ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); ++ while (rc == 0) { ++ /* skip separators */ ++ while (*s == sep) ++ s++; ++ if (!*s) ++ break; ++ /* next separator */ ++ while (*s && *s != sep) ++ s++; ++ ++ /* ++ * temporarily null-terminate the path at the end of ++ * the current component ++ */ ++ tmp = *s; ++ *s = 0; ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ++ full_path); ++ *s = tmp; ++ } ++ return rc; ++} ++ + int + cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) + { +@@ -3654,6 +3692,16 @@ remote_path_check: + kfree(full_path); + goto mount_fail_check; + } ++ ++ rc = cifs_are_all_path_components_accessible(server, ++ xid, tcon, cifs_sb, ++ full_path); ++ if (rc != 0) { ++ cifs_dbg(VFS, "cannot query dirs between root and final path, " ++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); ++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; ++ rc = 0; ++ } + kfree(full_path); + } + +@@ -3923,6 +3971,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb) + + bdi_destroy(&cifs_sb->bdi); + kfree(cifs_sb->mountdata); ++ kfree(cifs_sb->prepath); + call_rcu(&cifs_sb->rcu, delayed_free); + } + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index c3eb998a99bd..26a3b389a265 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry) + struct dentry *temp; + int namelen; + int dfsplen; ++ int pplen = 0; + char *full_path; + char dirsep; + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); +@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry) + dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); + else + dfsplen = 0; ++ ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; ++ + cifs_bp_rename_retry: +- namelen = dfsplen; ++ namelen = dfsplen + pplen; + seq = read_seqbegin(&rename_lock); + rcu_read_lock(); + for (temp = direntry; !IS_ROOT(temp);) { +@@ -137,7 +142,7 @@ cifs_bp_rename_retry: + } + } + rcu_read_unlock(); +- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { ++ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) { + cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n", + namelen, dfsplen); + /* presumably this is only possible if racing with a rename +@@ -153,6 +158,17 @@ cifs_bp_rename_retry: + those safely to '/' if any are found in the middle of the prepath */ + /* BB test paths to Windows with '/' in the midst of prepath */ + ++ if (pplen) { ++ int i; ++ ++ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); ++ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); ++ full_path[dfsplen] = '\\'; ++ for (i = 0; i < pplen-1; i++) ++ if (full_path[dfsplen+1+i] == '/') ++ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); ++ } ++ + if (dfsplen) { + strncpy(full_path, tcon->treeName, dfsplen); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { +@@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, + goto cifs_create_get_file_info; + } + ++ if (S_ISDIR(newinode->i_mode)) { ++ CIFSSMBClose(xid, tcon, fid->netfid); ++ iput(newinode); ++ rc = -EISDIR; ++ goto out; ++ } ++ + if (!S_ISREG(newinode->i_mode)) { + /* + * The server may allow us to open things like +@@ -399,10 +422,14 @@ cifs_create_set_dentry: + if (rc != 0) { + cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", + rc); +- if (server->ops->close) +- server->ops->close(xid, tcon, fid); +- goto out; ++ goto out_err; + } ++ ++ if (S_ISDIR(newinode->i_mode)) { ++ rc = -EISDIR; ++ goto out_err; ++ } ++ + d_drop(direntry); + d_add(direntry, newinode); + +@@ -410,6 +437,13 @@ out: + kfree(buf); + kfree(full_path); + return rc; ++ ++out_err: ++ if (server->ops->close) ++ server->ops->close(xid, tcon, fid); ++ if (newinode) ++ iput(newinode); ++ goto out; + } + + int +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index a329f5ba35aa..9cdeb0293267 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -982,10 +982,26 @@ struct inode *cifs_root_iget(struct super_block *sb) + struct inode *inode = NULL; + long rc; + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); ++ char *path = NULL; ++ int len; ++ ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ && cifs_sb->prepath) { ++ len = strlen(cifs_sb->prepath); ++ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); ++ if (path == NULL) ++ return ERR_PTR(-ENOMEM); ++ path[0] = '/'; ++ memcpy(path+1, cifs_sb->prepath, len); ++ } else { ++ path = kstrdup("", GFP_KERNEL); ++ if (path == NULL) ++ return ERR_PTR(-ENOMEM); ++ } + + xid = get_xid(); + if (tcon->unix_ext) { +- rc = cifs_get_inode_info_unix(&inode, "", sb, xid); ++ rc = cifs_get_inode_info_unix(&inode, path, sb, xid); + /* some servers mistakenly claim POSIX support */ + if (rc != -EOPNOTSUPP) + goto iget_no_retry; +@@ -993,7 +1009,8 @@ struct inode *cifs_root_iget(struct super_block *sb) + tcon->unix_ext = false; + } + +- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL); ++ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb)); ++ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL); + + iget_no_retry: + if (!inode) { +@@ -1022,6 +1039,7 @@ iget_no_retry: + } + + out: ++ kfree(path); + /* can not call macro free_xid here since in a void func + * TODO: This is no longer true + */ +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 53ccdde6ff18..dd8543caa56e 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -1039,6 +1039,9 @@ smb2_new_lease_key(struct cifs_fid *fid) + get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE); + } + ++#define SMB2_SYMLINK_STRUCT_SIZE \ ++ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) ++ + static int + smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, + const char *full_path, char **target_path, +@@ -1051,7 +1054,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, + struct cifs_fid fid; + struct smb2_err_rsp *err_buf = NULL; + struct smb2_symlink_err_rsp *symlink; +- unsigned int sub_len, sub_offset; ++ unsigned int sub_len; ++ unsigned int sub_offset; ++ unsigned int print_len; ++ unsigned int print_offset; + + cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); + +@@ -1072,11 +1078,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, + kfree(utf16_path); + return -ENOENT; + } ++ ++ if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) || ++ get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) { ++ kfree(utf16_path); ++ return -ENOENT; ++ } ++ + /* open must fail on symlink - reset rc */ + rc = 0; + symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData; + sub_len = le16_to_cpu(symlink->SubstituteNameLength); + sub_offset = le16_to_cpu(symlink->SubstituteNameOffset); ++ print_len = le16_to_cpu(symlink->PrintNameLength); ++ print_offset = le16_to_cpu(symlink->PrintNameOffset); ++ ++ if (get_rfc1002_length(err_buf) + 4 < ++ SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) { ++ kfree(utf16_path); ++ return -ENOENT; ++ } ++ ++ if (get_rfc1002_length(err_buf) + 4 < ++ SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) { ++ kfree(utf16_path); ++ return -ENOENT; ++ } ++ + *target_path = cifs_strndup_from_utf16( + (char *)symlink->PathBuffer + sub_offset, + sub_len, true, cifs_sb->local_nls); +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c +index 36345fefa3ff..2d964ce45606 100644 +--- a/fs/jbd2/commit.c ++++ b/fs/jbd2/commit.c +@@ -124,7 +124,7 @@ static int journal_submit_commit_record(journal_t *journal, + struct commit_header *tmp; + struct buffer_head *bh; + int ret; +- struct timespec now = current_kernel_time(); ++ struct timespec64 now = current_kernel_time64(); + + *cbh = NULL; + +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 7b9316406930..7a9b6e347249 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1261,6 +1261,9 @@ int nfs_updatepage(struct file *file, struct page *page, + dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", + file, count, (long long)(page_file_offset(page) + offset)); + ++ if (!count) ++ goto out; ++ + if (nfs_can_extend_write(file, page, inode)) { + count = max(count + offset, nfs_page_length(page)); + offset = 0; +@@ -1271,7 +1274,7 @@ int nfs_updatepage(struct file *file, struct page *page, + nfs_set_pageerror(page); + else + __set_page_dirty_nobuffers(page); +- ++out: + dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", + status, (long long)i_size_read(inode)); + return status; +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index ed2f64ca49de..f7ea624780a7 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -4882,6 +4882,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + return nfs_ok; + } + ++static __be32 ++nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) ++{ ++ struct nfs4_ol_stateid *stp = openlockstateid(s); ++ __be32 ret; ++ ++ mutex_lock(&stp->st_mutex); ++ ++ ret = check_stateid_generation(stateid, &s->sc_stateid, 1); ++ if (ret) ++ goto out; ++ ++ ret = nfserr_locks_held; ++ if (check_for_locks(stp->st_stid.sc_file, ++ lockowner(stp->st_stateowner))) ++ goto out; ++ ++ release_lock_stateid(stp); ++ ret = nfs_ok; ++ ++out: ++ mutex_unlock(&stp->st_mutex); ++ nfs4_put_stid(s); ++ return ret; ++} ++ + __be32 + nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + struct nfsd4_free_stateid *free_stateid) +@@ -4889,7 +4915,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + stateid_t *stateid = &free_stateid->fr_stateid; + struct nfs4_stid *s; + struct nfs4_delegation *dp; +- struct nfs4_ol_stateid *stp; + struct nfs4_client *cl = cstate->session->se_client; + __be32 ret = nfserr_bad_stateid; + +@@ -4908,18 +4933,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + ret = nfserr_locks_held; + break; + case NFS4_LOCK_STID: +- ret = check_stateid_generation(stateid, &s->sc_stateid, 1); +- if (ret) +- break; +- stp = openlockstateid(s); +- ret = nfserr_locks_held; +- if (check_for_locks(stp->st_stid.sc_file, +- lockowner(stp->st_stateowner))) +- break; +- WARN_ON(!unhash_lock_stateid(stp)); ++ atomic_inc(&s->sc_count); + spin_unlock(&cl->cl_lock); +- nfs4_put_stid(s); +- ret = nfs_ok; ++ ret = nfsd4_free_lock_stateid(stateid, s); + goto out; + case NFS4_REVOKED_DELEG_STID: + dp = delegstateid(s); +@@ -5486,7 +5502,7 @@ static __be32 + lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, + struct nfs4_ol_stateid *ost, + struct nfsd4_lock *lock, +- struct nfs4_ol_stateid **lst, bool *new) ++ struct nfs4_ol_stateid **plst, bool *new) + { + __be32 status; + struct nfs4_file *fi = ost->st_stid.sc_file; +@@ -5494,7 +5510,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, + struct nfs4_client *cl = oo->oo_owner.so_client; + struct inode *inode = d_inode(cstate->current_fh.fh_dentry); + struct nfs4_lockowner *lo; ++ struct nfs4_ol_stateid *lst; + unsigned int strhashval; ++ bool hashed; + + lo = find_lockowner_str(cl, &lock->lk_new_owner); + if (!lo) { +@@ -5510,12 +5528,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, + goto out; + } + +- *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); +- if (*lst == NULL) { ++retry: ++ lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); ++ if (lst == NULL) { + status = nfserr_jukebox; + goto out; + } ++ ++ mutex_lock(&lst->st_mutex); ++ ++ /* See if it's still hashed to avoid race with FREE_STATEID */ ++ spin_lock(&cl->cl_lock); ++ hashed = !list_empty(&lst->st_perfile); ++ spin_unlock(&cl->cl_lock); ++ ++ if (!hashed) { ++ mutex_unlock(&lst->st_mutex); ++ nfs4_put_stid(&lst->st_stid); ++ goto retry; ++ } + status = nfs_ok; ++ *plst = lst; + out: + nfs4_put_stateowner(&lo->lo_owner); + return status; +@@ -5582,8 +5615,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + goto out; + status = lookup_or_create_lock_state(cstate, open_stp, lock, + &lock_stp, &new); +- if (status == nfs_ok) +- mutex_lock(&lock_stp->st_mutex); + } else { + status = nfs4_preprocess_seqid_op(cstate, + lock->lk_old_lock_seqid, +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c +index a1acc6004a91..70a7bbe199d0 100644 +--- a/fs/overlayfs/super.c ++++ b/fs/overlayfs/super.c +@@ -376,7 +376,8 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) + static bool ovl_dentry_remote(struct dentry *dentry) + { + return dentry->d_flags & +- (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE); ++ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE | ++ DCACHE_OP_REAL); + } + + static bool ovl_dentry_weird(struct dentry *dentry) +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h +index 1b4d69f68c33..140c29635069 100644 +--- a/include/linux/backing-dev-defs.h ++++ b/include/linux/backing-dev-defs.h +@@ -163,6 +163,7 @@ struct backing_dev_info { + wait_queue_head_t wb_waitq; + + struct device *dev; ++ struct device *owner; + + struct timer_list laptop_mode_wb_timer; + +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h +index c82794f20110..89d3de3e096b 100644 +--- a/include/linux/backing-dev.h ++++ b/include/linux/backing-dev.h +@@ -24,6 +24,7 @@ __printf(3, 4) + int bdi_register(struct backing_dev_info *bdi, struct device *parent, + const char *fmt, ...); + int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); ++int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); + void bdi_unregister(struct backing_dev_info *bdi); + + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); +diff --git a/include/linux/bio.h b/include/linux/bio.h +index fbe47bc700bd..42e4e3cbb001 100644 +--- a/include/linux/bio.h ++++ b/include/linux/bio.h +@@ -527,11 +527,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx); + int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); + int bio_associate_current(struct bio *bio); + void bio_disassociate_task(struct bio *bio); ++void bio_clone_blkcg_association(struct bio *dst, struct bio *src); + #else /* CONFIG_BLK_CGROUP */ + static inline int bio_associate_blkcg(struct bio *bio, + struct cgroup_subsys_state *blkcg_css) { return 0; } + static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } + static inline void bio_disassociate_task(struct bio *bio) { } ++static inline void bio_clone_blkcg_association(struct bio *dst, ++ struct bio *src) { } + #endif /* CONFIG_BLK_CGROUP */ + + #ifdef CONFIG_HIGHMEM +diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h +index f079fb1a31f7..a8786d27ab81 100644 +--- a/include/linux/mlx5/qp.h ++++ b/include/linux/mlx5/qp.h +@@ -160,6 +160,7 @@ enum { + enum { + MLX5_FENCE_MODE_NONE = 0 << 5, + MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, ++ MLX5_FENCE_MODE_FENCE = 2 << 5, + MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, + MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, + }; +@@ -534,9 +535,9 @@ struct mlx5_destroy_qp_mbox_out { + struct mlx5_modify_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; +- u8 rsvd1[4]; +- __be32 optparam; + u8 rsvd0[4]; ++ __be32 optparam; ++ u8 rsvd1[4]; + struct mlx5_qp_context ctx; + }; + +diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h +index 28ee5c2e6bcd..711322a8ee35 100644 +--- a/include/target/target_core_backend.h ++++ b/include/target/target_core_backend.h +@@ -96,6 +96,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, + bool target_sense_desc_format(struct se_device *dev); + sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); + bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, +- struct request_queue *q, int block_size); ++ struct request_queue *q); + + #endif /* TARGET_CORE_BACKEND_H */ +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 689f4d207122..59081c73b296 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -139,6 +139,7 @@ enum se_cmd_flags_table { + SCF_COMPARE_AND_WRITE_POST = 0x00100000, + SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, + SCF_ACK_KREF = 0x00400000, ++ SCF_TASK_ATTR_SET = 0x01000000, + }; + + /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h +index 7fb2557a760e..ce9ea736f1d7 100644 +--- a/include/target/target_core_fabric.h ++++ b/include/target/target_core_fabric.h +@@ -163,7 +163,6 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); + void core_tmr_release_req(struct se_tmr_req *); + int transport_generic_handle_tmr(struct se_cmd *); + void transport_generic_request_failure(struct se_cmd *, sense_reason_t); +-void __target_execute_cmd(struct se_cmd *); + int transport_lookup_tmr_lun(struct se_cmd *, u64); + void core_allocate_nexus_loss_ua(struct se_node_acl *acl); + +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h +index 003dca933803..5664ca07c9c7 100644 +--- a/include/trace/events/sunrpc.h ++++ b/include/trace/events/sunrpc.h +@@ -529,20 +529,27 @@ TRACE_EVENT(svc_xprt_do_enqueue, + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) +- __field_struct(struct sockaddr_storage, ss) + __field(int, pid) + __field(unsigned long, flags) ++ __dynamic_array(unsigned char, addr, xprt != NULL ? ++ xprt->xpt_remotelen : 0) + ), + + TP_fast_assign( + __entry->xprt = xprt; +- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); + __entry->pid = rqst? rqst->rq_task->pid : 0; +- __entry->flags = xprt ? xprt->xpt_flags : 0; ++ if (xprt) { ++ memcpy(__get_dynamic_array(addr), ++ &xprt->xpt_remote, ++ xprt->xpt_remotelen); ++ __entry->flags = xprt->xpt_flags; ++ } else ++ __entry->flags = 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, +- (struct sockaddr *)&__entry->ss, ++ __get_dynamic_array_len(addr) != 0 ? ++ (struct sockaddr *)__get_dynamic_array(addr) : NULL, + __entry->pid, show_svc_xprt_flags(__entry->flags)) + ); + +@@ -553,18 +560,25 @@ TRACE_EVENT(svc_xprt_dequeue, + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) +- __field_struct(struct sockaddr_storage, ss) + __field(unsigned long, flags) ++ __dynamic_array(unsigned char, addr, xprt != NULL ? ++ xprt->xpt_remotelen : 0) + ), + + TP_fast_assign( +- __entry->xprt = xprt, +- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); +- __entry->flags = xprt ? xprt->xpt_flags : 0; ++ __entry->xprt = xprt; ++ if (xprt) { ++ memcpy(__get_dynamic_array(addr), ++ &xprt->xpt_remote, ++ xprt->xpt_remotelen); ++ __entry->flags = xprt->xpt_flags; ++ } else ++ __entry->flags = 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt, +- (struct sockaddr *)&__entry->ss, ++ __get_dynamic_array_len(addr) != 0 ? ++ (struct sockaddr *)__get_dynamic_array(addr) : NULL, + show_svc_xprt_flags(__entry->flags)) + ); + +@@ -592,19 +606,26 @@ TRACE_EVENT(svc_handle_xprt, + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(int, len) +- __field_struct(struct sockaddr_storage, ss) + __field(unsigned long, flags) ++ __dynamic_array(unsigned char, addr, xprt != NULL ? ++ xprt->xpt_remotelen : 0) + ), + + TP_fast_assign( + __entry->xprt = xprt; +- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); + __entry->len = len; +- __entry->flags = xprt ? xprt->xpt_flags : 0; ++ if (xprt) { ++ memcpy(__get_dynamic_array(addr), ++ &xprt->xpt_remote, ++ xprt->xpt_remotelen); ++ __entry->flags = xprt->xpt_flags; ++ } else ++ __entry->flags = 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, +- (struct sockaddr *)&__entry->ss, ++ __get_dynamic_array_len(addr) != 0 ? ++ (struct sockaddr *)__get_dynamic_array(addr) : NULL, + __entry->len, show_svc_xprt_flags(__entry->flags)) + ); + #endif /* _TRACE_SUNRPC_H */ +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index b86cc04959de..48f45987dc6c 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -73,6 +73,7 @@ + #include + #include + #include ++#include + #include + + #include "audit.h" +@@ -82,7 +83,8 @@ + #define AUDITSC_SUCCESS 1 + #define AUDITSC_FAILURE 2 + +-/* no execve audit message should be longer than this (userspace limits) */ ++/* no execve audit message should be longer than this (userspace limits), ++ * see the note near the top of audit_log_execve_info() about this value */ + #define MAX_EXECVE_AUDIT_LEN 7500 + + /* max length to print of cmdline/proctitle value during audit */ +@@ -988,184 +990,178 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid, + return rc; + } + +-/* +- * to_send and len_sent accounting are very loose estimates. We aren't +- * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being +- * within about 500 bytes (next page boundary) +- * +- * why snprintf? an int is up to 12 digits long. if we just assumed when +- * logging that a[%d]= was going to be 16 characters long we would be wasting +- * space in every audit message. In one 7500 byte message we can log up to +- * about 1000 min size arguments. That comes down to about 50% waste of space +- * if we didn't do the snprintf to find out how long arg_num_len was. +- */ +-static int audit_log_single_execve_arg(struct audit_context *context, +- struct audit_buffer **ab, +- int arg_num, +- size_t *len_sent, +- const char __user *p, +- char *buf) ++static void audit_log_execve_info(struct audit_context *context, ++ struct audit_buffer **ab) + { +- char arg_num_len_buf[12]; +- const char __user *tmp_p = p; +- /* how many digits are in arg_num? 5 is the length of ' a=""' */ +- size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5; +- size_t len, len_left, to_send; +- size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN; +- unsigned int i, has_cntl = 0, too_long = 0; +- int ret; +- +- /* strnlen_user includes the null we don't want to send */ +- len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1; +- +- /* +- * We just created this mm, if we can't find the strings +- * we just copied into it something is _very_ wrong. Similar +- * for strings that are too long, we should not have created +- * any. +- */ +- if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) { +- send_sig(SIGKILL, current, 0); +- return -1; ++ long len_max; ++ long len_rem; ++ long len_full; ++ long len_buf; ++ long len_abuf; ++ long len_tmp; ++ bool require_data; ++ bool encode; ++ unsigned int iter; ++ unsigned int arg; ++ char *buf_head; ++ char *buf; ++ const char __user *p = (const char __user *)current->mm->arg_start; ++ ++ /* NOTE: this buffer needs to be large enough to hold all the non-arg ++ * data we put in the audit record for this argument (see the ++ * code below) ... at this point in time 96 is plenty */ ++ char abuf[96]; ++ ++ /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the ++ * current value of 7500 is not as important as the fact that it ++ * is less than 8k, a setting of 7500 gives us plenty of wiggle ++ * room if we go over a little bit in the logging below */ ++ WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500); ++ len_max = MAX_EXECVE_AUDIT_LEN; ++ ++ /* scratch buffer to hold the userspace args */ ++ buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); ++ if (!buf_head) { ++ audit_panic("out of memory for argv string"); ++ return; + } ++ buf = buf_head; + +- /* walk the whole argument looking for non-ascii chars */ ++ audit_log_format(*ab, "argc=%d", context->execve.argc); ++ ++ len_rem = len_max; ++ len_buf = 0; ++ len_full = 0; ++ require_data = true; ++ encode = false; ++ iter = 0; ++ arg = 0; + do { +- if (len_left > MAX_EXECVE_AUDIT_LEN) +- to_send = MAX_EXECVE_AUDIT_LEN; +- else +- to_send = len_left; +- ret = copy_from_user(buf, tmp_p, to_send); +- /* +- * There is no reason for this copy to be short. We just +- * copied them here, and the mm hasn't been exposed to user- +- * space yet. +- */ +- if (ret) { +- WARN_ON(1); +- send_sig(SIGKILL, current, 0); +- return -1; +- } +- buf[to_send] = '\0'; +- has_cntl = audit_string_contains_control(buf, to_send); +- if (has_cntl) { +- /* +- * hex messages get logged as 2 bytes, so we can only +- * send half as much in each message +- */ +- max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2; +- break; +- } +- len_left -= to_send; +- tmp_p += to_send; +- } while (len_left > 0); +- +- len_left = len; +- +- if (len > max_execve_audit_len) +- too_long = 1; +- +- /* rewalk the argument actually logging the message */ +- for (i = 0; len_left > 0; i++) { +- int room_left; +- +- if (len_left > max_execve_audit_len) +- to_send = max_execve_audit_len; +- else +- to_send = len_left; +- +- /* do we have space left to send this argument in this ab? */ +- room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent; +- if (has_cntl) +- room_left -= (to_send * 2); +- else +- room_left -= to_send; +- if (room_left < 0) { +- *len_sent = 0; +- audit_log_end(*ab); +- *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); +- if (!*ab) +- return 0; +- } ++ /* NOTE: we don't ever want to trust this value for anything ++ * serious, but the audit record format insists we ++ * provide an argument length for really long arguments, ++ * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but ++ * to use strncpy_from_user() to obtain this value for ++ * recording in the log, although we don't use it ++ * anywhere here to avoid a double-fetch problem */ ++ if (len_full == 0) ++ len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1; ++ ++ /* read more data from userspace */ ++ if (require_data) { ++ /* can we make more room in the buffer? */ ++ if (buf != buf_head) { ++ memmove(buf_head, buf, len_buf); ++ buf = buf_head; ++ } ++ ++ /* fetch as much as we can of the argument */ ++ len_tmp = strncpy_from_user(&buf_head[len_buf], p, ++ len_max - len_buf); ++ if (len_tmp == -EFAULT) { ++ /* unable to copy from userspace */ ++ send_sig(SIGKILL, current, 0); ++ goto out; ++ } else if (len_tmp == (len_max - len_buf)) { ++ /* buffer is not large enough */ ++ require_data = true; ++ /* NOTE: if we are going to span multiple ++ * buffers force the encoding so we stand ++ * a chance at a sane len_full value and ++ * consistent record encoding */ ++ encode = true; ++ len_full = len_full * 2; ++ p += len_tmp; ++ } else { ++ require_data = false; ++ if (!encode) ++ encode = audit_string_contains_control( ++ buf, len_tmp); ++ /* try to use a trusted value for len_full */ ++ if (len_full < len_max) ++ len_full = (encode ? ++ len_tmp * 2 : len_tmp); ++ p += len_tmp + 1; ++ } ++ len_buf += len_tmp; ++ buf_head[len_buf] = '\0'; + +- /* +- * first record needs to say how long the original string was +- * so we can be sure nothing was lost. +- */ +- if ((i == 0) && (too_long)) +- audit_log_format(*ab, " a%d_len=%zu", arg_num, +- has_cntl ? 2*len : len); +- +- /* +- * normally arguments are small enough to fit and we already +- * filled buf above when we checked for control characters +- * so don't bother with another copy_from_user +- */ +- if (len >= max_execve_audit_len) +- ret = copy_from_user(buf, p, to_send); +- else +- ret = 0; +- if (ret) { +- WARN_ON(1); +- send_sig(SIGKILL, current, 0); +- return -1; ++ /* length of the buffer in the audit record? */ ++ len_abuf = (encode ? len_buf * 2 : len_buf + 2); + } +- buf[to_send] = '\0'; +- +- /* actually log it */ +- audit_log_format(*ab, " a%d", arg_num); +- if (too_long) +- audit_log_format(*ab, "[%d]", i); +- audit_log_format(*ab, "="); +- if (has_cntl) +- audit_log_n_hex(*ab, buf, to_send); +- else +- audit_log_string(*ab, buf); +- +- p += to_send; +- len_left -= to_send; +- *len_sent += arg_num_len; +- if (has_cntl) +- *len_sent += to_send * 2; +- else +- *len_sent += to_send; +- } +- /* include the null we didn't log */ +- return len + 1; +-} + +-static void audit_log_execve_info(struct audit_context *context, +- struct audit_buffer **ab) +-{ +- int i, len; +- size_t len_sent = 0; +- const char __user *p; +- char *buf; ++ /* write as much as we can to the audit log */ ++ if (len_buf > 0) { ++ /* NOTE: some magic numbers here - basically if we ++ * can't fit a reasonable amount of data into the ++ * existing audit buffer, flush it and start with ++ * a new buffer */ ++ if ((sizeof(abuf) + 8) > len_rem) { ++ len_rem = len_max; ++ audit_log_end(*ab); ++ *ab = audit_log_start(context, ++ GFP_KERNEL, AUDIT_EXECVE); ++ if (!*ab) ++ goto out; ++ } + +- p = (const char __user *)current->mm->arg_start; ++ /* create the non-arg portion of the arg record */ ++ len_tmp = 0; ++ if (require_data || (iter > 0) || ++ ((len_abuf + sizeof(abuf)) > len_rem)) { ++ if (iter == 0) { ++ len_tmp += snprintf(&abuf[len_tmp], ++ sizeof(abuf) - len_tmp, ++ " a%d_len=%lu", ++ arg, len_full); ++ } ++ len_tmp += snprintf(&abuf[len_tmp], ++ sizeof(abuf) - len_tmp, ++ " a%d[%d]=", arg, iter++); ++ } else ++ len_tmp += snprintf(&abuf[len_tmp], ++ sizeof(abuf) - len_tmp, ++ " a%d=", arg); ++ WARN_ON(len_tmp >= sizeof(abuf)); ++ abuf[sizeof(abuf) - 1] = '\0'; ++ ++ /* log the arg in the audit record */ ++ audit_log_format(*ab, "%s", abuf); ++ len_rem -= len_tmp; ++ len_tmp = len_buf; ++ if (encode) { ++ if (len_abuf > len_rem) ++ len_tmp = len_rem / 2; /* encoding */ ++ audit_log_n_hex(*ab, buf, len_tmp); ++ len_rem -= len_tmp * 2; ++ len_abuf -= len_tmp * 2; ++ } else { ++ if (len_abuf > len_rem) ++ len_tmp = len_rem - 2; /* quotes */ ++ audit_log_n_string(*ab, buf, len_tmp); ++ len_rem -= len_tmp + 2; ++ /* don't subtract the "2" because we still need ++ * to add quotes to the remaining string */ ++ len_abuf -= len_tmp; ++ } ++ len_buf -= len_tmp; ++ buf += len_tmp; ++ } + +- audit_log_format(*ab, "argc=%d", context->execve.argc); ++ /* ready to move to the next argument? */ ++ if ((len_buf == 0) && !require_data) { ++ arg++; ++ iter = 0; ++ len_full = 0; ++ require_data = true; ++ encode = false; ++ } ++ } while (arg < context->execve.argc); + +- /* +- * we need some kernel buffer to hold the userspace args. Just +- * allocate one big one rather than allocating one of the right size +- * for every single argument inside audit_log_single_execve_arg() +- * should be <8k allocation so should be pretty safe. +- */ +- buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); +- if (!buf) { +- audit_panic("out of memory for argv string"); +- return; +- } ++ /* NOTE: the caller handles the final audit_log_end() call */ + +- for (i = 0; i < context->execve.argc; i++) { +- len = audit_log_single_execve_arg(context, ab, i, +- &len_sent, p, buf); +- if (len <= 0) +- break; +- p += len; +- } +- kfree(buf); ++out: ++ kfree(buf_head); + } + + static void show_special(struct audit_context *context, int *call_panic) +diff --git a/kernel/module.c b/kernel/module.c +index 0e5c71195f18..b14a4f31221f 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -2606,13 +2606,18 @@ static inline void kmemleak_load_module(const struct module *mod, + #endif + + #ifdef CONFIG_MODULE_SIG +-static int module_sig_check(struct load_info *info) ++static int module_sig_check(struct load_info *info, int flags) + { + int err = -ENOKEY; + const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; + const void *mod = info->hdr; + +- if (info->len > markerlen && ++ /* ++ * Require flags == 0, as a module with version information ++ * removed is no longer the module that was signed ++ */ ++ if (flags == 0 && ++ info->len > markerlen && + memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { + /* We truncate the module to discard the signature */ + info->len -= markerlen; +@@ -2631,7 +2636,7 @@ static int module_sig_check(struct load_info *info) + return err; + } + #else /* !CONFIG_MODULE_SIG */ +-static int module_sig_check(struct load_info *info) ++static int module_sig_check(struct load_info *info, int flags) + { + return 0; + } +@@ -3444,7 +3449,7 @@ static int load_module(struct load_info *info, const char __user *uargs, + long err; + char *after_dashes; + +- err = module_sig_check(info); ++ err = module_sig_check(info, flags); + if (err) + goto free_copy; + +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index cbe6f0b96f29..9ef80bf441b3 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -825,6 +825,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) + } + EXPORT_SYMBOL(bdi_register_dev); + ++int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) ++{ ++ int rc; ++ ++ rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt), ++ MINOR(owner->devt)); ++ if (rc) ++ return rc; ++ bdi->owner = owner; ++ get_device(owner); ++ return 0; ++} ++EXPORT_SYMBOL(bdi_register_owner); ++ + /* + * Remove bdi from bdi_list, and ensure that it is no longer visible + */ +@@ -849,6 +863,11 @@ void bdi_unregister(struct backing_dev_info *bdi) + device_unregister(bdi->dev); + bdi->dev = NULL; + } ++ ++ if (bdi->owner) { ++ put_device(bdi->owner); ++ bdi->owner = NULL; ++ } + } + + void bdi_exit(struct backing_dev_info *bdi) +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index ef6963b577fd..0c31f184daf8 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2170,6 +2170,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, + * and reducing the surplus. + */ + spin_unlock(&hugetlb_lock); ++ ++ /* yield cpu to avoid soft lockup */ ++ cond_resched(); ++ + if (hstate_is_gigantic(h)) + ret = alloc_fresh_gigantic_page(h, nodes_allowed); + else +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c +index 1bb551527044..d9bbbded49ef 100644 +--- a/net/bluetooth/l2cap_sock.c ++++ b/net/bluetooth/l2cap_sock.c +@@ -927,7 +927,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, + break; + } + +- if (get_user(opt, (u32 __user *) optval)) { ++ if (get_user(opt, (u16 __user *) optval)) { + err = -EFAULT; + break; + } +diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c +index 28cddc85b700..bfa2b6d5b5cf 100644 +--- a/net/netlabel/netlabel_kapi.c ++++ b/net/netlabel/netlabel_kapi.c +@@ -824,7 +824,11 @@ socket_setattr_return: + */ + void netlbl_sock_delattr(struct sock *sk) + { +- cipso_v4_sock_delattr(sk); ++ switch (sk->sk_family) { ++ case AF_INET: ++ cipso_v4_sock_delattr(sk); ++ break; ++ } + } + + /** +@@ -987,7 +991,11 @@ req_setattr_return: + */ + void netlbl_req_delattr(struct request_sock *req) + { +- cipso_v4_req_delattr(req); ++ switch (req->rsk_ops->family) { ++ case AF_INET: ++ cipso_v4_req_delattr(req); ++ break; ++ } + } + + /** +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c +index e167592793a7..42396a74405d 100644 +--- a/scripts/recordmcount.c ++++ b/scripts/recordmcount.c +@@ -33,10 +33,17 @@ + #include + #include + ++/* ++ * glibc synced up and added the metag number but didn't add the relocations. ++ * Work around this in a crude manner for now. ++ */ + #ifndef EM_METAG +-/* Remove this when these make it to the standard system elf.h. */ + #define EM_METAG 174 ++#endif ++#ifndef R_METAG_ADDR32 + #define R_METAG_ADDR32 2 ++#endif ++#ifndef R_METAG_NONE + #define R_METAG_NONE 3 + #endif + +diff --git a/sound/hda/array.c b/sound/hda/array.c +index 516795baa7db..5dfa610e4471 100644 +--- a/sound/hda/array.c ++++ b/sound/hda/array.c +@@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array) + return NULL; + if (array->used >= array->alloced) { + int num = array->alloced + array->alloc_align; ++ int oldsize = array->alloced * array->elem_size; + int size = (num + 1) * array->elem_size; + void *nlist; + if (snd_BUG_ON(num >= 4096)) + return NULL; +- nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO); ++ nlist = krealloc(array->list, size, GFP_KERNEL); + if (!nlist) + return NULL; ++ memset(nlist + oldsize, 0, size - oldsize); + array->list = nlist; + array->alloced = num; + } +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 8218cace8fea..e769e5764cba 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2288,6 +2288,8 @@ static const struct pci_device_id azx_ids[] = { + { PCI_DEVICE(0x1022, 0x780d), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + /* ATI HDMI */ ++ { PCI_DEVICE(0x1002, 0x0002), ++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0x1308), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0x157a), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index abcb5a6a1cd9..f25479ba3981 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4674,6 +4674,22 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec, + } + } + ++static void alc298_fixup_speaker_volume(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ /* The speaker is routed to the Node 0x06 by a mistake, as a result ++ we can't adjust the speaker's volume since this node does not has ++ Amp-out capability. we change the speaker's route to: ++ Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 ( ++ Pin Complex), since Node 0x02 has Amp-out caps, we can adjust ++ speaker's volume now. */ ++ ++ hda_nid_t conn1[1] = { 0x0c }; ++ snd_hda_override_conn_list(codec, 0x17, 1, conn1); ++ } ++} ++ + /* Hook to update amp GPIO4 for automute */ + static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, + struct hda_jack_callback *jack) +@@ -4823,6 +4839,7 @@ enum { + ALC280_FIXUP_HP_HEADSET_MIC, + ALC221_FIXUP_HP_FRONT_MIC, + ALC292_FIXUP_TPT460, ++ ALC298_FIXUP_SPK_VOLUME, + }; + + static const struct hda_fixup alc269_fixups[] = { +@@ -5478,6 +5495,12 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE, + }, ++ [ALC298_FIXUP_SPK_VOLUME] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc298_fixup_speaker_volume, ++ .chained = true, ++ .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -5524,6 +5547,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), ++ SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -5799,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x1b, 0x01014020}, + {0x21, 0x0221103f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ++ {0x14, 0x90170130}, ++ {0x1b, 0x02011020}, ++ {0x21, 0x0221103f}), ++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x14, 0x90170150}, + {0x1b, 0x02011020}, + {0x21, 0x0221105f}), +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 510df220d1b5..336ed267c407 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -142,6 +142,7 @@ int vcpu_load(struct kvm_vcpu *vcpu) + put_cpu(); + return 0; + } ++EXPORT_SYMBOL_GPL(vcpu_load); + + void vcpu_put(struct kvm_vcpu *vcpu) + { +@@ -151,6 +152,7 @@ void vcpu_put(struct kvm_vcpu *vcpu) + preempt_enable(); + mutex_unlock(&vcpu->mutex); + } ++EXPORT_SYMBOL_GPL(vcpu_put); + + static void ack_flush(void *_completed) + { diff --git a/patch/kernel/marvell-dev/patch-4.4.19-20.patch b/patch/kernel/marvell-dev/patch-4.4.19-20.patch new file mode 100644 index 000000000..f1c008e6e --- /dev/null +++ b/patch/kernel/marvell-dev/patch-4.4.19-20.patch @@ -0,0 +1,3676 @@ +diff --git a/Makefile b/Makefile +index 695c64ec160c..b74d60081a16 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 19 ++SUBLEVEL = 20 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/Makefile b/arch/arc/Makefile +index 209d8451e23d..c05ea2b54276 100644 +--- a/arch/arc/Makefile ++++ b/arch/arc/Makefile +@@ -18,6 +18,20 @@ cflags-y += -fno-common -pipe -fno-builtin -D__linux__ + cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 + cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs + ++is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0) ++ ++ifdef CONFIG_ISA_ARCOMPACT ++ifeq ($(is_700), 0) ++ $(error Toolchain not configured for ARCompact builds) ++endif ++endif ++ ++ifdef CONFIG_ISA_ARCV2 ++ifeq ($(is_700), 1) ++ $(error Toolchain not configured for ARCv2 builds) ++endif ++endif ++ + ifdef CONFIG_ARC_CURR_IN_REG + # For a global register defintion, make sure it gets passed to every file + # We had a customer reported bug where some code built in kernel was NOT using +diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h +index 7fac7d85ed6a..2c30a016cf15 100644 +--- a/arch/arc/include/asm/arcregs.h ++++ b/arch/arc/include/asm/arcregs.h +@@ -374,12 +374,6 @@ static inline int is_isa_arcompact(void) + return IS_ENABLED(CONFIG_ISA_ARCOMPACT); + } + +-#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7) +-#error "Toolchain not configured for ARCompact builds" +-#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS) +-#error "Toolchain not configured for ARCv2 builds" +-#endif +- + #endif /* __ASEMBLY__ */ + + #endif /* _ASM_ARC_ARCREGS_H */ +diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h +index ad7860c5ce15..51597f344a62 100644 +--- a/arch/arc/include/asm/entry.h ++++ b/arch/arc/include/asm/entry.h +@@ -142,7 +142,7 @@ + + #ifdef CONFIG_ARC_CURR_IN_REG + ; Retrieve orig r25 and save it with rest of callee_regs +- ld.as r12, [r12, PT_user_r25] ++ ld r12, [r12, PT_user_r25] + PUSH r12 + #else + PUSH r25 +@@ -198,7 +198,7 @@ + + ; SP is back to start of pt_regs + #ifdef CONFIG_ARC_CURR_IN_REG +- st.as r12, [sp, PT_user_r25] ++ st r12, [sp, PT_user_r25] + #endif + .endm + +diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h +index c1d36458bfb7..4c6eed80cd8b 100644 +--- a/arch/arc/include/asm/irqflags-compact.h ++++ b/arch/arc/include/asm/irqflags-compact.h +@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void) + .endm + + .macro IRQ_ENABLE scratch ++ TRACE_ASM_IRQ_ENABLE + lr \scratch, [status32] + or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) + flag \scratch +- TRACE_ASM_IRQ_ENABLE + .endm + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c +index ff7ff6cbb811..aaf1e2d1d900 100644 +--- a/arch/arc/mm/cache.c ++++ b/arch/arc/mm/cache.c +@@ -914,6 +914,15 @@ void arc_cache_init(void) + + printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + ++ /* ++ * Only master CPU needs to execute rest of function: ++ * - Assume SMP so all cores will have same cache config so ++ * any geomtry checks will be same for all ++ * - IOC setup / dma callbacks only need to be setup once ++ */ ++ if (cpu) ++ return; ++ + if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { + struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; + +diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi +index 8fe39e1b680e..e0ee2b00d573 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi +@@ -262,6 +262,8 @@ + #io-channel-cells = <1>; + clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; + clock-names = "saradc", "apb_pclk"; ++ resets = <&cru SRST_SARADC>; ++ reset-names = "saradc-apb"; + status = "disabled"; + }; + +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h +index bc6492b9a924..44dd892a4bbe 100644 +--- a/arch/arm64/include/asm/elf.h ++++ b/arch/arm64/include/asm/elf.h +@@ -136,6 +136,7 @@ typedef struct user_fpsimd_state elf_fpregset_t; + + #define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); + ++/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ + #define ARCH_DLINFO \ + do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h +index 22d6d8885854..4cf0c17787a8 100644 +--- a/arch/arm64/include/uapi/asm/auxvec.h ++++ b/arch/arm64/include/uapi/asm/auxvec.h +@@ -19,4 +19,6 @@ + /* vDSO location */ + #define AT_SYSINFO_EHDR 33 + ++#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ ++ + #endif +diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h +index c0ae62520d15..274d5bc6ecce 100644 +--- a/arch/parisc/include/uapi/asm/errno.h ++++ b/arch/parisc/include/uapi/asm/errno.h +@@ -97,10 +97,10 @@ + #define ENOTCONN 235 /* Transport endpoint is not connected */ + #define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */ + #define ETOOMANYREFS 237 /* Too many references: cannot splice */ +-#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ + #define ETIMEDOUT 238 /* Connection timed out */ + #define ECONNREFUSED 239 /* Connection refused */ +-#define EREMOTERELEASE 240 /* Remote peer released connection */ ++#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ ++#define EREMOTERELEASE 240 /* Remote peer released connection */ + #define EHOSTDOWN 241 /* Host is down */ + #define EHOSTUNREACH 242 /* No route to host */ + +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c +index b34e8a54f7db..98949b0df00a 100644 +--- a/arch/powerpc/kernel/eeh.c ++++ b/arch/powerpc/kernel/eeh.c +@@ -677,7 +677,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) + /* Check if the request is finished successfully */ + if (active_flag) { + rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); +- if (rc <= 0) ++ if (rc < 0) + return rc; + + if (rc & active_flag) +diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S +index 1dd5bd8a8c59..133055311dce 100644 +--- a/arch/um/include/asm/common.lds.S ++++ b/arch/um/include/asm/common.lds.S +@@ -81,7 +81,7 @@ + .altinstr_replacement : { *(.altinstr_replacement) } + /* .exit.text is discard at runtime, not link time, to deal with references + from .altinstructions and .eh_frame */ +- .exit.text : { *(.exit.text) } ++ .exit.text : { EXIT_TEXT } + .exit.data : { *(.exit.data) } + + .preinit_array : { +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index 6df2029405a3..3142218e546f 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -86,7 +86,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) + + static inline void __native_flush_tlb(void) + { ++ /* ++ * If current->mm == NULL then we borrow a mm which may change during a ++ * task switch and therefore we must not be preempted while we write CR3 ++ * back: ++ */ ++ preempt_disable(); + native_write_cr3(native_read_cr3()); ++ preempt_enable(); + } + + static inline void __native_flush_tlb_global_irq_disabled(void) +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c +index bf4db6eaec8f..c6aace2bbe08 100644 +--- a/arch/x86/kernel/uprobes.c ++++ b/arch/x86/kernel/uprobes.c +@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) + *cursor &= 0xfe; + } + /* +- * Similar treatment for VEX3 prefix. +- * TODO: add XOP/EVEX treatment when insn decoder supports them ++ * Similar treatment for VEX3/EVEX prefix. ++ * TODO: add XOP treatment when insn decoder supports them + */ +- if (insn->vex_prefix.nbytes == 3) { ++ if (insn->vex_prefix.nbytes >= 3) { + /* + * vex2: c5 rvvvvLpp (has no b bit) + * vex3/xop: c4/8f rxbmmmmm wvvvvLpp + * evex: 62 rxbR00mm wvvvv1pp zllBVaaa +- * (evex will need setting of both b and x since +- * in non-sib encoding evex.x is 4th bit of MODRM.rm) +- * Setting VEX3.b (setting because it has inverted meaning): ++ * Setting VEX3.b (setting because it has inverted meaning). ++ * Setting EVEX.x since (in non-SIB encoding) EVEX.x ++ * is the 4th bit of MODRM.rm, and needs the same treatment. ++ * For VEX3-encoded insns, VEX3.x value has no effect in ++ * non-SIB encoding, the change is superfluous but harmless. + */ + cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; +- *cursor |= 0x20; ++ *cursor |= 0x60; + } + + /* +@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) + + reg = MODRM_REG(insn); /* Fetch modrm.reg */ + reg2 = 0xff; /* Fetch vex.vvvv */ +- if (insn->vex_prefix.nbytes == 2) +- reg2 = insn->vex_prefix.bytes[1]; +- else if (insn->vex_prefix.nbytes == 3) ++ if (insn->vex_prefix.nbytes) + reg2 = insn->vex_prefix.bytes[2]; + /* +- * TODO: add XOP, EXEV vvvv reading. ++ * TODO: add XOP vvvv reading. + * + * vex.vvvv field is in bits 6-3, bits are inverted. + * But in 32-bit mode, high-order bit may be ignored. +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index 6730f965b379..0afd1981e350 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -216,8 +216,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data) + continue; + + cpc_ptr = per_cpu(cpc_desc_ptr, i); +- if (!cpc_ptr) +- continue; ++ if (!cpc_ptr) { ++ retval = -EFAULT; ++ goto err_ret; ++ } + + pdomain = &(cpc_ptr->domain_info); + cpumask_set_cpu(i, pr->shared_cpu_map); +@@ -239,8 +241,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data) + continue; + + match_cpc_ptr = per_cpu(cpc_desc_ptr, j); +- if (!match_cpc_ptr) +- continue; ++ if (!match_cpc_ptr) { ++ retval = -EFAULT; ++ goto err_ret; ++ } + + match_pdomain = &(match_cpc_ptr->domain_info); + if (match_pdomain->domain != pdomain->domain) +@@ -270,8 +274,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data) + continue; + + match_cpc_ptr = per_cpu(cpc_desc_ptr, j); +- if (!match_cpc_ptr) +- continue; ++ if (!match_cpc_ptr) { ++ retval = -EFAULT; ++ goto err_ret; ++ } + + match_pdomain = &(match_cpc_ptr->domain_info); + if (match_pdomain->domain != pdomain->domain) +@@ -502,9 +508,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + /* Store CPU Logical ID */ + cpc_ptr->cpu_id = pr->id; + +- /* Plug it into this CPUs CPC descriptor. */ +- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; +- + /* Parse PSD data for this CPU */ + ret = acpi_get_psd(cpc_ptr, handle); + if (ret) +@@ -517,6 +520,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + goto out_free; + } + ++ /* Plug PSD data into this CPUs CPC descriptor. */ ++ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; ++ + /* Everything looks okay */ + pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); + +diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c +index 11d8209e6e5d..5230e8449d30 100644 +--- a/drivers/acpi/nfit.c ++++ b/drivers/acpi/nfit.c +@@ -1072,11 +1072,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) + { + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; + u64 offset = nfit_blk->stat_offset + mmio->size * bw; ++ const u32 STATUS_MASK = 0x80000037; + + if (mmio->num_lines) + offset = to_interleave_offset(offset, mmio); + +- return readl(mmio->addr.base + offset); ++ return readl(mmio->addr.base + offset) & STATUS_MASK; + } + + static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, +diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c +index 72b6e9ef0ae9..d176e0ece470 100644 +--- a/drivers/acpi/numa.c ++++ b/drivers/acpi/numa.c +@@ -327,10 +327,18 @@ int __init acpi_numa_init(void) + + /* SRAT: Static Resource Affinity Table */ + if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { +- acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, +- acpi_parse_x2apic_affinity, 0); +- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, +- acpi_parse_processor_affinity, 0); ++ struct acpi_subtable_proc srat_proc[2]; ++ ++ memset(srat_proc, 0, sizeof(srat_proc)); ++ srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; ++ srat_proc[0].handler = acpi_parse_processor_affinity; ++ srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY; ++ srat_proc[1].handler = acpi_parse_x2apic_affinity; ++ ++ acpi_table_parse_entries_array(ACPI_SIG_SRAT, ++ sizeof(struct acpi_table_srat), ++ srat_proc, ARRAY_SIZE(srat_proc), 0); ++ + cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, + acpi_parse_memory_affinity, + NR_NODE_MEMBLKS); +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 78d5f02a073b..dcb3d6245ca5 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -1958,7 +1958,7 @@ int __init acpi_scan_init(void) + + static struct acpi_probe_entry *ape; + static int acpi_probe_count; +-static DEFINE_SPINLOCK(acpi_probe_lock); ++static DEFINE_MUTEX(acpi_probe_mutex); + + static int __init acpi_match_madt(struct acpi_subtable_header *header, + const unsigned long end) +@@ -1977,7 +1977,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + if (acpi_disabled) + return 0; + +- spin_lock(&acpi_probe_lock); ++ mutex_lock(&acpi_probe_mutex); + for (ape = ap_head; nr; ape++, nr--) { + if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) { + acpi_probe_count = 0; +@@ -1990,7 +1990,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + count++; + } + } +- spin_unlock(&acpi_probe_lock); ++ mutex_unlock(&acpi_probe_mutex); + + return count; + } +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index 0243d375c6fd..4b3a9e27f1b6 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -555,23 +555,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device, + static int get_status(u32 index, acpi_event_status *status, + acpi_handle *handle) + { +- int result = 0; ++ int result; + + if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) +- goto end; ++ return -EINVAL; + + if (index < num_gpes) { + result = acpi_get_gpe_device(index, handle); + if (result) { + ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, + "Invalid GPE 0x%x", index)); +- goto end; ++ return result; + } + result = acpi_get_gpe_status(*handle, index, status); + } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) + result = acpi_get_event_status(index - num_gpes, status); + +-end: + return result; + } + +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c +index ea8189f4b021..6dc597126b79 100644 +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + ++ if (!ctx->authsize) ++ return 0; ++ + /* NULL encryption / decryption */ + if (!ctx->enckeylen) + return aead_null_set_sh_desc(aead); +@@ -614,7 +617,7 @@ skip_enc: + keys_fit_inline = true; + + /* aead_givencrypt shared descriptor */ +- desc = ctx->sh_desc_givenc; ++ desc = ctx->sh_desc_enc; + + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); +@@ -645,13 +648,13 @@ copy_iv: + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + +- /* ivsize + cryptlen = seqoutlen - authsize */ +- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); +- + /* Read and write assoclen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + ++ /* ivsize + cryptlen = seqoutlen - authsize */ ++ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); ++ + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + +@@ -697,7 +700,7 @@ copy_iv: + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); +- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { ++ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c +index 49106ea42887..99d5e11db194 100644 +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -1873,6 +1873,7 @@ caam_hash_alloc(struct caam_hash_template *template, + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); ++ t_alg->ahash_alg.setkey = NULL; + } + alg->cra_module = THIS_MODULE; + alg->cra_init = caam_hash_cra_init; +diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c +index 0794f1cc0018..42f0f229f7f7 100644 +--- a/drivers/crypto/nx/nx.c ++++ b/drivers/crypto/nx/nx.c +@@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev, + ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && + i < msc->triplets; + i++) { +- if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) { ++ if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) { + dev_err(dev, "unknown function code/mode " + "combo: %d/%d (ignored)\n", msc->fc, + msc->mode); +diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c +index 59e4c3af15ed..367b6661ee04 100644 +--- a/drivers/crypto/qat/qat_common/qat_algs.c ++++ b/drivers/crypto/qat/qat_common/qat_algs.c +@@ -1262,8 +1262,8 @@ static struct crypto_alg qat_algs[] = { { + .setkey = qat_alg_ablkcipher_xts_setkey, + .decrypt = qat_alg_ablkcipher_decrypt, + .encrypt = qat_alg_ablkcipher_encrypt, +- .min_keysize = AES_MIN_KEY_SIZE, +- .max_keysize = AES_MAX_KEY_SIZE, ++ .min_keysize = 2 * AES_MIN_KEY_SIZE, ++ .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + }, +diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c +index f1bcc2a163b3..b1bc945f008f 100644 +--- a/drivers/dma/sh/usb-dmac.c ++++ b/drivers/dma/sh/usb-dmac.c +@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) + { + struct usb_dmac_chan *chan = dev; + irqreturn_t ret = IRQ_NONE; +- u32 mask = USB_DMACHCR_TE; +- u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP; ++ u32 mask = 0; + u32 chcr; ++ bool xfer_end = false; + + spin_lock(&chan->vc.lock); + + chcr = usb_dmac_chan_read(chan, USB_DMACHCR); +- if (chcr & check_bits) +- mask |= USB_DMACHCR_DE | check_bits; ++ if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) { ++ mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP; ++ if (chcr & USB_DMACHCR_DE) ++ xfer_end = true; ++ ret |= IRQ_HANDLED; ++ } + if (chcr & USB_DMACHCR_NULL) { + /* An interruption of TE will happen after we set FTE */ + mask |= USB_DMACHCR_NULL; + chcr |= USB_DMACHCR_FTE; + ret |= IRQ_HANDLED; + } +- usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); ++ if (mask) ++ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); + +- if (chcr & check_bits) { ++ if (xfer_end) + usb_dmac_isr_transfer_end(chan); +- ret |= IRQ_HANDLED; +- } + + spin_unlock(&chan->vc.lock); + +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c +index 1b2c2187b347..dc68394da682 100644 +--- a/drivers/edac/edac_mc.c ++++ b/drivers/edac/edac_mc.c +@@ -966,7 +966,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci, + mci->ue_mc += count; + + if (!enable_per_layer_report) { +- mci->ce_noinfo_count += count; ++ mci->ue_noinfo_count += count; + return; + } + +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +index b18bea08ff25..469dc378adeb 100644 +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -50,6 +50,7 @@ config GPIO_DEVRES + config OF_GPIO + def_bool y + depends on OF ++ depends on HAS_IOMEM + + config GPIO_ACPI + def_bool y +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +index 053fc2f465df..ff5566c69f7d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +@@ -710,9 +710,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); + void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); + int amdgpu_gart_init(struct amdgpu_device *adev); + void amdgpu_gart_fini(struct amdgpu_device *adev); +-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, ++void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, + int pages); +-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, ++int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, struct page **pagelist, + dma_addr_t *dma_addr, uint32_t flags); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +index 0aaa457a1710..51a9942cdb40 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +@@ -331,6 +331,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * + (le16_to_cpu(path->usConnObjectId) & + OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; + ++ /* Skip TV/CV support */ ++ if ((le16_to_cpu(path->usDeviceTag) == ++ ATOM_DEVICE_TV1_SUPPORT) || ++ (le16_to_cpu(path->usDeviceTag) == ++ ATOM_DEVICE_CV_SUPPORT)) ++ continue; ++ ++ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) { ++ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n", ++ con_obj_id, le16_to_cpu(path->usDeviceTag)); ++ continue; ++ } ++ + connector_type = + object_connector_convert[con_obj_id]; + connector_object_id = con_obj_id; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +index 7312d729d300..22a613a95bf0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) + * Unbinds the requested pages from the gart page table and + * replaces them with the dummy page (all asics). + */ +-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, ++void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, + int pages) + { + unsigned t; +@@ -269,7 +269,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, + * (all asics). + * Returns 0 for success, -EINVAL for failure. + */ +-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, ++int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, struct page **pagelist, dma_addr_t *dma_addr, + uint32_t flags) + { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +index 9e25edafa721..c77a1ebfc632 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +@@ -288,7 +288,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) + int amdgpu_ib_ring_tests(struct amdgpu_device *adev) + { + unsigned i; +- int r; ++ int r, ret = 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; +@@ -309,10 +309,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) + } else { + /* still not good, but we can live with it */ + DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); ++ ret = r; + } + } + } +- return 0; ++ return ret; + } + + /* +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +index 1cbb16e15307..475c38fe9245 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +@@ -233,8 +233,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, + + adev = amdgpu_get_adev(bo->bdev); + ring = adev->mman.buffer_funcs_ring; +- old_start = old_mem->start << PAGE_SHIFT; +- new_start = new_mem->start << PAGE_SHIFT; ++ old_start = (u64)old_mem->start << PAGE_SHIFT; ++ new_start = (u64)new_mem->start << PAGE_SHIFT; + + switch (old_mem->mem_type) { + case TTM_PL_VRAM: +diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +index 5f712ceddf08..c568293cb6c1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c ++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); + static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); + static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); + static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); ++static int cik_sdma_soft_reset(void *handle); + + MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); + MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); +@@ -1030,6 +1031,8 @@ static int cik_sdma_resume(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + ++ cik_sdma_soft_reset(handle); ++ + return cik_sdma_hw_init(adev); + } + +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c +index 86c7500454b4..b37fe0df743e 100644 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c +@@ -2747,6 +2747,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev) + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + + ppgtt->base.cleanup(&ppgtt->base); ++ kfree(ppgtt); + } + + if (drm_mm_initialized(&vm->mm)) { +diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c +index 17ae2eb26ce2..d5c06f2764f4 100644 +--- a/drivers/hwmon/iio_hwmon.c ++++ b/drivers/hwmon/iio_hwmon.c +@@ -109,24 +109,24 @@ static int iio_hwmon_probe(struct platform_device *pdev) + + switch (type) { + case IIO_VOLTAGE: +- a->dev_attr.attr.name = kasprintf(GFP_KERNEL, +- "in%d_input", +- in_i++); ++ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, ++ "in%d_input", ++ in_i++); + break; + case IIO_TEMP: +- a->dev_attr.attr.name = kasprintf(GFP_KERNEL, +- "temp%d_input", +- temp_i++); ++ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, ++ "temp%d_input", ++ temp_i++); + break; + case IIO_CURRENT: +- a->dev_attr.attr.name = kasprintf(GFP_KERNEL, +- "curr%d_input", +- curr_i++); ++ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, ++ "curr%d_input", ++ curr_i++); + break; + case IIO_HUMIDITYRELATIVE: +- a->dev_attr.attr.name = kasprintf(GFP_KERNEL, +- "humidity%d_input", +- humidity_i++); ++ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, ++ "humidity%d_input", ++ humidity_i++); + break; + default: + ret = -EINVAL; +diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c +index a0d95ff682ae..2d5ff86398d0 100644 +--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c ++++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c +@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], + msg->outsize = request_len; + msg->insize = response_len; + +- result = cros_ec_cmd_xfer(bus->ec, msg); ++ result = cros_ec_cmd_xfer_status(bus->ec, msg); + if (result < 0) { + dev_err(dev, "Error transferring EC i2c message %d\n", result); + goto exit; +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c +index 0f6f63b20263..7afd226a3321 100644 +--- a/drivers/iio/industrialio-buffer.c ++++ b/drivers/iio/industrialio-buffer.c +@@ -107,6 +107,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, + { + struct iio_dev *indio_dev = filp->private_data; + struct iio_buffer *rb = indio_dev->buffer; ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + size_t datum_size; + size_t to_wait; + int ret; +@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, + else + to_wait = min_t(size_t, n / datum_size, rb->watermark); + ++ add_wait_queue(&rb->pollq, &wait); + do { +- ret = wait_event_interruptible(rb->pollq, +- iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)); +- if (ret) +- return ret; ++ if (!indio_dev->info) { ++ ret = -ENODEV; ++ break; ++ } + +- if (!indio_dev->info) +- return -ENODEV; ++ if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ ++ wait_woken(&wait, TASK_INTERRUPTIBLE, ++ MAX_SCHEDULE_TIMEOUT); ++ continue; ++ } + + ret = rb->access->read_first_n(rb, n, buf); + if (ret == 0 && (filp->f_flags & O_NONBLOCK)) + ret = -EAGAIN; + } while (ret == 0); ++ remove_wait_queue(&rb->pollq, &wait); + + return ret; + } +diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c +index acc5394afb03..29485bc4221c 100644 +--- a/drivers/input/keyboard/tegra-kbc.c ++++ b/drivers/input/keyboard/tegra-kbc.c +@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) + /* Reset the KBC controller to clear all previous status.*/ + reset_control_assert(kbc->rst); + udelay(100); +- reset_control_assert(kbc->rst); ++ reset_control_deassert(kbc->rst); + udelay(100); + + tegra_kbc_config_pins(kbc); +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 454195709a82..405252a884dd 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -1277,6 +1277,7 @@ static int __init i8042_create_kbd_port(void) + serio->start = i8042_start; + serio->stop = i8042_stop; + serio->close = i8042_port_close; ++ serio->ps2_cmd_mutex = &i8042_mutex; + serio->port_data = port; + serio->dev.parent = &i8042_platform_device->dev; + strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name)); +@@ -1304,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx) + serio->write = i8042_aux_write; + serio->start = i8042_start; + serio->stop = i8042_stop; ++ serio->ps2_cmd_mutex = &i8042_mutex; + serio->port_data = port; + serio->dev.parent = &i8042_platform_device->dev; + if (idx < 0) { +@@ -1373,21 +1375,6 @@ static void i8042_unregister_ports(void) + } + } + +-/* +- * Checks whether port belongs to i8042 controller. +- */ +-bool i8042_check_port_owner(const struct serio *port) +-{ +- int i; +- +- for (i = 0; i < I8042_NUM_PORTS; i++) +- if (i8042_ports[i].serio == port) +- return true; +- +- return false; +-} +-EXPORT_SYMBOL(i8042_check_port_owner); +- + static void i8042_free_irqs(void) + { + if (i8042_aux_irq_registered) +diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c +index 316f2c897101..83e9c663aa67 100644 +--- a/drivers/input/serio/libps2.c ++++ b/drivers/input/serio/libps2.c +@@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte); + + void ps2_begin_command(struct ps2dev *ps2dev) + { +- mutex_lock(&ps2dev->cmd_mutex); ++ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; + +- if (i8042_check_port_owner(ps2dev->serio)) +- i8042_lock_chip(); ++ mutex_lock(m); + } + EXPORT_SYMBOL(ps2_begin_command); + + void ps2_end_command(struct ps2dev *ps2dev) + { +- if (i8042_check_port_owner(ps2dev->serio)) +- i8042_unlock_chip(); ++ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; + +- mutex_unlock(&ps2dev->cmd_mutex); ++ mutex_unlock(m); + } + EXPORT_SYMBOL(ps2_end_command); + +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c +index 8487987458a1..00df3832faab 100644 +--- a/drivers/iommu/arm-smmu-v3.c ++++ b/drivers/iommu/arm-smmu-v3.c +@@ -870,7 +870,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) + * We may have concurrent producers, so we need to be careful + * not to touch any of the shadow cmdq state. + */ +- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords); ++ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); + dev_err(smmu->dev, "skipping command in error state:\n"); + for (i = 0; i < ARRAY_SIZE(cmd); ++i) + dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); +@@ -881,7 +881,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) + return; + } + +- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords); ++ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); + } + + static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, +@@ -1025,6 +1025,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, + case STRTAB_STE_0_CFG_S2_TRANS: + ste_live = true; + break; ++ case STRTAB_STE_0_CFG_ABORT: ++ if (disable_bypass) ++ break; + default: + BUG(); /* STE corruption */ + } +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c +index 58f2fe687a24..347a3c17f73a 100644 +--- a/drivers/iommu/dma-iommu.c ++++ b/drivers/iommu/dma-iommu.c +@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) + if (!iovad) + return; + +- put_iova_domain(iovad); ++ if (iovad->granule) ++ put_iova_domain(iovad); + kfree(iovad); + domain->iova_cookie = NULL; + } +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig +index 1dee533634c9..2e6d2fff1096 100644 +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -97,6 +97,7 @@ config MMC_RICOH_MMC + config MMC_SDHCI_ACPI + tristate "SDHCI support for ACPI enumerated SDHCI controllers" + depends on MMC_SDHCI && ACPI ++ select IOSF_MBI if X86 + help + This selects support for ACPI enumerated SDHCI controllers, + identified by ACPI Compatibility ID PNP0D40 or specific +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c +index 8aea3fa6938b..5a05bf400ca8 100644 +--- a/drivers/mmc/host/sdhci-acpi.c ++++ b/drivers/mmc/host/sdhci-acpi.c +@@ -41,6 +41,11 @@ + #include + #include + ++#ifdef CONFIG_X86 ++#include ++#include ++#endif ++ + #include "sdhci.h" + + enum { +@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { + .ops = &sdhci_acpi_ops_int, + }; + ++#ifdef CONFIG_X86 ++ ++static bool sdhci_acpi_byt(void) ++{ ++ static const struct x86_cpu_id byt[] = { ++ { X86_VENDOR_INTEL, 6, 0x37 }, ++ {} ++ }; ++ ++ return x86_match_cpu(byt); ++} ++ ++#define BYT_IOSF_SCCEP 0x63 ++#define BYT_IOSF_OCP_NETCTRL0 0x1078 ++#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) ++ ++static void sdhci_acpi_byt_setting(struct device *dev) ++{ ++ u32 val = 0; ++ ++ if (!sdhci_acpi_byt()) ++ return; ++ ++ if (iosf_mbi_read(BYT_IOSF_SCCEP, 0x06, BYT_IOSF_OCP_NETCTRL0, ++ &val)) { ++ dev_err(dev, "%s read error\n", __func__); ++ return; ++ } ++ ++ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) ++ return; ++ ++ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; ++ ++ if (iosf_mbi_write(BYT_IOSF_SCCEP, 0x07, BYT_IOSF_OCP_NETCTRL0, ++ val)) { ++ dev_err(dev, "%s write error\n", __func__); ++ return; ++ } ++ ++ dev_dbg(dev, "%s completed\n", __func__); ++} ++ ++static bool sdhci_acpi_byt_defer(struct device *dev) ++{ ++ if (!sdhci_acpi_byt()) ++ return false; ++ ++ if (!iosf_mbi_available()) ++ return true; ++ ++ sdhci_acpi_byt_setting(dev); ++ ++ return false; ++} ++ ++#else ++ ++static inline void sdhci_acpi_byt_setting(struct device *dev) ++{ ++} ++ ++static inline bool sdhci_acpi_byt_defer(struct device *dev) ++{ ++ return false; ++} ++ ++#endif ++ + static int bxt_get_cd(struct mmc_host *mmc) + { + int gpio_cd = mmc_gpio_get_cd(mmc); +@@ -337,6 +411,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev) + if (acpi_bus_get_status(device) || !device->status.present) + return -ENODEV; + ++ if (sdhci_acpi_byt_defer(dev)) ++ return -EPROBE_DEFER; ++ + hid = acpi_device_hid(device); + uid = device->pnp.unique_id; + +@@ -460,6 +537,8 @@ static int sdhci_acpi_resume(struct device *dev) + { + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + ++ sdhci_acpi_byt_setting(&c->pdev->dev); ++ + return sdhci_resume_host(c->host); + } + +@@ -483,6 +562,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev) + { + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + ++ sdhci_acpi_byt_setting(&c->pdev->dev); ++ + return sdhci_runtime_resume_host(c->host); + } + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 0c67b57be83c..289a5df0d44a 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2672,10 +2672,10 @@ static int nvme_dev_add(struct nvme_dev *dev) + return 0; + } + +-static int nvme_dev_map(struct nvme_dev *dev) ++static int nvme_pci_enable(struct nvme_dev *dev) + { + u64 cap; +- int bars, result = -ENOMEM; ++ int result = -ENOMEM; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pci_enable_device_mem(pdev)) +@@ -2683,24 +2683,14 @@ static int nvme_dev_map(struct nvme_dev *dev) + + dev->entry[0].vector = pdev->irq; + pci_set_master(pdev); +- bars = pci_select_bars(pdev, IORESOURCE_MEM); +- if (!bars) +- goto disable_pci; +- +- if (pci_request_selected_regions(pdev, bars, "nvme")) +- goto disable_pci; + + if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) + goto disable; + +- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); +- if (!dev->bar) +- goto disable; +- + if (readl(&dev->bar->csts) == -1) { + result = -ENODEV; +- goto unmap; ++ goto disable; + } + + /* +@@ -2710,7 +2700,7 @@ static int nvme_dev_map(struct nvme_dev *dev) + if (!pdev->irq) { + result = pci_enable_msix(pdev, dev->entry, 1); + if (result < 0) +- goto unmap; ++ goto disable; + } + + cap = lo_hi_readq(&dev->bar->cap); +@@ -2734,18 +2724,21 @@ static int nvme_dev_map(struct nvme_dev *dev) + + return 0; + +- unmap: +- iounmap(dev->bar); +- dev->bar = NULL; + disable: + pci_release_regions(pdev); +- disable_pci: +- pci_disable_device(pdev); ++ + return result; + } + + static void nvme_dev_unmap(struct nvme_dev *dev) + { ++ if (dev->bar) ++ iounmap(dev->bar); ++ pci_release_regions(to_pci_dev(dev->dev)); ++} ++ ++static void nvme_pci_disable(struct nvme_dev *dev) ++{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pdev->msi_enabled) +@@ -2753,12 +2746,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev) + else if (pdev->msix_enabled) + pci_disable_msix(pdev); + +- if (dev->bar) { +- iounmap(dev->bar); +- dev->bar = NULL; +- pci_release_regions(pdev); +- } +- + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + } +@@ -2962,7 +2949,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) + + nvme_dev_list_remove(dev); + +- if (dev->bar) { ++ if (pci_is_enabled(to_pci_dev(dev->dev))) { + nvme_freeze_queues(dev); + csts = readl(&dev->bar->csts); + } +@@ -2976,7 +2963,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) + nvme_shutdown_ctrl(dev); + nvme_disable_queue(dev, 0); + } +- nvme_dev_unmap(dev); ++ nvme_pci_disable(dev); + + for (i = dev->queue_count - 1; i >= 0; i--) + nvme_clear_queue(dev->queues[i]); +@@ -3136,7 +3123,7 @@ static void nvme_probe_work(struct work_struct *work) + bool start_thread = false; + int result; + +- result = nvme_dev_map(dev); ++ result = nvme_pci_enable(dev); + if (result) + goto out; + +@@ -3292,6 +3279,27 @@ static ssize_t nvme_sysfs_reset(struct device *dev, + } + static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); + ++static int nvme_dev_map(struct nvme_dev *dev) ++{ ++ int bars; ++ struct pci_dev *pdev = to_pci_dev(dev->dev); ++ ++ bars = pci_select_bars(pdev, IORESOURCE_MEM); ++ if (!bars) ++ return -ENODEV; ++ if (pci_request_selected_regions(pdev, bars, "nvme")) ++ return -ENODEV; ++ ++ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); ++ if (!dev->bar) ++ goto release; ++ ++ return 0; ++release: ++ pci_release_regions(pdev); ++ return -ENODEV; ++} ++ + static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { + int node, result = -ENOMEM; +@@ -3317,6 +3325,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) + INIT_WORK(&dev->reset_work, nvme_reset_work); + dev->dev = get_device(&pdev->dev); + pci_set_drvdata(pdev, dev); ++ ++ result = nvme_dev_map(dev); ++ if (result) ++ goto free; ++ + result = nvme_set_instance(dev); + if (result) + goto put_pci; +@@ -3355,6 +3368,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) + nvme_release_instance(dev); + put_pci: + put_device(dev->dev); ++ nvme_dev_unmap(dev); + free: + kfree(dev->queues); + kfree(dev->entry); +@@ -3398,6 +3412,7 @@ static void nvme_remove(struct pci_dev *pdev) + nvme_free_queues(dev, 0); + nvme_release_cmb(dev); + nvme_release_prp_pools(dev); ++ nvme_dev_unmap(dev); + kref_put(&dev->kref, nvme_free_dev); + } + +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 942461f36616..31341290cd91 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -2253,20 +2253,13 @@ struct device_node *of_graph_get_endpoint_by_regs( + const struct device_node *parent, int port_reg, int reg) + { + struct of_endpoint endpoint; +- struct device_node *node, *prev_node = NULL; +- +- while (1) { +- node = of_graph_get_next_endpoint(parent, prev_node); +- of_node_put(prev_node); +- if (!node) +- break; ++ struct device_node *node = NULL; + ++ for_each_endpoint_of_node(parent, node) { + of_graph_parse_endpoint(node, &endpoint); + if (((port_reg == -1) || (endpoint.port == port_reg)) && + ((reg == -1) || (endpoint.id == reg))) + return node; +- +- prev_node = node; + } + + return NULL; +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index 7eaa4c87fec7..10a6a8e5db88 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -1278,6 +1278,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + pci_msi_domain_update_chip_ops(info); + ++ info->flags |= MSI_FLAG_ACTIVATE_EARLY; ++ + domain = msi_create_irq_domain(fwnode, info, parent); + if (!domain) + return NULL; +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index eead54cd01b2..d7508704c992 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -1372,10 +1372,10 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) + if (!sysfs_initialized) + return -EACCES; + +- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) +- retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); +- else ++ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) + retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); ++ else ++ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); + if (retval) + goto err; + +@@ -1427,10 +1427,10 @@ err_rom_file: + err_resource_files: + pci_remove_resource_files(pdev); + err_config_file: +- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) +- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); +- else ++ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) + sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); ++ else ++ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); + err: + return retval; + } +@@ -1464,10 +1464,10 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) + + pci_remove_capabilities_sysfs(pdev); + +- if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) +- sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); +- else ++ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) + sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); ++ else ++ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); + + pci_remove_resource_files(pdev); + +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 3c4752a288e2..42774bc39786 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -287,6 +287,18 @@ static void quirk_citrine(struct pci_dev *dev) + } + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); + ++/* ++ * This chip can cause bus lockups if config addresses above 0x600 ++ * are read or written. ++ */ ++static void quirk_nfp6000(struct pci_dev *dev) ++{ ++ dev->cfg_size = 0x600; ++} ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000); ++ + /* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */ + static void quirk_extend_bar_to_page(struct pci_dev *dev) + { +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c +index 3318f1d6193c..7340ff78839a 100644 +--- a/drivers/pinctrl/pinctrl-amd.c ++++ b/drivers/pinctrl/pinctrl-amd.c +@@ -48,17 +48,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset) + + spin_lock_irqsave(&gpio_dev->lock, flags); + pin_reg = readl(gpio_dev->base + offset * 4); +- /* +- * Suppose BIOS or Bootloader sets specific debounce for the +- * GPIO. if not, set debounce to be 2.75ms and remove glitch. +- */ +- if ((pin_reg & DB_TMR_OUT_MASK) == 0) { +- pin_reg |= 0xf; +- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; +- pin_reg &= ~BIT(DB_TMR_LARGE_OFF); +- } +- + pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); + writel(pin_reg, gpio_dev->base + offset * 4); + spin_unlock_irqrestore(&gpio_dev->lock, flags); +@@ -331,15 +320,6 @@ static void amd_gpio_irq_enable(struct irq_data *d) + + spin_lock_irqsave(&gpio_dev->lock, flags); + pin_reg = readl(gpio_dev->base + (d->hwirq)*4); +- /* +- Suppose BIOS or Bootloader sets specific debounce for the +- GPIO. if not, set debounce to be 2.75ms. +- */ +- if ((pin_reg & DB_TMR_OUT_MASK) == 0) { +- pin_reg |= 0xf; +- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); +- pin_reg &= ~BIT(DB_TMR_LARGE_OFF); +- } + pin_reg |= BIT(INTERRUPT_ENABLE_OFF); + pin_reg |= BIT(INTERRUPT_MASK_OFF); + writel(pin_reg, gpio_dev->base + (d->hwirq)*4); +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c +index 990308ca384f..92430f781eb7 100644 +--- a/drivers/platform/chrome/cros_ec_proto.c ++++ b/drivers/platform/chrome/cros_ec_proto.c +@@ -380,3 +380,20 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + return ret; + } + EXPORT_SYMBOL(cros_ec_cmd_xfer); ++ ++int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, ++ struct cros_ec_command *msg) ++{ ++ int ret; ++ ++ ret = cros_ec_cmd_xfer(ec_dev, msg); ++ if (ret < 0) { ++ dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret); ++ } else if (msg->result != EC_RES_SUCCESS) { ++ dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result); ++ return -EPROTO; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(cros_ec_cmd_xfer_status); +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index 4abfbdb285ec..84c13dffa3a8 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -1584,9 +1584,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + unsigned long long now; + int expires; + ++ cqr = (struct dasd_ccw_req *) intparm; + if (IS_ERR(irb)) { + switch (PTR_ERR(irb)) { + case -EIO: ++ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { ++ device = (struct dasd_device *) cqr->startdev; ++ cqr->status = DASD_CQR_CLEARED; ++ dasd_device_clear_timer(device); ++ wake_up(&dasd_flush_wq); ++ dasd_schedule_device_bh(device); ++ return; ++ } + break; + case -ETIMEDOUT: + DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " +@@ -1602,7 +1611,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + } + + now = get_tod_clock(); +- cqr = (struct dasd_ccw_req *) intparm; + /* check for conditions that should be handled immediately */ + if (!cqr || + !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && +diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c +index 54195a117f72..f78cc943d230 100644 +--- a/drivers/scsi/aacraid/commctrl.c ++++ b/drivers/scsi/aacraid/commctrl.c +@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) + struct fib *fibptr; + struct hw_fib * hw_fib = (struct hw_fib *)0; + dma_addr_t hw_fib_pa = (dma_addr_t)0LL; +- unsigned size; ++ unsigned int size, osize; + int retval; + + if (dev->in_reset) { +@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) + * will not overrun the buffer when we copy the memory. Return + * an error if we would. + */ +- size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); ++ osize = size = le16_to_cpu(kfib->header.Size) + ++ sizeof(struct aac_fibhdr); + if (size < le16_to_cpu(kfib->header.SenderSize)) + size = le16_to_cpu(kfib->header.SenderSize); + if (size > dev->max_fib_size) { +@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) + goto cleanup; + } + ++ /* Sanity check the second copy */ ++ if ((osize != le16_to_cpu(kfib->header.Size) + ++ sizeof(struct aac_fibhdr)) ++ || (size < le16_to_cpu(kfib->header.SenderSize))) { ++ retval = -EINVAL; ++ goto cleanup; ++ } ++ + if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { + aac_adapter_interrupt(dev); + /* +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 00ce3e269a43..e994ff944091 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -4669,7 +4669,7 @@ static int megasas_init_fw(struct megasas_instance *instance) + /* Find first memory bar */ + bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); + instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); +- if (pci_request_selected_regions(instance->pdev, instance->bar, ++ if (pci_request_selected_regions(instance->pdev, 1<bar, + "megasas: LSI")) { + dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); + return -EBUSY; +@@ -4960,7 +4960,7 @@ fail_ready_state: + iounmap(instance->reg_set); + + fail_ioremap: +- pci_release_selected_regions(instance->pdev, instance->bar); ++ pci_release_selected_regions(instance->pdev, 1<bar); + + return -EINVAL; + } +@@ -4981,7 +4981,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) + + iounmap(instance->reg_set); + +- pci_release_selected_regions(instance->pdev, instance->bar); ++ pci_release_selected_regions(instance->pdev, 1<bar); + } + + /** +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index 8d630a552b07..4f391e747be2 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -2437,7 +2437,7 @@ megasas_release_fusion(struct megasas_instance *instance) + + iounmap(instance->reg_set); + +- pci_release_selected_regions(instance->pdev, instance->bar); ++ pci_release_selected_regions(instance->pdev, 1<bar); + } + + /** +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 11393ebf1a68..356233f86064 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -2155,6 +2155,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) + } else + ioc->msix96_vector = 0; + ++ if (ioc->is_warpdrive) { ++ ioc->reply_post_host_index[0] = (resource_size_t __iomem *) ++ &ioc->chip->ReplyPostHostIndex; ++ ++ for (i = 1; i < ioc->cpu_msix_table_sz; i++) ++ ioc->reply_post_host_index[i] = ++ (resource_size_t __iomem *) ++ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) ++ * 4))); ++ } ++ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + pr_info(MPT3SAS_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : +@@ -5201,17 +5212,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) + if (r) + goto out_free_resources; + +- if (ioc->is_warpdrive) { +- ioc->reply_post_host_index[0] = (resource_size_t __iomem *) +- &ioc->chip->ReplyPostHostIndex; +- +- for (i = 1; i < ioc->cpu_msix_table_sz; i++) +- ioc->reply_post_host_index[i] = +- (resource_size_t __iomem *) +- ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) +- * 4))); +- } +- + pci_set_drvdata(ioc->pdev, ioc->shost); + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) +diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c +index 4ab186669f0c..ec5b9a23494d 100644 +--- a/drivers/staging/comedi/drivers/comedi_test.c ++++ b/drivers/staging/comedi/drivers/comedi_test.c +@@ -56,11 +56,6 @@ + + #define N_CHANS 8 + +-enum waveform_state_bits { +- WAVEFORM_AI_RUNNING, +- WAVEFORM_AO_RUNNING +-}; +- + /* Data unique to this driver */ + struct waveform_private { + struct timer_list ai_timer; /* timer for AI commands */ +@@ -68,7 +63,6 @@ struct waveform_private { + unsigned int wf_amplitude; /* waveform amplitude in microvolts */ + unsigned int wf_period; /* waveform period in microseconds */ + unsigned int wf_current; /* current time in waveform period */ +- unsigned long state_bits; + unsigned int ai_scan_period; /* AI scan period in usec */ + unsigned int ai_convert_period; /* AI conversion period in usec */ + struct timer_list ao_timer; /* timer for AO commands */ +@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg) + unsigned int nsamples; + unsigned int time_increment; + +- /* check command is still active */ +- if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits)) +- return; +- + now = ktime_to_us(ktime_get()); + nsamples = comedi_nsamples_left(s, UINT_MAX); + +@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev, + */ + devpriv->ai_timer.expires = + jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; +- +- /* mark command as active */ +- smp_mb__before_atomic(); +- set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits); +- smp_mb__after_atomic(); + add_timer(&devpriv->ai_timer); + return 0; + } +@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev, + { + struct waveform_private *devpriv = dev->private; + +- /* mark command as no longer active */ +- clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits); +- smp_mb__after_atomic(); +- /* cannot call del_timer_sync() as may be called from timer routine */ +- del_timer(&devpriv->ai_timer); ++ if (in_softirq()) { ++ /* Assume we were called from the timer routine itself. */ ++ del_timer(&devpriv->ai_timer); ++ } else { ++ del_timer_sync(&devpriv->ai_timer); ++ } + return 0; + } + +@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg) + u64 scans_since; + unsigned int scans_avail = 0; + +- /* check command is still active */ +- if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits)) +- return; +- + /* determine number of scan periods since last time */ + now = ktime_to_us(ktime_get()); + scans_since = now - devpriv->ao_last_scan_time; +@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev, + devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); + devpriv->ao_timer.expires = + jiffies + usecs_to_jiffies(devpriv->ao_scan_period); +- +- /* mark command as active */ +- smp_mb__before_atomic(); +- set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits); +- smp_mb__after_atomic(); + add_timer(&devpriv->ao_timer); + + return 1; +@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev, + struct waveform_private *devpriv = dev->private; + + s->async->inttrig = NULL; +- /* mark command as no longer active */ +- clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits); +- smp_mb__after_atomic(); +- /* cannot call del_timer_sync() as may be called from timer routine */ +- del_timer(&devpriv->ao_timer); ++ if (in_softirq()) { ++ /* Assume we were called from the timer routine itself. */ ++ del_timer(&devpriv->ao_timer); ++ } else { ++ del_timer_sync(&devpriv->ao_timer); ++ } + return 0; + } + +diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c +index 57ab6680e3ae..e5fee6e0fb47 100644 +--- a/drivers/staging/comedi/drivers/daqboard2000.c ++++ b/drivers/staging/comedi/drivers/daqboard2000.c +@@ -636,7 +636,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev, + const struct daq200_boardtype *board; + int i; + +- if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) ++ if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH) + return NULL; + + for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c +index 27fbf1a81097..35ab4a9ef95d 100644 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c +@@ -2823,7 +2823,15 @@ static int ni_ao_inttrig(struct comedi_device *dev, + int i; + static const int timeout = 1000; + +- if (trig_num != cmd->start_arg) ++ /* ++ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT. ++ * For backwards compatibility, also allow trig_num == 0 when ++ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT); ++ * in that case, the internal trigger is being used as a pre-trigger ++ * before the external trigger. ++ */ ++ if (!(trig_num == cmd->start_arg || ++ (trig_num == 0 && cmd->start_src != TRIG_INT))) + return -EINVAL; + + /* Null trig at beginning prevent ao start trigger from executing more than +@@ -5346,7 +5354,7 @@ static int ni_E_init(struct comedi_device *dev, + s->maxdata = (devpriv->is_m_series) ? 0xffffffff + : 0x00ffffff; + s->insn_read = ni_tio_insn_read; +- s->insn_write = ni_tio_insn_read; ++ s->insn_write = ni_tio_insn_write; + s->insn_config = ni_tio_insn_config; + #ifdef PCIDMA + if (dev->irq && devpriv->mite) { +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index 391a1225b0ba..ca367b05e440 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -1585,8 +1585,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on) + { + struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget); + +- /* Data+ pullup controlled by OTG state machine in OTG fsm mode */ +- if (ci_otg_is_fsm_mode(ci)) ++ /* ++ * Data+ pullup controlled by OTG state machine in OTG fsm mode; ++ * and don't touch Data+ in host mode for dual role config. ++ */ ++ if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST) + return 0; + + pm_runtime_get_sync(&ci->gadget.dev); +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index d37fdcc3143c..7f374369e539 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1336,7 +1336,6 @@ made_compressed_probe: + spin_lock_init(&acm->write_lock); + spin_lock_init(&acm->read_lock); + mutex_init(&acm->mutex); +- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); + acm->is_int_ep = usb_endpoint_xfer_int(epread); + if (acm->is_int_ep) + acm->bInterval = epread->bInterval; +@@ -1376,14 +1375,14 @@ made_compressed_probe: + urb->transfer_dma = rb->dma; + if (acm->is_int_ep) { + usb_fill_int_urb(urb, acm->dev, +- acm->rx_endpoint, ++ usb_rcvintpipe(usb_dev, epread->bEndpointAddress), + rb->base, + acm->readsize, + acm_read_bulk_callback, rb, + acm->bInterval); + } else { + usb_fill_bulk_urb(urb, acm->dev, +- acm->rx_endpoint, ++ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), + rb->base, + acm->readsize, + acm_read_bulk_callback, rb); +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h +index ccfaba9ab4e4..b30ac5fcde68 100644 +--- a/drivers/usb/class/cdc-acm.h ++++ b/drivers/usb/class/cdc-acm.h +@@ -95,7 +95,6 @@ struct acm { + struct urb *read_urbs[ACM_NR]; + struct acm_rb read_buffers[ACM_NR]; + int rx_buflimit; +- int rx_endpoint; + spinlock_t read_lock; + int write_used; /* number of non-empty write buffers */ + int transmitting; +diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c +index 673d53038ed2..a00bfb93acc3 100644 +--- a/drivers/usb/common/common.c ++++ b/drivers/usb/common/common.c +@@ -50,6 +50,7 @@ static const char *const speed_names[] = { + [USB_SPEED_HIGH] = "high-speed", + [USB_SPEED_WIRELESS] = "wireless", + [USB_SPEED_SUPER] = "super-speed", ++ [USB_SPEED_SUPER_PLUS] = "super-speed-plus", + }; + + const char *usb_speed_string(enum usb_device_speed speed) +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 5050760f5e17..80c8d90d8b75 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -142,6 +142,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + } + } + ++static const unsigned short low_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 8, ++ [USB_ENDPOINT_XFER_ISOC] = 0, ++ [USB_ENDPOINT_XFER_BULK] = 0, ++ [USB_ENDPOINT_XFER_INT] = 8, ++}; ++static const unsigned short full_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 64, ++ [USB_ENDPOINT_XFER_ISOC] = 1023, ++ [USB_ENDPOINT_XFER_BULK] = 64, ++ [USB_ENDPOINT_XFER_INT] = 64, ++}; ++static const unsigned short high_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 64, ++ [USB_ENDPOINT_XFER_ISOC] = 1024, ++ [USB_ENDPOINT_XFER_BULK] = 512, ++ [USB_ENDPOINT_XFER_INT] = 1024, ++}; ++static const unsigned short super_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 512, ++ [USB_ENDPOINT_XFER_ISOC] = 1024, ++ [USB_ENDPOINT_XFER_BULK] = 1024, ++ [USB_ENDPOINT_XFER_INT] = 1024, ++}; ++ + static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + int asnum, struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) +@@ -150,6 +175,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + struct usb_endpoint_descriptor *d; + struct usb_host_endpoint *endpoint; + int n, i, j, retval; ++ unsigned int maxp; ++ const unsigned short *maxpacket_maxes; + + d = (struct usb_endpoint_descriptor *) buffer; + buffer += d->bLength; +@@ -191,6 +218,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + if (usb_endpoint_xfer_int(d)) { + i = 1; + switch (to_usb_device(ddev)->speed) { ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + case USB_SPEED_HIGH: + /* Many device manufacturers are using full-speed +@@ -256,6 +284,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + endpoint->desc.wMaxPacketSize = cpu_to_le16(8); + } + ++ /* Validate the wMaxPacketSize field */ ++ maxp = usb_endpoint_maxp(&endpoint->desc); ++ ++ /* Find the highest legal maxpacket size for this endpoint */ ++ i = 0; /* additional transactions per microframe */ ++ switch (to_usb_device(ddev)->speed) { ++ case USB_SPEED_LOW: ++ maxpacket_maxes = low_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_FULL: ++ maxpacket_maxes = full_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_HIGH: ++ /* Bits 12..11 are allowed only for HS periodic endpoints */ ++ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { ++ i = maxp & (BIT(12) | BIT(11)); ++ maxp &= ~i; ++ } ++ /* fallthrough */ ++ default: ++ maxpacket_maxes = high_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_SUPER: ++ case USB_SPEED_SUPER_PLUS: ++ maxpacket_maxes = super_speed_maxpacket_maxes; ++ break; ++ } ++ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; ++ ++ if (maxp > j) { ++ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", ++ cfgno, inum, asnum, d->bEndpointAddress, maxp, j); ++ maxp = j; ++ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); ++ } ++ + /* + * Some buggy high speed devices have bulk endpoints using + * maxpacket sizes other than 512. High speed HCDs may not +@@ -263,9 +327,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + */ + if (to_usb_device(ddev)->speed == USB_SPEED_HIGH + && usb_endpoint_xfer_bulk(d)) { +- unsigned maxp; +- +- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; + if (maxp != 512) + dev_warn(ddev, "config %d interface %d altsetting %d " + "bulk endpoint 0x%X has invalid maxpacket %d\n", +@@ -274,7 +335,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + } + + /* Parse a possible SuperSpeed endpoint companion descriptor */ +- if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) ++ if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER) + usb_parse_ss_endpoint_companion(ddev, cfgno, + inum, asnum, endpoint, buffer, size); + +diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c +index 2a3bbdf7eb94..332ed277a06c 100644 +--- a/drivers/usb/core/devices.c ++++ b/drivers/usb/core/devices.c +@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, + break; + case USB_ENDPOINT_XFER_INT: + type = "Int."; +- if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER) ++ if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER) + interval = 1 << (desc->bInterval - 1); + else + interval = desc->bInterval; +@@ -230,7 +230,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, + return start; + } + interval *= (speed == USB_SPEED_HIGH || +- speed == USB_SPEED_SUPER) ? 125 : 1000; ++ speed >= USB_SPEED_SUPER) ? 125 : 1000; + if (interval % 1000) + unit = 'u'; + else { +@@ -322,7 +322,7 @@ static char *usb_dump_config_descriptor(char *start, char *end, + + if (start > end) + return start; +- if (speed == USB_SPEED_SUPER) ++ if (speed >= USB_SPEED_SUPER) + mul = 8; + else + mul = 2; +@@ -534,6 +534,8 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, + speed = "480"; break; + case USB_SPEED_SUPER: + speed = "5000"; break; ++ case USB_SPEED_SUPER_PLUS: ++ speed = "10000"; break; + default: + speed = "??"; + } +@@ -553,7 +555,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, + + /* super/high speed reserves 80%, full/low reserves 90% */ + if (usbdev->speed == USB_SPEED_HIGH || +- usbdev->speed == USB_SPEED_SUPER) ++ usbdev->speed >= USB_SPEED_SUPER) + max = 800; + else + max = FRAME_TIME_MAX_USECS_ALLOC; +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 3ffb01ff6549..f5c92d904ded 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -1530,11 +1530,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb + as->urb->start_frame = uurb->start_frame; + as->urb->number_of_packets = number_of_packets; + as->urb->stream_id = stream_id; +- if (uurb->type == USBDEVFS_URB_TYPE_ISO || +- ps->dev->speed == USB_SPEED_HIGH) +- as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); +- else +- as->urb->interval = ep->desc.bInterval; ++ ++ if (ep->desc.bInterval) { ++ if (uurb->type == USBDEVFS_URB_TYPE_ISO || ++ ps->dev->speed == USB_SPEED_HIGH || ++ ps->dev->speed >= USB_SPEED_SUPER) ++ as->urb->interval = 1 << ++ min(15, ep->desc.bInterval - 1); ++ else ++ as->urb->interval = ep->desc.bInterval; ++ } ++ + as->urb->context = as; + as->urb->complete = async_completed; + for (totlen = u = 0; u < number_of_packets; u++) { +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c +index b8b580e5ae6e..40378487e023 100644 +--- a/drivers/usb/core/hcd-pci.c ++++ b/drivers/usb/core/hcd-pci.c +@@ -206,7 +206,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) + * The xHCI driver has its own irq management + * make sure irq setup is not touched for xhci in generic hcd code + */ +- if ((driver->flags & HCD_MASK) != HCD_USB3) { ++ if ((driver->flags & HCD_MASK) < HCD_USB3) { + if (!dev->irq) { + dev_err(&dev->dev, + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 1c102d60cd9f..f44ce09367bc 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1078,7 +1078,7 @@ static int register_root_hub(struct usb_hcd *hcd) + retval = usb_get_bos_descriptor(usb_dev); + if (!retval) { + usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); +- } else if (usb_dev->speed == USB_SPEED_SUPER) { ++ } else if (usb_dev->speed >= USB_SPEED_SUPER) { + mutex_unlock(&usb_bus_list_lock); + dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", + dev_name(&usb_dev->dev), retval); +@@ -2112,7 +2112,7 @@ int usb_alloc_streams(struct usb_interface *interface, + hcd = bus_to_hcd(dev->bus); + if (!hcd->driver->alloc_streams || !hcd->driver->free_streams) + return -EINVAL; +- if (dev->speed != USB_SPEED_SUPER) ++ if (dev->speed < USB_SPEED_SUPER) + return -EINVAL; + if (dev->state < USB_STATE_CONFIGURED) + return -ENODEV; +@@ -2160,7 +2160,7 @@ int usb_free_streams(struct usb_interface *interface, + + dev = interface_to_usbdev(interface); + hcd = bus_to_hcd(dev->bus); +- if (dev->speed != USB_SPEED_SUPER) ++ if (dev->speed < USB_SPEED_SUPER) + return -EINVAL; + + /* Double-free is not allowed */ +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 84df093639ac..bcc1e1b729ad 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -298,7 +298,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev) + unsigned int hub_u1_del; + unsigned int hub_u2_del; + +- if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER) ++ if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) + return; + + hub = usb_hub_to_struct_hub(udev->parent); +@@ -1036,14 +1036,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + + /* Continue a partial initialization */ + if (type == HUB_INIT2 || type == HUB_INIT3) { +- device_lock(hub->intfdev); ++ device_lock(&hdev->dev); + + /* Was the hub disconnected while we were waiting? */ +- if (hub->disconnected) { +- device_unlock(hub->intfdev); +- kref_put(&hub->kref, hub_release); +- return; +- } ++ if (hub->disconnected) ++ goto disconnected; + if (type == HUB_INIT2) + goto init2; + goto init3; +@@ -1246,7 +1243,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + queue_delayed_work(system_power_efficient_wq, + &hub->init_work, + msecs_to_jiffies(delay)); +- device_unlock(hub->intfdev); ++ device_unlock(&hdev->dev); + return; /* Continues at init3: below */ + } else { + msleep(delay); +@@ -1265,12 +1262,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + /* Scan all ports that need attention */ + kick_hub_wq(hub); + +- /* Allow autosuspend if it was suppressed */ +- if (type <= HUB_INIT3) ++ if (type == HUB_INIT2 || type == HUB_INIT3) { ++ /* Allow autosuspend if it was suppressed */ ++ disconnected: + usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); +- +- if (type == HUB_INIT2 || type == HUB_INIT3) +- device_unlock(hub->intfdev); ++ device_unlock(&hdev->dev); ++ } + + kref_put(&hub->kref, hub_release); + } +@@ -1299,8 +1296,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) + struct usb_device *hdev = hub->hdev; + int i; + +- cancel_delayed_work_sync(&hub->init_work); +- + /* hub_wq and related activity won't re-trigger */ + hub->quiescing = 1; + +@@ -2645,7 +2640,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) + */ + static bool use_new_scheme(struct usb_device *udev, int retry) + { +- if (udev->speed == USB_SPEED_SUPER) ++ if (udev->speed >= USB_SPEED_SUPER) + return false; + + return USE_NEW_SCHEME(retry); +@@ -3985,7 +3980,7 @@ int usb_disable_lpm(struct usb_device *udev) + struct usb_hcd *hcd; + + if (!udev || !udev->parent || +- udev->speed != USB_SPEED_SUPER || ++ udev->speed < USB_SPEED_SUPER || + !udev->lpm_capable || + udev->state < USB_STATE_DEFAULT) + return 0; +@@ -4042,7 +4037,7 @@ void usb_enable_lpm(struct usb_device *udev) + struct usb_hcd *hcd; + + if (!udev || !udev->parent || +- udev->speed != USB_SPEED_SUPER || ++ udev->speed < USB_SPEED_SUPER || + !udev->lpm_capable || + udev->state < USB_STATE_DEFAULT) + return; +@@ -4308,7 +4303,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + + retval = -ENODEV; + +- if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { ++ /* Don't allow speed changes at reset, except usb 3.0 to faster */ ++ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed && ++ !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) { + dev_dbg(&udev->dev, "device reset changed speed!\n"); + goto fail; + } +@@ -4320,6 +4317,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. + */ + switch (udev->speed) { ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + case USB_SPEED_WIRELESS: /* fixed at 512 */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); +@@ -4346,7 +4344,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + else + speed = usb_speed_string(udev->speed); + +- if (udev->speed != USB_SPEED_SUPER) ++ if (udev->speed < USB_SPEED_SUPER) + dev_info(&udev->dev, + "%s %s USB device number %d using %s\n", + (udev->config) ? "reset" : "new", speed, +@@ -4476,11 +4474,12 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + devnum, retval); + goto fail; + } +- if (udev->speed == USB_SPEED_SUPER) { ++ if (udev->speed >= USB_SPEED_SUPER) { + devnum = udev->devnum; + dev_info(&udev->dev, +- "%s SuperSpeed USB device number %d using %s\n", ++ "%s SuperSpeed%s USB device number %d using %s\n", + (udev->config) ? "reset" : "new", ++ (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "", + devnum, udev->bus->controller->driver->name); + } + +@@ -4519,7 +4518,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + * got from those devices show they aren't superspeed devices. Warm + * reset the port attached by the devices can fix them. + */ +- if ((udev->speed == USB_SPEED_SUPER) && ++ if ((udev->speed >= USB_SPEED_SUPER) && + (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { + dev_err(&udev->dev, "got a wrong device descriptor, " + "warm reset device\n"); +@@ -4530,7 +4529,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, + } + + if (udev->descriptor.bMaxPacketSize0 == 0xff || +- udev->speed == USB_SPEED_SUPER) ++ udev->speed >= USB_SPEED_SUPER) + i = 512; + else + i = udev->descriptor.bMaxPacketSize0; +@@ -4740,7 +4739,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, + udev->level = hdev->level + 1; + udev->wusb = hub_is_wusb(hub); + +- /* Only USB 3.0 devices are connected to SuperSpeed hubs. */ ++ /* Devices connected to SuperSpeed hubs are USB 3.0 or later */ + if (hub_is_superspeed(hub->hdev)) + udev->speed = USB_SPEED_SUPER; + else +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c +index 3d274778caaf..c601e25b609f 100644 +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -401,7 +401,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + /* SuperSpeed isoc endpoints have up to 16 bursts of up to + * 3 packets each + */ +- if (dev->speed == USB_SPEED_SUPER) { ++ if (dev->speed >= USB_SPEED_SUPER) { + int burst = 1 + ep->ss_ep_comp.bMaxBurst; + int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); + max *= burst; +@@ -499,6 +499,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + } + /* too big? */ + switch (dev->speed) { ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: /* units are 125us */ + /* Handle up to 2^(16-1) microframes */ + if (urb->interval > (1 << 15)) +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h +index 05b5e17abf92..53318126ed91 100644 +--- a/drivers/usb/core/usb.h ++++ b/drivers/usb/core/usb.h +@@ -45,7 +45,7 @@ static inline unsigned usb_get_max_power(struct usb_device *udev, + struct usb_host_config *c) + { + /* SuperSpeed power is in 8 mA units; others are in 2 mA units */ +- unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2); ++ unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2); + + return c->desc.bMaxPower * mul; + } +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 009d83048c8c..3d731d1b5c60 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -36,6 +36,7 @@ + #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 + #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa + #define PCI_DEVICE_ID_INTEL_APL 0x5aaa ++#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 + + static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; + static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; +@@ -214,6 +215,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, + { } /* Terminating Entry */ + }; +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 70900e6ca9bc..fb79dca9484b 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1892,14 +1892,6 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, + s_pkt = 1; + } + +- /* +- * We assume here we will always receive the entire data block +- * which we should receive. Meaning, if we program RX to +- * receive 4K but we receive only 2K, we assume that's all we +- * should receive and we simply bounce the request back to the +- * gadget driver for further processing. +- */ +- req->request.actual += req->request.length - count; + if (s_pkt) + return 1; + if ((event->status & DEPEVT_STATUS_LST) && +@@ -1919,6 +1911,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + struct dwc3_trb *trb; + unsigned int slot; + unsigned int i; ++ int count = 0; + int ret; + + do { +@@ -1935,6 +1928,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + slot++; + slot %= DWC3_TRB_NUM; + trb = &dep->trb_pool[slot]; ++ count += trb->size & DWC3_TRB_SIZE_MASK; ++ + + ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, + event, status); +@@ -1942,6 +1937,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + break; + } while (++i < req->request.num_mapped_sgs); + ++ /* ++ * We assume here we will always receive the entire data block ++ * which we should receive. Meaning, if we program RX to ++ * receive 4K but we receive only 2K, we assume that's all we ++ * should receive and we simply bounce the request back to the ++ * gadget driver for further processing. ++ */ ++ req->request.actual += req->request.length - count; + dwc3_gadget_giveback(dep, req, status); + + if (ret) +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index 55386619a0f1..e57f48f9528f 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -541,7 +541,7 @@ static ssize_t ep_aio(struct kiocb *iocb, + */ + spin_lock_irq(&epdata->dev->lock); + value = -ENODEV; +- if (unlikely(epdata->ep)) ++ if (unlikely(epdata->ep == NULL)) + goto fail; + + req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); +diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c +index 5fb6f8b4f0b4..c73689b72f95 100644 +--- a/drivers/usb/gadget/udc/fsl_qe_udc.c ++++ b/drivers/usb/gadget/udc/fsl_qe_udc.c +@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc, + struct qe_ep *ep; + + if (wValue != 0 || wLength != 0 +- || pipe > USB_MAX_ENDPOINTS) ++ || pipe >= USB_MAX_ENDPOINTS) + break; + ep = &udc->eps[pipe]; + +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c +index 48c92bf78bd0..f7661d9750fd 100644 +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) + int port = HCS_N_PORTS(ehci->hcs_params); + + while (port--) { +- ehci_writel(ehci, PORT_RWC_BITS, +- &ehci->regs->port_status[port]); + spin_unlock_irq(&ehci->lock); + ehci_port_power(ehci, port, false); + spin_lock_irq(&ehci->lock); ++ ehci_writel(ehci, PORT_RWC_BITS, ++ &ehci->regs->port_status[port]); + } + } + +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index f980c239eded..1da876605e4d 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -377,6 +377,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) + + ret = 0; + virt_dev = xhci->devs[slot_id]; ++ if (!virt_dev) ++ return -ENODEV; ++ + cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); + if (!cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index d8dbd7e5194b..8ea2c05beca2 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1072,7 +1072,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, + struct usb_device *top_dev; + struct usb_hcd *hcd; + +- if (udev->speed == USB_SPEED_SUPER) ++ if (udev->speed >= USB_SPEED_SUPER) + hcd = xhci->shared_hcd; + else + hcd = xhci->main_hcd; +@@ -1107,6 +1107,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud + /* 3) Only the control endpoint is valid - one endpoint context */ + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); + switch (udev->speed) { ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); + max_packets = MAX_PACKET(512); +@@ -1294,6 +1295,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, + } + /* Fall through - SS and HS isoc/int have same decoding */ + ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + if (usb_endpoint_xfer_int(&ep->desc) || + usb_endpoint_xfer_isoc(&ep->desc)) { +@@ -1334,7 +1336,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, + static u32 xhci_get_endpoint_mult(struct usb_device *udev, + struct usb_host_endpoint *ep) + { +- if (udev->speed != USB_SPEED_SUPER || ++ if (udev->speed < USB_SPEED_SUPER || + !usb_endpoint_xfer_isoc(&ep->desc)) + return 0; + return ep->ss_ep_comp.bmAttributes; +@@ -1384,7 +1386,7 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev, + usb_endpoint_xfer_bulk(&ep->desc)) + return 0; + +- if (udev->speed == USB_SPEED_SUPER) ++ if (udev->speed >= USB_SPEED_SUPER) + return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); + + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); +@@ -1455,6 +1457,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); + max_burst = 0; + switch (udev->speed) { ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + /* dig out max burst from ep companion desc */ + max_burst = ep->ss_ep_comp.bMaxBurst; +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index de644e56aa3b..963867c2c1d5 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -311,11 +311,12 @@ static void xhci_pci_remove(struct pci_dev *dev) + usb_remove_hcd(xhci->shared_hcd); + usb_put_hcd(xhci->shared_hcd); + } +- usb_hcd_pci_remove(dev); + + /* Workaround for spurious wakeups at shutdown with HSW */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + pci_set_power_state(dev, PCI_D3hot); ++ ++ usb_hcd_pci_remove(dev); + } + + #ifdef CONFIG_PM +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 34cd23724bed..1f37b89e7267 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1331,12 +1331,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, + + cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); + +- if (cmd->command_trb != xhci->cmd_ring->dequeue) { +- xhci_err(xhci, +- "Command completion event does not match command\n"); +- return; +- } +- + del_timer(&xhci->cmd_timer); + + trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); +@@ -1348,6 +1342,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, + xhci_handle_stopped_cmd_ring(xhci, cmd); + return; + } ++ ++ if (cmd->command_trb != xhci->cmd_ring->dequeue) { ++ xhci_err(xhci, ++ "Command completion event does not match command\n"); ++ return; ++ } ++ + /* + * Host aborted the command ring, check if the current command was + * supposed to be aborted, otherwise continue normally. +@@ -3575,7 +3576,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, + { + unsigned int max_burst; + +- if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) ++ if (xhci->hci_version < 0x100 || udev->speed < USB_SPEED_SUPER) + return 0; + + max_burst = urb->ep->ss_ep_comp.bMaxBurst; +@@ -3601,6 +3602,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, + return 0; + + switch (udev->speed) { ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + /* bMaxBurst is zero based: 0 means 1 packet per burst */ + max_burst = urb->ep->ss_ep_comp.bMaxBurst; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 6fe0174da226..adc169d2fd76 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -2073,6 +2073,7 @@ static unsigned int xhci_get_block_size(struct usb_device *udev) + case USB_SPEED_HIGH: + return HS_BLOCK; + case USB_SPEED_SUPER: ++ case USB_SPEED_SUPER_PLUS: + return SS_BLOCK; + case USB_SPEED_UNKNOWN: + case USB_SPEED_WIRELESS: +@@ -2198,7 +2199,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, + unsigned int packets_remaining = 0; + unsigned int i; + +- if (virt_dev->udev->speed == USB_SPEED_SUPER) ++ if (virt_dev->udev->speed >= USB_SPEED_SUPER) + return xhci_check_ss_bw(xhci, virt_dev); + + if (virt_dev->udev->speed == USB_SPEED_HIGH) { +@@ -2399,7 +2400,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, + if (xhci_is_async_ep(ep_bw->type)) + return; + +- if (udev->speed == USB_SPEED_SUPER) { ++ if (udev->speed >= USB_SPEED_SUPER) { + if (xhci_is_sync_in_ep(ep_bw->type)) + xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= + xhci_get_ss_bw_consumed(ep_bw); +@@ -2437,6 +2438,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, + interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; + break; + case USB_SPEED_SUPER: ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_UNKNOWN: + case USB_SPEED_WIRELESS: + /* Should never happen because only LS/FS/HS endpoints will get +@@ -2496,6 +2498,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, + interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; + break; + case USB_SPEED_SUPER: ++ case USB_SPEED_SUPER_PLUS: + case USB_SPEED_UNKNOWN: + case USB_SPEED_WIRELESS: + /* Should never happen because only LS/FS/HS endpoints will get +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c +index 1a812eafe670..1624b09d9748 100644 +--- a/drivers/usb/misc/usbtest.c ++++ b/drivers/usb/misc/usbtest.c +@@ -558,7 +558,6 @@ static void sg_timeout(unsigned long _req) + { + struct usb_sg_request *req = (struct usb_sg_request *) _req; + +- req->status = -ETIMEDOUT; + usb_sg_cancel(req); + } + +@@ -589,8 +588,10 @@ static int perform_sglist( + mod_timer(&sg_timer, jiffies + + msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); + usb_sg_wait(req); +- del_timer_sync(&sg_timer); +- retval = req->status; ++ if (!del_timer_sync(&sg_timer)) ++ retval = -ETIMEDOUT; ++ else ++ retval = req->status; + + /* FIXME check resulting data pattern */ + +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index db565f620f82..36e5b5c530bd 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -869,7 +869,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) + + /* use PIO if packet is less than pio_dma_border or pipe is DCP */ + if ((len < usbhs_get_dparam(priv, pio_dma_border)) || +- usbhs_pipe_is_dcp(pipe)) ++ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) + goto usbhsf_pio_prepare_push; + + /* check data length if this driver don't use USB-DMAC */ +@@ -974,7 +974,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, + + /* use PIO if packet is less than pio_dma_border or pipe is DCP */ + if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || +- usbhs_pipe_is_dcp(pipe)) ++ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) + goto usbhsf_pio_prepare_pop; + + fifo = usbhsf_get_dma_fifo(priv, pkt); +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c +index 5a3abf56d56b..efc4fae123a4 100644 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c +@@ -618,10 +618,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep, + * use dmaengine if possible. + * It will use pio handler if impossible. + */ +- if (usb_endpoint_dir_in(desc)) ++ if (usb_endpoint_dir_in(desc)) { + pipe->handler = &usbhs_fifo_dma_push_handler; +- else ++ } else { + pipe->handler = &usbhs_fifo_dma_pop_handler; ++ usbhs_xxxsts_clear(priv, BRDYSTS, ++ usbhs_pipe_number(pipe)); ++ } + + ret = 0; + } +@@ -1072,7 +1075,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) + + gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); + dev_info(dev, "%stransceiver found\n", +- gpriv->transceiver ? "" : "no "); ++ !IS_ERR(gpriv->transceiver) ? "" : "no "); + + /* + * CAUTION +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index b61f12160d37..8c48c9d83d48 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, ++ { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, ++ { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, + { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, + { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, + { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, +@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, ++ { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index c5d6c1e73e8e..f87a938cf005 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -406,6 +406,12 @@ + #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 + + /* ++ * Ivium Technologies product IDs ++ */ ++#define FTDI_PALMSENS_PID 0xf440 ++#define FTDI_IVIUM_XSTAT_PID 0xf441 ++ ++/* + * Linx Technologies product ids + */ + #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ +@@ -673,6 +679,12 @@ + #define INTREPID_NEOVI_PID 0x0701 + + /* ++ * WICED USB UART ++ */ ++#define WICED_VID 0x0A5C ++#define WICED_USB20706V2_PID 0x6422 ++ ++/* + * Definitions for ID TECH (www.idt-net.com) devices + */ + #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index 06c7dbc1c802..63db004af21f 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, + + if (urb->transfer_buffer == NULL) { + urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, +- GFP_KERNEL); ++ GFP_ATOMIC); + if (!urb->transfer_buffer) + goto exit; + } +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index 8ac9b55f05af..7f3ddd7ba2ce 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, + } + + if (urb->transfer_buffer == NULL) { +- urb->transfer_buffer = +- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); ++ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, ++ GFP_ATOMIC); + if (!urb->transfer_buffer) + goto exit; + } +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 8e07536c233a..9894e341c6ac 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_LE920 0x1200 + #define TELIT_PRODUCT_LE910 0x1201 + #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 ++#define TELIT_PRODUCT_LE920A4_1207 0x1207 ++#define TELIT_PRODUCT_LE920A4_1208 0x1208 ++#define TELIT_PRODUCT_LE920A4_1211 0x1211 ++#define TELIT_PRODUCT_LE920A4_1212 0x1212 ++#define TELIT_PRODUCT_LE920A4_1213 0x1213 ++#define TELIT_PRODUCT_LE920A4_1214 0x1214 + + /* ZTE PRODUCTS */ + #define ZTE_VENDOR_ID 0x19d2 +@@ -519,6 +525,12 @@ static void option_instat_callback(struct urb *urb); + #define VIATELECOM_VENDOR_ID 0x15eb + #define VIATELECOM_PRODUCT_CDS7 0x0001 + ++/* WeTelecom products */ ++#define WETELECOM_VENDOR_ID 0x22de ++#define WETELECOM_PRODUCT_WMD200 0x6801 ++#define WETELECOM_PRODUCT_6802 0x6802 ++#define WETELECOM_PRODUCT_WMD300 0x6803 ++ + struct option_blacklist_info { + /* bitmask of interface numbers blacklisted for send_setup */ + const unsigned long sendsetup; +@@ -628,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { + .reserved = BIT(1) | BIT(5), + }; + ++static const struct option_blacklist_info telit_le920a4_blacklist_1 = { ++ .sendsetup = BIT(0), ++ .reserved = BIT(1), ++}; ++ + static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { + .sendsetup = BIT(2), + .reserved = BIT(0) | BIT(1) | BIT(3), +@@ -1203,6 +1220,16 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), ++ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), ++ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, +@@ -1966,9 +1993,13 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, option_ids); +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 46f1f13b41f1..a0ca291bc07f 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -1432,7 +1432,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] + + rc = usb_register(udriver); + if (rc) +- return rc; ++ goto failed_usb_register; + + for (sd = serial_drivers; *sd; ++sd) { + (*sd)->usb_driver = udriver; +@@ -1450,6 +1450,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] + while (sd-- > serial_drivers) + usb_serial_deregister(*sd); + usb_deregister(udriver); ++failed_usb_register: ++ kfree(udriver); + return rc; + } + EXPORT_SYMBOL_GPL(usb_serial_register_drivers); +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 3b3ba15558b7..20e9a86d2dcf 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -563,67 +563,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, + } + + static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, +- uint32_t flags, void *data) ++ unsigned int count, uint32_t flags, ++ void *data) + { +- int32_t fd = *(int32_t *)data; +- +- if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) +- return -EINVAL; +- + /* DATA_NONE/DATA_BOOL enables loopback testing */ + if (flags & VFIO_IRQ_SET_DATA_NONE) { +- if (*ctx) +- eventfd_signal(*ctx, 1); +- return 0; ++ if (*ctx) { ++ if (count) { ++ eventfd_signal(*ctx, 1); ++ } else { ++ eventfd_ctx_put(*ctx); ++ *ctx = NULL; ++ } ++ return 0; ++ } + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { +- uint8_t trigger = *(uint8_t *)data; ++ uint8_t trigger; ++ ++ if (!count) ++ return -EINVAL; ++ ++ trigger = *(uint8_t *)data; + if (trigger && *ctx) + eventfd_signal(*ctx, 1); +- return 0; +- } + +- /* Handle SET_DATA_EVENTFD */ +- if (fd == -1) { +- if (*ctx) +- eventfd_ctx_put(*ctx); +- *ctx = NULL; + return 0; +- } else if (fd >= 0) { +- struct eventfd_ctx *efdctx; +- efdctx = eventfd_ctx_fdget(fd); +- if (IS_ERR(efdctx)) +- return PTR_ERR(efdctx); +- if (*ctx) +- eventfd_ctx_put(*ctx); +- *ctx = efdctx; ++ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { ++ int32_t fd; ++ ++ if (!count) ++ return -EINVAL; ++ ++ fd = *(int32_t *)data; ++ if (fd == -1) { ++ if (*ctx) ++ eventfd_ctx_put(*ctx); ++ *ctx = NULL; ++ } else if (fd >= 0) { ++ struct eventfd_ctx *efdctx; ++ ++ efdctx = eventfd_ctx_fdget(fd); ++ if (IS_ERR(efdctx)) ++ return PTR_ERR(efdctx); ++ ++ if (*ctx) ++ eventfd_ctx_put(*ctx); ++ ++ *ctx = efdctx; ++ } + return 0; +- } else +- return -EINVAL; ++ } ++ ++ return -EINVAL; + } + + static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) + { +- if (index != VFIO_PCI_ERR_IRQ_INDEX) ++ if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) + return -EINVAL; + +- /* +- * We should sanitize start & count, but that wasn't caught +- * originally, so this IRQ index must forever ignore them :-( +- */ +- +- return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data); ++ return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, ++ count, flags, data); + } + + static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) + { +- if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1) ++ if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) + return -EINVAL; + +- return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data); ++ return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, ++ count, flags, data); + } + + int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index ee663c458b20..dc2b94142f53 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -202,6 +202,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, + * host should service the ring ASAP. */ + if (out_sgs) + vq->notify(&vq->vq); ++ if (indirect) ++ kfree(desc); + END_USE(vq); + return -ENOSPC; + } +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 385b449fd7ed..1391f72c28c3 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -1770,6 +1770,7 @@ struct btrfs_fs_info { + struct btrfs_workqueue *qgroup_rescan_workers; + struct completion qgroup_rescan_completion; + struct btrfs_work qgroup_rescan_work; ++ bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */ + + /* filesystem state */ + unsigned long fs_state; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 41fb43183406..85b207d19aa5 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2276,6 +2276,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) + fs_info->quota_enabled = 0; + fs_info->pending_quota_state = 0; + fs_info->qgroup_ulist = NULL; ++ fs_info->qgroup_rescan_running = false; + mutex_init(&fs_info->qgroup_rescan_lock); + } + +@@ -3811,7 +3812,7 @@ void close_ctree(struct btrfs_root *root) + smp_mb(); + + /* wait for the qgroup rescan worker to stop */ +- btrfs_qgroup_wait_for_completion(fs_info); ++ btrfs_qgroup_wait_for_completion(fs_info, false); + + /* wait for the uuid_scan task to finish */ + down(&fs_info->uuid_tree_rescan_sem); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index bfcd87ee8ff5..65f30b3b04f9 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -5121,7 +5121,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + +- return btrfs_qgroup_wait_for_completion(root->fs_info); ++ return btrfs_qgroup_wait_for_completion(root->fs_info, true); + } + + static long _btrfs_ioctl_set_received_subvol(struct file *file, +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 5279fdae7142..bcc965ed5fa1 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -995,7 +995,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, + goto out; + fs_info->quota_enabled = 0; + fs_info->pending_quota_state = 0; +- btrfs_qgroup_wait_for_completion(fs_info); ++ btrfs_qgroup_wait_for_completion(fs_info, false); + spin_lock(&fs_info->qgroup_lock); + quota_root = fs_info->quota_root; + fs_info->quota_root = NULL; +@@ -2283,6 +2283,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) + int err = -ENOMEM; + int ret = 0; + ++ mutex_lock(&fs_info->qgroup_rescan_lock); ++ fs_info->qgroup_rescan_running = true; ++ mutex_unlock(&fs_info->qgroup_rescan_lock); ++ + path = btrfs_alloc_path(); + if (!path) + goto out; +@@ -2349,6 +2353,9 @@ out: + } + + done: ++ mutex_lock(&fs_info->qgroup_rescan_lock); ++ fs_info->qgroup_rescan_running = false; ++ mutex_unlock(&fs_info->qgroup_rescan_lock); + complete_all(&fs_info->qgroup_rescan_completion); + } + +@@ -2467,20 +2474,26 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) + return 0; + } + +-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info) ++int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, ++ bool interruptible) + { + int running; + int ret = 0; + + mutex_lock(&fs_info->qgroup_rescan_lock); + spin_lock(&fs_info->qgroup_lock); +- running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN; ++ running = fs_info->qgroup_rescan_running; + spin_unlock(&fs_info->qgroup_lock); + mutex_unlock(&fs_info->qgroup_rescan_lock); + +- if (running) ++ if (!running) ++ return 0; ++ ++ if (interruptible) + ret = wait_for_completion_interruptible( + &fs_info->qgroup_rescan_completion); ++ else ++ wait_for_completion(&fs_info->qgroup_rescan_completion); + + return ret; + } +diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h +index ecb2c143ef75..3d73e4c9c7df 100644 +--- a/fs/btrfs/qgroup.h ++++ b/fs/btrfs/qgroup.h +@@ -46,7 +46,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info); + int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info); + void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info); +-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info); ++int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, ++ bool interruptible); + int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info, u64 src, u64 dst); + int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, +diff --git a/fs/seq_file.c b/fs/seq_file.c +index e85664b7c7d9..d672e2fec459 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -222,8 +222,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) + size -= n; + buf += n; + copied += n; +- if (!m->count) ++ if (!m->count) { ++ m->from = 0; + m->index++; ++ } + if (!size) + goto Done; + } +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index f35523d4fa3a..b803213d1307 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, + * If buf != of->prealloc_buf, we don't know how + * large it is, so cannot safely pass it to ->show + */ +- if (pos || WARN_ON_ONCE(buf != of->prealloc_buf)) ++ if (WARN_ON_ONCE(buf != of->prealloc_buf)) + return 0; + len = ops->show(kobj, of->kn->priv, buf); ++ if (pos) { ++ if (len <= pos) ++ return 0; ++ len -= pos; ++ memmove(buf, buf + pos, len); ++ } + return min(count, len); + } + +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 1991aea2ec4c..3672893b275e 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -920,7 +920,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + return NULL; + } + +-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ ++#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const void * __acpi_table_##name[] \ + __attribute__((unused)) \ + = { (void *) table_id, \ +diff --git a/include/linux/i8042.h b/include/linux/i8042.h +index 0f9bafa17a02..d98780ca9604 100644 +--- a/include/linux/i8042.h ++++ b/include/linux/i8042.h +@@ -62,7 +62,6 @@ struct serio; + void i8042_lock_chip(void); + void i8042_unlock_chip(void); + int i8042_command(unsigned char *param, int command); +-bool i8042_check_port_owner(const struct serio *); + int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); + int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, +@@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command) + return -ENODEV; + } + +-static inline bool i8042_check_port_owner(const struct serio *serio) +-{ +- return false; +-} +- + static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) + { +diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h +index 494682ce4bf3..3ab3cede28ea 100644 +--- a/include/linux/mfd/cros_ec.h ++++ b/include/linux/mfd/cros_ec.h +@@ -224,6 +224,21 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + + /** ++ * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC ++ * ++ * This function is identical to cros_ec_cmd_xfer, except it returns success ++ * status only if both the command was transmitted successfully and the EC ++ * replied with success status. It's not necessary to check msg->result when ++ * using this function. ++ * ++ * @ec_dev: EC device ++ * @msg: Message to write ++ * @return: Num. of bytes transferred on success, <0 on failure ++ */ ++int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, ++ struct cros_ec_command *msg); ++ ++/** + * cros_ec_remove - Remove a ChromeOS EC + * + * Call this to deregister a ChromeOS EC, then clean up any private data. +diff --git a/include/linux/msi.h b/include/linux/msi.h +index f71a25e5fd25..f0f43ec45ee7 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -254,12 +254,12 @@ enum { + * callbacks. + */ + MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), +- /* Build identity map between hwirq and irq */ +- MSI_FLAG_IDENTITY_MAP = (1 << 2), + /* Support multiple PCI MSI interrupts */ +- MSI_FLAG_MULTI_PCI_MSI = (1 << 3), ++ MSI_FLAG_MULTI_PCI_MSI = (1 << 2), + /* Support PCI MSIX interrupts */ +- MSI_FLAG_PCI_MSIX = (1 << 4), ++ MSI_FLAG_PCI_MSIX = (1 << 3), ++ /* Needs early activate, required for PCI */ ++ MSI_FLAG_ACTIVATE_EARLY = (1 << 4), + }; + + int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index d9ba49cedc5d..37f05cb1dfd6 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -2495,6 +2495,13 @@ + #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 + #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff + ++#define PCI_VENDOR_ID_NETRONOME 0x19ee ++#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200 ++#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240 ++#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 ++#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 ++#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 ++ + #define PCI_VENDOR_ID_QMI 0x1a32 + + #define PCI_VENDOR_ID_AZWAVE 0x1a3b +diff --git a/include/linux/serio.h b/include/linux/serio.h +index df4ab5de1586..c733cff44e18 100644 +--- a/include/linux/serio.h ++++ b/include/linux/serio.h +@@ -31,7 +31,8 @@ struct serio { + + struct serio_device_id id; + +- spinlock_t lock; /* protects critical sections from port's interrupt handler */ ++ /* Protects critical sections from port's interrupt handler */ ++ spinlock_t lock; + + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); +@@ -40,16 +41,29 @@ struct serio { + void (*stop)(struct serio *); + + struct serio *parent; +- struct list_head child_node; /* Entry in parent->children list */ ++ /* Entry in parent->children list */ ++ struct list_head child_node; + struct list_head children; +- unsigned int depth; /* level of nesting in serio hierarchy */ ++ /* Level of nesting in serio hierarchy */ ++ unsigned int depth; + +- struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ +- struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ ++ /* ++ * serio->drv is accessed from interrupt handlers; when modifying ++ * caller should acquire serio->drv_mutex and serio->lock. ++ */ ++ struct serio_driver *drv; ++ /* Protects serio->drv so attributes can pin current driver */ ++ struct mutex drv_mutex; + + struct device dev; + + struct list_head node; ++ ++ /* ++ * For use by PS/2 layer when several ports share hardware and ++ * may get indigestion when exposed to concurrent access (i8042). ++ */ ++ struct mutex *ps2_cmd_mutex; + }; + #define to_serio_port(d) container_of(d, struct serio, dev) + +diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h +index 4338eb7b09b3..779a62aafafe 100644 +--- a/include/uapi/linux/usb/ch9.h ++++ b/include/uapi/linux/usb/ch9.h +@@ -954,6 +954,7 @@ enum usb_device_speed { + USB_SPEED_HIGH, /* usb 2.0 */ + USB_SPEED_WIRELESS, /* wireless (usb 2.5) */ + USB_SPEED_SUPER, /* usb 3.0 */ ++ USB_SPEED_SUPER_PLUS, /* usb 3.1 */ + }; + + +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +index 6b0c0b74a2a1..4b21779d5163 100644 +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -268,7 +268,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + struct msi_domain_ops *ops = info->ops; + msi_alloc_info_t arg; + struct msi_desc *desc; +- int i, ret, virq = -1; ++ int i, ret, virq; + + ret = ops->msi_check(domain, info, dev); + if (ret == 0) +@@ -278,12 +278,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + + for_each_msi_entry(desc, dev) { + ops->set_desc(&arg, desc); +- if (info->flags & MSI_FLAG_IDENTITY_MAP) +- virq = (int)ops->get_hwirq(info, &arg); +- else +- virq = -1; + +- virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, ++ virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, + dev_to_node(dev), &arg, false); + if (virq < 0) { + ret = -ENOSPC; +@@ -307,6 +303,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + else + dev_dbg(dev, "irq [%d-%d] for MSI\n", + virq, virq + desc->nvec_used - 1); ++ /* ++ * This flag is set by the PCI layer as we need to activate ++ * the MSI entries before the PCI layer enables MSI in the ++ * card. Otherwise the card latches a random msi message. ++ */ ++ if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { ++ struct irq_data *irq_data; ++ ++ irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ irq_domain_activate_irq(irq_data); ++ } + } + + return 0; +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 67d1e1597d9c..ea863bc22caf 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -627,7 +627,10 @@ int get_nohz_timer_target(void) + rcu_read_lock(); + for_each_domain(cpu, sd) { + for_each_cpu(i, sched_domain_span(sd)) { +- if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { ++ if (cpu == i) ++ continue; ++ ++ if (!idle_cpu(i) && is_housekeeping_cpu(i)) { + cpu = i; + goto unlock; + } +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index f74ea89e77a8..a1aecbedf5b1 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -600,19 +600,25 @@ static void cputime_adjust(struct task_cputime *curr, + stime = curr->stime; + utime = curr->utime; + +- if (utime == 0) { +- stime = rtime; ++ /* ++ * If either stime or both stime and utime are 0, assume all runtime is ++ * userspace. Once a task gets some ticks, the monotonicy code at ++ * 'update' will ensure things converge to the observed ratio. ++ */ ++ if (stime == 0) { ++ utime = rtime; + goto update; + } + +- if (stime == 0) { +- utime = rtime; ++ if (utime == 0) { ++ stime = rtime; + goto update; + } + + stime = scale_stime((__force u64)stime, (__force u64)rtime, + (__force u64)(stime + utime)); + ++update: + /* + * Make sure stime doesn't go backwards; this preserves monotonicity + * for utime because rtime is monotonic. +@@ -635,7 +641,6 @@ static void cputime_adjust(struct task_cputime *curr, + stime = rtime - utime; + } + +-update: + prev->stime = stime; + prev->utime = utime; + out: +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 0c31f184daf8..125c7dd55322 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4213,7 +4213,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) + if (saddr) { + spte = huge_pte_offset(svma->vm_mm, saddr); + if (spte) { +- mm_inc_nr_pmds(mm); + get_page(virt_to_page(spte)); + break; + } +@@ -4228,9 +4227,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) + if (pud_none(*pud)) { + pud_populate(mm, pud, + (pmd_t *)((unsigned long)spte & PAGE_MASK)); ++ mm_inc_nr_pmds(mm); + } else { + put_page(virt_to_page(spte)); +- mm_inc_nr_pmds(mm); + } + spin_unlock(ptl); + out: +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index c12f348138ac..19322c047386 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -865,7 +865,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) + + /* free all potentially still buffered bcast frames */ + local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); +- skb_queue_purge(&sdata->u.ap.ps.bc_buf); ++ ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); + + mutex_lock(&local->mtx); + ieee80211_vif_copy_chanctx_to_vlans(sdata, true); +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index bdc224d5053a..e1225b395415 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -365,7 +365,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) + skb = skb_dequeue(&ps->bc_buf); + if (skb) { + purged++; +- dev_kfree_skb(skb); ++ ieee80211_free_txskb(&local->hw, skb); + } + total += skb_queue_len(&ps->bc_buf); + } +@@ -448,7 +448,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) + if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { + ps_dbg(tx->sdata, + "BC TX buffer full - dropping the oldest frame\n"); +- dev_kfree_skb(skb_dequeue(&ps->bc_buf)); ++ ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); + } else + tx->local->total_ps_buffered++; + +@@ -3781,7 +3781,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) + break; +- dev_kfree_skb_any(skb); ++ ieee80211_free_txskb(hw, skb); + } + + info = IEEE80211_SKB_CB(skb); +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 799e65b944b9..06095cc8815e 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) + } + + static struct gss_upcall_msg * +-__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) ++__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth) + { + struct gss_upcall_msg *pos; + list_for_each_entry(pos, &pipe->in_downcall, list) { + if (!uid_eq(pos->uid, uid)) + continue; ++ if (auth && pos->auth->service != auth->service) ++ continue; + atomic_inc(&pos->count); + dprintk("RPC: %s found msg %p\n", __func__, pos); + return pos; +@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg) + struct gss_upcall_msg *old; + + spin_lock(&pipe->lock); +- old = __gss_find_upcall(pipe, gss_msg->uid); ++ old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth); + if (old == NULL) { + atomic_inc(&gss_msg->count); + list_add(&gss_msg->list, &pipe->in_downcall); +@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) + err = -ENOENT; + /* Find a matching upcall */ + spin_lock(&pipe->lock); +- gss_msg = __gss_find_upcall(pipe, uid); ++ gss_msg = __gss_find_upcall(pipe, uid, NULL); + if (gss_msg == NULL) { + spin_unlock(&pipe->lock); + goto err_put_ctx; +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 027c9ef8a263..1ba417207465 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -2286,6 +2286,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) + /* SYN_SENT! */ + if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; ++ break; ++ case -EADDRNOTAVAIL: ++ /* Source port number is unavailable. Try a new one! */ ++ transport->srcport = 0; + } + out: + return ret; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index e769e5764cba..12f7f6fdae4d 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -944,20 +944,23 @@ static int azx_resume(struct device *dev) + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_intel *hda; ++ struct hdac_bus *bus; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_intel, chip); ++ bus = azx_bus(chip); + if (chip->disabled || hda->init_failed || !chip->running) + return 0; + +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL +- && hda->need_i915_power) { +- snd_hdac_display_power(azx_bus(chip), true); +- haswell_set_bclk(hda); ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { ++ snd_hdac_display_power(bus, true); ++ if (hda->need_i915_power) ++ haswell_set_bclk(hda); + } ++ + if (chip->msi) + if (pci_enable_msi(pci) < 0) + chip->msi = 0; +@@ -967,6 +970,11 @@ static int azx_resume(struct device *dev) + + hda_intel_init_chip(chip, true); + ++ /* power down again for link-controlled chips */ ++ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) && ++ !hda->need_i915_power) ++ snd_hdac_display_power(bus, false); ++ + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + + trace_azx_resume(chip); +@@ -1046,6 +1054,7 @@ static int azx_runtime_resume(struct device *dev) + + chip = card->private_data; + hda = container_of(chip, struct hda_intel, chip); ++ bus = azx_bus(chip); + if (chip->disabled || hda->init_failed) + return 0; + +@@ -1053,15 +1062,9 @@ static int azx_runtime_resume(struct device *dev) + return 0; + + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { +- bus = azx_bus(chip); +- if (hda->need_i915_power) { +- snd_hdac_display_power(bus, true); ++ snd_hdac_display_power(bus, true); ++ if (hda->need_i915_power) + haswell_set_bclk(hda); +- } else { +- /* toggle codec wakeup bit for STATESTS read */ +- snd_hdac_set_codec_wakeup(bus, true); +- snd_hdac_set_codec_wakeup(bus, false); +- } + } + + /* Read STATESTS before controller reset */ +@@ -1081,6 +1084,11 @@ static int azx_runtime_resume(struct device *dev) + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & + ~STATESTS_INT_MASK); + ++ /* power down again for link-controlled chips */ ++ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) && ++ !hda->need_i915_power) ++ snd_hdac_display_power(bus, false); ++ + trace_azx_runtime_resume(chip); + return 0; + } +diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c +index 204cc074adb9..41aa3355e920 100644 +--- a/sound/usb/line6/pcm.c ++++ b/sound/usb/line6/pcm.c +@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol, + err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE); + if (err < 0) { + line6pcm->impulse_volume = 0; +- line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE); + return err; + } + } else { +@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction, + spin_lock_irqsave(&pstr->lock, flags); + clear_bit(type, &pstr->running); + if (!pstr->running) { ++ spin_unlock_irqrestore(&pstr->lock, flags); + line6_unlink_audio_urbs(line6pcm, pstr); ++ spin_lock_irqsave(&pstr->lock, flags); + if (direction == SNDRV_PCM_STREAM_CAPTURE) { + line6pcm->prev_fbuf = NULL; + line6pcm->prev_fsize = 0; +diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c +index daf81d169a42..45dd34874f43 100644 +--- a/sound/usb/line6/pod.c ++++ b/sound/usb/line6/pod.c +@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value, + static ssize_t serial_number_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +- struct usb_interface *interface = to_usb_interface(dev); +- struct usb_line6_pod *pod = usb_get_intfdata(interface); ++ struct snd_card *card = dev_to_snd_card(dev); ++ struct usb_line6_pod *pod = card->private_data; + + return sprintf(buf, "%u\n", pod->serial_number); + } +@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev, + static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +- struct usb_interface *interface = to_usb_interface(dev); +- struct usb_line6_pod *pod = usb_get_intfdata(interface); ++ struct snd_card *card = dev_to_snd_card(dev); ++ struct usb_line6_pod *pod = card->private_data; + + return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100, + pod->firmware_version % 100); +@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev, + static ssize_t device_id_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +- struct usb_interface *interface = to_usb_interface(dev); +- struct usb_line6_pod *pod = usb_get_intfdata(interface); ++ struct snd_card *card = dev_to_snd_card(dev); ++ struct usb_line6_pod *pod = card->private_data; + + return sprintf(buf, "%d\n", pod->device_id); + } +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index db11ecf0b74d..a3e1252ce242 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1129,6 +1129,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + { + /* devices which do not support reading the sample rate. */ + switch (chip->usb_id) { ++ case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */ + case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ + case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ + case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */ +@@ -1139,6 +1140,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ + case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ + case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ ++ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ + case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ + case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ +diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c +index b02af064f0f9..c53f78767568 100644 +--- a/tools/perf/arch/x86/util/intel-pt.c ++++ b/tools/perf/arch/x86/util/intel-pt.c +@@ -499,7 +499,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; +- bool have_timing_info; ++ bool have_timing_info, need_immediate = false; + struct perf_evsel *evsel, *intel_pt_evsel = NULL; + const struct cpu_map *cpus = evlist->cpus; + bool privileged = geteuid() == 0 || perf_event_paranoid() < 0; +@@ -653,6 +653,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, + ptr->have_sched_switch = 3; + } else { + opts->record_switch_events = true; ++ need_immediate = true; + if (cpu_wide) + ptr->have_sched_switch = 3; + else +@@ -698,6 +699,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, + tracking_evsel->attr.freq = 0; + tracking_evsel->attr.sample_period = 1; + ++ if (need_immediate) ++ tracking_evsel->immediate = true; ++ + /* In per-cpu case, always need the time of mmap events etc */ + if (!cpu_map__empty(cpus)) { + perf_evsel__set_sample_bit(tracking_evsel, TIME); +diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c +index 51cf8256c6cd..f0d1c8ff8e8a 100644 +--- a/tools/testing/nvdimm/test/nfit.c ++++ b/tools/testing/nvdimm/test/nfit.c +@@ -13,6 +13,7 @@ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include + #include ++#include + #include + #include + #include +@@ -1246,6 +1247,7 @@ static int nfit_test_probe(struct platform_device *pdev) + if (nfit_test->setup != nfit_test0_setup) + return 0; + ++ flush_work(&acpi_desc->work); + nfit_test->setup_hotplug = 1; + nfit_test->setup(nfit_test); +