diff --git a/patch/kernel/rk3328-default/patch-4.4.70-71.patch b/patch/kernel/rk3328-default/patch-4.4.70-71.patch new file mode 100644 index 000000000..e72e5bfad --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.70-71.patch @@ -0,0 +1,2294 @@ +diff --git a/Makefile b/Makefile +index a5ecb29c6ed3..ad91a79aed51 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 70 ++SUBLEVEL = 71 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h +index 91b963a887b7..29c3b400f949 100644 +--- a/arch/sparc/include/asm/pgtable_32.h ++++ b/arch/sparc/include/asm/pgtable_32.h +@@ -91,9 +91,9 @@ extern unsigned long pfn_base; + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +-extern unsigned long empty_zero_page; ++extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + +-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) ++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + + /* + * In general all page table modifications should use the V8 atomic +diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h +index 29d64b1758ed..be0cc1beed41 100644 +--- a/arch/sparc/include/asm/setup.h ++++ b/arch/sparc/include/asm/setup.h +@@ -16,7 +16,7 @@ extern char reboot_command[]; + */ + extern unsigned char boot_cpu_id; + +-extern unsigned long empty_zero_page; ++extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + + extern int serial_console; + static inline int con_is_present(void) +diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c +index eb8287155279..3b7092d9ea8f 100644 +--- a/arch/sparc/mm/init_32.c ++++ b/arch/sparc/mm/init_32.c +@@ -301,7 +301,7 @@ void __init mem_init(void) + + + /* Saves us work later. */ +- memset((void *)&empty_zero_page, 0, PAGE_SIZE); ++ memset((void *)empty_zero_page, 0, PAGE_SIZE); + + i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); + i += 1; +diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c +index fc061f7c2bd1..a7de8ae185a5 100644 +--- a/drivers/char/pcmcia/cm4040_cs.c ++++ b/drivers/char/pcmcia/cm4040_cs.c +@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, + + rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); + if (rc <= 0) { +- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); ++ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; +@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, + for (i = 0; i < bytes_to_write; i++) { + rc = wait_for_bulk_out_ready(dev); + if (rc <= 0) { +- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", ++ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", + rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) +@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, + rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); + + if (rc <= 0) { +- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); ++ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); + DEBUGP(2, dev, "<- cm4040_write (failed)\n"); + if (rc == -ERESTARTSYS) + return rc; +diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c +index ce0645d0c1e5..61e3a097a478 100644 +--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c ++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c +@@ -783,20 +783,23 @@ void psb_intel_lvds_init(struct drm_device *dev, + if (scan->type & DRM_MODE_TYPE_PREFERRED) { + mode_dev->panel_fixed_mode = + drm_mode_duplicate(dev, scan); ++ DRM_DEBUG_KMS("Using mode from DDC\n"); + goto out; /* FIXME: check for quirks */ + } + } + + /* Failed to get EDID, what about VBT? do we need this? */ +- if (mode_dev->vbt_mode) ++ if (dev_priv->lfp_lvds_vbt_mode) { + mode_dev->panel_fixed_mode = +- drm_mode_duplicate(dev, mode_dev->vbt_mode); ++ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + +- if (!mode_dev->panel_fixed_mode) +- if (dev_priv->lfp_lvds_vbt_mode) +- mode_dev->panel_fixed_mode = +- drm_mode_duplicate(dev, +- dev_priv->lfp_lvds_vbt_mode); ++ if (mode_dev->panel_fixed_mode) { ++ mode_dev->panel_fixed_mode->type |= ++ DRM_MODE_TYPE_PREFERRED; ++ DRM_DEBUG_KMS("Using mode from VBT\n"); ++ goto out; ++ } ++ } + + /* + * If we didn't get EDID, try checking if the panel is already turned +@@ -813,6 +816,7 @@ void psb_intel_lvds_init(struct drm_device *dev, + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; ++ DRM_DEBUG_KMS("Using pre-programmed mode\n"); + goto out; /* FIXME: check for quirks */ + } + } +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c +index 4a09947be244..3c32f095a873 100644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c ++++ b/drivers/gpu/drm/radeon/ci_dpm.c +@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) + u32 vblank_time = r600_dpm_get_vblank_time(rdev); + u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + ++ /* disable mclk switching if the refresh is >120Hz, even if the ++ * blanking period would allow it ++ */ ++ if (r600_dpm_get_vrefresh(rdev) > 120) ++ return true; ++ + if (vblank_time < switch_limit) + return true; + else +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index f81fb2641097..134874cab4c7 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -7762,7 +7762,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +@@ -7792,7 +7792,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +index 32491355a1d4..ba9e6ed4ae54 100644 +--- a/drivers/gpu/drm/radeon/evergreen.c ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -4924,7 +4924,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +@@ -4955,7 +4955,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c +index cc2fdf0be37a..0e20c08f8977 100644 +--- a/drivers/gpu/drm/radeon/r600.c ++++ b/drivers/gpu/drm/radeon/r600.c +@@ -3945,7 +3945,7 @@ static void r600_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c +index f878d6962da5..5cf3a2cbc07e 100644 +--- a/drivers/gpu/drm/radeon/si.c ++++ b/drivers/gpu/drm/radeon/si.c +@@ -6335,7 +6335,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +@@ -6366,7 +6366,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { +- tmp = RREG32(DC_HPD5_INT_CONTROL); ++ tmp = RREG32(DC_HPD6_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 35e3fd9fadf6..b62c50d1b1e4 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -1440,37 +1440,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) + { + unsigned char *data = wacom->data; + +- if (wacom->pen_input) ++ if (wacom->pen_input) { + dev_dbg(wacom->pen_input->dev.parent, + "%s: received report #%d\n", __func__, data[0]); +- else if (wacom->touch_input) ++ ++ if (len == WACOM_PKGLEN_PENABLED || ++ data[0] == WACOM_REPORT_PENABLED) ++ return wacom_tpc_pen(wacom); ++ } ++ else if (wacom->touch_input) { + dev_dbg(wacom->touch_input->dev.parent, + "%s: received report #%d\n", __func__, data[0]); + +- switch (len) { +- case WACOM_PKGLEN_TPC1FG: +- return wacom_tpc_single_touch(wacom, len); ++ switch (len) { ++ case WACOM_PKGLEN_TPC1FG: ++ return wacom_tpc_single_touch(wacom, len); + +- case WACOM_PKGLEN_TPC2FG: +- return wacom_tpc_mt_touch(wacom); ++ case WACOM_PKGLEN_TPC2FG: ++ return wacom_tpc_mt_touch(wacom); + +- case WACOM_PKGLEN_PENABLED: +- return wacom_tpc_pen(wacom); ++ default: ++ switch (data[0]) { ++ case WACOM_REPORT_TPC1FG: ++ case WACOM_REPORT_TPCHID: ++ case WACOM_REPORT_TPCST: ++ case WACOM_REPORT_TPC1FGE: ++ return wacom_tpc_single_touch(wacom, len); + +- default: +- switch (data[0]) { +- case WACOM_REPORT_TPC1FG: +- case WACOM_REPORT_TPCHID: +- case WACOM_REPORT_TPCST: +- case WACOM_REPORT_TPC1FGE: +- return wacom_tpc_single_touch(wacom, len); +- +- case WACOM_REPORT_TPCMT: +- case WACOM_REPORT_TPCMT2: +- return wacom_mt_touch(wacom); ++ case WACOM_REPORT_TPCMT: ++ case WACOM_REPORT_TPCMT2: ++ return wacom_mt_touch(wacom); + +- case WACOM_REPORT_PENABLED: +- return wacom_tpc_pen(wacom); ++ } + } + } + +diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c +index 0ed77eeff31e..a2e3dd715380 100644 +--- a/drivers/i2c/busses/i2c-tiny-usb.c ++++ b/drivers/i2c/busses/i2c-tiny-usb.c +@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd, + int value, int index, void *data, int len) + { + struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; ++ void *dmadata = kmalloc(len, GFP_KERNEL); ++ int ret; ++ ++ if (!dmadata) ++ return -ENOMEM; + + /* do control transfer */ +- return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), ++ ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), + cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | +- USB_DIR_IN, value, index, data, len, 2000); ++ USB_DIR_IN, value, index, dmadata, len, 2000); ++ ++ memcpy(data, dmadata, len); ++ kfree(dmadata); ++ return ret; + } + + static int usb_write(struct i2c_adapter *adapter, int cmd, + int value, int index, void *data, int len) + { + struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; ++ void *dmadata = kmemdup(data, len, GFP_KERNEL); ++ int ret; ++ ++ if (!dmadata) ++ return -ENOMEM; + + /* do control transfer */ +- return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), ++ ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), + cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, +- value, index, data, len, 2000); ++ value, index, dmadata, len, 2000); ++ ++ kfree(dmadata); ++ return ret; + } + + static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c +index 3b423b0ad8e7..f280744578e4 100644 +--- a/drivers/mmc/host/sdhci-iproc.c ++++ b/drivers/mmc/host/sdhci-iproc.c +@@ -156,7 +156,8 @@ static const struct sdhci_ops sdhci_iproc_ops = { + }; + + static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { +- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, ++ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, + .ops = &sdhci_iproc_ops, + }; +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index 8a1d9fffd7d6..26255862d1cf 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -5260,9 +5260,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb, + struct be_adapter *adapter = netdev_priv(dev); + u8 l4_hdr = 0; + +- /* The code below restricts offload features for some tunneled packets. ++ /* The code below restricts offload features for some tunneled and ++ * Q-in-Q packets. + * Offload features for normal (non tunnel) packets are unchanged. + */ ++ features = vlan_features_check(skb, features); + if (!skb->encapsulation || + !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) + return features; +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index 0240552b50f3..d2701c53ed68 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -203,34 +203,6 @@ static int marvell_config_aneg(struct phy_device *phydev) + { + int err; + +- /* The Marvell PHY has an errata which requires +- * that certain registers get written in order +- * to restart autonegotiation */ +- err = phy_write(phydev, MII_BMCR, BMCR_RESET); +- +- if (err < 0) +- return err; +- +- err = phy_write(phydev, 0x1d, 0x1f); +- if (err < 0) +- return err; +- +- err = phy_write(phydev, 0x1e, 0x200c); +- if (err < 0) +- return err; +- +- err = phy_write(phydev, 0x1d, 0x5); +- if (err < 0) +- return err; +- +- err = phy_write(phydev, 0x1e, 0); +- if (err < 0) +- return err; +- +- err = phy_write(phydev, 0x1e, 0x100); +- if (err < 0) +- return err; +- + err = marvell_set_polarity(phydev, phydev->mdix); + if (err < 0) + return err; +@@ -264,6 +236,42 @@ static int marvell_config_aneg(struct phy_device *phydev) + return 0; + } + ++static int m88e1101_config_aneg(struct phy_device *phydev) ++{ ++ int err; ++ ++ /* This Marvell PHY has an errata which requires ++ * that certain registers get written in order ++ * to restart autonegotiation ++ */ ++ err = phy_write(phydev, MII_BMCR, BMCR_RESET); ++ ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, 0x1d, 0x1f); ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, 0x1e, 0x200c); ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, 0x1d, 0x5); ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, 0x1e, 0); ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, 0x1e, 0x100); ++ if (err < 0) ++ return err; ++ ++ return marvell_config_aneg(phydev); ++} ++ + #ifdef CONFIG_OF_MDIO + /* + * Set and/or override some configuration registers based on the +@@ -993,7 +1001,7 @@ static struct phy_driver marvell_drivers[] = { + .name = "Marvell 88E1101", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, +- .config_aneg = &marvell_config_aneg, ++ .config_aneg = &m88e1101_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index c6f5d9a6bec6..582d8f0c6266 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -730,6 +730,8 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ ++ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ ++ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 0e2a19e58923..7f7c87762bc6 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1415,6 +1415,7 @@ static const struct net_device_ops virtnet_netdev = { + #ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = virtnet_busy_poll, + #endif ++ .ndo_features_check = passthru_features_check, + }; + + static void virtnet_config_changed_work(struct work_struct *work) +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h +index 1766a20ebcb1..741f3ee81cfe 100644 +--- a/drivers/s390/net/qeth_core.h ++++ b/drivers/s390/net/qeth_core.h +@@ -717,6 +717,7 @@ enum qeth_discipline_id { + }; + + struct qeth_discipline { ++ const struct device_type *devtype; + void (*start_poll)(struct ccw_device *, int, unsigned long); + qdio_handler_t *input_handler; + qdio_handler_t *output_handler; +@@ -881,6 +882,9 @@ extern struct qeth_discipline qeth_l2_discipline; + extern struct qeth_discipline qeth_l3_discipline; + extern const struct attribute_group *qeth_generic_attr_groups[]; + extern const struct attribute_group *qeth_osn_attr_groups[]; ++extern const struct attribute_group qeth_device_attr_group; ++extern const struct attribute_group qeth_device_blkt_group; ++extern const struct device_type qeth_generic_devtype; + extern struct workqueue_struct *qeth_wq; + + int qeth_card_hw_is_reachable(struct qeth_card *); +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c +index 31ac53fa5cee..d10bf3da8e5f 100644 +--- a/drivers/s390/net/qeth_core_main.c ++++ b/drivers/s390/net/qeth_core_main.c +@@ -5449,10 +5449,12 @@ void qeth_core_free_discipline(struct qeth_card *card) + card->discipline = NULL; + } + +-static const struct device_type qeth_generic_devtype = { ++const struct device_type qeth_generic_devtype = { + .name = "qeth_generic", + .groups = qeth_generic_attr_groups, + }; ++EXPORT_SYMBOL_GPL(qeth_generic_devtype); ++ + static const struct device_type qeth_osn_devtype = { + .name = "qeth_osn", + .groups = qeth_osn_attr_groups, +@@ -5578,23 +5580,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) + goto err_card; + } + +- if (card->info.type == QETH_CARD_TYPE_OSN) +- gdev->dev.type = &qeth_osn_devtype; +- else +- gdev->dev.type = &qeth_generic_devtype; +- + switch (card->info.type) { + case QETH_CARD_TYPE_OSN: + case QETH_CARD_TYPE_OSM: + rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); + if (rc) + goto err_card; ++ ++ gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) ++ ? card->discipline->devtype ++ : &qeth_osn_devtype; + rc = card->discipline->setup(card->gdev); + if (rc) + goto err_disc; +- case QETH_CARD_TYPE_OSD: +- case QETH_CARD_TYPE_OSX: ++ break; + default: ++ gdev->dev.type = &qeth_generic_devtype; + break; + } + +@@ -5650,8 +5651,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) + if (rc) + goto err; + rc = card->discipline->setup(card->gdev); +- if (rc) ++ if (rc) { ++ qeth_core_free_discipline(card); + goto err; ++ } + } + rc = card->discipline->set_online(gdev); + err: +diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c +index e6e5b9671bf2..fa844b0ff847 100644 +--- a/drivers/s390/net/qeth_core_sys.c ++++ b/drivers/s390/net/qeth_core_sys.c +@@ -409,12 +409,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, + + if (card->options.layer2 == newdis) + goto out; +- else { +- card->info.mac_bits = 0; +- if (card->discipline) { +- card->discipline->remove(card->gdev); +- qeth_core_free_discipline(card); +- } ++ if (card->info.type == QETH_CARD_TYPE_OSM) { ++ /* fixed layer, can't switch */ ++ rc = -EOPNOTSUPP; ++ goto out; ++ } ++ ++ card->info.mac_bits = 0; ++ if (card->discipline) { ++ card->discipline->remove(card->gdev); ++ qeth_core_free_discipline(card); + } + + rc = qeth_core_load_discipline(card, newdis); +@@ -422,6 +426,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, + goto out; + + rc = card->discipline->setup(card->gdev); ++ if (rc) ++ qeth_core_free_discipline(card); + out: + mutex_unlock(&card->discipline_mutex); + return rc ? rc : count; +@@ -699,10 +705,11 @@ static struct attribute *qeth_blkt_device_attrs[] = { + &dev_attr_inter_jumbo.attr, + NULL, + }; +-static struct attribute_group qeth_device_blkt_group = { ++const struct attribute_group qeth_device_blkt_group = { + .name = "blkt", + .attrs = qeth_blkt_device_attrs, + }; ++EXPORT_SYMBOL_GPL(qeth_device_blkt_group); + + static struct attribute *qeth_device_attrs[] = { + &dev_attr_state.attr, +@@ -722,9 +729,10 @@ static struct attribute *qeth_device_attrs[] = { + &dev_attr_switch_attrs.attr, + NULL, + }; +-static struct attribute_group qeth_device_attr_group = { ++const struct attribute_group qeth_device_attr_group = { + .attrs = qeth_device_attrs, + }; ++EXPORT_SYMBOL_GPL(qeth_device_attr_group); + + const struct attribute_group *qeth_generic_attr_groups[] = { + &qeth_device_attr_group, +diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h +index 0767556404bd..eb87bf97d38a 100644 +--- a/drivers/s390/net/qeth_l2.h ++++ b/drivers/s390/net/qeth_l2.h +@@ -8,6 +8,8 @@ + + #include "qeth_core.h" + ++extern const struct attribute_group *qeth_l2_attr_groups[]; ++ + int qeth_l2_create_device_attributes(struct device *); + void qeth_l2_remove_device_attributes(struct device *); + void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c +index df036b872b05..bf1e0e39334d 100644 +--- a/drivers/s390/net/qeth_l2_main.c ++++ b/drivers/s390/net/qeth_l2_main.c +@@ -1027,11 +1027,21 @@ static int qeth_l2_stop(struct net_device *dev) + return 0; + } + ++static const struct device_type qeth_l2_devtype = { ++ .name = "qeth_layer2", ++ .groups = qeth_l2_attr_groups, ++}; ++ + static int qeth_l2_probe_device(struct ccwgroup_device *gdev) + { + struct qeth_card *card = dev_get_drvdata(&gdev->dev); ++ int rc; + +- qeth_l2_create_device_attributes(&gdev->dev); ++ if (gdev->dev.type == &qeth_generic_devtype) { ++ rc = qeth_l2_create_device_attributes(&gdev->dev); ++ if (rc) ++ return rc; ++ } + INIT_LIST_HEAD(&card->vid_list); + hash_init(card->mac_htable); + card->options.layer2 = 1; +@@ -1043,7 +1053,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) + { + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + +- qeth_l2_remove_device_attributes(&cgdev->dev); ++ if (cgdev->dev.type == &qeth_generic_devtype) ++ qeth_l2_remove_device_attributes(&cgdev->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + +@@ -1101,7 +1112,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) + case QETH_CARD_TYPE_OSN: + card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, + ether_setup); +- card->dev->flags |= IFF_NOARP; + break; + default: + card->dev = alloc_etherdev(0); +@@ -1114,9 +1124,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) + card->dev->watchdog_timeo = QETH_TX_TIMEOUT; + card->dev->mtu = card->info.initial_mtu; + card->dev->netdev_ops = &qeth_l2_netdev_ops; +- card->dev->ethtool_ops = +- (card->info.type != QETH_CARD_TYPE_OSN) ? +- &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; ++ if (card->info.type == QETH_CARD_TYPE_OSN) { ++ card->dev->ethtool_ops = &qeth_l2_osn_ops; ++ card->dev->flags |= IFF_NOARP; ++ } else { ++ card->dev->ethtool_ops = &qeth_l2_ethtool_ops; ++ } + card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { + card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; +@@ -1429,6 +1442,7 @@ static int qeth_l2_control_event(struct qeth_card *card, + } + + struct qeth_discipline qeth_l2_discipline = { ++ .devtype = &qeth_l2_devtype, + .start_poll = qeth_qdio_start_poll, + .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, + .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, +diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c +index 692db49e3d2a..a48ed9e7e168 100644 +--- a/drivers/s390/net/qeth_l2_sys.c ++++ b/drivers/s390/net/qeth_l2_sys.c +@@ -272,3 +272,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) + } else + qeth_bridgeport_an_set(card, 0); + } ++ ++const struct attribute_group *qeth_l2_attr_groups[] = { ++ &qeth_device_attr_group, ++ &qeth_device_blkt_group, ++ /* l2 specific, see l2_{create,remove}_device_attributes(): */ ++ &qeth_l2_bridgeport_attr_group, ++ NULL, ++}; +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c +index cc4d3c3d8cc5..285fe0b2c753 100644 +--- a/drivers/s390/net/qeth_l3_main.c ++++ b/drivers/s390/net/qeth_l3_main.c +@@ -3227,8 +3227,11 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) + static int qeth_l3_probe_device(struct ccwgroup_device *gdev) + { + struct qeth_card *card = dev_get_drvdata(&gdev->dev); ++ int rc; + +- qeth_l3_create_device_attributes(&gdev->dev); ++ rc = qeth_l3_create_device_attributes(&gdev->dev); ++ if (rc) ++ return rc; + card->options.layer2 = 0; + card->info.hwtrap = 0; + return 0; +@@ -3519,6 +3522,7 @@ static int qeth_l3_control_event(struct qeth_card *card, + } + + struct qeth_discipline qeth_l3_discipline = { ++ .devtype = &qeth_generic_devtype, + .start_poll = qeth_qdio_start_poll, + .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, + .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 8a5fbdb45cfd..e333029e4b6c 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -4452,6 +4452,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 response_code = 0; + unsigned long flags; ++ unsigned int sector_sz; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); +@@ -4510,6 +4511,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + } + + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); ++ ++ /* In case of bogus fw or device, we could end up having ++ * unaligned partial completion. We can force alignment here, ++ * then scsi-ml does not need to handle this misbehavior. ++ */ ++ sector_sz = scmd->device->sector_size; ++ if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && ++ xfer_cnt % sector_sz)) { ++ sdev_printk(KERN_INFO, scmd->device, ++ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", ++ xfer_cnt, sector_sz); ++ xfer_cnt = round_down(xfer_cnt, sector_sz); ++ } ++ + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index 119c2422aac7..75884aecf920 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -2179,8 +2179,10 @@ xfs_bmap_add_extent_delay_real( + } + temp = xfs_bmap_worst_indlen(bma->ip, temp); + temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); +- diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - +- (bma->cur ? bma->cur->bc_private.b.allocated : 0)); ++ diff = (int)(temp + temp2 - ++ (startblockval(PREV.br_startblock) - ++ (bma->cur ? ++ bma->cur->bc_private.b.allocated : 0))); + if (diff > 0) { + error = xfs_mod_fdblocks(bma->ip->i_mount, + -((int64_t)diff), false); +@@ -2232,7 +2234,6 @@ xfs_bmap_add_extent_delay_real( + temp = da_new; + if (bma->cur) + temp += bma->cur->bc_private.b.allocated; +- ASSERT(temp <= da_old); + if (temp < da_old) + xfs_mod_fdblocks(bma->ip->i_mount, + (int64_t)(da_old - temp), false); +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index af1bbee5586e..28bc5e78b110 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -4064,7 +4064,7 @@ xfs_btree_change_owner( + xfs_btree_readahead_ptr(cur, ptr, 1); + + /* save for the next iteration of the loop */ +- lptr = *ptr; ++ xfs_btree_copy_ptrs(cur, &lptr, ptr, 1); + } + + /* for each buffer in the level */ +diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h +index dd4824589470..234331227c0c 100644 +--- a/fs/xfs/xfs_attr.h ++++ b/fs/xfs/xfs_attr.h +@@ -112,6 +112,7 @@ typedef struct attrlist_cursor_kern { + *========================================================================*/ + + ++/* Return 0 on success, or -errno; other state communicated via *context */ + typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int, + unsigned char *, int, int, unsigned char *); + +diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c +index 4fa14820e2e2..c8be331a3196 100644 +--- a/fs/xfs/xfs_attr_list.c ++++ b/fs/xfs/xfs_attr_list.c +@@ -108,16 +108,14 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) + (int)sfe->namelen, + (int)sfe->valuelen, + &sfe->nameval[sfe->namelen]); +- ++ if (error) ++ return error; + /* + * Either search callback finished early or + * didn't fit it all in the buffer after all. + */ + if (context->seen_enough) + break; +- +- if (error) +- return error; + sfe = XFS_ATTR_SF_NEXTENTRY(sfe); + } + trace_xfs_attr_list_sf_all(context); +@@ -581,7 +579,7 @@ xfs_attr_put_listent( + trace_xfs_attr_list_full(context); + alist->al_more = 1; + context->seen_enough = 1; +- return 1; ++ return 0; + } + + aep = (attrlist_ent_t *)&context->alist[context->firstu]; +diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c +index 832764ee035a..863e1bff403b 100644 +--- a/fs/xfs/xfs_bmap_util.c ++++ b/fs/xfs/xfs_bmap_util.c +@@ -682,7 +682,7 @@ xfs_getbmap( + * extents. + */ + if (map[i].br_startblock == DELAYSTARTBLOCK && +- map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) ++ map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) + ASSERT((iflags & BMV_IF_DELALLOC) != 0); + + if (map[i].br_startblock == HOLESTARTBLOCK && +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index 8146b0cf20ce..dcb70969ff1c 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -979,6 +979,8 @@ void + xfs_buf_unlock( + struct xfs_buf *bp) + { ++ ASSERT(xfs_buf_islocked(bp)); ++ + XB_CLEAR_OWNER(bp); + up(&bp->b_sema); + +@@ -1713,6 +1715,28 @@ error: + } + + /* ++ * Cancel a delayed write list. ++ * ++ * Remove each buffer from the list, clear the delwri queue flag and drop the ++ * associated buffer reference. ++ */ ++void ++xfs_buf_delwri_cancel( ++ struct list_head *list) ++{ ++ struct xfs_buf *bp; ++ ++ while (!list_empty(list)) { ++ bp = list_first_entry(list, struct xfs_buf, b_list); ++ ++ xfs_buf_lock(bp); ++ bp->b_flags &= ~_XBF_DELWRI_Q; ++ list_del_init(&bp->b_list); ++ xfs_buf_relse(bp); ++ } ++} ++ ++/* + * Add a buffer to the delayed write list. + * + * This queues a buffer for writeout if it hasn't already been. Note that +diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h +index c75721acd867..149bbd451731 100644 +--- a/fs/xfs/xfs_buf.h ++++ b/fs/xfs/xfs_buf.h +@@ -304,6 +304,7 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, + extern void *xfs_buf_offset(struct xfs_buf *, size_t); + + /* Delayed Write Buffer Routines */ ++extern void xfs_buf_delwri_cancel(struct list_head *); + extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); + extern int xfs_buf_delwri_submit(struct list_head *); + extern int xfs_buf_delwri_submit_nowait(struct list_head *); +diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c +index 642d55d10075..2fbf643fa10a 100644 +--- a/fs/xfs/xfs_dir2_readdir.c ++++ b/fs/xfs/xfs_dir2_readdir.c +@@ -406,6 +406,7 @@ xfs_dir2_leaf_readbuf( + + /* + * Do we need more readahead? ++ * Each loop tries to process 1 full dir blk; last may be partial. + */ + blk_start_plug(&plug); + for (mip->ra_index = mip->ra_offset = i = 0; +@@ -416,7 +417,8 @@ xfs_dir2_leaf_readbuf( + * Read-ahead a contiguous directory block. + */ + if (i > mip->ra_current && +- map[mip->ra_index].br_blockcount >= geo->fsbcount) { ++ (map[mip->ra_index].br_blockcount - mip->ra_offset) >= ++ geo->fsbcount) { + xfs_dir3_data_readahead(dp, + map[mip->ra_index].br_startoff + mip->ra_offset, + XFS_FSB_TO_DADDR(dp->i_mount, +@@ -437,14 +439,19 @@ xfs_dir2_leaf_readbuf( + } + + /* +- * Advance offset through the mapping table. ++ * Advance offset through the mapping table, processing a full ++ * dir block even if it is fragmented into several extents. ++ * But stop if we have consumed all valid mappings, even if ++ * it's not yet a full directory block. + */ +- for (j = 0; j < geo->fsbcount; j += length ) { ++ for (j = 0; ++ j < geo->fsbcount && mip->ra_index < mip->map_valid; ++ j += length ) { + /* + * The rest of this extent but not more than a dir + * block. + */ +- length = min_t(int, geo->fsbcount, ++ length = min_t(int, geo->fsbcount - j, + map[mip->ra_index].br_blockcount - + mip->ra_offset); + mip->ra_offset += length; +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c +index f5392ab2def1..ceea444dafb4 100644 +--- a/fs/xfs/xfs_file.c ++++ b/fs/xfs/xfs_file.c +@@ -1208,7 +1208,7 @@ xfs_find_get_desired_pgoff( + unsigned nr_pages; + unsigned int i; + +- want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); ++ want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; + nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, + want); + /* +@@ -1235,17 +1235,6 @@ xfs_find_get_desired_pgoff( + break; + } + +- /* +- * At lease we found one page. If this is the first time we +- * step into the loop, and if the first page index offset is +- * greater than the given search offset, a hole was found. +- */ +- if (type == HOLE_OFF && lastoff == startoff && +- lastoff < page_offset(pvec.pages[0])) { +- found = true; +- break; +- } +- + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + loff_t b_offset; +@@ -1257,18 +1246,18 @@ xfs_find_get_desired_pgoff( + * file mapping. However, page->index will not change + * because we have a reference on the page. + * +- * Searching done if the page index is out of range. +- * If the current offset is not reaches the end of +- * the specified search range, there should be a hole +- * between them. ++ * If current page offset is beyond where we've ended, ++ * we've found a hole. + */ +- if (page->index > end) { +- if (type == HOLE_OFF && lastoff < endoff) { +- *offset = lastoff; +- found = true; +- } ++ if (type == HOLE_OFF && lastoff < endoff && ++ lastoff < page_offset(pvec.pages[i])) { ++ found = true; ++ *offset = lastoff; + goto out; + } ++ /* Searching done if the page index is out of range. */ ++ if (page->index > end) ++ goto out; + + lock_page(page); + /* +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c +index d7a490f24ead..adbc1f59969a 100644 +--- a/fs/xfs/xfs_icache.c ++++ b/fs/xfs/xfs_icache.c +@@ -210,14 +210,17 @@ xfs_iget_cache_hit( + + error = inode_init_always(mp->m_super, inode); + if (error) { ++ bool wake; + /* + * Re-initializing the inode failed, and we are in deep + * trouble. Try to re-add it to the reclaim list. + */ + rcu_read_lock(); + spin_lock(&ip->i_flags_lock); +- ++ wake = !!__xfs_iflags_test(ip, XFS_INEW); + ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); ++ if (wake) ++ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); + ASSERT(ip->i_flags & XFS_IRECLAIMABLE); + trace_xfs_iget_reclaim_fail(ip); + goto out_error; +@@ -363,6 +366,22 @@ out_destroy: + return error; + } + ++static void ++xfs_inew_wait( ++ struct xfs_inode *ip) ++{ ++ wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); ++ DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); ++ ++ do { ++ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); ++ if (!xfs_iflags_test(ip, XFS_INEW)) ++ break; ++ schedule(); ++ } while (true); ++ finish_wait(wq, &wait.wait); ++} ++ + /* + * Look up an inode by number in the given file system. + * The inode is looked up in the cache held in each AG. +@@ -467,9 +486,11 @@ out_error_or_again: + + STATIC int + xfs_inode_ag_walk_grab( +- struct xfs_inode *ip) ++ struct xfs_inode *ip, ++ int flags) + { + struct inode *inode = VFS_I(ip); ++ bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); + + ASSERT(rcu_read_lock_held()); + +@@ -487,7 +508,8 @@ xfs_inode_ag_walk_grab( + goto out_unlock_noent; + + /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ +- if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) ++ if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || ++ __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) + goto out_unlock_noent; + spin_unlock(&ip->i_flags_lock); + +@@ -515,7 +537,8 @@ xfs_inode_ag_walk( + void *args), + int flags, + void *args, +- int tag) ++ int tag, ++ int iter_flags) + { + uint32_t first_index; + int last_error = 0; +@@ -557,7 +580,7 @@ restart: + for (i = 0; i < nr_found; i++) { + struct xfs_inode *ip = batch[i]; + +- if (done || xfs_inode_ag_walk_grab(ip)) ++ if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) + batch[i] = NULL; + + /* +@@ -585,6 +608,9 @@ restart: + for (i = 0; i < nr_found; i++) { + if (!batch[i]) + continue; ++ if ((iter_flags & XFS_AGITER_INEW_WAIT) && ++ xfs_iflags_test(batch[i], XFS_INEW)) ++ xfs_inew_wait(batch[i]); + error = execute(batch[i], flags, args); + IRELE(batch[i]); + if (error == -EAGAIN) { +@@ -637,12 +663,13 @@ xfs_eofblocks_worker( + } + + int +-xfs_inode_ag_iterator( ++xfs_inode_ag_iterator_flags( + struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, + void *args), + int flags, +- void *args) ++ void *args, ++ int iter_flags) + { + struct xfs_perag *pag; + int error = 0; +@@ -652,7 +679,8 @@ xfs_inode_ag_iterator( + ag = 0; + while ((pag = xfs_perag_get(mp, ag))) { + ag = pag->pag_agno + 1; +- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1); ++ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, ++ iter_flags); + xfs_perag_put(pag); + if (error) { + last_error = error; +@@ -664,6 +692,17 @@ xfs_inode_ag_iterator( + } + + int ++xfs_inode_ag_iterator( ++ struct xfs_mount *mp, ++ int (*execute)(struct xfs_inode *ip, int flags, ++ void *args), ++ int flags, ++ void *args) ++{ ++ return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); ++} ++ ++int + xfs_inode_ag_iterator_tag( + struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, +@@ -680,7 +719,8 @@ xfs_inode_ag_iterator_tag( + ag = 0; + while ((pag = xfs_perag_get_tag(mp, ag, tag))) { + ag = pag->pag_agno + 1; +- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag); ++ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, ++ 0); + xfs_perag_put(pag); + if (error) { + last_error = error; +diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h +index 62f1f91c32cb..147a79212e63 100644 +--- a/fs/xfs/xfs_icache.h ++++ b/fs/xfs/xfs_icache.h +@@ -48,6 +48,11 @@ struct xfs_eofblocks { + #define XFS_IGET_UNTRUSTED 0x2 + #define XFS_IGET_DONTCACHE 0x4 + ++/* ++ * flags for AG inode iterator ++ */ ++#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */ ++ + int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, + uint flags, uint lock_flags, xfs_inode_t **ipp); + +@@ -72,6 +77,9 @@ void xfs_eofblocks_worker(struct work_struct *); + int xfs_inode_ag_iterator(struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, void *args), + int flags, void *args); ++int xfs_inode_ag_iterator_flags(struct xfs_mount *mp, ++ int (*execute)(struct xfs_inode *ip, int flags, void *args), ++ int flags, void *args, int iter_flags); + int xfs_inode_ag_iterator_tag(struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, int flags, void *args), + int flags, void *args, int tag); +diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h +index ca9e11989cbd..ae1a49845744 100644 +--- a/fs/xfs/xfs_inode.h ++++ b/fs/xfs/xfs_inode.h +@@ -208,7 +208,8 @@ xfs_get_initial_prid(struct xfs_inode *dp) + #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */ + #define XFS_ISTALE (1 << 1) /* inode has been staled */ + #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */ +-#define XFS_INEW (1 << 3) /* inode has just been allocated */ ++#define __XFS_INEW_BIT 3 /* inode has just been allocated */ ++#define XFS_INEW (1 << __XFS_INEW_BIT) + #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */ + #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */ + #define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */ +@@ -453,6 +454,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip) + xfs_iflags_clear(ip, XFS_INEW); + barrier(); + unlock_new_inode(VFS_I(ip)); ++ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); + } + + static inline void xfs_setup_existing_inode(struct xfs_inode *ip) +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index d42738deec6d..e4a4f82ea13f 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -403,6 +403,7 @@ xfs_attrlist_by_handle( + { + int error = -ENOMEM; + attrlist_cursor_kern_t *cursor; ++ struct xfs_fsop_attrlist_handlereq __user *p = arg; + xfs_fsop_attrlist_handlereq_t al_hreq; + struct dentry *dentry; + char *kbuf; +@@ -435,6 +436,11 @@ xfs_attrlist_by_handle( + if (error) + goto out_kfree; + ++ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) { ++ error = -EFAULT; ++ goto out_kfree; ++ } ++ + if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) + error = -EFAULT; + +@@ -1379,10 +1385,11 @@ xfs_ioc_getbmap( + unsigned int cmd, + void __user *arg) + { +- struct getbmapx bmx; ++ struct getbmapx bmx = { 0 }; + int error; + +- if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) ++ /* struct getbmap is a strict subset of struct getbmapx. */ ++ if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags))) + return -EFAULT; + + if (bmx.bmv_count < 2) +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 532ab79d38fe..572b64a135b3 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -1355,12 +1355,7 @@ xfs_qm_quotacheck( + mp->m_qflags |= flags; + + error_return: +- while (!list_empty(&buffer_list)) { +- struct xfs_buf *bp = +- list_first_entry(&buffer_list, struct xfs_buf, b_list); +- list_del_init(&bp->b_list); +- xfs_buf_relse(bp); +- } ++ xfs_buf_delwri_cancel(&buffer_list); + + if (error) { + xfs_warn(mp, +diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c +index 3640c6e896af..4d334440bd94 100644 +--- a/fs/xfs/xfs_qm_syscalls.c ++++ b/fs/xfs/xfs_qm_syscalls.c +@@ -764,5 +764,6 @@ xfs_qm_dqrele_all_inodes( + uint flags) + { + ASSERT(mp->m_quotainfo); +- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL); ++ xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL, ++ XFS_AGITER_INEW_WAIT); + } +diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c +index 839b35ca21c6..e6dae28dfa1a 100644 +--- a/fs/xfs/xfs_xattr.c ++++ b/fs/xfs/xfs_xattr.c +@@ -180,7 +180,7 @@ xfs_xattr_put_listent( + arraytop = context->count + prefix_len + namelen + 1; + if (arraytop > context->firstu) { + context->count = -1; /* insufficient space */ +- return 1; ++ return 0; + } + offset = (char *)context->alist + context->count; + strncpy(offset, xfs_xattr_prefix(flags), prefix_len); +@@ -222,12 +222,15 @@ list_one_attr(const char *name, const size_t len, void *data, + } + + ssize_t +-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) ++xfs_vn_listxattr( ++ struct dentry *dentry, ++ char *data, ++ size_t size) + { + struct xfs_attr_list_context context; + struct attrlist_cursor_kern cursor = { 0 }; +- struct inode *inode = d_inode(dentry); +- int error; ++ struct inode *inode = d_inode(dentry); ++ int error; + + /* + * First read the regular on-disk attributes. +@@ -245,7 +248,9 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) + else + context.put_listent = xfs_xattr_put_listent_sizes; + +- xfs_attr_list_int(&context); ++ error = xfs_attr_list_int(&context); ++ if (error) ++ return error; + if (context.count < 0) + return -ERANGE; + +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h +index 67ce5bd3b56a..19db03dbbd00 100644 +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -616,15 +616,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) + static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, + netdev_features_t features) + { +- if (skb_vlan_tagged_multi(skb)) +- features = netdev_intersect_features(features, +- NETIF_F_SG | +- NETIF_F_HIGHDMA | +- NETIF_F_FRAGLIST | +- NETIF_F_GEN_CSUM | +- NETIF_F_HW_VLAN_CTAG_TX | +- NETIF_F_HW_VLAN_STAG_TX); +- ++ if (skb_vlan_tagged_multi(skb)) { ++ /* In the case of multi-tagged packets, use a direct mask ++ * instead of using netdev_interesect_features(), to make ++ * sure that only devices supporting NETIF_F_HW_CSUM will ++ * have checksum offloading support. ++ */ ++ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | ++ NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_HW_VLAN_STAG_TX; ++ } + return features; + } + +diff --git a/include/net/dst.h b/include/net/dst.h +index c7329dcd90cc..e4f450617919 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -110,10 +110,16 @@ struct dst_entry { + }; + }; + ++struct dst_metrics { ++ u32 metrics[RTAX_MAX]; ++ atomic_t refcnt; ++}; ++extern const struct dst_metrics dst_default_metrics; ++ + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); +-extern const u32 dst_default_metrics[]; + + #define DST_METRICS_READ_ONLY 0x1UL ++#define DST_METRICS_REFCOUNTED 0x2UL + #define DST_METRICS_FLAGS 0x3UL + #define __DST_METRICS_PTR(Y) \ + ((u32 *)((Y) & ~DST_METRICS_FLAGS)) +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index 3f98233388fb..bda1721e9622 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -112,11 +112,11 @@ struct fib_info { + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_priority; +- u32 *fib_metrics; +-#define fib_mtu fib_metrics[RTAX_MTU-1] +-#define fib_window fib_metrics[RTAX_WINDOW-1] +-#define fib_rtt fib_metrics[RTAX_RTT-1] +-#define fib_advmss fib_metrics[RTAX_ADVMSS-1] ++ struct dst_metrics *fib_metrics; ++#define fib_mtu fib_metrics->metrics[RTAX_MTU-1] ++#define fib_window fib_metrics->metrics[RTAX_WINDOW-1] ++#define fib_rtt fib_metrics->metrics[RTAX_RTT-1] ++#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] + int fib_nhs; + #ifdef CONFIG_IP_ROUTE_MULTIPATH + int fib_weight; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 750b7893ee3a..43aee7ab143e 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1619,12 +1619,8 @@ static int soft_offline_huge_page(struct page *page, int flags) + if (ret) { + pr_info("soft offline: %#lx: migration failed %d, type %lx\n", + pfn, ret, page->flags); +- /* +- * We know that soft_offline_huge_page() tries to migrate +- * only one hugepage pointed to by hpage, so we need not +- * run through the pagelist here. +- */ +- putback_active_hugepage(hpage); ++ if (!list_empty(&pagelist)) ++ putback_movable_pages(&pagelist); + if (ret > 0) + ret = -EIO; + } else { +diff --git a/mm/mlock.c b/mm/mlock.c +index d6006b146fea..9d2e773f3a95 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -277,7 +277,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) + { + int i; + int nr = pagevec_count(pvec); +- int delta_munlocked; ++ int delta_munlocked = -nr; + struct pagevec pvec_putback; + int pgrescued = 0; + +@@ -297,6 +297,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) + continue; + else + __munlock_isolation_failed(page); ++ } else { ++ delta_munlocked++; + } + + /* +@@ -308,7 +310,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) + pagevec_add(&pvec_putback, pvec->pages[i]); + pvec->pages[i] = NULL; + } +- delta_munlocked = -nr + pagevec_count(&pvec_putback); + __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); + spin_unlock_irq(&zone->lru_lock); + +diff --git a/mm/slub.c b/mm/slub.c +index 65d5f92d51d2..4cf3a9c768b1 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -5261,6 +5261,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) + char mbuf[64]; + char *buf; + struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); ++ ssize_t len; + + if (!attr || !attr->store || !attr->show) + continue; +@@ -5285,8 +5286,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) + buf = buffer; + } + +- attr->show(root_cache, buf); +- attr->store(s, buf, strlen(buf)); ++ len = attr->show(root_cache, buf); ++ if (len > 0) ++ attr->store(s, buf, len); + } + + if (buffer) +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c +index 413d18e37083..ff8bb41d713f 100644 +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -768,6 +768,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[]) + return -EPROTONOSUPPORT; + } + } ++ ++ if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { ++ __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); ++ ++ if (defpvid >= VLAN_VID_MASK) ++ return -EINVAL; ++ } + #endif + + return 0; +diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c +index 8a7ada8bb947..57be733a99bc 100644 +--- a/net/bridge/br_stp_if.c ++++ b/net/bridge/br_stp_if.c +@@ -166,6 +166,7 @@ static void br_stp_start(struct net_bridge *br) + br_debug(br, "using kernel STP\n"); + + /* To start timers on any ports left in blocking */ ++ mod_timer(&br->hello_timer, jiffies + br->hello_time); + br_port_state_selection(br); + } + +diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c +index 5f0f5af0ec35..7dbe6a5c31eb 100644 +--- a/net/bridge/br_stp_timer.c ++++ b/net/bridge/br_stp_timer.c +@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg) + if (br->dev->flags & IFF_UP) { + br_config_bpdu_generation(br); + +- if (br->stp_enabled != BR_USER_STP) ++ if (br->stp_enabled == BR_KERNEL_STP) + mod_timer(&br->hello_timer, + round_jiffies(jiffies + br->hello_time)); + } +diff --git a/net/core/dst.c b/net/core/dst.c +index a1656e3b8d72..d7ad628bf64e 100644 +--- a/net/core/dst.c ++++ b/net/core/dst.c +@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) + } + EXPORT_SYMBOL(dst_discard_out); + +-const u32 dst_default_metrics[RTAX_MAX + 1] = { ++const struct dst_metrics dst_default_metrics = { + /* This initializer is needed to force linker to place this variable + * into const section. Otherwise it might end into bss section. + * We really want to avoid false sharing on this variable, and catch + * any writes on it. + */ +- [RTAX_MAX] = 0xdeadbeef, ++ .refcnt = ATOMIC_INIT(1), + }; + + void dst_init(struct dst_entry *dst, struct dst_ops *ops, +@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, + if (dev) + dev_hold(dev); + dst->ops = ops; +- dst_init_metrics(dst, dst_default_metrics, true); ++ dst_init_metrics(dst, dst_default_metrics.metrics, true); + dst->expires = 0UL; + dst->path = dst; + dst->from = NULL; +@@ -315,25 +315,30 @@ EXPORT_SYMBOL(dst_release); + + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) + { +- u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); ++ struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC); + + if (p) { +- u32 *old_p = __DST_METRICS_PTR(old); ++ struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); + unsigned long prev, new; + +- memcpy(p, old_p, sizeof(u32) * RTAX_MAX); ++ atomic_set(&p->refcnt, 1); ++ memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); + + new = (unsigned long) p; + prev = cmpxchg(&dst->_metrics, old, new); + + if (prev != old) { + kfree(p); +- p = __DST_METRICS_PTR(prev); ++ p = (struct dst_metrics *)__DST_METRICS_PTR(prev); + if (prev & DST_METRICS_READ_ONLY) + p = NULL; ++ } else if (prev & DST_METRICS_REFCOUNTED) { ++ if (atomic_dec_and_test(&old_p->refcnt)) ++ kfree(old_p); + } + } +- return p; ++ BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0); ++ return (u32 *)p; + } + EXPORT_SYMBOL(dst_cow_metrics_generic); + +@@ -342,7 +347,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) + { + unsigned long prev, new; + +- new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; ++ new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY; + prev = cmpxchg(&dst->_metrics, old, new); + if (prev == old) + kfree(__DST_METRICS_PTR(old)); +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index fe38ef58997c..d43544ce7550 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1458,13 +1458,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) + cb->nlh->nlmsg_seq, 0, + NLM_F_MULTI, + ext_filter_mask); +- /* If we ran out of room on the first message, +- * we're in trouble +- */ +- WARN_ON((err == -EMSGSIZE) && (skb->len == 0)); + +- if (err < 0) +- goto out; ++ if (err < 0) { ++ if (likely(skb->len)) ++ goto out; ++ ++ goto out_err; ++ } + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + cont: +@@ -1472,10 +1472,12 @@ cont: + } + } + out: ++ err = skb->len; ++out_err: + cb->args[1] = idx; + cb->args[0] = h; + +- return skb->len; ++ return err; + } + + int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len) +@@ -3127,8 +3129,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) + err = br_dev->netdev_ops->ndo_bridge_getlink( + skb, portid, seq, dev, + filter_mask, NLM_F_MULTI); +- if (err < 0 && err != -EOPNOTSUPP) +- break; ++ if (err < 0 && err != -EOPNOTSUPP) { ++ if (likely(skb->len)) ++ break; ++ ++ goto out_err; ++ } + } + idx++; + } +@@ -3139,16 +3145,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) + seq, dev, + filter_mask, + NLM_F_MULTI); +- if (err < 0 && err != -EOPNOTSUPP) +- break; ++ if (err < 0 && err != -EOPNOTSUPP) { ++ if (likely(skb->len)) ++ break; ++ ++ goto out_err; ++ } + } + idx++; + } + } ++ err = skb->len; ++out_err: + rcu_read_unlock(); + cb->args[0] = idx; + +- return skb->len; ++ return err; + } + + static inline size_t bridge_nlmsg_size(void) +diff --git a/net/core/sock.c b/net/core/sock.c +index 9c708a5fb751..bd2fad27891e 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1690,17 +1690,17 @@ EXPORT_SYMBOL(skb_set_owner_w); + + void skb_orphan_partial(struct sk_buff *skb) + { +- /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, +- * so we do not completely orphan skb, but transfert all +- * accounted bytes but one, to avoid unexpected reorders. +- */ + if (skb->destructor == sock_wfree + #ifdef CONFIG_INET + || skb->destructor == tcp_wfree + #endif + ) { +- atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); +- skb->truesize = 1; ++ struct sock *sk = skb->sk; ++ ++ if (atomic_inc_not_zero(&sk->sk_refcnt)) { ++ atomic_sub(skb->truesize, &sk->sk_wmem_alloc); ++ skb->destructor = sock_efree; ++ } + } else { + skb_orphan(skb); + } +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 8113ad58fcb4..3470ad1843bb 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -422,6 +422,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, + newsk->sk_backlog_rcv = dccp_v4_do_rcv; + newnp->pktoptions = NULL; + newnp->opt = NULL; ++ newnp->ipv6_mc_list = NULL; ++ newnp->ipv6_ac_list = NULL; ++ newnp->ipv6_fl_list = NULL; + newnp->mcast_oif = inet6_iif(skb); + newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; + +@@ -486,6 +489,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, + /* Clone RX bits */ + newnp->rxopt.all = np->rxopt.all; + ++ newnp->ipv6_mc_list = NULL; ++ newnp->ipv6_ac_list = NULL; ++ newnp->ipv6_fl_list = NULL; + newnp->pktoptions = NULL; + newnp->opt = NULL; + newnp->mcast_oif = inet6_iif(skb); +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 1adba44f8fbc..66dcb529fd9c 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -757,7 +757,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) + unsigned int e = 0, s_e; + struct fib_table *tb; + struct hlist_head *head; +- int dumped = 0; ++ int dumped = 0, err; + + if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && + ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) +@@ -777,20 +777,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) + if (dumped) + memset(&cb->args[2], 0, sizeof(cb->args) - + 2 * sizeof(cb->args[0])); +- if (fib_table_dump(tb, skb, cb) < 0) +- goto out; ++ err = fib_table_dump(tb, skb, cb); ++ if (err < 0) { ++ if (likely(skb->len)) ++ goto out; ++ ++ goto out_err; ++ } + dumped = 1; + next: + e++; + } + } + out: ++ err = skb->len; ++out_err: + rcu_read_unlock(); + + cb->args[1] = e; + cb->args[0] = h; + +- return skb->len; ++ return err; + } + + /* Prepare and feed intra-kernel routing request. +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 67d44aa9e09f..b2504712259f 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -204,6 +204,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) + static void free_fib_info_rcu(struct rcu_head *head) + { + struct fib_info *fi = container_of(head, struct fib_info, rcu); ++ struct dst_metrics *m; + + change_nexthops(fi) { + if (nexthop_nh->nh_dev) +@@ -214,8 +215,9 @@ static void free_fib_info_rcu(struct rcu_head *head) + rt_fibinfo_free(&nexthop_nh->nh_rth_input); + } endfor_nexthops(fi); + +- if (fi->fib_metrics != (u32 *) dst_default_metrics) +- kfree(fi->fib_metrics); ++ m = fi->fib_metrics; ++ if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt)) ++ kfree(m); + kfree(fi); + } + +@@ -982,11 +984,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) + val = 255; + if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) + return -EINVAL; +- fi->fib_metrics[type - 1] = val; ++ fi->fib_metrics->metrics[type - 1] = val; + } + + if (ecn_ca) +- fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; ++ fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; + + return 0; + } +@@ -1044,11 +1046,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg) + goto failure; + fib_info_cnt++; + if (cfg->fc_mx) { +- fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); ++ fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); + if (!fi->fib_metrics) + goto failure; ++ atomic_set(&fi->fib_metrics->refcnt, 1); + } else +- fi->fib_metrics = (u32 *) dst_default_metrics; ++ fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; + + fi->fib_net = net; + fi->fib_protocol = cfg->fc_protocol; +@@ -1251,7 +1254,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, + if (fi->fib_priority && + nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) + goto nla_put_failure; +- if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) ++ if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0) + goto nla_put_failure; + + if (fi->fib_prefsrc && +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index 7c52afb98c42..5c598f99a500 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1906,6 +1906,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, + + /* rcu_read_lock is hold by caller */ + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { ++ int err; ++ + if (i < s_i) { + i++; + continue; +@@ -1916,17 +1918,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, + continue; + } + +- if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, +- cb->nlh->nlmsg_seq, +- RTM_NEWROUTE, +- tb->tb_id, +- fa->fa_type, +- xkey, +- KEYLENGTH - fa->fa_slen, +- fa->fa_tos, +- fa->fa_info, NLM_F_MULTI) < 0) { ++ err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, ++ cb->nlh->nlmsg_seq, RTM_NEWROUTE, ++ tb->tb_id, fa->fa_type, ++ xkey, KEYLENGTH - fa->fa_slen, ++ fa->fa_tos, fa->fa_info, NLM_F_MULTI); ++ if (err < 0) { + cb->args[4] = i; +- return -1; ++ return err; + } + i++; + } +@@ -1948,10 +1947,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, + t_key key = cb->args[3]; + + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { +- if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { ++ int err; ++ ++ err = fn_trie_dump_leaf(l, tb, skb, cb); ++ if (err < 0) { + cb->args[3] = key; + cb->args[2] = count; +- return -1; ++ return err; + } + + ++count; +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 64148914803a..45fa2aaa3d3f 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -669,6 +669,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, + inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); + newsk->sk_write_space = sk_stream_write_space; + ++ inet_sk(newsk)->mc_list = NULL; ++ + newsk->sk_mark = inet_rsk(req)->ir_mark; + atomic64_set(&newsk->sk_cookie, + atomic64_read(&inet_rsk(req)->ir_cookie)); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 375248b900ba..c295d882c6e0 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1356,8 +1356,12 @@ static void rt_add_uncached_list(struct rtable *rt) + + static void ipv4_dst_destroy(struct dst_entry *dst) + { ++ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); + struct rtable *rt = (struct rtable *) dst; + ++ if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt)) ++ kfree(p); ++ + if (!list_empty(&rt->rt_uncached)) { + struct uncached_list *ul = rt->rt_uncached_list; + +@@ -1409,7 +1413,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, + rt->rt_gateway = nh->nh_gw; + rt->rt_uses_gateway = 1; + } +- dst_init_metrics(&rt->dst, fi->fib_metrics, true); ++ dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); ++ if (fi->fib_metrics != &dst_default_metrics) { ++ rt->dst._metrics |= DST_METRICS_REFCOUNTED; ++ atomic_inc(&fi->fib_metrics->refcnt); ++ } + #ifdef CONFIG_IP_ROUTE_CLASSID + rt->dst.tclassid = nh->nh_tclassid; + #endif +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index e1d51370977b..4bd8678329d6 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1071,9 +1071,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, + int *copied, size_t size) + { + struct tcp_sock *tp = tcp_sk(sk); ++ struct sockaddr *uaddr = msg->msg_name; + int err, flags; + +- if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) ++ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || ++ (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && ++ uaddr->sa_family == AF_UNSPEC)) + return -EOPNOTSUPP; + if (tp->fastopen_req) + return -EALREADY; /* Another Fast Open is in progress */ +@@ -1086,7 +1089,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, + tp->fastopen_req->size = size; + + flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; +- err = __inet_stream_connect(sk->sk_socket, msg->msg_name, ++ err = __inet_stream_connect(sk->sk_socket, uaddr, + msg->msg_namelen, flags); + *copied = tp->fastopen_req->copied; + tcp_free_fastopen_req(tp); +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 818630cec54f..87791f803627 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -1134,13 +1134,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, + */ + if (pkt_len > mss) { + unsigned int new_len = (pkt_len / mss) * mss; +- if (!in_sack && new_len < pkt_len) { ++ if (!in_sack && new_len < pkt_len) + new_len += mss; +- if (new_len >= skb->len) +- return 0; +- } + pkt_len = new_len; + } ++ ++ if (pkt_len >= skb->len && !in_sack) ++ return 0; ++ + err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); + if (err < 0) + return err; +@@ -3219,7 +3220,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, + int delta; + + /* Non-retransmitted hole got filled? That's reordering */ +- if (reord < prior_fackets) ++ if (reord < prior_fackets && reord <= tp->fackets_out) + tcp_update_reordering(sk, tp->fackets_out - reord, 0); + + delta = tcp_is_fack(tp) ? pkts_acked : +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index 225f5f7f26ba..568bc0a52ca1 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -62,7 +62,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + const struct net_offload *ops; + int proto; + struct frag_hdr *fptr; +- unsigned int unfrag_ip6hlen; + u8 *prevhdr; + int offset = 0; + bool encap, udpfrag; +@@ -121,8 +120,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + skb->network_header = (u8 *)ipv6h - skb->head; + + if (udpfrag) { +- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); +- fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); ++ int err = ip6_find_1stfragopt(skb, &prevhdr); ++ if (err < 0) ++ return ERR_PTR(err); ++ fptr = (struct frag_hdr *)((u8 *)ipv6h + err); + fptr->frag_off = htons(offset); + if (skb->next) + fptr->frag_off |= htons(IP6_MF); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 8004532fa882..1db17efe36c1 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -571,7 +571,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int ptr, offset = 0, err = 0; + u8 *prevhdr, nexthdr = 0; + +- hlen = ip6_find_1stfragopt(skb, &prevhdr); ++ err = ip6_find_1stfragopt(skb, &prevhdr); ++ if (err < 0) ++ goto fail; ++ hlen = err; + nexthdr = *prevhdr; + + mtu = ip6_skb_dst_mtu(skb); +@@ -1429,6 +1432,11 @@ alloc_new_skb: + */ + alloclen += sizeof(struct frag_hdr); + ++ copy = datalen - transhdrlen - fraggap; ++ if (copy < 0) { ++ err = -EINVAL; ++ goto error; ++ } + if (transhdrlen) { + skb = sock_alloc_send_skb(sk, + alloclen + hh_len, +@@ -1478,13 +1486,9 @@ alloc_new_skb: + data += fraggap; + pskb_trim_unique(skb_prev, maxfraglen); + } +- copy = datalen - transhdrlen - fraggap; +- +- if (copy < 0) { +- err = -EINVAL; +- kfree_skb(skb); +- goto error; +- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { ++ if (copy > 0 && ++ getfrag(from, data + transhdrlen, offset, ++ copy, fraggap, skb) < 0) { + err = -EFAULT; + kfree_skb(skb); + goto error; +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index 1d184322a7b1..8b56c5240429 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident); + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + { + u16 offset = sizeof(struct ipv6hdr); +- struct ipv6_opt_hdr *exthdr = +- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); + unsigned int packet_len = skb_tail_pointer(skb) - + skb_network_header(skb); + int found_rhdr = 0; + *nexthdr = &ipv6_hdr(skb)->nexthdr; + +- while (offset + 1 <= packet_len) { ++ while (offset <= packet_len) { ++ struct ipv6_opt_hdr *exthdr; + + switch (**nexthdr) { + +@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + return offset; + } + +- offset += ipv6_optlen(exthdr); +- *nexthdr = &exthdr->nexthdr; ++ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len) ++ return -EINVAL; ++ + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + + offset); ++ offset += ipv6_optlen(exthdr); ++ *nexthdr = &exthdr->nexthdr; + } + +- return offset; ++ return -EINVAL; + } + EXPORT_SYMBOL(ip6_find_1stfragopt); + +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 1a63c4deef26..8e958fde6e4b 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1033,6 +1033,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * + newtp->af_specific = &tcp_sock_ipv6_mapped_specific; + #endif + ++ newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; + newnp->pktoptions = NULL; +@@ -1102,6 +1103,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * + First: no IPv4 options. + */ + newinet->inet_opt = NULL; ++ newnp->ipv6_mc_list = NULL; + newnp->ipv6_ac_list = NULL; + newnp->ipv6_fl_list = NULL; + +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c +index 7441e1e63893..01582966ffa0 100644 +--- a/net/ipv6/udp_offload.c ++++ b/net/ipv6/udp_offload.c +@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + u8 frag_hdr_sz = sizeof(struct frag_hdr); + __wsum csum; + int tnl_hlen; ++ int err; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) +@@ -97,7 +98,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + /* Find the unfragmentable header and shift it left by frag_hdr_sz + * bytes to insert fragment header. + */ +- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); ++ err = ip6_find_1stfragopt(skb, &prevhdr); ++ if (err < 0) ++ return ERR_PTR(err); ++ unfrag_ip6hlen = err; + nexthdr = *prevhdr; + *prevhdr = NEXTHDR_FRAGMENT; + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + +diff --git a/net/sctp/input.c b/net/sctp/input.c +index b6493b3f11a9..2d7859c03fd2 100644 +--- a/net/sctp/input.c ++++ b/net/sctp/input.c +@@ -472,15 +472,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, + struct sctp_association **app, + struct sctp_transport **tpp) + { ++ struct sctp_init_chunk *chunkhdr, _chunkhdr; + union sctp_addr saddr; + union sctp_addr daddr; + struct sctp_af *af; + struct sock *sk = NULL; + struct sctp_association *asoc; + struct sctp_transport *transport = NULL; +- struct sctp_init_chunk *chunkhdr; + __u32 vtag = ntohl(sctphdr->vtag); +- int len = skb->len - ((void *)sctphdr - (void *)skb->data); + + *app = NULL; *tpp = NULL; + +@@ -515,13 +514,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, + * discard the packet. + */ + if (vtag == 0) { +- chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); +- if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) +- + sizeof(__be32) || ++ /* chunk header + first 4 octects of init header */ ++ chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) + ++ sizeof(struct sctphdr), ++ sizeof(struct sctp_chunkhdr) + ++ sizeof(__be32), &_chunkhdr); ++ if (!chunkhdr || + chunkhdr->chunk_hdr.type != SCTP_CID_INIT || +- ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { ++ ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) + goto out; +- } ++ + } else if (vtag != asoc->c.peer_vtag) { + goto out; + } +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index ce46f1c7f133..7527c168e471 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -239,12 +239,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + struct sctp_bind_addr *bp; + struct ipv6_pinfo *np = inet6_sk(sk); + struct sctp_sockaddr_entry *laddr; +- union sctp_addr *baddr = NULL; + union sctp_addr *daddr = &t->ipaddr; + union sctp_addr dst_saddr; + struct in6_addr *final_p, final; + __u8 matchlen = 0; +- __u8 bmatchlen; + sctp_scope_t scope; + + memset(fl6, 0, sizeof(struct flowi6)); +@@ -311,23 +309,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + */ + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { +- if (!laddr->valid) ++ struct dst_entry *bdst; ++ __u8 bmatchlen; ++ ++ if (!laddr->valid || ++ laddr->state != SCTP_ADDR_SRC || ++ laddr->a.sa.sa_family != AF_INET6 || ++ scope > sctp_scope(&laddr->a)) + continue; +- if ((laddr->state == SCTP_ADDR_SRC) && +- (laddr->a.sa.sa_family == AF_INET6) && +- (scope <= sctp_scope(&laddr->a))) { +- bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); +- if (!baddr || (matchlen < bmatchlen)) { +- baddr = &laddr->a; +- matchlen = bmatchlen; +- } +- } +- } +- if (baddr) { +- fl6->saddr = baddr->v6.sin6_addr; +- fl6->fl6_sport = baddr->v6.sin6_port; ++ ++ fl6->saddr = laddr->a.v6.sin6_addr; ++ fl6->fl6_sport = laddr->a.v6.sin6_port; + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); +- dst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ bdst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ ++ if (!IS_ERR(bdst) && ++ ipv6_chk_addr(dev_net(bdst->dev), ++ &laddr->a.v6.sin6_addr, bdst->dev, 1)) { ++ if (!IS_ERR_OR_NULL(dst)) ++ dst_release(dst); ++ dst = bdst; ++ break; ++ } ++ ++ bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); ++ if (matchlen > bmatchlen) ++ continue; ++ ++ if (!IS_ERR_OR_NULL(dst)) ++ dst_release(dst); ++ dst = bdst; ++ matchlen = bmatchlen; + } + rcu_read_unlock(); + +@@ -662,6 +674,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, + newnp = inet6_sk(newsk); + + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ++ newnp->ipv6_mc_list = NULL; ++ newnp->ipv6_ac_list = NULL; ++ newnp->ipv6_fl_list = NULL; + + rcu_read_lock(); + opt = rcu_dereference(np->opt); +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 37b70f8e878f..0abab7926dca 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -1537,6 +1537,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = { + "Dell Inspiron 1501", STAC_9200_DELL_M26), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, + "unknown Dell", STAC_9200_DELL_M26), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201, ++ "Dell Latitude D430", STAC_9200_DELL_M22), + /* Panasonic */ + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), + /* Gateway machines needs EAPD to be set on resume */ diff --git a/patch/kernel/rk3328-default/patch-4.4.71-72.patch b/patch/kernel/rk3328-default/patch-4.4.71-72.patch new file mode 100644 index 000000000..684793d14 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.71-72.patch @@ -0,0 +1,3244 @@ +diff --git a/Makefile b/Makefile +index ad91a79aed51..94d663c935c0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 71 ++SUBLEVEL = 72 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S +index 3988e72d16ff..bfc5aae0c280 100644 +--- a/arch/arm/kvm/init.S ++++ b/arch/arm/kvm/init.S +@@ -110,7 +110,6 @@ __do_hyp_init: + @ - Write permission implies XN: disabled + @ - Instruction cache: enabled + @ - Data/Unified cache: enabled +- @ - Memory alignment checks: enabled + @ - MMU: enabled (this code must be run from an identity mapping) + mrc p15, 4, r0, c1, c0, 0 @ HSCR + ldr r2, =HSCTLR_MASK +@@ -118,8 +117,8 @@ __do_hyp_init: + mrc p15, 0, r1, c1, c0, 0 @ SCTLR + ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) + and r1, r1, r2 +- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) +- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) ++ ARM( ldr r2, =(HSCTLR_M) ) ++ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) + orr r1, r1, r2 + orr r0, r0, r1 + isb +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index 01cf10556081..1f1ff7e7b9cf 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -869,6 +869,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache + pmd_t *pmd; + + pud = stage2_get_pud(kvm, cache, addr); ++ if (!pud) ++ return NULL; ++ + if (pud_none(*pud)) { + if (!cache) + return NULL; +diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h +new file mode 100644 +index 000000000000..be2d2347d995 +--- /dev/null ++++ b/arch/arm64/include/asm/asm-uaccess.h +@@ -0,0 +1,13 @@ ++#ifndef __ASM_ASM_UACCESS_H ++#define __ASM_ASM_UACCESS_H ++ ++/* ++ * Remove the address tag from a virtual address, if present. ++ */ ++ .macro clear_address_tag, dst, addr ++ tst \addr, #(1 << 55) ++ bic \dst, \addr, #(0xff << 56) ++ csel \dst, \dst, \addr, eq ++ .endm ++ ++#endif +diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h +index 9622eb48f894..f2d2c0bbe21b 100644 +--- a/arch/arm64/include/asm/barrier.h ++++ b/arch/arm64/include/asm/barrier.h +@@ -41,23 +41,33 @@ + + #define smp_store_release(p, v) \ + do { \ ++ union { typeof(*p) __val; char __c[1]; } __u = \ ++ { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("stlrb %w1, %0" \ +- : "=Q" (*p) : "r" (v) : "memory"); \ ++ : "=Q" (*p) \ ++ : "r" (*(__u8 *)__u.__c) \ ++ : "memory"); \ + break; \ + case 2: \ + asm volatile ("stlrh %w1, %0" \ +- : "=Q" (*p) : "r" (v) : "memory"); \ ++ : "=Q" (*p) \ ++ : "r" (*(__u16 *)__u.__c) \ ++ : "memory"); \ + break; \ + case 4: \ + asm volatile ("stlr %w1, %0" \ +- : "=Q" (*p) : "r" (v) : "memory"); \ ++ : "=Q" (*p) \ ++ : "r" (*(__u32 *)__u.__c) \ ++ : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ +- : "=Q" (*p) : "r" (v) : "memory"); \ ++ : "=Q" (*p) \ ++ : "r" (*(__u64 *)__u.__c) \ ++ : "memory"); \ + break; \ + } \ + } while (0) +diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h +index d9ca1f2c0ea8..829fa6d3e561 100644 +--- a/arch/arm64/include/asm/uaccess.h ++++ b/arch/arm64/include/asm/uaccess.h +@@ -21,6 +21,7 @@ + /* + * User space memory access functions + */ ++#include + #include + #include + +@@ -103,6 +104,13 @@ static inline void set_fs(mm_segment_t fs) + flag; \ + }) + ++/* ++ * When dealing with data aborts, watchpoints, or instruction traps we may end ++ * up with a tagged userland pointer. Clear the tag to get a sane pointer to ++ * pass on to access_ok(), for instance. ++ */ ++#define untagged_addr(addr) sign_extend64(addr, 55) ++ + #define access_ok(type, addr, size) __range_ok(addr, size) + #define user_addr_max get_fs + +diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c +index 884b317e..10d3642d 100644 +--- a/arch/arm64/kernel/armv8_deprecated.c ++++ b/arch/arm64/kernel/armv8_deprecated.c +@@ -299,7 +299,8 @@ do { \ + _ASM_EXTABLE(0b, 4b) \ + _ASM_EXTABLE(1b, 4b) \ + : "=&r" (res), "+r" (data), "=&r" (temp) \ +- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ ++ : "r" ((unsigned long)addr), "i" (-EAGAIN), \ ++ "i" (-EFAULT) \ + : "memory"); \ + uaccess_disable(); \ + } while (0) +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index bd14849beb73..dccd0c2e9023 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -316,12 +317,13 @@ el1_da: + /* + * Data abort handling + */ +- mrs x0, far_el1 ++ mrs x3, far_el1 + enable_dbg + // re-enable interrupts if they were enabled in the aborted context + tbnz x23, #7, 1f // PSR_I_BIT + enable_irq + 1: ++ clear_address_tag x0, x3 + mov x2, sp // struct pt_regs + bl do_mem_abort + +@@ -483,7 +485,7 @@ el0_da: + // enable interrupts before calling the main handler + enable_dbg_and_irq + ct_user_exit +- bic x0, x26, #(0xff << 56) ++ clear_address_tag x0, x26 + mov x1, x25 + mov x2, sp + bl do_mem_abort +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h +index 8b3b46b7b0f2..329771559cbb 100644 +--- a/arch/powerpc/include/asm/topology.h ++++ b/arch/powerpc/include/asm/topology.h +@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void); + extern int sysfs_add_device_to_node(struct device *dev, int nid); + extern void sysfs_remove_device_from_node(struct device *dev, int nid); + ++static inline int early_cpu_to_node(int cpu) ++{ ++ int nid; ++ ++ nid = numa_cpu_lookup_table[cpu]; ++ ++ /* ++ * Fall back to node 0 if nid is unset (it should be, except bugs). ++ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). ++ */ ++ return (nid < 0) ? 0 : nid; ++} + #else + ++static inline int early_cpu_to_node(int cpu) { return 0; } ++ + static inline void dump_numa_cpu_topology(void) {} + + static inline int sysfs_add_device_to_node(struct device *dev, int nid) +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c +index c314db8b798c..9837c98caabe 100644 +--- a/arch/powerpc/kernel/eeh_driver.c ++++ b/arch/powerpc/kernel/eeh_driver.c +@@ -655,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) + */ + #define MAX_WAIT_FOR_RECOVERY 300 + +-static void eeh_handle_normal_event(struct eeh_pe *pe) ++static bool eeh_handle_normal_event(struct eeh_pe *pe) + { + struct pci_bus *frozen_bus; + int rc = 0; +@@ -665,7 +665,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) + if (!frozen_bus) { + pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", + __func__, pe->phb->global_number, pe->addr); +- return; ++ return false; + } + + eeh_pe_update_time_stamp(pe); +@@ -790,7 +790,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) + pr_info("EEH: Notify device driver to resume\n"); + eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); + +- return; ++ return false; + + excess_failures: + /* +@@ -831,7 +831,11 @@ perm_error: + pci_lock_rescan_remove(); + pcibios_remove_pci_devices(frozen_bus); + pci_unlock_rescan_remove(); ++ ++ /* The passed PE should no longer be used */ ++ return true; + } ++ return false; + } + + static void eeh_handle_special_event(void) +@@ -897,7 +901,14 @@ static void eeh_handle_special_event(void) + */ + if (rc == EEH_NEXT_ERR_FROZEN_PE || + rc == EEH_NEXT_ERR_FENCED_PHB) { +- eeh_handle_normal_event(pe); ++ /* ++ * eeh_handle_normal_event() can make the PE stale if it ++ * determines that the PE cannot possibly be recovered. ++ * Don't modify the PE state if that's the case. ++ */ ++ if (eeh_handle_normal_event(pe)) ++ continue; ++ + eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + } else { + pci_lock_rescan_remove(); +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index a20823210ac0..fe6e800c1357 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -751,7 +751,7 @@ void __init setup_arch(char **cmdline_p) + + static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) + { +- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, ++ return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align, + __pa(MAX_DMA_ADDRESS)); + } + +@@ -762,7 +762,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size) + + static int pcpu_cpu_distance(unsigned int from, unsigned int to) + { +- if (cpu_to_node(from) == cpu_to_node(to)) ++ if (early_cpu_to_node(from) == early_cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c +index e9ff44cd5d86..e8b1027e1b5b 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -110,6 +110,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn) + for (i = 0; i < num_lmbs; i++) { + lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); + lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); ++ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index); + lmbs[i].flags = be32_to_cpu(lmbs[i].flags); + } + +@@ -553,6 +554,7 @@ static void dlpar_update_drconf_property(struct device_node *dn, + for (i = 0; i < num_lmbs; i++) { + lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); + lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); ++ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index); + lmbs[i].flags = cpu_to_be32(lmbs[i].flags); + } + +diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig +index 56442d2d7bbc..eb9487470141 100644 +--- a/arch/sparc/Kconfig ++++ b/arch/sparc/Kconfig +@@ -182,9 +182,9 @@ config NR_CPUS + int "Maximum number of CPUs" + depends on SMP + range 2 32 if SPARC32 +- range 2 1024 if SPARC64 ++ range 2 4096 if SPARC64 + default 32 if SPARC32 +- default 64 if SPARC64 ++ default 4096 if SPARC64 + + source kernel/Kconfig.hz + +diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h +index f7de0dbc38af..83b36a5371ff 100644 +--- a/arch/sparc/include/asm/mmu_64.h ++++ b/arch/sparc/include/asm/mmu_64.h +@@ -52,7 +52,7 @@ + #define CTX_NR_MASK TAG_CONTEXT_BITS + #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) + +-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) ++#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) + #define CTX_VALID(__ctx) \ + (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) + #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) +diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h +index b84be675e507..349dd23e2876 100644 +--- a/arch/sparc/include/asm/mmu_context_64.h ++++ b/arch/sparc/include/asm/mmu_context_64.h +@@ -17,13 +17,8 @@ extern spinlock_t ctx_alloc_lock; + extern unsigned long tlb_context_cache; + extern unsigned long mmu_context_bmap[]; + ++DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); + void get_new_mmu_context(struct mm_struct *mm); +-#ifdef CONFIG_SMP +-void smp_new_mmu_context_version(void); +-#else +-#define smp_new_mmu_context_version() do { } while (0) +-#endif +- + int init_new_context(struct task_struct *tsk, struct mm_struct *mm); + void destroy_context(struct mm_struct *mm); + +@@ -74,8 +69,9 @@ void __flush_tlb_mm(unsigned long, unsigned long); + static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) + { + unsigned long ctx_valid, flags; +- int cpu; ++ int cpu = smp_processor_id(); + ++ per_cpu(per_cpu_secondary_mm, cpu) = mm; + if (unlikely(mm == &init_mm)) + return; + +@@ -121,7 +117,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str + * for the first time, we must flush that context out of the + * local TLB. + */ +- cpu = smp_processor_id(); + if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { + cpumask_set_cpu(cpu, mm_cpumask(mm)); + __flush_tlb_mm(CTX_HWBITS(mm->context), +@@ -131,26 +126,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str + } + + #define deactivate_mm(tsk,mm) do { } while (0) +- +-/* Activate a new MM instance for the current task. */ +-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) +-{ +- unsigned long flags; +- int cpu; +- +- spin_lock_irqsave(&mm->context.lock, flags); +- if (!CTX_VALID(mm->context)) +- get_new_mmu_context(mm); +- cpu = smp_processor_id(); +- if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) +- cpumask_set_cpu(cpu, mm_cpumask(mm)); +- +- load_secondary_context(mm); +- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); +- tsb_context_switch(mm); +- spin_unlock_irqrestore(&mm->context.lock, flags); +-} +- ++#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) + #endif /* !(__ASSEMBLY__) */ + + #endif /* !(__SPARC64_MMU_CONTEXT_H) */ +diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h +index 266937030546..522b43db2ed3 100644 +--- a/arch/sparc/include/asm/pil.h ++++ b/arch/sparc/include/asm/pil.h +@@ -20,7 +20,6 @@ + #define PIL_SMP_CALL_FUNC 1 + #define PIL_SMP_RECEIVE_SIGNAL 2 + #define PIL_SMP_CAPTURE 3 +-#define PIL_SMP_CTX_NEW_VERSION 4 + #define PIL_DEVICE_IRQ 5 + #define PIL_SMP_CALL_FUNC_SNGL 6 + #define PIL_DEFERRED_PCR_WORK 7 +diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h +index 8174f6cdbbbb..9dca7a892978 100644 +--- a/arch/sparc/include/asm/vio.h ++++ b/arch/sparc/include/asm/vio.h +@@ -327,6 +327,7 @@ struct vio_dev { + int compat_len; + + u64 dev_no; ++ u64 id; + + unsigned long channel_id; + +diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c +index e22416ce56ea..bfbde8c4ffb2 100644 +--- a/arch/sparc/kernel/irq_64.c ++++ b/arch/sparc/kernel/irq_64.c +@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) + { + #ifdef CONFIG_SMP + unsigned long page; ++ void *mondo, *p; + +- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); ++ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); ++ ++ /* Make sure mondo block is 64byte aligned */ ++ p = kzalloc(127, GFP_KERNEL); ++ if (!p) { ++ prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); ++ prom_halt(); ++ } ++ mondo = (void *)(((unsigned long)p + 63) & ~0x3f); ++ tb->cpu_mondo_block_pa = __pa(mondo); + + page = get_zeroed_page(GFP_KERNEL); + if (!page) { +- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); ++ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); + prom_halt(); + } + +- tb->cpu_mondo_block_pa = __pa(page); +- tb->cpu_list_pa = __pa(page + 64); ++ tb->cpu_list_pa = __pa(page); + #endif + } + +diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h +index e7f652be9e61..44f32dd4477f 100644 +--- a/arch/sparc/kernel/kernel.h ++++ b/arch/sparc/kernel/kernel.h +@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr + /* smp_64.c */ + void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); + void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); +-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); + void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); + void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); + +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index 19cd08d18672..95a9fa0d2195 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -959,37 +959,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) + preempt_enable(); + } + +-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) +-{ +- struct mm_struct *mm; +- unsigned long flags; +- +- clear_softint(1 << irq); +- +- /* See if we need to allocate a new TLB context because +- * the version of the one we are using is now out of date. +- */ +- mm = current->active_mm; +- if (unlikely(!mm || (mm == &init_mm))) +- return; +- +- spin_lock_irqsave(&mm->context.lock, flags); +- +- if (unlikely(!CTX_VALID(mm->context))) +- get_new_mmu_context(mm); +- +- spin_unlock_irqrestore(&mm->context.lock, flags); +- +- load_secondary_context(mm); +- __flush_tlb_mm(CTX_HWBITS(mm->context), +- SECONDARY_CONTEXT); +-} +- +-void smp_new_mmu_context_version(void) +-{ +- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); +-} +- + #ifdef CONFIG_KGDB + void kgdb_roundup_cpus(unsigned long flags) + { +diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S +index d568c8207af7..395ec1800530 100644 +--- a/arch/sparc/kernel/tsb.S ++++ b/arch/sparc/kernel/tsb.S +@@ -470,13 +470,16 @@ __tsb_context_switch: + .type copy_tsb,#function + copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size + * %o2=new_tsb_base, %o3=new_tsb_size ++ * %o4=page_size_shift + */ + sethi %uhi(TSB_PASS_BITS), %g7 + srlx %o3, 4, %o3 +- add %o0, %o1, %g1 /* end of old tsb */ ++ add %o0, %o1, %o1 /* end of old tsb */ + sllx %g7, 32, %g7 + sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ + ++ mov %o4, %g1 /* page_size_shift */ ++ + 661: prefetcha [%o0] ASI_N, #one_read + .section .tsb_phys_patch, "ax" + .word 661b +@@ -501,9 +504,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size + /* This can definitely be computed faster... */ + srlx %o0, 4, %o5 /* Build index */ + and %o5, 511, %o5 /* Mask index */ +- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ ++ sllx %o5, %g1, %o5 /* Put into vaddr position */ + or %o4, %o5, %o4 /* Full VADDR. */ +- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ ++ srlx %o4, %g1, %o4 /* Shift down to create index */ + and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ + sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ + TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ +@@ -511,7 +514,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size + TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ + + 80: add %o0, 16, %o0 +- cmp %o0, %g1 ++ cmp %o0, %o1 + bne,pt %xcc, 90b + nop + +diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S +index c6dfdaa29e20..170ead662f2a 100644 +--- a/arch/sparc/kernel/ttable_64.S ++++ b/arch/sparc/kernel/ttable_64.S +@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) + tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) + tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) + tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) +-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) ++tl0_irq4: BTRAP(0x44) + #else + tl0_irq1: BTRAP(0x41) + tl0_irq2: BTRAP(0x42) +diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c +index cb5789c9f961..34824ca396f0 100644 +--- a/arch/sparc/kernel/vio.c ++++ b/arch/sparc/kernel/vio.c +@@ -284,13 +284,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, + if (!id) { + dev_set_name(&vdev->dev, "%s", bus_id_name); + vdev->dev_no = ~(u64)0; ++ vdev->id = ~(u64)0; + } else if (!cfg_handle) { + dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); + vdev->dev_no = *id; ++ vdev->id = ~(u64)0; + } else { + dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, + *cfg_handle, *id); + vdev->dev_no = *cfg_handle; ++ vdev->id = *id; + } + + vdev->dev.parent = parent; +@@ -333,27 +336,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node) + (void) vio_create_one(hp, node, &root_vdev->dev); + } + ++struct vio_md_node_query { ++ const char *type; ++ u64 dev_no; ++ u64 id; ++}; ++ + static int vio_md_node_match(struct device *dev, void *arg) + { ++ struct vio_md_node_query *query = (struct vio_md_node_query *) arg; + struct vio_dev *vdev = to_vio_dev(dev); + +- if (vdev->mp == (u64) arg) +- return 1; ++ if (vdev->dev_no != query->dev_no) ++ return 0; ++ if (vdev->id != query->id) ++ return 0; ++ if (strcmp(vdev->type, query->type)) ++ return 0; + +- return 0; ++ return 1; + } + + static void vio_remove(struct mdesc_handle *hp, u64 node) + { ++ const char *type; ++ const u64 *id, *cfg_handle; ++ u64 a; ++ struct vio_md_node_query query; + struct device *dev; + +- dev = device_find_child(&root_vdev->dev, (void *) node, ++ type = mdesc_get_property(hp, node, "device-type", NULL); ++ if (!type) { ++ type = mdesc_get_property(hp, node, "name", NULL); ++ if (!type) ++ type = mdesc_node_name(hp, node); ++ } ++ ++ query.type = type; ++ ++ id = mdesc_get_property(hp, node, "id", NULL); ++ cfg_handle = NULL; ++ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { ++ u64 target; ++ ++ target = mdesc_arc_target(hp, a); ++ cfg_handle = mdesc_get_property(hp, target, ++ "cfg-handle", NULL); ++ if (cfg_handle) ++ break; ++ } ++ ++ if (!id) { ++ query.dev_no = ~(u64)0; ++ query.id = ~(u64)0; ++ } else if (!cfg_handle) { ++ query.dev_no = *id; ++ query.id = ~(u64)0; ++ } else { ++ query.dev_no = *cfg_handle; ++ query.id = *id; ++ } ++ ++ dev = device_find_child(&root_vdev->dev, &query, + vio_md_node_match); + if (dev) { + printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); + + device_unregister(dev); + put_device(dev); ++ } else { ++ if (!id) ++ printk(KERN_ERR "VIO: Removed unknown %s node.\n", ++ type); ++ else if (!cfg_handle) ++ printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", ++ type, *id); ++ else ++ printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", ++ type, *cfg_handle, *id); + } + } + +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index 965655afdbb6..384aba109d7c 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -656,10 +656,58 @@ EXPORT_SYMBOL(__flush_dcache_range); + + /* get_new_mmu_context() uses "cache + 1". */ + DEFINE_SPINLOCK(ctx_alloc_lock); +-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; ++unsigned long tlb_context_cache = CTX_FIRST_VERSION; + #define MAX_CTX_NR (1UL << CTX_NR_BITS) + #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) + DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); ++DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; ++ ++static void mmu_context_wrap(void) ++{ ++ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; ++ unsigned long new_ver, new_ctx, old_ctx; ++ struct mm_struct *mm; ++ int cpu; ++ ++ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); ++ ++ /* Reserve kernel context */ ++ set_bit(0, mmu_context_bmap); ++ ++ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; ++ if (unlikely(new_ver == 0)) ++ new_ver = CTX_FIRST_VERSION; ++ tlb_context_cache = new_ver; ++ ++ /* ++ * Make sure that any new mm that are added into per_cpu_secondary_mm, ++ * are going to go through get_new_mmu_context() path. ++ */ ++ mb(); ++ ++ /* ++ * Updated versions to current on those CPUs that had valid secondary ++ * contexts ++ */ ++ for_each_online_cpu(cpu) { ++ /* ++ * If a new mm is stored after we took this mm from the array, ++ * it will go into get_new_mmu_context() path, because we ++ * already bumped the version in tlb_context_cache. ++ */ ++ mm = per_cpu(per_cpu_secondary_mm, cpu); ++ ++ if (unlikely(!mm || mm == &init_mm)) ++ continue; ++ ++ old_ctx = mm->context.sparc64_ctx_val; ++ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { ++ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; ++ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); ++ mm->context.sparc64_ctx_val = new_ctx; ++ } ++ } ++} + + /* Caller does TLB context flushing on local CPU if necessary. + * The caller also ensures that CTX_VALID(mm->context) is false. +@@ -675,48 +723,30 @@ void get_new_mmu_context(struct mm_struct *mm) + { + unsigned long ctx, new_ctx; + unsigned long orig_pgsz_bits; +- int new_version; + + spin_lock(&ctx_alloc_lock); ++retry: ++ /* wrap might have happened, test again if our context became valid */ ++ if (unlikely(CTX_VALID(mm->context))) ++ goto out; + orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); + ctx = (tlb_context_cache + 1) & CTX_NR_MASK; + new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); +- new_version = 0; + if (new_ctx >= (1 << CTX_NR_BITS)) { + new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); + if (new_ctx >= ctx) { +- int i; +- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + +- CTX_FIRST_VERSION; +- if (new_ctx == 1) +- new_ctx = CTX_FIRST_VERSION; +- +- /* Don't call memset, for 16 entries that's just +- * plain silly... +- */ +- mmu_context_bmap[0] = 3; +- mmu_context_bmap[1] = 0; +- mmu_context_bmap[2] = 0; +- mmu_context_bmap[3] = 0; +- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { +- mmu_context_bmap[i + 0] = 0; +- mmu_context_bmap[i + 1] = 0; +- mmu_context_bmap[i + 2] = 0; +- mmu_context_bmap[i + 3] = 0; +- } +- new_version = 1; +- goto out; ++ mmu_context_wrap(); ++ goto retry; + } + } ++ if (mm->context.sparc64_ctx_val) ++ cpumask_clear(mm_cpumask(mm)); + mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); + new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); +-out: + tlb_context_cache = new_ctx; + mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; ++out: + spin_unlock(&ctx_alloc_lock); +- +- if (unlikely(new_version)) +- smp_new_mmu_context_version(); + } + + static int numa_enabled = 1; +diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c +index 9cdeca0fa955..266411291634 100644 +--- a/arch/sparc/mm/tsb.c ++++ b/arch/sparc/mm/tsb.c +@@ -451,7 +451,8 @@ retry_tsb_alloc: + extern void copy_tsb(unsigned long old_tsb_base, + unsigned long old_tsb_size, + unsigned long new_tsb_base, +- unsigned long new_tsb_size); ++ unsigned long new_tsb_size, ++ unsigned long page_size_shift); + unsigned long old_tsb_base = (unsigned long) old_tsb; + unsigned long new_tsb_base = (unsigned long) new_tsb; + +@@ -459,7 +460,9 @@ retry_tsb_alloc: + old_tsb_base = __pa(old_tsb_base); + new_tsb_base = __pa(new_tsb_base); + } +- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); ++ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, ++ tsb_index == MM_TSB_BASE ? ++ PAGE_SHIFT : REAL_HPAGE_SHIFT); + } + + mm->context.tsb_block[tsb_index].tsb = new_tsb; +diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S +index 5d2fd6cd3189..fcf4d27a38fb 100644 +--- a/arch/sparc/mm/ultra.S ++++ b/arch/sparc/mm/ultra.S +@@ -971,11 +971,6 @@ xcall_capture: + wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint + retry + +- .globl xcall_new_mmu_context_version +-xcall_new_mmu_context_version: +- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint +- retry +- + #ifdef CONFIG_KGDB + .globl xcall_kgdb_capture + xcall_kgdb_capture: +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index 47190bd399e7..cec49ecf5f31 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token) + */ + rcu_irq_exit(); + native_safe_halt(); +- rcu_irq_enter(); + local_irq_disable(); ++ rcu_irq_enter(); + } + } + if (!n.halted) +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 642e9c93a097..9357b29de9bc 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -737,18 +737,20 @@ out: + static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) + { + struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; +- int j, nent = vcpu->arch.cpuid_nent; ++ struct kvm_cpuid_entry2 *ej; ++ int j = i; ++ int nent = vcpu->arch.cpuid_nent; + + e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; + /* when no next entry is found, the current entry[i] is reselected */ +- for (j = i + 1; ; j = (j + 1) % nent) { +- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; +- if (ej->function == e->function) { +- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; +- return j; +- } +- } +- return 0; /* silence gcc, even though control never reaches here */ ++ do { ++ j = (j + 1) % nent; ++ ej = &vcpu->arch.cpuid_entries[j]; ++ } while (ej->function != e->function); ++ ++ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; ++ ++ return j; + } + + /* find an entry with matching function, matching index (if needed), and that +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 8eb8a934b531..1049c3c9b877 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -3433,12 +3433,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) + return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); + } + +-static bool can_do_async_pf(struct kvm_vcpu *vcpu) ++bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) + { + if (unlikely(!lapic_in_kernel(vcpu) || + kvm_event_needs_reinjection(vcpu))) + return false; + ++ if (is_guest_mode(vcpu)) ++ return false; ++ + return kvm_x86_ops->interrupt_allowed(vcpu); + } + +@@ -3454,7 +3457,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, + if (!async) + return false; /* *pfn has correct page already */ + +- if (!prefault && can_do_async_pf(vcpu)) { ++ if (!prefault && kvm_can_do_async_pf(vcpu)) { + trace_kvm_try_async_get_page(gva, gfn); + if (kvm_find_async_pf_gfn(vcpu, gfn)) { + trace_kvm_async_pf_doublefault(gva, gfn); +diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h +index 55ffb7b0f95e..e60fc80f8a9c 100644 +--- a/arch/x86/kvm/mmu.h ++++ b/arch/x86/kvm/mmu.h +@@ -74,6 +74,7 @@ enum { + int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); + void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); + void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); ++bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); + + static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) + { +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index ae2b9cd358f2..6c82792487e9 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -8245,8 +8245,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) + if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) + return true; + else +- return !kvm_event_needs_reinjection(vcpu) && +- kvm_x86_ops->interrupt_allowed(vcpu); ++ return kvm_can_do_async_pf(vcpu); + } + + void kvm_arch_start_assignment(struct kvm *kvm) +diff --git a/crypto/gcm.c b/crypto/gcm.c +index 1238b3c5a321..0a12c09d7cb2 100644 +--- a/crypto/gcm.c ++++ b/crypto/gcm.c +@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, + + err = crypto_ablkcipher_encrypt(&data->req); + if (err == -EINPROGRESS || err == -EBUSY) { +- err = wait_for_completion_interruptible( +- &data->result.completion); +- if (!err) +- err = data->result.err; ++ wait_for_completion(&data->result.completion); ++ err = data->result.err; + } + + if (err) +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 0975d23031ea..2898d19fadf5 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -346,7 +346,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + + /* It's illegal to wrap around the end of the physical address space. */ +- if (offset + (phys_addr_t)size < offset) ++ if (offset + (phys_addr_t)size - 1 < offset) + return -EINVAL; + + if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 38b363f4316b..ebed319657e7 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -2451,6 +2451,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) + if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && + list_empty(&cpufreq_policy_list)) { + /* if all ->init() calls failed, unregister */ ++ ret = -ENODEV; + pr_debug("%s: No CPU initialized for driver %s\n", __func__, + driver_data->name); + goto err_if_unreg; +diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c +index 57ff46284f15..c97336a2ba92 100644 +--- a/drivers/dma/ep93xx_dma.c ++++ b/drivers/dma/ep93xx_dma.c +@@ -325,6 +325,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) + | M2P_CONTROL_ENABLE; + m2p_set_control(edmac, control); + ++ edmac->buffer = 0; ++ + return 0; + } + +diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c +index b1bc945f008f..56410ea75ac5 100644 +--- a/drivers/dma/sh/usb-dmac.c ++++ b/drivers/dma/sh/usb-dmac.c +@@ -117,7 +117,7 @@ struct usb_dmac { + #define USB_DMASWR 0x0008 + #define USB_DMASWR_SWR (1 << 0) + #define USB_DMAOR 0x0060 +-#define USB_DMAOR_AE (1 << 2) ++#define USB_DMAOR_AE (1 << 1) + #define USB_DMAOR_DME (1 << 0) + + #define USB_DMASAR 0x0000 +diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +index 57a2e347f04d..0f0094b58d1f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c ++++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +@@ -893,6 +893,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) + u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; + ++ /* disable mclk switching if the refresh is >120Hz, even if the ++ * blanking period would allow it ++ */ ++ if (amdgpu_dpm_get_vrefresh(adev) > 120) ++ return true; ++ + if (vblank_time < switch_limit) + return true; + else +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c +index b88ce514eb8e..24d45fc7716c 100644 +--- a/drivers/gpu/drm/msm/msm_drv.c ++++ b/drivers/gpu/drm/msm/msm_drv.c +@@ -986,6 +986,7 @@ static struct drm_driver msm_driver = { + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, ++ .gem_prime_res_obj = msm_gem_prime_res_obj, + .gem_prime_pin = msm_gem_prime_pin, + .gem_prime_unpin = msm_gem_prime_unpin, + .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, +diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h +index 3be7a56b14f1..026e156e519c 100644 +--- a/drivers/gpu/drm/msm/msm_drv.h ++++ b/drivers/gpu/drm/msm/msm_drv.h +@@ -212,6 +212,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); + void *msm_gem_prime_vmap(struct drm_gem_object *obj); + void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); ++struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); + struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg); + int msm_gem_prime_pin(struct drm_gem_object *obj); +diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c +index 121975b07cd4..1fbddc5c7281 100644 +--- a/drivers/gpu/drm/msm/msm_gem_prime.c ++++ b/drivers/gpu/drm/msm/msm_gem_prime.c +@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) + if (!obj->import_attach) + msm_gem_put_pages(obj); + } ++ ++struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) ++{ ++ struct msm_gem_object *msm_obj = to_msm_bo(obj); ++ ++ return msm_obj->resv; ++} +diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +index 82d3e28918fd..7e4f24ae7de8 100644 +--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h ++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +@@ -4,6 +4,7 @@ + + struct nvkm_alarm { + struct list_head head; ++ struct list_head exec; + u64 timestamp; + void (*func)(struct nvkm_alarm *); + }; +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +index 79fcdb43e174..46033909d950 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) + /* Move to completed list. We'll drop the lock before + * executing the callback so it can reschedule itself. + */ +- list_move_tail(&alarm->head, &exec); ++ list_del_init(&alarm->head); ++ list_add(&alarm->exec, &exec); + } + + /* Shut down interrupt if no more pending alarms. */ +@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) + spin_unlock_irqrestore(&tmr->lock, flags); + + /* Execute completed callbacks. */ +- list_for_each_entry_safe(alarm, atemp, &exec, head) { +- list_del_init(&alarm->head); ++ list_for_each_entry_safe(alarm, atemp, &exec, exec) { ++ list_del(&alarm->exec); + alarm->func(alarm); + } + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index b6a0806b06bf..a1c68e6a689e 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, + return fifo_state->static_buffer; + else { + fifo_state->dynamic_buffer = vmalloc(bytes); ++ if (!fifo_state->dynamic_buffer) ++ goto out_err; + return fifo_state->dynamic_buffer; + } + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +index c9c04ccccdd9..027987023400 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +@@ -1288,11 +1288,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + uint32_t size; +- uint32_t backup_handle; ++ uint32_t backup_handle = 0; + + if (req->multisample_count != 0) + return -EINVAL; + ++ if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS) ++ return -EINVAL; ++ + if (unlikely(vmw_user_surface_size == 0)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + + 128; +@@ -1328,12 +1331,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, + ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, + &res->backup, + &user_srf->backup_base); +- if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < +- res->backup_size) { +- DRM_ERROR("Surface backup buffer is too small.\n"); +- vmw_dmabuf_unreference(&res->backup); +- ret = -EINVAL; +- goto out_unlock; ++ if (ret == 0) { ++ if (res->backup->base.num_pages * PAGE_SIZE < ++ res->backup_size) { ++ DRM_ERROR("Surface backup buffer is too small.\n"); ++ vmw_dmabuf_unreference(&res->backup); ++ ret = -EINVAL; ++ goto out_unlock; ++ } else { ++ backup_handle = req->buffer_handle; ++ } + } + } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) + ret = vmw_user_dmabuf_alloc(dev_priv, tfile, +diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c +index 6bf89d8f3741..b9d1e5c58ec5 100644 +--- a/drivers/iio/light/ltr501.c ++++ b/drivers/iio/light/ltr501.c +@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000}; + static const struct reg_field reg_field_it = + REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); + static const struct reg_field reg_field_als_intr = +- REG_FIELD(LTR501_INTR, 0, 0); +-static const struct reg_field reg_field_ps_intr = + REG_FIELD(LTR501_INTR, 1, 1); ++static const struct reg_field reg_field_ps_intr = ++ REG_FIELD(LTR501_INTR, 0, 0); + static const struct reg_field reg_field_als_rate = + REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); + static const struct reg_field reg_field_ps_rate = +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c +index bf0bd7e03aff..9e6d1cdb7fcd 100644 +--- a/drivers/iio/proximity/as3935.c ++++ b/drivers/iio/proximity/as3935.c +@@ -40,9 +40,9 @@ + #define AS3935_AFE_PWR_BIT BIT(0) + + #define AS3935_INT 0x03 +-#define AS3935_INT_MASK 0x07 ++#define AS3935_INT_MASK 0x0f + #define AS3935_EVENT_INT BIT(3) +-#define AS3935_NOISE_INT BIT(1) ++#define AS3935_NOISE_INT BIT(0) + + #define AS3935_DATA 0x07 + #define AS3935_DATA_MASK 0x3F +diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c +index e6b7556d5221..cbc4216091c9 100644 +--- a/drivers/infiniband/hw/qib/qib_rc.c ++++ b/drivers/infiniband/hw/qib/qib_rc.c +@@ -2088,8 +2088,10 @@ send_last: + ret = qib_get_rwqe(qp, 1); + if (ret < 0) + goto nack_op_err; +- if (!ret) ++ if (!ret) { ++ qib_put_ss(&qp->r_sge); + goto rnr_nak; ++ } + wc.ex.imm_data = ohdr->u.rc.imm_data; + hdrsize += 4; + wc.wc_flags = IB_WC_WITH_IMM; +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 1a2b2620421e..6f4dc0fd2ca3 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1122,8 +1122,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, + * Asus UX32VD 0x361f02 00, 15, 0e clickpad + * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons ++ * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons ++ * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons + * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) + * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons +@@ -1529,6 +1531,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { + }, + }, + { ++ /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"), ++ }, ++ }, ++ { + /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), +@@ -1550,6 +1559,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { + }, + }, + { ++ /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"), ++ }, ++ }, ++ { + /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), +diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c +index 783337d22f36..10a02934bfc0 100644 +--- a/drivers/misc/cxl/file.c ++++ b/drivers/misc/cxl/file.c +@@ -158,11 +158,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, + + /* Do this outside the status_mutex to avoid a circular dependency with + * the locking in cxl_mmap_fault() */ +- if (copy_from_user(&work, uwork, +- sizeof(struct cxl_ioctl_start_work))) { +- rc = -EFAULT; +- goto out; +- } ++ if (copy_from_user(&work, uwork, sizeof(work))) ++ return -EFAULT; + + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +index c82ab87fcbe8..e5911ccb2148 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -1949,7 +1949,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + } + + /* select a non-FCoE queue */ +- return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); ++ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); + } + + void bnx2x_set_num_queues(struct bnx2x *bp) +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +index 0d147610a06f..090e00650601 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -2714,10 +2714,14 @@ static int cxgb_up(struct adapter *adap) + if (err) + goto irq_err; + } ++ ++ mutex_lock(&uld_mutex); + enable_rx(adap); + t4_sge_start(adap); + t4_intr_enable(adap); + adap->flags |= FULL_INIT_DONE; ++ mutex_unlock(&uld_mutex); ++ + notify_ulds(adap, CXGB4_STATE_UP); + #if IS_ENABLED(CONFIG_IPV6) + update_clip(adap); +diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c +index ff665493ca97..52f2230062e7 100644 +--- a/drivers/net/ethernet/ethoc.c ++++ b/drivers/net/ethernet/ethoc.c +@@ -713,6 +713,8 @@ static int ethoc_open(struct net_device *dev) + if (ret) + return ret; + ++ napi_enable(&priv->napi); ++ + ethoc_init_ring(priv, dev->mem_start); + ethoc_reset(priv); + +@@ -725,7 +727,6 @@ static int ethoc_open(struct net_device *dev) + } + + phy_start(priv->phy); +- napi_enable(&priv->napi); + + if (netif_msg_ifup(priv)) { + dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 590750ab6564..9a986ccd42e5 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -77,6 +77,8 @@ static const u8 all_zeros_mac[ETH_ALEN]; + + static int vxlan_sock_add(struct vxlan_dev *vxlan); + ++static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); ++ + /* per-network namespace private data for this module */ + struct vxlan_net { + struct list_head vxlan_list; +@@ -1052,6 +1054,8 @@ static void __vxlan_sock_release(struct vxlan_sock *vs) + + static void vxlan_sock_release(struct vxlan_dev *vxlan) + { ++ vxlan_vs_del_dev(vxlan); ++ + __vxlan_sock_release(vxlan->vn4_sock); + #if IS_ENABLED(CONFIG_IPV6) + __vxlan_sock_release(vxlan->vn6_sock); +@@ -2255,6 +2259,15 @@ static void vxlan_cleanup(unsigned long arg) + mod_timer(&vxlan->age_timer, next_timer); + } + ++static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) ++{ ++ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); ++ ++ spin_lock(&vn->sock_lock); ++ hlist_del_init_rcu(&vxlan->hlist); ++ spin_unlock(&vn->sock_lock); ++} ++ + static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) + { + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); +@@ -3028,12 +3041,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, + static void vxlan_dellink(struct net_device *dev, struct list_head *head) + { + struct vxlan_dev *vxlan = netdev_priv(dev); +- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); +- +- spin_lock(&vn->sock_lock); +- if (!hlist_unhashed(&vxlan->hlist)) +- hlist_del_rcu(&vxlan->hlist); +- spin_unlock(&vn->sock_lock); + + gro_cells_destroy(&vxlan->gro_cells); + list_del(&vxlan->next); +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 1f445f357da1..888e9cfef51a 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) + queue->rx_skbs[id] = skb; + + ref = gnttab_claim_grant_reference(&queue->gref_rx_head); +- BUG_ON((signed short)ref < 0); ++ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); + queue->grant_rx_ref[id] = ref; + + page = skb_frag_page(&skb_shinfo(skb)->frags[0]); +@@ -437,7 +437,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, + id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); + tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); + ref = gnttab_claim_grant_reference(&queue->gref_tx_head); +- BUG_ON((signed short)ref < 0); ++ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); + + gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, + gfn, GNTMAP_readonly); +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 3588a56aabb4..5cbf20ab94aa 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -2311,10 +2311,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) + + if (mem_only) { + if (pci_enable_device_mem(pdev)) +- goto probe_out; ++ return ret; + } else { + if (pci_enable_device(pdev)) +- goto probe_out; ++ return ret; + } + + /* This may fail but that's ok */ +@@ -2324,7 +2324,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) + if (!ha) { + ql_log_pci(ql_log_fatal, pdev, 0x0009, + "Unable to allocate memory for ha.\n"); +- goto probe_out; ++ goto disable_device; + } + ql_dbg_pci(ql_dbg_init, pdev, 0x000a, + "Memory allocated for ha=%p.\n", ha); +@@ -2923,7 +2923,7 @@ iospace_config_failed: + kfree(ha); + ha = NULL; + +-probe_out: ++disable_device: + pci_disable_device(pdev); + return ret; + } +diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c +index 2fb1e974cc70..e11b1001d1f6 100644 +--- a/drivers/staging/lustre/lustre/lov/lov_pack.c ++++ b/drivers/staging/lustre/lustre/lov/lov_pack.c +@@ -399,18 +399,10 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, + struct lov_mds_md *lmmk = NULL; + int rc, lmm_size; + int lum_size; +- mm_segment_t seg; + + if (!lsm) + return -ENODATA; + +- /* +- * "Switch to kernel segment" to allow copying from kernel space by +- * copy_{to,from}_user(). +- */ +- seg = get_fs(); +- set_fs(KERNEL_DS); +- + /* we only need the header part from user space to get lmm_magic and + * lmm_stripe_count, (the header part is common to v1 and v3) */ + lum_size = sizeof(struct lov_user_md_v1); +@@ -485,6 +477,5 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, + + obd_free_diskmd(exp, &lmmk); + out_set: +- set_fs(seg); + return rc; + } +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index af301414a9f3..60743bf27f37 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1154,15 +1154,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) + if (cmd->unknown_data_length) { + cmd->data_length = size; + } else if (size != cmd->data_length) { +- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" ++ pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" + " %u does not match SCSI CDB Length: %u for SAM Opcode:" + " 0x%02x\n", cmd->se_tfo->get_fabric_name(), + cmd->data_length, size, cmd->t_task_cdb[0]); + +- if (cmd->data_direction == DMA_TO_DEVICE && +- cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { +- pr_err("Rejecting underflow/overflow WRITE data\n"); +- return TCM_INVALID_CDB_FIELD; ++ if (cmd->data_direction == DMA_TO_DEVICE) { ++ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { ++ pr_err_ratelimited("Rejecting underflow/overflow" ++ " for WRITE data CDB\n"); ++ return TCM_INVALID_CDB_FIELD; ++ } ++ /* ++ * Some fabric drivers like iscsi-target still expect to ++ * always reject overflow writes. Reject this case until ++ * full fabric driver level support for overflow writes ++ * is introduced tree-wide. ++ */ ++ if (size > cmd->data_length) { ++ pr_err_ratelimited("Rejecting overflow for" ++ " WRITE control CDB\n"); ++ return TCM_INVALID_CDB_FIELD; ++ } + } + /* + * Reject READ_* or WRITE_* with overflow/underflow for +diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c +index 88246f7e435a..0f23dda60011 100644 +--- a/drivers/tty/serial/ifx6x60.c ++++ b/drivers/tty/serial/ifx6x60.c +@@ -1378,9 +1378,9 @@ static struct spi_driver ifx_spi_driver = { + static void __exit ifx_spi_exit(void) + { + /* unregister */ ++ spi_unregister_driver(&ifx_spi_driver); + tty_unregister_driver(tty_drv); + put_tty_driver(tty_drv); +- spi_unregister_driver(&ifx_spi_driver); + unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); + } + +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index 63a06ab6ba03..235e150d7b81 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -1800,11 +1800,13 @@ static int sci_startup(struct uart_port *port) + + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); + ++ sci_request_dma(port); ++ + ret = sci_request_irq(s); +- if (unlikely(ret < 0)) ++ if (unlikely(ret < 0)) { ++ sci_free_dma(port); + return ret; +- +- sci_request_dma(port); ++ } + + spin_lock_irqsave(&port->lock, flags); + sci_start_tx(port); +@@ -1834,8 +1836,8 @@ static void sci_shutdown(struct uart_port *port) + } + #endif + +- sci_free_dma(port); + sci_free_irq(s); ++ sci_free_dma(port); + } + + static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps, +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 7cef54334b12..1bb629ab8ecc 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -2070,13 +2070,12 @@ retry_open: + if (tty) { + mutex_unlock(&tty_mutex); + retval = tty_lock_interruptible(tty); ++ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */ + if (retval) { + if (retval == -EINTR) + retval = -ERESTARTSYS; + goto err_unref; + } +- /* safe to drop the kref from tty_driver_lookup_tty() */ +- tty_kref_put(tty); + retval = tty_reopen(tty); + if (retval < 0) { + tty_unlock(tty); +diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c +index d09293bc0e04..cff304abb619 100644 +--- a/drivers/tty/tty_mutex.c ++++ b/drivers/tty/tty_mutex.c +@@ -24,10 +24,15 @@ EXPORT_SYMBOL(tty_lock); + + int tty_lock_interruptible(struct tty_struct *tty) + { ++ int ret; ++ + if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty)) + return -EIO; + tty_kref_get(tty); +- return mutex_lock_interruptible(&tty->legacy_mutex); ++ ret = mutex_lock_interruptible(&tty->legacy_mutex); ++ if (ret) ++ tty_kref_put(tty); ++ return ret; + } + + void __lockfunc tty_unlock(struct tty_struct *tty) +diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c +index 58c8485a0715..923379972707 100644 +--- a/drivers/usb/chipidea/debug.c ++++ b/drivers/usb/chipidea/debug.c +@@ -295,7 +295,8 @@ static int ci_role_show(struct seq_file *s, void *data) + { + struct ci_hdrc *ci = s->private; + +- seq_printf(s, "%s\n", ci_role(ci)->name); ++ if (ci->role != CI_ROLE_END) ++ seq_printf(s, "%s\n", ci_role(ci)->name); + + return 0; + } +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index d8a045fc1fdb..aff086ca97e4 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -1982,6 +1982,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci) + int ci_hdrc_gadget_init(struct ci_hdrc *ci) + { + struct ci_role_driver *rdrv; ++ int ret; + + if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) + return -ENXIO; +@@ -1994,7 +1995,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci) + rdrv->stop = udc_id_switch_for_host; + rdrv->irq = udc_irq; + rdrv->name = "gadget"; +- ci->roles[CI_ROLE_GADGET] = rdrv; + +- return udc_start(ci); ++ ret = udc_start(ci); ++ if (!ret) ++ ci->roles[CI_ROLE_GADGET] = rdrv; ++ ++ return ret; + } +diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c +index a4f664062e0c..a069726da72a 100644 +--- a/drivers/usb/gadget/function/f_mass_storage.c ++++ b/drivers/usb/gadget/function/f_mass_storage.c +@@ -399,7 +399,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) + /* Caller must hold fsg->lock */ + static void wakeup_thread(struct fsg_common *common) + { +- smp_wmb(); /* ensure the write of bh->state is complete */ ++ /* ++ * Ensure the reading of thread_wakeup_needed ++ * and the writing of bh->state are completed ++ */ ++ smp_mb(); + /* Tell the main thread that something has happened */ + common->thread_wakeup_needed = 1; + if (common->thread_task) +@@ -630,7 +634,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze) + } + __set_current_state(TASK_RUNNING); + common->thread_wakeup_needed = 0; +- smp_rmb(); /* ensure the latest bh->state is visible */ ++ ++ /* ++ * Ensure the writing of thread_wakeup_needed ++ * and the reading of bh->state are completed ++ */ ++ smp_mb(); + return rc; + } + +diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c +index df2e6f783318..527de56f832f 100644 +--- a/drivers/xen/privcmd.c ++++ b/drivers/xen/privcmd.c +@@ -335,8 +335,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) + st->global_error = 1; + } + } +- st->va += PAGE_SIZE * nr; +- st->index += nr; ++ st->va += XEN_PAGE_SIZE * nr; ++ st->index += nr / XEN_PFN_PER_PAGE; + + return 0; + } +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 2a2e370399ba..c36a03fa7678 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -3854,6 +3854,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, + info->space_info_kobj, "%s", + alloc_name(found->flags)); + if (ret) { ++ percpu_counter_destroy(&found->total_bytes_pinned); + kfree(found); + return ret; + } +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 353f4bae658c..d4a6eef31854 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -2771,7 +2771,7 @@ static long btrfs_fallocate(struct file *file, int mode, + if (!ret) + ret = btrfs_prealloc_file_range(inode, mode, + range->start, +- range->len, 1 << inode->i_blkbits, ++ range->len, i_blocksize(inode), + offset + len, &alloc_hint); + list_del(&range->list); + kfree(range); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 3cff6523f27d..863fa0f1972b 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7318,8 +7318,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) + int found = false; + void **pagep = NULL; + struct page *page = NULL; +- int start_idx; +- int end_idx; ++ unsigned long start_idx; ++ unsigned long end_idx; + + start_idx = start >> PAGE_CACHE_SHIFT; + +diff --git a/fs/buffer.c b/fs/buffer.c +index 4f4cd959da7c..6f7d519a093b 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -2298,7 +2298,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, + loff_t pos, loff_t *bytes) + { + struct inode *inode = mapping->host; +- unsigned blocksize = 1 << inode->i_blkbits; ++ unsigned int blocksize = i_blocksize(inode); + struct page *page; + void *fsdata; + pgoff_t index, curidx; +@@ -2378,8 +2378,8 @@ int cont_write_begin(struct file *file, struct address_space *mapping, + get_block_t *get_block, loff_t *bytes) + { + struct inode *inode = mapping->host; +- unsigned blocksize = 1 << inode->i_blkbits; +- unsigned zerofrom; ++ unsigned int blocksize = i_blocksize(inode); ++ unsigned int zerofrom; + int err; + + err = cont_expand_zero(file, mapping, pos, bytes); +@@ -2741,7 +2741,7 @@ int nobh_truncate_page(struct address_space *mapping, + struct buffer_head map_bh; + int err; + +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + length = offset & (blocksize - 1); + + /* Block boundary? Nothing to do */ +@@ -2819,7 +2819,7 @@ int block_truncate_page(struct address_space *mapping, + struct buffer_head *bh; + int err; + +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + length = offset & (blocksize - 1); + + /* Block boundary? Nothing to do */ +@@ -2931,7 +2931,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, + struct inode *inode = mapping->host; + tmp.b_state = 0; + tmp.b_blocknr = 0; +- tmp.b_size = 1 << inode->i_blkbits; ++ tmp.b_size = i_blocksize(inode); + get_block(inode, block, &tmp, 0); + return tmp.b_blocknr; + } +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index b7d218a168fb..c6a1ec110c01 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -697,7 +697,7 @@ static int ceph_writepages_start(struct address_space *mapping, + struct pagevec pvec; + int done = 0; + int rc = 0; +- unsigned wsize = 1 << inode->i_blkbits; ++ unsigned int wsize = i_blocksize(inode); + struct ceph_osd_request *req = NULL; + int do_sync = 0; + loff_t snap_size, i_size; +diff --git a/fs/direct-io.c b/fs/direct-io.c +index 01171d8a6ee9..c772fdf36cd9 100644 +--- a/fs/direct-io.c ++++ b/fs/direct-io.c +@@ -575,7 +575,7 @@ static int dio_set_defer_completion(struct dio *dio) + /* + * Call into the fs to map some more disk blocks. We record the current number + * of available blocks at sdio->blocks_available. These are in units of the +- * fs blocksize, (1 << inode->i_blkbits). ++ * fs blocksize, i_blocksize(inode). + * + * The fs is allowed to map lots of blocks at once. If it wants to do that, + * it uses the passed inode-relative block number as the file offset, as usual. +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 8a456f9b8a44..61d5bfc7318c 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -4902,6 +4902,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, + + /* Zero out partial block at the edges of the range */ + ret = ext4_zero_partial_blocks(handle, inode, offset, len); ++ if (ret >= 0) ++ ext4_update_inode_fsync_trans(handle, inode, 1); + + if (file->f_flags & O_SYNC) + ext4_handle_sync(handle); +@@ -5597,6 +5599,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) + ext4_handle_sync(handle); + inode->i_mtime = inode->i_ctime = ext4_current_time(inode); + ext4_mark_inode_dirty(handle, inode); ++ ext4_update_inode_fsync_trans(handle, inode, 1); + + out_stop: + ext4_journal_stop(handle); +@@ -5770,6 +5773,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) + up_write(&EXT4_I(inode)->i_data_sem); + if (IS_SYNC(inode)) + ext4_handle_sync(handle); ++ if (ret >= 0) ++ ext4_update_inode_fsync_trans(handle, inode, 1); + + out_stop: + ext4_journal_stop(handle); +diff --git a/fs/ext4/file.c b/fs/ext4/file.c +index 0d24ebcd7c9e..8772bfc3415b 100644 +--- a/fs/ext4/file.c ++++ b/fs/ext4/file.c +@@ -463,47 +463,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, + num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); + nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, + (pgoff_t)num); +- if (nr_pages == 0) { +- if (whence == SEEK_DATA) +- break; +- +- BUG_ON(whence != SEEK_HOLE); +- /* +- * If this is the first time to go into the loop and +- * offset is not beyond the end offset, it will be a +- * hole at this offset +- */ +- if (lastoff == startoff || lastoff < endoff) +- found = 1; ++ if (nr_pages == 0) + break; +- } +- +- /* +- * If this is the first time to go into the loop and +- * offset is smaller than the first page offset, it will be a +- * hole at this offset. +- */ +- if (lastoff == startoff && whence == SEEK_HOLE && +- lastoff < page_offset(pvec.pages[0])) { +- found = 1; +- break; +- } + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + struct buffer_head *bh, *head; + + /* +- * If the current offset is not beyond the end of given +- * range, it will be a hole. ++ * If current offset is smaller than the page offset, ++ * there is a hole at this offset. + */ +- if (lastoff < endoff && whence == SEEK_HOLE && +- page->index > end) { ++ if (whence == SEEK_HOLE && lastoff < endoff && ++ lastoff < page_offset(pvec.pages[i])) { + found = 1; + *offset = lastoff; + goto out; + } + ++ if (page->index > end) ++ goto out; ++ + lock_page(page); + + if (unlikely(page->mapping != inode->i_mapping)) { +@@ -543,20 +523,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, + unlock_page(page); + } + +- /* +- * The no. of pages is less than our desired, that would be a +- * hole in there. +- */ +- if (nr_pages < num && whence == SEEK_HOLE) { +- found = 1; +- *offset = lastoff; ++ /* The no. of pages is less than our desired, we are done. */ ++ if (nr_pages < num) + break; +- } + + index = pvec.pages[i - 1]->index + 1; + pagevec_release(&pvec); + } while (index <= end); + ++ if (whence == SEEK_HOLE && lastoff < endoff) { ++ found = 1; ++ *offset = lastoff; ++ } + out: + pagevec_release(&pvec); + return found; +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index ccae64dad40c..1796d1bd9a1d 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -2044,7 +2044,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd, + { + struct inode *inode = mpd->inode; + int err; +- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) ++ ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) + >> inode->i_blkbits; + + do { +@@ -3793,6 +3793,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) + + inode->i_mtime = inode->i_ctime = ext4_current_time(inode); + ext4_mark_inode_dirty(handle, inode); ++ if (ret >= 0) ++ ext4_update_inode_fsync_trans(handle, inode, 1); + out_stop: + ext4_journal_stop(handle); + out_dio: +@@ -5162,8 +5164,9 @@ static int ext4_expand_extra_isize(struct inode *inode, + /* No extended attributes present */ + if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || + header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { +- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, +- new_extra_isize); ++ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ++ EXT4_I(inode)->i_extra_isize, 0, ++ new_extra_isize - EXT4_I(inode)->i_extra_isize); + EXT4_I(inode)->i_extra_isize = new_extra_isize; + return 0; + } +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c +index 7861d801b048..05048fcfd602 100644 +--- a/fs/ext4/move_extent.c ++++ b/fs/ext4/move_extent.c +@@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to) + if (PageUptodate(page)) + return 0; + +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + if (!page_has_buffers(page)) + create_empty_buffers(page, blocksize, 0); + +diff --git a/fs/jfs/super.c b/fs/jfs/super.c +index 8f9176caf098..c8d58c5ac8ae 100644 +--- a/fs/jfs/super.c ++++ b/fs/jfs/super.c +@@ -758,7 +758,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, + sb->s_blocksize - offset : toread; + + tmp_bh.b_state = 0; +- tmp_bh.b_size = 1 << inode->i_blkbits; ++ tmp_bh.b_size = i_blocksize(inode); + err = jfs_get_block(inode, blk, &tmp_bh, 0); + if (err) + return err; +@@ -798,7 +798,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type, + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; +- tmp_bh.b_size = 1 << inode->i_blkbits; ++ tmp_bh.b_size = i_blocksize(inode); + err = jfs_get_block(inode, blk, &tmp_bh, 1); + if (err) + goto out; +diff --git a/fs/mpage.c b/fs/mpage.c +index 1480d3a18037..6ade29b19494 100644 +--- a/fs/mpage.c ++++ b/fs/mpage.c +@@ -111,7 +111,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) + SetPageUptodate(page); + return; + } +- create_empty_buffers(page, 1 << inode->i_blkbits, 0); ++ create_empty_buffers(page, i_blocksize(inode), 0); + } + head = page_buffers(page); + page_bh = head; +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 52ee0b73ab4a..5b21b1ca2341 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -2421,6 +2421,20 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags) + } + EXPORT_SYMBOL_GPL(nfs_may_open); + ++static int nfs_execute_ok(struct inode *inode, int mask) ++{ ++ struct nfs_server *server = NFS_SERVER(inode); ++ int ret; ++ ++ if (mask & MAY_NOT_BLOCK) ++ ret = nfs_revalidate_inode_rcu(server, inode); ++ else ++ ret = nfs_revalidate_inode(server, inode); ++ if (ret == 0 && !execute_ok(inode)) ++ ret = -EACCES; ++ return ret; ++} ++ + int nfs_permission(struct inode *inode, int mask) + { + struct rpc_cred *cred; +@@ -2438,6 +2452,9 @@ int nfs_permission(struct inode *inode, int mask) + case S_IFLNK: + goto out; + case S_IFREG: ++ if ((mask & MAY_OPEN) && ++ nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)) ++ return 0; + break; + case S_IFDIR: + /* +@@ -2470,8 +2487,8 @@ force_lookup: + res = PTR_ERR(cred); + } + out: +- if (!res && (mask & MAY_EXEC) && !execute_ok(inode)) +- res = -EACCES; ++ if (!res && (mask & MAY_EXEC)) ++ res = nfs_execute_ok(inode, mask); + + dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n", + inode->i_sb->s_id, inode->i_ino, mask, res); +diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c +index c29d9421bd5e..0976f8dad4ce 100644 +--- a/fs/nfsd/blocklayout.c ++++ b/fs/nfsd/blocklayout.c +@@ -50,7 +50,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, + { + struct nfsd4_layout_seg *seg = &args->lg_seg; + struct super_block *sb = inode->i_sb; +- u32 block_size = (1 << inode->i_blkbits); ++ u32 block_size = i_blocksize(inode); + struct pnfs_block_extent *bex; + struct iomap iomap; + u32 device_generation = 0; +@@ -151,7 +151,7 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, + int error; + + nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, +- lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); ++ lcp->lc_up_len, &iomaps, i_blocksize(inode)); + if (nr_iomaps < 0) + return nfserrno(nr_iomaps); + +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index 7d5351cd67fb..209dbfc50cd4 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -1690,6 +1690,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, + opdesc->op_get_currentstateid(cstate, &op->u); + op->status = opdesc->op_func(rqstp, cstate, &op->u); + ++ /* Only from SEQUENCE */ ++ if (cstate->status == nfserr_replay_cache) { ++ dprintk("%s NFS4.1 replay from cache\n", __func__); ++ status = op->status; ++ goto out; ++ } + if (!op->status) { + if (opdesc->op_set_currentstateid) + opdesc->op_set_currentstateid(cstate, &op->u); +@@ -1700,14 +1706,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, + if (need_wrongsec_check(rqstp)) + op->status = check_nfsd_access(current_fh->fh_export, rqstp); + } +- + encode_op: +- /* Only from SEQUENCE */ +- if (cstate->status == nfserr_replay_cache) { +- dprintk("%s NFS4.1 replay from cache\n", __func__); +- status = op->status; +- goto out; +- } + if (op->status == nfserr_replay_me) { + op->replay = &cstate->replay_owner->so_replay; + nfsd4_encode_replay(&resp->xdr, op); +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index c3e1cb481fe0..3f68a25f2169 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -2753,9 +2753,16 @@ out_acl: + } + #endif /* CONFIG_NFSD_PNFS */ + if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) { +- status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0, +- NFSD_SUPPATTR_EXCLCREAT_WORD1, +- NFSD_SUPPATTR_EXCLCREAT_WORD2); ++ u32 supp[3]; ++ ++ supp[0] = nfsd_suppattrs0(minorversion); ++ supp[1] = nfsd_suppattrs1(minorversion); ++ supp[2] = nfsd_suppattrs2(minorversion); ++ supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0; ++ supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1; ++ supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2; ++ ++ status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]); + if (status) + goto out; + } +diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c +index a35ae35e6932..cd39b57288c2 100644 +--- a/fs/nilfs2/btnode.c ++++ b/fs/nilfs2/btnode.c +@@ -55,7 +55,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) + brelse(bh); + BUG(); + } +- memset(bh->b_data, 0, 1 << inode->i_blkbits); ++ memset(bh->b_data, 0, i_blocksize(inode)); + bh->b_bdev = inode->i_sb->s_bdev; + bh->b_blocknr = blocknr; + set_buffer_mapped(bh); +diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c +index ac2f64943ff4..00877ef0b120 100644 +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -55,7 +55,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n) + { + struct nilfs_root *root = NILFS_I(inode)->i_root; + +- inode_add_bytes(inode, (1 << inode->i_blkbits) * n); ++ inode_add_bytes(inode, i_blocksize(inode) * n); + if (root) + atomic64_add(n, &root->blocks_count); + } +@@ -64,7 +64,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) + { + struct nilfs_root *root = NILFS_I(inode)->i_root; + +- inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); ++ inode_sub_bytes(inode, i_blocksize(inode) * n); + if (root) + atomic64_sub(n, &root->blocks_count); + } +diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c +index 1125f40233ff..612a2457243d 100644 +--- a/fs/nilfs2/mdt.c ++++ b/fs/nilfs2/mdt.c +@@ -60,7 +60,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, + set_buffer_mapped(bh); + + kaddr = kmap_atomic(bh->b_page); +- memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); ++ memset(kaddr + bh_offset(bh), 0, i_blocksize(inode)); + if (init_block) + init_block(inode, bh, kaddr); + flush_dcache_page(bh->b_page); +@@ -503,7 +503,7 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size, + struct nilfs_mdt_info *mi = NILFS_MDT(inode); + + mi->mi_entry_size = entry_size; +- mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size; ++ mi->mi_entries_per_block = i_blocksize(inode) / entry_size; + mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); + } + +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 3b65adaae7e4..2f27c935bd57 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -719,7 +719,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, + + lock_page(page); + if (!page_has_buffers(page)) +- create_empty_buffers(page, 1 << inode->i_blkbits, 0); ++ create_empty_buffers(page, i_blocksize(inode), 0); + unlock_page(page); + + bh = head = page_buffers(page); +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c +index e6795c7c76a8..e4184bd2a954 100644 +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -1103,7 +1103,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, + int ret = 0; + struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; + unsigned int block_end, block_start; +- unsigned int bsize = 1 << inode->i_blkbits; ++ unsigned int bsize = i_blocksize(inode); + + if (!page_has_buffers(page)) + create_empty_buffers(page, bsize, 0); +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index 56dd3957cc91..1d738723a41a 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -808,7 +808,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, + /* We know that zero_from is block aligned */ + for (block_start = zero_from; block_start < zero_to; + block_start = block_end) { +- block_end = block_start + (1 << inode->i_blkbits); ++ block_end = block_start + i_blocksize(inode); + + /* + * block_start is block-aligned. Bump it by one to force +diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c +index 8f5ccdf81c25..38187300a2b4 100644 +--- a/fs/reiserfs/file.c ++++ b/fs/reiserfs/file.c +@@ -189,7 +189,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page, + int ret = 0; + + th.t_trans_id = 0; +- blocksize = 1 << inode->i_blkbits; ++ blocksize = i_blocksize(inode); + + if (logit) { + reiserfs_write_lock(s); +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c +index 3d8e7e671d5b..60ba35087d12 100644 +--- a/fs/reiserfs/inode.c ++++ b/fs/reiserfs/inode.c +@@ -524,7 +524,7 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode, + * referenced in convert_tail_for_hole() that may be called from + * reiserfs_get_block() + */ +- bh_result->b_size = (1 << inode->i_blkbits); ++ bh_result->b_size = i_blocksize(inode); + + ret = reiserfs_get_block(inode, iblock, bh_result, + create | GET_BLOCK_NO_DANGLE); +diff --git a/fs/stat.c b/fs/stat.c +index d4a61d8dc021..004dd77c3b93 100644 +--- a/fs/stat.c ++++ b/fs/stat.c +@@ -31,7 +31,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat) + stat->atime = inode->i_atime; + stat->mtime = inode->i_mtime; + stat->ctime = inode->i_ctime; +- stat->blksize = (1 << inode->i_blkbits); ++ stat->blksize = i_blocksize(inode); + stat->blocks = inode->i_blocks; + } + +@@ -454,6 +454,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes) + inode->i_bytes -= 512; + } + } ++EXPORT_SYMBOL(__inode_add_bytes); + + void inode_add_bytes(struct inode *inode, loff_t bytes) + { +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 566df9b5a6cb..7be3166ba553 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -1206,7 +1206,7 @@ int udf_setsize(struct inode *inode, loff_t newsize) + { + int err; + struct udf_inode_info *iinfo; +- int bsize = 1 << inode->i_blkbits; ++ int bsize = i_blocksize(inode); + + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) +diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c +index dc5fae601c24..637e17cb0edd 100644 +--- a/fs/ufs/balloc.c ++++ b/fs/ufs/balloc.c +@@ -81,7 +81,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) + ufs_error (sb, "ufs_free_fragments", + "bit already cleared for fragment %u", i); + } +- ++ ++ inode_sub_bytes(inode, count << uspi->s_fshift); + fs32_add(sb, &ucg->cg_cs.cs_nffree, count); + uspi->cs_total.cs_nffree += count; + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); +@@ -183,6 +184,7 @@ do_more: + ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); + } + ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); ++ inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift); + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) + ufs_clusteracct (sb, ucpi, blkno, 1); + +@@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, + return 0; + } + ++static bool try_add_frags(struct inode *inode, unsigned frags) ++{ ++ unsigned size = frags * i_blocksize(inode); ++ spin_lock(&inode->i_lock); ++ __inode_add_bytes(inode, size); ++ if (unlikely((u32)inode->i_blocks != inode->i_blocks)) { ++ __inode_sub_bytes(inode, size); ++ spin_unlock(&inode->i_lock); ++ return false; ++ } ++ spin_unlock(&inode->i_lock); ++ return true; ++} ++ + static u64 ufs_add_fragments(struct inode *inode, u64 fragment, + unsigned oldcount, unsigned newcount) + { +@@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, + for (i = oldcount; i < newcount; i++) + if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) + return 0; ++ ++ if (!try_add_frags(inode, count)) ++ return 0; + /* + * Block can be extended + */ +@@ -647,6 +666,7 @@ cg_found: + ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); + i = uspi->s_fpb - count; + ++ inode_sub_bytes(inode, i << uspi->s_fshift); + fs32_add(sb, &ucg->cg_cs.cs_nffree, i); + uspi->cs_total.cs_nffree += i; + fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); +@@ -657,6 +677,8 @@ cg_found: + result = ufs_bitmap_search (sb, ucpi, goal, allocsize); + if (result == INVBLOCK) + return 0; ++ if (!try_add_frags(inode, count)) ++ return 0; + for (i = 0; i < count; i++) + ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); + +@@ -716,6 +738,8 @@ norot: + return INVBLOCK; + ucpi->c_rotor = result; + gotit: ++ if (!try_add_frags(inode, uspi->s_fpb)) ++ return 0; + blkno = ufs_fragstoblks(result); + ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); + if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) +diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c +index a064cf44b143..1f69bb9b1e9d 100644 +--- a/fs/ufs/inode.c ++++ b/fs/ufs/inode.c +@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to, + + p = ufs_get_direct_data_ptr(uspi, ufsi, block); + tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), +- new_size, err, locked_page); ++ new_size - (lastfrag & uspi->s_fpbmask), err, ++ locked_page); + return tmp != 0; + } + +@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index, + goal += uspi->s_fpb; + } + tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), +- goal, uspi->s_fpb, err, locked_page); ++ goal, nfrags, err, locked_page); + + if (!tmp) { + *err = -ENOSPC; +@@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff + + if (!create) { + phys64 = ufs_frag_map(inode, offsets, depth); +- goto out; ++ if (phys64) ++ map_bh(bh_result, sb, phys64 + frag); ++ return 0; + } + + /* This code entered only while writing ....? */ +diff --git a/fs/ufs/super.c b/fs/ufs/super.c +index f6390eec02ca..10f364490833 100644 +--- a/fs/ufs/super.c ++++ b/fs/ufs/super.c +@@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb) + return; + } + ++static u64 ufs_max_bytes(struct super_block *sb) ++{ ++ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; ++ int bits = uspi->s_apbshift; ++ u64 res; ++ ++ if (bits > 21) ++ res = ~0ULL; ++ else ++ res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) + ++ (1LL << (3*bits)); ++ ++ if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift)) ++ return MAX_LFS_FILESIZE; ++ return res << uspi->s_bshift; ++} ++ + static int ufs_fill_super(struct super_block *sb, void *data, int silent) + { + struct ufs_sb_info * sbi; +@@ -1212,6 +1229,7 @@ magic_found: + "fast symlink size (%u)\n", uspi->s_maxsymlinklen); + uspi->s_maxsymlinklen = maxsymlen; + } ++ sb->s_maxbytes = ufs_max_bytes(sb); + sb->s_max_links = UFS_LINK_MAX; + + inode = ufs_iget(sb, UFS_ROOTINO); +diff --git a/fs/ufs/util.h b/fs/ufs/util.h +index 954175928240..3f9463f8cf2f 100644 +--- a/fs/ufs/util.h ++++ b/fs/ufs/util.h +@@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_( + static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, + struct ufs_buffer_head * ubh, unsigned begin, unsigned block) + { ++ u8 mask; + switch (uspi->s_fpb) { + case 8: + return (*ubh_get_addr (ubh, begin + block) == 0xff); + case 4: +- return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); ++ mask = 0x0f << ((block & 0x01) << 2); ++ return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask; + case 2: +- return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); ++ mask = 0x03 << ((block & 0x03) << 1); ++ return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask; + case 1: +- return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); ++ mask = 0x01 << (block & 0x07); ++ return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask; + } + return 0; + } +diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c +index 29e7e5dd5178..187b80267ff9 100644 +--- a/fs/xfs/xfs_aops.c ++++ b/fs/xfs/xfs_aops.c +@@ -288,7 +288,7 @@ xfs_map_blocks( + { + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; +- ssize_t count = 1 << inode->i_blkbits; ++ ssize_t count = i_blocksize(inode); + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int bmapi_flags = XFS_BMAPI_ENTIRE; +@@ -921,7 +921,7 @@ xfs_aops_discard_page( + break; + } + next_buffer: +- offset += 1 << inode->i_blkbits; ++ offset += i_blocksize(inode); + + } while ((bh = bh->b_this_page) != head); + +@@ -1363,7 +1363,7 @@ xfs_map_trim_size( + offset + mapping_size >= i_size_read(inode)) { + /* limit mapping to block that spans EOF */ + mapping_size = roundup_64(i_size_read(inode) - offset, +- 1 << inode->i_blkbits); ++ i_blocksize(inode)); + } + if (mapping_size > LONG_MAX) + mapping_size = LONG_MAX; +@@ -1395,7 +1395,7 @@ __xfs_get_blocks( + return -EIO; + + offset = (xfs_off_t)iblock << inode->i_blkbits; +- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); ++ ASSERT(bh_result->b_size >= i_blocksize(inode)); + size = bh_result->b_size; + + if (!create && direct && offset >= i_size_read(inode)) +@@ -1968,7 +1968,7 @@ xfs_vm_set_page_dirty( + if (offset < end_offset) + set_buffer_dirty(bh); + bh = bh->b_this_page; +- offset += 1 << inode->i_blkbits; ++ offset += i_blocksize(inode); + } while (bh != head); + } + /* +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c +index ceea444dafb4..3dd47307363f 100644 +--- a/fs/xfs/xfs_file.c ++++ b/fs/xfs/xfs_file.c +@@ -947,7 +947,7 @@ xfs_file_fallocate( + if (error) + goto out_unlock; + } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { +- unsigned blksize_mask = (1 << inode->i_blkbits) - 1; ++ unsigned int blksize_mask = i_blocksize(inode) - 1; + + if (offset & blksize_mask || len & blksize_mask) { + error = -EINVAL; +@@ -969,7 +969,7 @@ xfs_file_fallocate( + if (error) + goto out_unlock; + } else if (mode & FALLOC_FL_INSERT_RANGE) { +- unsigned blksize_mask = (1 << inode->i_blkbits) - 1; ++ unsigned int blksize_mask = i_blocksize(inode) - 1; + + new_size = i_size_read(inode) + len; + if (offset & blksize_mask || len & blksize_mask) { +diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c +index e6dae28dfa1a..9beaf192b4bb 100644 +--- a/fs/xfs/xfs_xattr.c ++++ b/fs/xfs/xfs_xattr.c +@@ -180,6 +180,7 @@ xfs_xattr_put_listent( + arraytop = context->count + prefix_len + namelen + 1; + if (arraytop > context->firstu) { + context->count = -1; /* insufficient space */ ++ context->seen_enough = 1; + return 0; + } + offset = (char *)context->alist + context->count; +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h +index ad2bcf647b9a..210ccc4ea44b 100644 +--- a/include/linux/cgroup.h ++++ b/include/linux/cgroup.h +@@ -340,6 +340,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) + } + + /** ++ * css_is_dying - test whether the specified css is dying ++ * @css: target css ++ * ++ * Test whether @css is in the process of offlining or already offline. In ++ * most cases, ->css_online() and ->css_offline() callbacks should be ++ * enough; however, the actual offline operations are RCU delayed and this ++ * test returns %true also when @css is scheduled to be offlined. ++ * ++ * This is useful, for example, when the use case requires synchronous ++ * behavior with respect to cgroup removal. cgroup removal schedules css ++ * offlining but the css can seem alive while the operation is being ++ * delayed. If the delay affects user visible semantics, this test can be ++ * used to resolve the situation. ++ */ ++static inline bool css_is_dying(struct cgroup_subsys_state *css) ++{ ++ return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); ++} ++ ++/** + * css_put - put a css reference + * @css: target css + * +diff --git a/include/linux/fs.h b/include/linux/fs.h +index e1a123760dbf..c8decb7075d6 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -680,6 +680,11 @@ struct inode { + void *i_private; /* fs or device private pointer */ + }; + ++static inline unsigned int i_blocksize(const struct inode *node) ++{ ++ return (1 << node->i_blkbits); ++} ++ + static inline int inode_unhashed(struct inode *inode) + { + return hlist_unhashed(&inode->i_hash); +diff --git a/include/linux/memblock.h b/include/linux/memblock.h +index 24daf8fc4d7c..76b502c6258f 100644 +--- a/include/linux/memblock.h ++++ b/include/linux/memblock.h +@@ -408,12 +408,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) + } + #endif + ++extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, ++ phys_addr_t end_addr); + #else + static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) + { + return 0; + } + ++static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, ++ phys_addr_t end_addr) ++{ ++ return 0; ++} ++ + #endif /* CONFIG_HAVE_MEMBLOCK */ + + #endif /* __KERNEL__ */ +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index e23a9e704536..5b609a3ce3d7 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -688,6 +688,7 @@ typedef struct pglist_data { + * is the first PFN that needs to be initialised. + */ + unsigned long first_deferred_pfn; ++ unsigned long static_init_size; + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + } pg_data_t; + +diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h +index e13bfdf7f314..81fdf4b8aba4 100644 +--- a/include/linux/ptrace.h ++++ b/include/linux/ptrace.h +@@ -50,7 +50,8 @@ extern int ptrace_request(struct task_struct *child, long request, + unsigned long addr, unsigned long data); + extern void ptrace_notify(int exit_code); + extern void __ptrace_link(struct task_struct *child, +- struct task_struct *new_parent); ++ struct task_struct *new_parent, ++ const struct cred *ptracer_cred); + extern void __ptrace_unlink(struct task_struct *child); + extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); + #define PTRACE_MODE_READ 0x01 +@@ -202,7 +203,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) + + if (unlikely(ptrace) && current->ptrace) { + child->ptrace = current->ptrace; +- __ptrace_link(child, current->parent); ++ __ptrace_link(child, current->parent, current->ptracer_cred); + + if (child->ptrace & PT_SEIZED) + task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); +@@ -211,6 +212,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) + + set_tsk_thread_flag(child, TIF_SIGPENDING); + } ++ else ++ child->ptracer_cred = NULL; + } + + /** +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index d443d9ab0236..3f61c647fc5c 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1084,9 +1084,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) + + static inline void skb_sender_cpu_clear(struct sk_buff *skb) + { +-#ifdef CONFIG_XPS +- skb->sender_cpu = 0; +-#endif + } + + #ifdef NET_SKBUFF_DATA_USES_OFFSET +diff --git a/include/net/ipv6.h b/include/net/ipv6.h +index 9a5c9f013784..ad1d6039185d 100644 +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -958,6 +958,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, + */ + extern const struct proto_ops inet6_stream_ops; + extern const struct proto_ops inet6_dgram_ops; ++extern const struct proto_ops inet6_sockraw_ops; + + struct group_source_req; + struct group_filter; +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index b271353d5202..3b5e5430f5d0 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -173,9 +173,9 @@ typedef enum { + } cpuset_flagbits_t; + + /* convenient tests for these bits */ +-static inline bool is_cpuset_online(const struct cpuset *cs) ++static inline bool is_cpuset_online(struct cpuset *cs) + { +- return test_bit(CS_ONLINE, &cs->flags); ++ return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); + } + + static inline int is_cpu_exclusive(const struct cpuset *cs) +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 784ab8fe8714..22350b15b4e7 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6410,6 +6410,21 @@ static void perf_log_itrace_start(struct perf_event *event) + perf_output_end(&handle); + } + ++static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) ++{ ++ /* ++ * Due to interrupt latency (AKA "skid"), we may enter the ++ * kernel before taking an overflow, even if the PMU is only ++ * counting user events. ++ * To avoid leaking information to userspace, we must always ++ * reject kernel samples when exclude_kernel is set. ++ */ ++ if (event->attr.exclude_kernel && !user_mode(regs)) ++ return false; ++ ++ return true; ++} ++ + /* + * Generic event overflow handling, sampling. + */ +@@ -6457,6 +6472,12 @@ static int __perf_event_overflow(struct perf_event *event, + } + + /* ++ * For security, drop the skid kernel samples if necessary. ++ */ ++ if (!sample_is_allowed(event, regs)) ++ return ret; ++ ++ /* + * XXX event_limit might not quite work as expected on inherited + * events + */ +diff --git a/kernel/fork.c b/kernel/fork.c +index 0ee630f3ad4b..68cfda1c1800 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -368,7 +368,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) + set_task_stack_end_magic(tsk); + + #ifdef CONFIG_CC_STACKPROTECTOR +- tsk->stack_canary = get_random_int(); ++ tsk->stack_canary = get_random_long(); + #endif + + /* +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index c7e8ed99c953..5e2cd1030702 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -28,19 +28,25 @@ + #include + + ++void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, ++ const struct cred *ptracer_cred) ++{ ++ BUG_ON(!list_empty(&child->ptrace_entry)); ++ list_add(&child->ptrace_entry, &new_parent->ptraced); ++ child->parent = new_parent; ++ child->ptracer_cred = get_cred(ptracer_cred); ++} ++ + /* + * ptrace a task: make the debugger its new parent and + * move it to the ptrace list. + * + * Must be called with the tasklist lock write-held. + */ +-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) ++static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) + { +- BUG_ON(!list_empty(&child->ptrace_entry)); +- list_add(&child->ptrace_entry, &new_parent->ptraced); +- child->parent = new_parent; + rcu_read_lock(); +- child->ptracer_cred = get_cred(__task_cred(new_parent)); ++ __ptrace_link(child, new_parent, __task_cred(new_parent)); + rcu_read_unlock(); + } + +@@ -353,7 +359,7 @@ static int ptrace_attach(struct task_struct *task, long request, + flags |= PT_SEIZED; + task->ptrace = flags; + +- __ptrace_link(task, current); ++ ptrace_link(task, current); + + /* SEIZE doesn't trap tracee on attach */ + if (!seize) +@@ -420,7 +426,7 @@ static int ptrace_traceme(void) + */ + if (!ret && !(current->real_parent->flags & PF_EXITING)) { + current->ptrace = PT_PTRACED; +- __ptrace_link(current, current->real_parent); ++ ptrace_link(current, current->real_parent); + } + } + write_unlock_irq(&tasklist_lock); +diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c +index 0ecef3e4690e..5e6db6b1e3bd 100644 +--- a/lib/test_user_copy.c ++++ b/lib/test_user_copy.c +@@ -58,7 +58,9 @@ static int __init test_user_copy_init(void) + usermem = (char __user *)user_addr; + bad_usermem = (char *)user_addr; + +- /* Legitimate usage: none of these should fail. */ ++ /* ++ * Legitimate usage: none of these copies should fail. ++ */ + ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), + "legitimate copy_from_user failed"); + ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), +@@ -68,19 +70,33 @@ static int __init test_user_copy_init(void) + ret |= test(put_user(value, (unsigned long __user *)usermem), + "legitimate put_user failed"); + +- /* Invalid usage: none of these should succeed. */ ++ /* ++ * Invalid usage: none of these copies should succeed. ++ */ ++ ++ /* Reject kernel-to-kernel copies through copy_from_user(). */ + ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), + PAGE_SIZE), + "illegal all-kernel copy_from_user passed"); ++ ++#if 0 ++ /* ++ * When running with SMAP/PAN/etc, this will Oops the kernel ++ * due to the zeroing of userspace memory on failure. This needs ++ * to be tested in LKDTM instead, since this test module does not ++ * expect to explode. ++ */ + ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, + PAGE_SIZE), + "illegal reversed copy_from_user passed"); ++#endif + ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, + PAGE_SIZE), + "illegal all-kernel copy_to_user passed"); + ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, + PAGE_SIZE), + "illegal reversed copy_to_user passed"); ++ + ret |= test(!get_user(value, (unsigned long __user *)kmem), + "illegal get_user passed"); + ret |= test(!put_user(value, (unsigned long __user *)kmem), +diff --git a/mm/memblock.c b/mm/memblock.c +index d300f1329814..f8fab45bfdb7 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -1634,6 +1634,30 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name + } + } + ++extern unsigned long __init_memblock ++memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) ++{ ++ struct memblock_type *type = &memblock.reserved; ++ unsigned long size = 0; ++ int idx; ++ ++ for (idx = 0; idx < type->cnt; idx++) { ++ struct memblock_region *rgn = &type->regions[idx]; ++ phys_addr_t start, end; ++ ++ if (rgn->base + rgn->size < start_addr) ++ continue; ++ if (rgn->base > end_addr) ++ continue; ++ ++ start = rgn->base; ++ end = start + rgn->size; ++ size += end - start; ++ } ++ ++ return size; ++} ++ + void __init_memblock __memblock_dump_all(void) + { + pr_info("MEMBLOCK configuration:\n"); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 6f9005dcca2e..bd17a6bdf131 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -269,6 +269,26 @@ int page_group_by_mobility_disabled __read_mostly; + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + static inline void reset_deferred_meminit(pg_data_t *pgdat) + { ++ unsigned long max_initialise; ++ unsigned long reserved_lowmem; ++ ++ /* ++ * Initialise at least 2G of a node but also take into account that ++ * two large system hashes that can take up 1GB for 0.25TB/node. ++ */ ++ max_initialise = max(2UL << (30 - PAGE_SHIFT), ++ (pgdat->node_spanned_pages >> 8)); ++ ++ /* ++ * Compensate the all the memblock reservations (e.g. crash kernel) ++ * from the initial estimation to make sure we will initialize enough ++ * memory to boot. ++ */ ++ reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, ++ pgdat->node_start_pfn + max_initialise); ++ max_initialise += reserved_lowmem; ++ ++ pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); + pgdat->first_deferred_pfn = ULONG_MAX; + } + +@@ -302,10 +322,9 @@ static inline bool update_defer_init(pg_data_t *pgdat, + /* Always populate low zones for address-contrained allocations */ + if (zone_end < pgdat_end_pfn(pgdat)) + return true; +- + /* Initialise at least 2G of the highest zone */ + (*nr_initialised)++; +- if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && ++ if ((*nr_initialised > pgdat->static_init_size) && + (pfn & (PAGES_PER_SECTION - 1)) == 0) { + pgdat->first_deferred_pfn = pfn; + return false; +@@ -5343,7 +5362,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, + /* pg_data_t should be reset to zero when it's allocated */ + WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); + +- reset_deferred_meminit(pgdat); + pgdat->node_id = nid; + pgdat->node_start_pfn = node_start_pfn; + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +@@ -5362,6 +5380,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, + (unsigned long)pgdat->node_mem_map); + #endif + ++ reset_deferred_meminit(pgdat); + free_area_init_core(pgdat); + } + +diff --git a/mm/truncate.c b/mm/truncate.c +index 76e35ad97102..f4c8270f7b84 100644 +--- a/mm/truncate.c ++++ b/mm/truncate.c +@@ -732,7 +732,7 @@ EXPORT_SYMBOL(truncate_setsize); + */ + void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) + { +- int bsize = 1 << inode->i_blkbits; ++ int bsize = i_blocksize(inode); + loff_t rounded_from; + struct page *page; + pgoff_t index; +diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c +index 57be733a99bc..bcb4559e735d 100644 +--- a/net/bridge/br_stp_if.c ++++ b/net/bridge/br_stp_if.c +@@ -166,7 +166,8 @@ static void br_stp_start(struct net_bridge *br) + br_debug(br, "using kernel STP\n"); + + /* To start timers on any ports left in blocking */ +- mod_timer(&br->hello_timer, jiffies + br->hello_time); ++ if (br->dev->flags & IFF_UP) ++ mod_timer(&br->hello_timer, jiffies + br->hello_time); + br_port_state_selection(br); + } + +diff --git a/net/core/dev.c b/net/core/dev.c +index 48399d8ce614..87b8754f34ac 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -182,7 +182,7 @@ EXPORT_SYMBOL(dev_base_lock); + /* protects napi_hash addition/deletion and napi_gen_id */ + static DEFINE_SPINLOCK(napi_hash_lock); + +-static unsigned int napi_gen_id; ++static unsigned int napi_gen_id = NR_CPUS; + static DEFINE_HASHTABLE(napi_hash, 8); + + static seqcount_t devnet_rename_seq; +@@ -3049,7 +3049,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, + int queue_index = 0; + + #ifdef CONFIG_XPS +- if (skb->sender_cpu == 0) ++ u32 sender_cpu = skb->sender_cpu - 1; ++ ++ if (sender_cpu >= (u32)NR_CPUS) + skb->sender_cpu = raw_smp_processor_id() + 1; + #endif + +@@ -4726,25 +4728,22 @@ EXPORT_SYMBOL_GPL(napi_by_id); + + void napi_hash_add(struct napi_struct *napi) + { +- if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { ++ if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) ++ return; + +- spin_lock(&napi_hash_lock); ++ spin_lock(&napi_hash_lock); + +- /* 0 is not a valid id, we also skip an id that is taken +- * we expect both events to be extremely rare +- */ +- napi->napi_id = 0; +- while (!napi->napi_id) { +- napi->napi_id = ++napi_gen_id; +- if (napi_by_id(napi->napi_id)) +- napi->napi_id = 0; +- } ++ /* 0..NR_CPUS+1 range is reserved for sender_cpu use */ ++ do { ++ if (unlikely(++napi_gen_id < NR_CPUS + 1)) ++ napi_gen_id = NR_CPUS + 1; ++ } while (napi_by_id(napi_gen_id)); ++ napi->napi_id = napi_gen_id; + +- hlist_add_head_rcu(&napi->napi_hash_node, +- &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); ++ hlist_add_head_rcu(&napi->napi_hash_node, ++ &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); + +- spin_unlock(&napi_hash_lock); +- } ++ spin_unlock(&napi_hash_lock); + } + EXPORT_SYMBOL_GPL(napi_hash_add); + +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index afc18e9ca94a..967a47ff78a4 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -1014,7 +1014,7 @@ static struct inet_protosw inetsw_array[] = + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMP, + .prot = &ping_prot, +- .ops = &inet_dgram_ops, ++ .ops = &inet_sockraw_ops, + .flags = INET_PROTOSW_REUSE, + }, + +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c +index 882caa4e72bc..aafe68134763 100644 +--- a/net/ipv4/tcp_cong.c ++++ b/net/ipv4/tcp_cong.c +@@ -183,6 +183,7 @@ void tcp_init_congestion_control(struct sock *sk) + { + const struct inet_connection_sock *icsk = inet_csk(sk); + ++ tcp_sk(sk)->prior_ssthresh = 0; + if (icsk->icsk_ca_ops->init) + icsk->icsk_ca_ops->init(sk); + if (tcp_ca_needs_ecn(sk)) +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index 568bc0a52ca1..9e2ea4ae840d 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -121,8 +121,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + + if (udpfrag) { + int err = ip6_find_1stfragopt(skb, &prevhdr); +- if (err < 0) ++ if (err < 0) { ++ kfree_skb_list(segs); + return ERR_PTR(err); ++ } + fptr = (struct frag_hdr *)((u8 *)ipv6h + err); + fptr->frag_off = htons(offset); + if (skb->next) +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c +index 3e55447b63a4..a830b68e63c9 100644 +--- a/net/ipv6/ping.c ++++ b/net/ipv6/ping.c +@@ -50,7 +50,7 @@ static struct inet_protosw pingv6_protosw = { + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMPV6, + .prot = &pingv6_prot, +- .ops = &inet6_dgram_ops, ++ .ops = &inet6_sockraw_ops, + .flags = INET_PROTOSW_REUSE, + }; + +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index c93ede16795d..4d52a0e2f60d 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -1303,7 +1303,7 @@ void raw6_proc_exit(void) + #endif /* CONFIG_PROC_FS */ + + /* Same as inet6_dgram_ops, sans udp_poll. */ +-static const struct proto_ops inet6_sockraw_ops = { ++const struct proto_ops inet6_sockraw_ops = { + .family = PF_INET6, + .owner = THIS_MODULE, + .release = inet6_release, +diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c +index 0e015906f9ca..07d36573f50b 100644 +--- a/net/ipv6/xfrm6_mode_ro.c ++++ b/net/ipv6/xfrm6_mode_ro.c +@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) + iph = ipv6_hdr(skb); + + hdr_len = x->type->hdr_offset(x, skb, &prevhdr); ++ if (hdr_len < 0) ++ return hdr_len; + skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); + skb_set_network_header(skb, -x->props.header_len); + skb->transport_header = skb->network_header + hdr_len; +diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c +index 4e344105b3fd..1d3bbe6e1183 100644 +--- a/net/ipv6/xfrm6_mode_transport.c ++++ b/net/ipv6/xfrm6_mode_transport.c +@@ -28,6 +28,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) + iph = ipv6_hdr(skb); + + hdr_len = x->type->hdr_offset(x, skb, &prevhdr); ++ if (hdr_len < 0) ++ return hdr_len; + skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); + skb_set_network_header(skb, -x->props.header_len); + skb->transport_header = skb->network_header + hdr_len; +diff --git a/security/keys/key.c b/security/keys/key.c +index 534808915371..09c10b181881 100644 +--- a/security/keys/key.c ++++ b/security/keys/key.c +@@ -934,12 +934,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen) + /* the key must be writable */ + ret = key_permission(key_ref, KEY_NEED_WRITE); + if (ret < 0) +- goto error; ++ return ret; + + /* attempt to update it if supported */ +- ret = -EOPNOTSUPP; + if (!key->type->update) +- goto error; ++ return -EOPNOTSUPP; + + memset(&prep, 0, sizeof(prep)); + prep.data = payload; +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index 442e350c209d..671709d8610d 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -97,7 +97,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, + /* pull the payload in if one was supplied */ + payload = NULL; + +- if (_payload) { ++ if (plen) { + ret = -ENOMEM; + payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); + if (!payload) { +@@ -327,7 +327,7 @@ long keyctl_update_key(key_serial_t id, + + /* pull the payload in if one was supplied */ + payload = NULL; +- if (_payload) { ++ if (plen) { + ret = -ENOMEM; + payload = kmalloc(plen, GFP_KERNEL); + if (!payload) +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 278a332f97bd..48eaccba82a3 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -1621,6 +1621,7 @@ static int snd_timer_user_tselect(struct file *file, + if (err < 0) + goto __err; + ++ tu->qhead = tu->qtail = tu->qused = 0; + kfree(tu->queue); + tu->queue = NULL; + kfree(tu->tqueue); +@@ -1958,6 +1959,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + + tu = file->private_data; + unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); ++ mutex_lock(&tu->ioctl_lock); + spin_lock_irq(&tu->qlock); + while ((long)count - result >= unit) { + while (!tu->qused) { +@@ -1973,7 +1975,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + add_wait_queue(&tu->qchange_sleep, &wait); + + spin_unlock_irq(&tu->qlock); ++ mutex_unlock(&tu->ioctl_lock); + schedule(); ++ mutex_lock(&tu->ioctl_lock); + spin_lock_irq(&tu->qlock); + + remove_wait_queue(&tu->qchange_sleep, &wait); +@@ -1993,7 +1997,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + tu->qused--; + spin_unlock_irq(&tu->qlock); + +- mutex_lock(&tu->ioctl_lock); + if (tu->tread) { + if (copy_to_user(buffer, &tu->tqueue[qhead], + sizeof(struct snd_timer_tread))) +@@ -2003,7 +2006,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + sizeof(struct snd_timer_read))) + err = -EFAULT; + } +- mutex_unlock(&tu->ioctl_lock); + + spin_lock_irq(&tu->qlock); + if (err < 0) +@@ -2013,6 +2015,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + } + _error: + spin_unlock_irq(&tu->qlock); ++ mutex_unlock(&tu->ioctl_lock); + return result > 0 ? result : err; + } + +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index a1305f827a98..fa6b74a304a7 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -1775,6 +1775,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card) + for (i = 0; i < card->num_aux_devs; i++) + soc_remove_aux_dev(card, i); + ++ /* free the ALSA card at first; this syncs with pending operations */ ++ snd_card_free(card->snd_card); ++ + /* remove and free each DAI */ + soc_remove_dai_links(card); + +@@ -1786,9 +1789,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card) + + snd_soc_dapm_free(&card->dapm); + +- snd_card_free(card->snd_card); + return 0; +- + } + + /* removes a socdev */ diff --git a/patch/kernel/rk3328-default/patch-4.4.72-73.patch b/patch/kernel/rk3328-default/patch-4.4.72-73.patch new file mode 100644 index 000000000..43d7eb9c8 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.72-73.patch @@ -0,0 +1,2192 @@ +diff --git a/Makefile b/Makefile +index 94d663c935c0..ba5a70b6e32c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 72 ++SUBLEVEL = 73 + EXTRAVERSION = + NAME = Blurry Fish Butt + +@@ -789,7 +789,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) + KBUILD_ARFLAGS := $(call ar-option,D) + + # check for 'asm goto' +-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) ++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO + KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO + endif +diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi +index 4b0ec0703825..8ca9217204a0 100644 +--- a/arch/arm/boot/dts/imx6dl.dtsi ++++ b/arch/arm/boot/dts/imx6dl.dtsi +@@ -30,7 +30,7 @@ + /* kHz uV */ + 996000 1250000 + 792000 1175000 +- 396000 1075000 ++ 396000 1150000 + >; + fsl,soc-operating-points = < + /* ARM kHz SOC-PU uV */ +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S +index 7460df3eec6b..4612ed7ec2e5 100644 +--- a/arch/s390/kernel/entry.S ++++ b/arch/s390/kernel/entry.S +@@ -229,12 +229,17 @@ ENTRY(sie64a) + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + .Lsie_done: + # some program checks are suppressing. C code (e.g. do_protection_exception) +-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other +-# instructions between sie64a and .Lsie_done should not cause program +-# interrupts. So lets use a nop (47 00 00 00) as a landing pad. ++# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There ++# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. ++# Other instructions between sie64a and .Lsie_done should not cause program ++# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. + # See also .Lcleanup_sie +-.Lrewind_pad: +- nop 0 ++.Lrewind_pad6: ++ nopr 7 ++.Lrewind_pad4: ++ nopr 7 ++.Lrewind_pad2: ++ nopr 7 + .globl sie_exit + sie_exit: + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area +@@ -247,7 +252,9 @@ sie_exit: + stg %r14,__SF_EMPTY+16(%r15) # set exit reason code + j sie_exit + +- EX_TABLE(.Lrewind_pad,.Lsie_fault) ++ EX_TABLE(.Lrewind_pad6,.Lsie_fault) ++ EX_TABLE(.Lrewind_pad4,.Lsie_fault) ++ EX_TABLE(.Lrewind_pad2,.Lsie_fault) + EX_TABLE(sie_exit,.Lsie_fault) + #endif + +diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c +index ef7d6c8fea66..f354fd84adeb 100644 +--- a/arch/s390/mm/vmem.c ++++ b/arch/s390/mm/vmem.c +@@ -372,7 +372,7 @@ void __init vmem_map_init(void) + ro_end = (unsigned long)&_eshared & PAGE_MASK; + for_each_memblock(memory, reg) { + start = reg->base; +- end = reg->base + reg->size - 1; ++ end = reg->base + reg->size; + if (start >= ro_end || end <= ro_start) + vmem_add_mem(start, end - start, 0); + else if (start >= ro_start && end <= ro_end) +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c +index d21cd625c0de..cc97a43268ee 100644 +--- a/arch/sparc/kernel/traps_64.c ++++ b/arch/sparc/kernel/traps_64.c +@@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) + + void bad_trap(struct pt_regs *regs, long lvl) + { +- char buffer[32]; ++ char buffer[36]; + siginfo_t info; + + if (notify_die(DIE_TRAP, "bad trap", regs, +@@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl) + + void bad_trap_tl1(struct pt_regs *regs, long lvl) + { +- char buffer[32]; ++ char buffer[36]; + + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) +diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h +index f71f88ea7646..19707db966f1 100644 +--- a/arch/xtensa/include/asm/irq.h ++++ b/arch/xtensa/include/asm/irq.h +@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { } + # define PLATFORM_NR_IRQS 0 + #endif + #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS +-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) ++#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) ++#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) + + #if VARIANT_NR_IRQS == 0 + static inline void variant_init_irq(void) { } +diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c +index 4ac3d23161cf..441694464b1e 100644 +--- a/arch/xtensa/kernel/irq.c ++++ b/arch/xtensa/kernel/irq.c +@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) + { + int irq = irq_find_mapping(NULL, hwirq); + +- if (hwirq >= NR_IRQS) { +- printk(KERN_EMERG "%s: cannot handle IRQ %d\n", +- __func__, hwirq); +- } +- + #ifdef CONFIG_DEBUG_STACKOVERFLOW + /* Debugging check for stack overflow: is there less than 1KB free? */ + { +diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +index dbeea2b440a1..1fda7e20dfcb 100644 +--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h ++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +@@ -24,16 +24,18 @@ + + /* Interrupt configuration. */ + +-#define PLATFORM_NR_IRQS 10 ++#define PLATFORM_NR_IRQS 0 + + /* Default assignment of LX60 devices to external interrupts. */ + + #ifdef CONFIG_XTENSA_MX + #define DUART16552_INTNUM XCHAL_EXTINT3_NUM + #define OETH_IRQ XCHAL_EXTINT4_NUM ++#define C67X00_IRQ XCHAL_EXTINT8_NUM + #else + #define DUART16552_INTNUM XCHAL_EXTINT0_NUM + #define OETH_IRQ XCHAL_EXTINT1_NUM ++#define C67X00_IRQ XCHAL_EXTINT5_NUM + #endif + + /* +@@ -63,5 +65,5 @@ + + #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) + #define C67X00_SIZE 0x10 +-#define C67X00_IRQ 5 ++ + #endif /* __XTENSA_XTAVNET_HARDWARE_H */ +diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c +index e9f65f79cf2e..d1e9439fad45 100644 +--- a/arch/xtensa/platforms/xtfpga/setup.c ++++ b/arch/xtensa/platforms/xtfpga/setup.c +@@ -209,8 +209,8 @@ static struct resource ethoc_res[] = { + .flags = IORESOURCE_MEM, + }, + [2] = { /* IRQ number */ +- .start = OETH_IRQ, +- .end = OETH_IRQ, ++ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), ++ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), + .flags = IORESOURCE_IRQ, + }, + }; +@@ -246,8 +246,8 @@ static struct resource c67x00_res[] = { + .flags = IORESOURCE_MEM, + }, + [1] = { /* IRQ number */ +- .start = C67X00_IRQ, +- .end = C67X00_IRQ, ++ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), ++ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), + .flags = IORESOURCE_IRQ, + }, + }; +@@ -280,7 +280,7 @@ static struct resource serial_resource = { + static struct plat_serial8250_port serial_platform_data[] = { + [0] = { + .mapbase = DUART16552_PADDR, +- .irq = DUART16552_INTNUM, ++ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), + .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | + UPF_IOREMAP, + .iotype = UPIO_MEM32, +diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c +index 93e7c1b32edd..5610cd537da7 100644 +--- a/block/partitions/msdos.c ++++ b/block/partitions/msdos.c +@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state, + continue; + bsd_start = le32_to_cpu(p->p_offset); + bsd_size = le32_to_cpu(p->p_size); ++ if (memcmp(flavour, "bsd\0", 4) == 0) ++ bsd_start += offset; + if (offset == bsd_start && size == bsd_size) + /* full parent partition, we have it already */ + continue; +diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c +index 3252429f96af..3a20dc594338 100644 +--- a/drivers/base/power/runtime.c ++++ b/drivers/base/power/runtime.c +@@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) + unsigned long flags; + int retval; + +- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); +- + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) + return 0; + } + ++ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); ++ + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_idle(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); +@@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) + unsigned long flags; + int retval; + +- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); +- + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) + return 0; + } + ++ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); ++ + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_suspend(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); +@@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) + unsigned long flags; + int retval; + +- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); ++ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && ++ dev->power.runtime_status != RPM_ACTIVE); + + if (rpmflags & RPM_GET_PUT) + atomic_inc(&dev->power.usage_count); +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h +index 05f6522c0457..b92139e9b9d8 100644 +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -113,6 +113,7 @@ struct ast_private { + struct ttm_bo_kmap_obj cache_kmap; + int next_cursor; + bool support_wide_screen; ++ bool DisableP2A; + + enum ast_tx_chip tx_chip_type; + u8 dp501_maxclk; +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 9b8f0b975ca6..6c021165ca67 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) + } else + *need_post = false; + ++ /* Check P2A Access */ ++ ast->DisableP2A = true; ++ data = ast_read32(ast, 0xf004); ++ if (data != 0xFFFFFFFF) ++ ast->DisableP2A = false; ++ + /* Check if we support wide screen */ + switch (ast->chip) { + case AST1180: +@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) + ast->support_wide_screen = true; + else { + ast->support_wide_screen = false; +- /* Read SCU7c (silicon revision register) */ +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- data = ast_read32(ast, 0x1207c); +- data &= 0x300; +- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ +- ast->support_wide_screen = true; +- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ +- ast->support_wide_screen = true; ++ if (ast->DisableP2A == false) { ++ /* Read SCU7c (silicon revision register) */ ++ ast_write32(ast, 0xf004, 0x1e6e0000); ++ ast_write32(ast, 0xf000, 0x1); ++ data = ast_read32(ast, 0x1207c); ++ data &= 0x300; ++ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ ++ ast->support_wide_screen = true; ++ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ ++ ast->support_wide_screen = true; ++ } + } + break; + } +@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) + uint32_t data, data2; + uint32_t denum, num, div, ref_pll; + +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- +- +- ast_write32(ast, 0x10000, 0xfc600309); +- +- do { +- if (pci_channel_offline(dev->pdev)) +- return -EIO; +- } while (ast_read32(ast, 0x10000) != 0x01); +- data = ast_read32(ast, 0x10004); +- +- if (data & 0x40) ++ if (ast->DisableP2A) ++ { + ast->dram_bus_width = 16; ++ ast->dram_type = AST_DRAM_1Gx16; ++ ast->mclk = 396; ++ } + else +- ast->dram_bus_width = 32; ++ { ++ ast_write32(ast, 0xf004, 0x1e6e0000); ++ ast_write32(ast, 0xf000, 0x1); ++ data = ast_read32(ast, 0x10004); ++ ++ if (data & 0x40) ++ ast->dram_bus_width = 16; ++ else ++ ast->dram_bus_width = 32; ++ ++ if (ast->chip == AST2300 || ast->chip == AST2400) { ++ switch (data & 0x03) { ++ case 0: ++ ast->dram_type = AST_DRAM_512Mx16; ++ break; ++ default: ++ case 1: ++ ast->dram_type = AST_DRAM_1Gx16; ++ break; ++ case 2: ++ ast->dram_type = AST_DRAM_2Gx16; ++ break; ++ case 3: ++ ast->dram_type = AST_DRAM_4Gx16; ++ break; ++ } ++ } else { ++ switch (data & 0x0c) { ++ case 0: ++ case 4: ++ ast->dram_type = AST_DRAM_512Mx16; ++ break; ++ case 8: ++ if (data & 0x40) ++ ast->dram_type = AST_DRAM_1Gx16; ++ else ++ ast->dram_type = AST_DRAM_512Mx32; ++ break; ++ case 0xc: ++ ast->dram_type = AST_DRAM_1Gx32; ++ break; ++ } ++ } + +- if (ast->chip == AST2300 || ast->chip == AST2400) { +- switch (data & 0x03) { +- case 0: +- ast->dram_type = AST_DRAM_512Mx16; +- break; +- default: +- case 1: +- ast->dram_type = AST_DRAM_1Gx16; +- break; +- case 2: +- ast->dram_type = AST_DRAM_2Gx16; +- break; ++ data = ast_read32(ast, 0x10120); ++ data2 = ast_read32(ast, 0x10170); ++ if (data2 & 0x2000) ++ ref_pll = 14318; ++ else ++ ref_pll = 12000; ++ ++ denum = data & 0x1f; ++ num = (data & 0x3fe0) >> 5; ++ data = (data & 0xc000) >> 14; ++ switch (data) { + case 3: +- ast->dram_type = AST_DRAM_4Gx16; +- break; +- } +- } else { +- switch (data & 0x0c) { +- case 0: +- case 4: +- ast->dram_type = AST_DRAM_512Mx16; ++ div = 0x4; + break; +- case 8: +- if (data & 0x40) +- ast->dram_type = AST_DRAM_1Gx16; +- else +- ast->dram_type = AST_DRAM_512Mx32; ++ case 2: ++ case 1: ++ div = 0x2; + break; +- case 0xc: +- ast->dram_type = AST_DRAM_1Gx32; ++ default: ++ div = 0x1; + break; + } ++ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + } +- +- data = ast_read32(ast, 0x10120); +- data2 = ast_read32(ast, 0x10170); +- if (data2 & 0x2000) +- ref_pll = 14318; +- else +- ref_pll = 12000; +- +- denum = data & 0x1f; +- num = (data & 0x3fe0) >> 5; +- data = (data & 0xc000) >> 14; +- switch (data) { +- case 3: +- div = 0x4; +- break; +- case 2: +- case 1: +- div = 0x2; +- break; +- default: +- div = 0x1; +- break; +- } +- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + return 0; + } + +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c +index 30672a3df8a9..270e8fb2803f 100644 +--- a/drivers/gpu/drm/ast/ast_post.c ++++ b/drivers/gpu/drm/ast/ast_post.c +@@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev) + ast_enable_mmio(dev); + ast_set_def_ext_reg(dev); + +- if (ast->chip == AST2300 || ast->chip == AST2400) +- ast_init_dram_2300(dev); +- else +- ast_init_dram_reg(dev); ++ if (ast->DisableP2A == false) ++ { ++ if (ast->chip == AST2300 || ast->chip == AST2400) ++ ast_init_dram_2300(dev); ++ else ++ ast_init_dram_reg(dev); + +- ast_init_3rdtx(dev); ++ ast_init_3rdtx(dev); ++ } ++ else ++ { ++ if (ast->tx_chip_type != AST_TX_NONE) ++ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ ++ } + } + + /* AST 2300 DRAM settings */ +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c +index 58a3f7cf2fb3..00de1bf81519 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c +@@ -370,7 +370,8 @@ nouveau_display_init(struct drm_device *dev) + return ret; + + /* enable polling for external displays */ +- drm_kms_helper_poll_enable(dev); ++ if (!dev->mode_config.poll_enabled) ++ drm_kms_helper_poll_enable(dev); + + /* enable hotplug interrupts */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index d236fc7c425b..91a61d2cca88 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -743,7 +743,10 @@ nouveau_pmops_runtime_resume(struct device *dev) + pci_set_master(pdev); + + ret = nouveau_do_resume(drm_dev, true); +- drm_kms_helper_poll_enable(drm_dev); ++ ++ if (!drm_dev->mode_config.poll_enabled) ++ drm_kms_helper_poll_enable(drm_dev); ++ + /* do magic */ + nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); + vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h +index 2e3a62d38fe9..1621c8ae0fa0 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.h ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.h +@@ -99,6 +99,7 @@ struct nv84_fence_priv { + struct nouveau_bo *bo; + struct nouveau_bo *bo_gart; + u32 *suspend; ++ struct mutex mutex; + }; + + u64 nv84_fence_crtc(struct nouveau_channel *, int); +diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c +index 6ae1b3494bcd..b7b961233949 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_usif.c ++++ b/drivers/gpu/drm/nouveau/nouveau_usif.c +@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) + if (nvif_unpack(argv->v0, 0, 0, true)) { + /* block access to objects not created via this interface */ + owner = argv->v0.owner; +- if (argv->v0.object == 0ULL) ++ if (argv->v0.object == 0ULL && ++ argv->v0.type != NVIF_IOCTL_V0_DEL) + argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ + else + argv->v0.owner = NVDRM_OBJECT_USIF; +diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c +index 412c5be5a9ca..7bc26eceda66 100644 +--- a/drivers/gpu/drm/nouveau/nv84_fence.c ++++ b/drivers/gpu/drm/nouveau/nv84_fence.c +@@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) + } + + nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); ++ mutex_lock(&priv->mutex); + nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); + nouveau_bo_vma_del(priv->bo, &fctx->vma); ++ mutex_unlock(&priv->mutex); + nouveau_fence_context_del(&fctx->base); + chan->fence = NULL; + nouveau_fence_context_free(&fctx->base); +@@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) + fctx->base.sync32 = nv84_fence_sync32; + fctx->base.sequence = nv84_fence_read(chan); + ++ mutex_lock(&priv->mutex); + ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); + if (ret == 0) { + ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, + &fctx->vma_gart); + } ++ mutex_unlock(&priv->mutex); + + /* map display semaphore buffers into channel's vm */ + for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { +@@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm) + priv->base.context_base = fence_context_alloc(priv->base.contexts); + priv->base.uevent = true; + ++ mutex_init(&priv->mutex); ++ + /* Use VRAM if there is any ; otherwise fallback to system memory */ + domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : + /* +diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c +index 630bce68bf38..b61db9db3ca5 100644 +--- a/drivers/i2c/busses/i2c-piix4.c ++++ b/drivers/i2c/busses/i2c-piix4.c +@@ -54,7 +54,7 @@ + #define SMBSLVDAT (0xC + piix4_smba) + + /* count for request_region */ +-#define SMBIOSIZE 8 ++#define SMBIOSIZE 9 + + /* PCI Address Constants */ + #define SMBBA 0x090 +diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c +index bb3ac5fe5846..72a391e01011 100644 +--- a/drivers/irqchip/irq-xtensa-mx.c ++++ b/drivers/irqchip/irq-xtensa-mx.c +@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { + int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) + { + struct irq_domain *root_domain = +- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, ++ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, + &xtensa_mx_irq_domain_ops, + &xtensa_mx_irq_chip); + irq_set_default_host(root_domain); +diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c +index 472ae1770964..f728755fa292 100644 +--- a/drivers/irqchip/irq-xtensa-pic.c ++++ b/drivers/irqchip/irq-xtensa-pic.c +@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { + int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) + { + struct irq_domain *root_domain = +- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, ++ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, + &xtensa_irq_domain_ops, &xtensa_irq_chip); + irq_set_default_host(root_domain); + return 0; +diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c +index ac7288240d55..f089fa954f42 100644 +--- a/drivers/net/ethernet/adaptec/starfire.c ++++ b/drivers/net/ethernet/adaptec/starfire.c +@@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev) + if (skb == NULL) + break; + np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); ++ if (pci_dma_mapping_error(np->pci_dev, ++ np->rx_info[i].mapping)) { ++ dev_kfree_skb(skb); ++ np->rx_info[i].skb = NULL; ++ break; ++ } + /* Grrr, we cannot offset to correctly align the IP header. */ + np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); + } +@@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) + { + struct netdev_private *np = netdev_priv(dev); + unsigned int entry; ++ unsigned int prev_tx; + u32 status; +- int i; ++ int i, j; + + /* + * be cautious here, wrapping the queue has weird semantics +@@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) + } + #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ + ++ prev_tx = np->cur_tx; + entry = np->cur_tx % TX_RING_SIZE; + for (i = 0; i < skb_num_frags(skb); i++) { + int wrap_ring = 0; +@@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) + skb_frag_size(this_frag), + PCI_DMA_TODEVICE); + } ++ if (pci_dma_mapping_error(np->pci_dev, ++ np->tx_info[entry].mapping)) { ++ dev->stats.tx_dropped++; ++ goto err_out; ++ } + + np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); + np->tx_ring[entry].status = cpu_to_le32(status); +@@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) + netif_stop_queue(dev); + + return NETDEV_TX_OK; +-} + ++err_out: ++ entry = prev_tx % TX_RING_SIZE; ++ np->tx_info[entry].skb = NULL; ++ if (i > 0) { ++ pci_unmap_single(np->pci_dev, ++ np->tx_info[entry].mapping, ++ skb_first_frag_len(skb), ++ PCI_DMA_TODEVICE); ++ np->tx_info[entry].mapping = 0; ++ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; ++ for (j = 1; j < i; j++) { ++ pci_unmap_single(np->pci_dev, ++ np->tx_info[entry].mapping, ++ skb_frag_size( ++ &skb_shinfo(skb)->frags[j-1]), ++ PCI_DMA_TODEVICE); ++ entry++; ++ } ++ } ++ dev_kfree_skb_any(skb); ++ np->cur_tx = prev_tx; ++ return NETDEV_TX_OK; ++} + + /* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +@@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev) + break; /* Better luck next round. */ + np->rx_info[entry].mapping = + pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); ++ if (pci_dma_mapping_error(np->pci_dev, ++ np->rx_info[entry].mapping)) { ++ dev_kfree_skb(skb); ++ np->rx_info[entry].skb = NULL; ++ break; ++ } + np->rx_ring[entry].rxaddr = + cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); + } +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index 3e233d924cce..6a061f17a44f 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -1999,8 +1999,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) + if (!rxb->page) + continue; + +- dma_unmap_single(rx_queue->dev, rxb->dma, +- PAGE_SIZE, DMA_FROM_DEVICE); ++ dma_unmap_page(rx_queue->dev, rxb->dma, ++ PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +index 08cef0dfb5db..2fa54b0b0679 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +@@ -105,8 +105,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, + struct hns_nic_ring_data *ring_data) + { + struct hns_nic_priv *priv = netdev_priv(ndev); +- struct device *dev = priv->dev; + struct hnae_ring *ring = ring_data->ring; ++ struct device *dev = ring_to_dev(ring); + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int buf_num; +diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c +index 715de8affcc9..e203d0c4e5a3 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/catas.c ++++ b/drivers/net/ethernet/mellanox/mlx4/catas.c +@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) + return -ETIMEDOUT; + } + +-static int mlx4_comm_internal_err(u32 slave_read) ++int mlx4_comm_internal_err(u32 slave_read) + { + return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == + (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; +diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c +index 0472941af820..1a134e08f010 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/intf.c ++++ b/drivers/net/ethernet/mellanox/mlx4/intf.c +@@ -218,6 +218,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev) + struct mlx4_interface *intf; + + mlx4_stop_catas_poll(dev); ++ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && ++ mlx4_is_slave(dev)) { ++ /* In mlx4_remove_one on a VF */ ++ u32 slave_read = ++ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); ++ ++ if (mlx4_comm_internal_err(slave_read)) { ++ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", ++ __func__); ++ mlx4_enter_error_state(dev->persist); ++ } ++ } + mutex_lock(&intf_mutex); + + list_for_each_entry(intf, &intf_list, list) +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h +index e1cf9036af22..f5fdbd53d052 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h +@@ -1205,6 +1205,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); + void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); + + void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); ++int mlx4_comm_internal_err(u32 slave_read); + + int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, + enum mlx4_port_type *type); +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 549ad2018e7f..1e61d4da72db 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -171,6 +171,49 @@ static struct mdiobb_ops bb_ops = { + .get_mdio_data = ravb_get_mdio_data, + }; + ++/* Free TX skb function for AVB-IP */ ++static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) ++{ ++ struct ravb_private *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &priv->stats[q]; ++ struct ravb_tx_desc *desc; ++ int free_num = 0; ++ int entry; ++ u32 size; ++ ++ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { ++ bool txed; ++ ++ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * ++ NUM_TX_DESC); ++ desc = &priv->tx_ring[q][entry]; ++ txed = desc->die_dt == DT_FEMPTY; ++ if (free_txed_only && !txed) ++ break; ++ /* Descriptor type must be checked before all other reads */ ++ dma_rmb(); ++ size = le16_to_cpu(desc->ds_tagl) & TX_DS; ++ /* Free the original skb. */ ++ if (priv->tx_skb[q][entry / NUM_TX_DESC]) { ++ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), ++ size, DMA_TO_DEVICE); ++ /* Last packet descriptor? */ ++ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { ++ entry /= NUM_TX_DESC; ++ dev_kfree_skb_any(priv->tx_skb[q][entry]); ++ priv->tx_skb[q][entry] = NULL; ++ if (txed) ++ stats->tx_packets++; ++ } ++ free_num++; ++ } ++ if (txed) ++ stats->tx_bytes += size; ++ desc->die_dt = DT_EEMPTY; ++ } ++ return free_num; ++} ++ + /* Free skb's and DMA buffers for Ethernet AVB */ + static void ravb_ring_free(struct net_device *ndev, int q) + { +@@ -186,19 +229,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + +- /* Free TX skb ringbuffer */ +- if (priv->tx_skb[q]) { +- for (i = 0; i < priv->num_tx_ring[q]; i++) +- dev_kfree_skb(priv->tx_skb[q][i]); +- } +- kfree(priv->tx_skb[q]); +- priv->tx_skb[q] = NULL; +- + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + + if (priv->rx_ring[q]) { ++ for (i = 0; i < priv->num_rx_ring[q]; i++) { ++ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; ++ ++ if (!dma_mapping_error(ndev->dev.parent, ++ le32_to_cpu(desc->dptr))) ++ dma_unmap_single(ndev->dev.parent, ++ le32_to_cpu(desc->dptr), ++ PKT_BUF_SZ, ++ DMA_FROM_DEVICE); ++ } + ring_size = sizeof(struct ravb_ex_rx_desc) * + (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], +@@ -207,12 +252,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) + } + + if (priv->tx_ring[q]) { ++ ravb_tx_free(ndev, q, false); ++ + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], + priv->tx_desc_dma[q]); + priv->tx_ring[q] = NULL; + } ++ ++ /* Free TX skb ringbuffer. ++ * SKBs are freed by ravb_tx_free() call above. ++ */ ++ kfree(priv->tx_skb[q]); ++ priv->tx_skb[q] = NULL; + } + + /* Format skb and descriptor buffer for Ethernet AVB */ +@@ -420,44 +473,6 @@ static int ravb_dmac_init(struct net_device *ndev) + return 0; + } + +-/* Free TX skb function for AVB-IP */ +-static int ravb_tx_free(struct net_device *ndev, int q) +-{ +- struct ravb_private *priv = netdev_priv(ndev); +- struct net_device_stats *stats = &priv->stats[q]; +- struct ravb_tx_desc *desc; +- int free_num = 0; +- int entry; +- u32 size; +- +- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { +- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * +- NUM_TX_DESC); +- desc = &priv->tx_ring[q][entry]; +- if (desc->die_dt != DT_FEMPTY) +- break; +- /* Descriptor type must be checked before all other reads */ +- dma_rmb(); +- size = le16_to_cpu(desc->ds_tagl) & TX_DS; +- /* Free the original skb. */ +- if (priv->tx_skb[q][entry / NUM_TX_DESC]) { +- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), +- size, DMA_TO_DEVICE); +- /* Last packet descriptor? */ +- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { +- entry /= NUM_TX_DESC; +- dev_kfree_skb_any(priv->tx_skb[q][entry]); +- priv->tx_skb[q][entry] = NULL; +- stats->tx_packets++; +- } +- free_num++; +- } +- stats->tx_bytes += size; +- desc->die_dt = DT_EEMPTY; +- } +- return free_num; +-} +- + static void ravb_get_tx_tstamp(struct net_device *ndev) + { + struct ravb_private *priv = netdev_priv(ndev); +@@ -797,7 +812,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) + spin_lock_irqsave(&priv->lock, flags); + /* Clear TX interrupt */ + ravb_write(ndev, ~mask, TIS); +- ravb_tx_free(ndev, q); ++ ravb_tx_free(ndev, q, true); + netif_wake_subqueue(ndev, q); + mmiowb(); + spin_unlock_irqrestore(&priv->lock, flags); +@@ -1393,7 +1408,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) + + priv->cur_tx[q] += NUM_TX_DESC; + if (priv->cur_tx[q] - priv->dirty_tx[q] > +- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) ++ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && ++ !ravb_tx_free(ndev, q, true)) + netif_stop_subqueue(ndev, q); + + exit: +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +index cf468c87ce57..4cb8b85cbf2c 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +@@ -100,6 +100,14 @@ + /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ + #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) + ++#ifdef __BIG_ENDIAN ++#define xemaclite_readl ioread32be ++#define xemaclite_writel iowrite32be ++#else ++#define xemaclite_readl ioread32 ++#define xemaclite_writel iowrite32 ++#endif ++ + /** + * struct net_local - Our private per device data + * @ndev: instance of the network device +@@ -158,15 +166,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) + u32 reg_data; + + /* Enable the Tx interrupts for the first Buffer */ +- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); +- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, +- drvdata->base_addr + XEL_TSR_OFFSET); ++ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); ++ xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK, ++ drvdata->base_addr + XEL_TSR_OFFSET); + + /* Enable the Rx interrupts for the first buffer */ +- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); ++ xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); + + /* Enable the Global Interrupt Enable */ +- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); ++ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + } + + /** +@@ -181,17 +189,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) + u32 reg_data; + + /* Disable the Global Interrupt Enable */ +- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); ++ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + + /* Disable the Tx interrupts for the first buffer */ +- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); +- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), +- drvdata->base_addr + XEL_TSR_OFFSET); ++ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); ++ xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), ++ drvdata->base_addr + XEL_TSR_OFFSET); + + /* Disable the Rx interrupts for the first buffer */ +- reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); +- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), +- drvdata->base_addr + XEL_RSR_OFFSET); ++ reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET); ++ xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), ++ drvdata->base_addr + XEL_RSR_OFFSET); + } + + /** +@@ -323,7 +331,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, + byte_count = ETH_FRAME_LEN; + + /* Check if the expected buffer is available */ +- reg_data = __raw_readl(addr + XEL_TSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); + if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | + XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { + +@@ -336,7 +344,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, + + addr = (void __iomem __force *)((u32 __force)addr ^ + XEL_BUFFER_OFFSET); +- reg_data = __raw_readl(addr + XEL_TSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); + + if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | + XEL_TSR_XMIT_ACTIVE_MASK)) != 0) +@@ -347,16 +355,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, + /* Write the frame to the buffer */ + xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); + +- __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), +- addr + XEL_TPLR_OFFSET); ++ xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), ++ addr + XEL_TPLR_OFFSET); + + /* Update the Tx Status Register to indicate that there is a + * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which + * is used by the interrupt handler to check whether a frame + * has been transmitted */ +- reg_data = __raw_readl(addr + XEL_TSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); + reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); +- __raw_writel(reg_data, addr + XEL_TSR_OFFSET); ++ xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); + + return 0; + } +@@ -371,7 +379,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, + * + * Return: Total number of bytes received + */ +-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) ++static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) + { + void __iomem *addr; + u16 length, proto_type; +@@ -381,7 +389,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) + addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); + + /* Verify which buffer has valid data */ +- reg_data = __raw_readl(addr + XEL_RSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); + + if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { + if (drvdata->rx_ping_pong != 0) +@@ -398,27 +406,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) + return 0; /* No data was available */ + + /* Verify that buffer has valid data */ +- reg_data = __raw_readl(addr + XEL_RSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); + if ((reg_data & XEL_RSR_RECV_DONE_MASK) != + XEL_RSR_RECV_DONE_MASK) + return 0; /* No data was available */ + } + + /* Get the protocol type of the ethernet frame that arrived */ +- proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + ++ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & + XEL_RPLR_LENGTH_MASK); + + /* Check if received ethernet frame is a raw ethernet frame + * or an IP packet or an ARP packet */ +- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { ++ if (proto_type > ETH_DATA_LEN) { + + if (proto_type == ETH_P_IP) { +- length = ((ntohl(__raw_readl(addr + ++ length = ((ntohl(xemaclite_readl(addr + + XEL_HEADER_IP_LENGTH_OFFSET + + XEL_RXBUFF_OFFSET)) >> + XEL_HEADER_SHIFT) & + XEL_RPLR_LENGTH_MASK); ++ length = min_t(u16, length, ETH_DATA_LEN); + length += ETH_HLEN + ETH_FCS_LEN; + + } else if (proto_type == ETH_P_ARP) +@@ -431,14 +440,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) + /* Use the length in the frame, plus the header and trailer */ + length = proto_type + ETH_HLEN + ETH_FCS_LEN; + ++ if (WARN_ON(length > maxlen)) ++ length = maxlen; ++ + /* Read from the EmacLite device */ + xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), + data, length); + + /* Acknowledge the frame */ +- reg_data = __raw_readl(addr + XEL_RSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); + reg_data &= ~XEL_RSR_RECV_DONE_MASK; +- __raw_writel(reg_data, addr + XEL_RSR_OFFSET); ++ xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET); + + return length; + } +@@ -465,14 +477,14 @@ static void xemaclite_update_address(struct net_local *drvdata, + + xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); + +- __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); ++ xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); + + /* Update the MAC address in the EmacLite */ +- reg_data = __raw_readl(addr + XEL_TSR_OFFSET); +- __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); ++ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); ++ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); + + /* Wait for EmacLite to finish with the MAC address update */ +- while ((__raw_readl(addr + XEL_TSR_OFFSET) & ++ while ((xemaclite_readl(addr + XEL_TSR_OFFSET) & + XEL_TSR_PROG_MAC_ADDR) != 0) + ; + } +@@ -605,7 +617,7 @@ static void xemaclite_rx_handler(struct net_device *dev) + + skb_reserve(skb, 2); + +- len = xemaclite_recv_data(lp, (u8 *) skb->data); ++ len = xemaclite_recv_data(lp, (u8 *) skb->data, len); + + if (!len) { + dev->stats.rx_errors++; +@@ -642,32 +654,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) + u32 tx_status; + + /* Check if there is Rx Data available */ +- if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & ++ if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) & + XEL_RSR_RECV_DONE_MASK) || +- (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) ++ (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) + & XEL_RSR_RECV_DONE_MASK)) + + xemaclite_rx_handler(dev); + + /* Check if the Transmission for the first buffer is completed */ +- tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); ++ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); + if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { + + tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; +- __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); ++ xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); + + tx_complete = true; + } + + /* Check if the Transmission for the second buffer is completed */ +- tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); ++ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { + + tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; +- __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + +- XEL_TSR_OFFSET); ++ xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + ++ XEL_TSR_OFFSET); + + tx_complete = true; + } +@@ -700,7 +712,7 @@ static int xemaclite_mdio_wait(struct net_local *lp) + /* wait for the MDIO interface to not be busy or timeout + after some time. + */ +- while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & ++ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & + XEL_MDIOCTRL_MDIOSTS_MASK) { + if (time_before_eq(end, jiffies)) { + WARN_ON(1); +@@ -736,17 +748,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) + * MDIO Address register. Set the Status bit in the MDIO Control + * register to start a MDIO read transaction. + */ +- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); +- __raw_writel(XEL_MDIOADDR_OP_MASK | +- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), +- lp->base_addr + XEL_MDIOADDR_OFFSET); +- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, +- lp->base_addr + XEL_MDIOCTRL_OFFSET); ++ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); ++ xemaclite_writel(XEL_MDIOADDR_OP_MASK | ++ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), ++ lp->base_addr + XEL_MDIOADDR_OFFSET); ++ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, ++ lp->base_addr + XEL_MDIOCTRL_OFFSET); + + if (xemaclite_mdio_wait(lp)) + return -ETIMEDOUT; + +- rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); ++ rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); + + dev_dbg(&lp->ndev->dev, + "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", +@@ -783,13 +795,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, + * Data register. Finally, set the Status bit in the MDIO Control + * register to start a MDIO write transaction. + */ +- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); +- __raw_writel(~XEL_MDIOADDR_OP_MASK & +- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), +- lp->base_addr + XEL_MDIOADDR_OFFSET); +- __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); +- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, +- lp->base_addr + XEL_MDIOCTRL_OFFSET); ++ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); ++ xemaclite_writel(~XEL_MDIOADDR_OP_MASK & ++ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), ++ lp->base_addr + XEL_MDIOADDR_OFFSET); ++ xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); ++ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, ++ lp->base_addr + XEL_MDIOCTRL_OFFSET); + + return 0; + } +@@ -836,8 +848,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) + /* Enable the MDIO bus by asserting the enable bit in MDIO Control + * register. + */ +- __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, +- lp->base_addr + XEL_MDIOCTRL_OFFSET); ++ xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK, ++ lp->base_addr + XEL_MDIOCTRL_OFFSET); + + bus = mdiobus_alloc(); + if (!bus) { +@@ -1141,8 +1153,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) + dev_warn(dev, "No MAC address found\n"); + + /* Clear the Tx CSR's in case this is a restart */ +- __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); +- __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); ++ xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET); ++ xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + + /* Set the MAC address in the EmacLite device */ + xemaclite_update_address(lp, ndev->dev_addr); +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c +index 85828f153445..0758d0816840 100644 +--- a/drivers/net/hamradio/mkiss.c ++++ b/drivers/net/hamradio/mkiss.c +@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev) + { + /* Finish setting up the DEVICE info. */ + dev->mtu = AX_MTU; +- dev->hard_header_len = 0; +- dev->addr_len = 0; ++ dev->hard_header_len = AX25_MAX_HEADER_LEN; ++ dev->addr_len = AX25_ADDR_LEN; + dev->type = ARPHRD_AX25; + dev->tx_queue_len = 10; + dev->header_ops = &ax25_header_ops; +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index fbb1867ff25c..1c27e6fb99f9 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -1851,6 +1851,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) + napi_complete(napi); + if (!list_empty(&tp->rx_done)) + napi_schedule(napi); ++ else if (!skb_queue_empty(&tp->tx_queue) && ++ !list_empty(&tp->tx_free)) ++ napi_schedule(napi); + } + + return work_done; +@@ -2990,10 +2993,13 @@ static void set_carrier(struct r8152 *tp) + if (!netif_carrier_ok(netdev)) { + tp->rtl_ops.enable(tp); + set_bit(RTL8152_SET_RX_MODE, &tp->flags); ++ netif_stop_queue(netdev); + napi_disable(&tp->napi); + netif_carrier_on(netdev); + rtl_start_rx(tp); + napi_enable(&tp->napi); ++ netif_wake_queue(netdev); ++ netif_info(tp, link, netdev, "carrier on\n"); + } + } else { + if (netif_carrier_ok(netdev)) { +@@ -3001,6 +3007,7 @@ static void set_carrier(struct r8152 *tp) + napi_disable(&tp->napi); + tp->rtl_ops.disable(tp); + napi_enable(&tp->napi); ++ netif_info(tp, link, netdev, "carrier off\n"); + } + } + } +@@ -3385,12 +3392,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf) + if (!netif_running(netdev)) + return 0; + ++ netif_stop_queue(netdev); + napi_disable(&tp->napi); + clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); + cancel_delayed_work_sync(&tp->schedule); + if (netif_carrier_ok(netdev)) { +- netif_stop_queue(netdev); + mutex_lock(&tp->control); + tp->rtl_ops.disable(tp); + mutex_unlock(&tp->control); +@@ -3415,12 +3422,14 @@ static int rtl8152_post_reset(struct usb_interface *intf) + if (netif_carrier_ok(netdev)) { + mutex_lock(&tp->control); + tp->rtl_ops.enable(tp); ++ rtl_start_rx(tp); + rtl8152_set_rx_mode(netdev); + mutex_unlock(&tp->control); +- netif_wake_queue(netdev); + } + + napi_enable(&tp->napi); ++ netif_wake_queue(netdev); ++ usb_submit_urb(tp->intr_urb, GFP_KERNEL); + + return 0; + } +diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c +index a251588762ec..0b5a84c9022c 100644 +--- a/drivers/net/usb/sierra_net.c ++++ b/drivers/net/usb/sierra_net.c +@@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0); + /* Private data structure */ + struct sierra_net_data { + +- u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ +- + u16 link_up; /* air link up or down */ + u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ + +@@ -122,6 +120,7 @@ struct param { + + /* LSI Protocol types */ + #define SIERRA_NET_PROTOCOL_UMTS 0x01 ++#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 + /* LSI Coverage */ + #define SIERRA_NET_COVERAGE_NONE 0x00 + #define SIERRA_NET_COVERAGE_NOPACKET 0x01 +@@ -129,7 +128,8 @@ struct param { + /* LSI Session */ + #define SIERRA_NET_SESSION_IDLE 0x00 + /* LSI Link types */ +-#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 ++#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 ++#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 + + struct lsi_umts { + u8 protocol; +@@ -137,9 +137,14 @@ struct lsi_umts { + __be16 length; + /* eventually use a union for the rest - assume umts for now */ + u8 coverage; +- u8 unused2[41]; ++ u8 network_len; /* network name len */ ++ u8 network[40]; /* network name (UCS2, bigendian) */ + u8 session_state; + u8 unused3[33]; ++} __packed; ++ ++struct lsi_umts_single { ++ struct lsi_umts lsi; + u8 link_type; + u8 pdp_addr_len; /* NW-supplied PDP address len */ + u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ +@@ -158,10 +163,31 @@ struct lsi_umts { + u8 reserved[8]; + } __packed; + ++struct lsi_umts_dual { ++ struct lsi_umts lsi; ++ u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ ++ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ ++ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ ++ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ ++ u8 unused4[23]; ++ u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ ++ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ ++ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ ++ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ ++ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ ++ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ ++ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ ++ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ ++ u8 unused5[68]; ++} __packed; ++ + #define SIERRA_NET_LSI_COMMON_LEN 4 +-#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) ++#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) + #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ + (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) ++#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) ++#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ ++ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) + + /* Forward definitions */ + static void sierra_sync_timer(unsigned long syncdata); +@@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev, + dev->data[0] = (unsigned long)priv; + } + +-/* is packet IPv4 */ ++/* is packet IPv4/IPv6 */ + static inline int is_ip(struct sk_buff *skb) + { +- return skb->protocol == cpu_to_be16(ETH_P_IP); ++ return skb->protocol == cpu_to_be16(ETH_P_IP) || ++ skb->protocol == cpu_to_be16(ETH_P_IPV6); + } + + /* +@@ -350,49 +377,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len) + static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) + { + struct lsi_umts *lsi = (struct lsi_umts *)data; ++ u32 expected_length; + +- if (datalen < sizeof(struct lsi_umts)) { +- netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", +- __func__, datalen, +- sizeof(struct lsi_umts)); ++ if (datalen < sizeof(struct lsi_umts_single)) { ++ netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n", ++ __func__, datalen, sizeof(struct lsi_umts_single)); + return -1; + } + +- if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { +- netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", +- __func__, be16_to_cpu(lsi->length), +- (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); +- return -1; ++ /* Validate the session state */ ++ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { ++ netdev_err(dev->net, "Session idle, 0x%02x\n", ++ lsi->session_state); ++ return 0; + } + + /* Validate the protocol - only support UMTS for now */ +- if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { ++ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { ++ struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; ++ ++ /* Validate the link type */ ++ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && ++ single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { ++ netdev_err(dev->net, "Link type unsupported: 0x%02x\n", ++ single->link_type); ++ return -1; ++ } ++ expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; ++ } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { ++ expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; ++ } else { + netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", +- lsi->protocol); ++ lsi->protocol); + return -1; + } + +- /* Validate the link type */ +- if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { +- netdev_err(dev->net, "Link type unsupported: 0x%02x\n", +- lsi->link_type); ++ if (be16_to_cpu(lsi->length) != expected_length) { ++ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", ++ __func__, be16_to_cpu(lsi->length), expected_length); + return -1; + } + + /* Validate the coverage */ +- if (lsi->coverage == SIERRA_NET_COVERAGE_NONE +- || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { ++ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || ++ lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { + netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); + return 0; + } + +- /* Validate the session state */ +- if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { +- netdev_err(dev->net, "Session idle, 0x%02x\n", +- lsi->session_state); +- return 0; +- } +- + /* Set link_sense true */ + return 1; + } +@@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) + u8 numendpoints; + u16 fwattr = 0; + int status; +- struct ethhdr *eth; + struct sierra_net_data *priv; + static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { + 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; +@@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) + dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); + dev->net->dev_addr[ETH_ALEN-1] = ifacenum; + +- /* we will have to manufacture ethernet headers, prepare template */ +- eth = (struct ethhdr *)priv->ethr_hdr_tmpl; +- memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); +- eth->h_proto = cpu_to_be16(ETH_P_IP); +- + /* prepare shutdown message template */ + memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); + /* set context index initially to 0 - prepares tx hdr template */ +@@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + + skb_pull(skb, hh.hdrlen); + +- /* We are going to accept this packet, prepare it */ +- memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, +- ETH_HLEN); ++ /* We are going to accept this packet, prepare it. ++ * In case protocol is IPv6, keep it, otherwise force IPv4. ++ */ ++ skb_reset_mac_header(skb); ++ if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) ++ eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); ++ eth_zero_addr(eth_hdr(skb)->h_source); ++ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); + + /* Last packet in batch handled by usbnet */ + if (hh.payload_len.word == skb->len) +diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c +index 6e3a60c78873..50f3bb0dd1f1 100644 +--- a/drivers/parport/parport_gsc.c ++++ b/drivers/parport/parport_gsc.c +@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, + p->irq = PARPORT_IRQ_NONE; + } + if (p->irq != PARPORT_IRQ_NONE) { +- printk(", irq %d", p->irq); ++ pr_cont(", irq %d", p->irq); + + if (p->dma == PARPORT_DMA_AUTO) { + p->dma = PARPORT_DMA_NONE; +@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base, + is mandatory (see above) */ + p->dma = PARPORT_DMA_NONE; + +- printk(" ["); +-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} ++ pr_cont(" ["); ++#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} + { + int f = 0; + printmode(PCSPP); +@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, + // printmode(DMA); + } + #undef printmode +- printk("]\n"); ++ pr_cont("]\n"); + + if (p->irq != PARPORT_IRQ_NONE) { + if (request_irq (p->irq, parport_irq_handler, +diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c +index 09172043d589..c617ec49e9ed 100644 +--- a/drivers/pinctrl/berlin/berlin-bg4ct.c ++++ b/drivers/pinctrl/berlin/berlin-bg4ct.c +@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = { + BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, + BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ + BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ +- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ ++ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */ + BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, + BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ + BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ +diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +index 79bf13f5c0d1..7a8ceb961bb6 100644 +--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c ++++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +@@ -1185,8 +1185,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, + struct cb_desc *cb_desc, struct sk_buff *skb) + { + struct r8192_priv *priv = rtllib_priv(dev); +- dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, +- PCI_DMA_TODEVICE); ++ dma_addr_t mapping; + struct tx_fwinfo_8190pci *pTxFwInfo = NULL; + + pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; +@@ -1197,8 +1196,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, + pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, + pTxFwInfo->TxRate, cb_desc); + +- if (pci_dma_mapping_error(priv->pdev, mapping)) +- netdev_err(dev, "%s(): DMA Mapping error\n", __func__); + if (cb_desc->bAMPDUEnable) { + pTxFwInfo->AllowAggregation = 1; + pTxFwInfo->RxMF = cb_desc->ampdu_factor; +@@ -1233,6 +1230,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, + } + + memset((u8 *)pdesc, 0, 12); ++ ++ mapping = pci_map_single(priv->pdev, skb->data, skb->len, ++ PCI_DMA_TODEVICE); ++ if (pci_dma_mapping_error(priv->pdev, mapping)) { ++ netdev_err(dev, "%s(): DMA Mapping error\n", __func__); ++ return; ++ } ++ + pdesc->LINIP = 0; + pdesc->CmdInit = 1; + pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 156bc18eac69..53a827c6d8b1 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server) + } + } while (server->tcpStatus == CifsNeedReconnect); + ++ if (server->tcpStatus == CifsNeedNegotiate) ++ mod_delayed_work(cifsiod_wq, &server->echo, 0); ++ + return rc; + } + +@@ -421,18 +424,27 @@ cifs_echo_request(struct work_struct *work) + int rc; + struct TCP_Server_Info *server = container_of(work, + struct TCP_Server_Info, echo.work); ++ unsigned long echo_interval; ++ ++ /* ++ * If we need to renegotiate, set echo interval to zero to ++ * immediately call echo service where we can renegotiate. ++ */ ++ if (server->tcpStatus == CifsNeedNegotiate) ++ echo_interval = 0; ++ else ++ echo_interval = SMB_ECHO_INTERVAL; + + /* +- * We cannot send an echo if it is disabled or until the +- * NEGOTIATE_PROTOCOL request is done, which is indicated by +- * server->ops->need_neg() == true. Also, no need to ping if +- * we got a response recently. ++ * We cannot send an echo if it is disabled. ++ * Also, no need to ping if we got a response recently. + */ + + if (server->tcpStatus == CifsNeedReconnect || +- server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || ++ server->tcpStatus == CifsExiting || ++ server->tcpStatus == CifsNew || + (server->ops->can_echo && !server->ops->can_echo(server)) || +- time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) ++ time_before(jiffies, server->lstrp + echo_interval - HZ)) + goto requeue_echo; + + rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c +index 4304072161aa..40d61077bead 100644 +--- a/fs/fscache/cookie.c ++++ b/fs/fscache/cookie.c +@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) + hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { + if (invalidate) + set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); ++ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); + fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); + } + } else { +@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) + wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, + TASK_UNINTERRUPTIBLE); + ++ /* Make sure any pending writes are cancelled. */ ++ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) ++ fscache_invalidate_writes(cookie); ++ + /* Reset the cookie state if it wasn't relinquished */ + if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { + atomic_inc(&cookie->n_active); +diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c +index 9b28649df3a1..a8aa00be4444 100644 +--- a/fs/fscache/netfs.c ++++ b/fs/fscache/netfs.c +@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) + cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; + + spin_lock_init(&cookie->lock); ++ spin_lock_init(&cookie->stores_lock); + INIT_HLIST_HEAD(&cookie->backing_objects); + + /* check the netfs type is not already present */ +diff --git a/fs/fscache/object.c b/fs/fscache/object.c +index 9e792e30f4db..7a182c87f378 100644 +--- a/fs/fscache/object.c ++++ b/fs/fscache/object.c +@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object + static const struct fscache_state *fscache_object_available(struct fscache_object *, int); + static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); + static const struct fscache_state *fscache_update_object(struct fscache_object *, int); ++static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); + + #define __STATE_NAME(n) fscache_osm_##n + #define STATE(n) (&__STATE_NAME(n)) +@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); + static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); + static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); + static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); +-static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); ++static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); + + static WAIT_STATE(WAIT_FOR_INIT, "?INI", + TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); +@@ -229,6 +230,10 @@ execute_work_state: + event = -1; + if (new_state == NO_TRANSIT) { + _debug("{OBJ%x} %s notrans", object->debug_id, state->name); ++ if (unlikely(state == STATE(OBJECT_DEAD))) { ++ _leave(" [dead]"); ++ return; ++ } + fscache_enqueue_object(object); + event_mask = object->oob_event_mask; + goto unmask_events; +@@ -239,7 +244,7 @@ execute_work_state: + object->state = state = new_state; + + if (state->work) { +- if (unlikely(state->work == ((void *)2UL))) { ++ if (unlikely(state == STATE(OBJECT_DEAD))) { + _leave(" [dead]"); + return; + } +@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob + fscache_mark_object_dead(object); + object->oob_event_mask = 0; + ++ if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { ++ /* Reject any new read/write ops and abort any that are pending. */ ++ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); ++ fscache_cancel_all_ops(object); ++ } ++ + if (list_empty(&object->dependents) && + object->n_ops == 0 && + object->n_children == 0) +@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object, + } + } + EXPORT_SYMBOL(fscache_object_mark_killed); ++ ++/* ++ * The object is dead. We can get here if an object gets queued by an event ++ * that would lead to its death (such as EV_KILL) when the dispatcher is ++ * already running (and so can be requeued) but hasn't yet cleared the event ++ * mask. ++ */ ++static const struct fscache_state *fscache_object_dead(struct fscache_object *object, ++ int event) ++{ ++ if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, ++ &object->flags)) ++ return NO_TRANSIT; ++ ++ WARN(true, "FS-Cache object redispatched after death"); ++ return NO_TRANSIT; ++} +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 82dc3035ea45..e8d1d6c5000c 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1072,6 +1072,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) + case -NFS4ERR_BADXDR: + case -NFS4ERR_RESOURCE: + case -NFS4ERR_NOFILEHANDLE: ++ case -NFS4ERR_MOVED: + /* Non-seqid mutating errors */ + return; + }; +diff --git a/fs/proc/base.c b/fs/proc/base.c +index d2b8c754f627..dd732400578e 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -3058,6 +3058,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) + iter.tgid += 1, iter = next_tgid(ns, iter)) { + char name[PROC_NUMBUF]; + int len; ++ ++ cond_resched(); + if (!has_pid_permissions(ns, iter.task, 2)) + continue; + +diff --git a/fs/romfs/super.c b/fs/romfs/super.c +index 268733cda397..5f4f1882dc7d 100644 +--- a/fs/romfs/super.c ++++ b/fs/romfs/super.c +@@ -74,6 +74,7 @@ + #include + #include + #include ++#include + #include "internal.h" + + static struct kmem_cache *romfs_inode_cachep; +@@ -415,7 +416,22 @@ static void romfs_destroy_inode(struct inode *inode) + static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) + { + struct super_block *sb = dentry->d_sb; +- u64 id = huge_encode_dev(sb->s_bdev->bd_dev); ++ u64 id = 0; ++ ++ /* When calling huge_encode_dev(), ++ * use sb->s_bdev->bd_dev when, ++ * - CONFIG_ROMFS_ON_BLOCK defined ++ * use sb->s_dev when, ++ * - CONFIG_ROMFS_ON_BLOCK undefined and ++ * - CONFIG_ROMFS_ON_MTD defined ++ * leave id as 0 when, ++ * - CONFIG_ROMFS_ON_BLOCK undefined and ++ * - CONFIG_ROMFS_ON_MTD undefined ++ */ ++ if (sb->s_bdev) ++ id = huge_encode_dev(sb->s_bdev->bd_dev); ++ else if (sb->s_dev) ++ id = huge_encode_dev(sb->s_dev); + + buf->f_type = ROMFS_MAGIC; + buf->f_namelen = ROMFS_MAXFN; +@@ -488,6 +504,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) + sb->s_flags |= MS_RDONLY | MS_NOATIME; + sb->s_op = &romfs_super_ops; + ++#ifdef CONFIG_ROMFS_ON_MTD ++ /* Use same dev ID from the underlying mtdblock device */ ++ if (sb->s_mtd) ++ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); ++#endif + /* read the image superblock and check it */ + rsb = kmalloc(512, GFP_KERNEL); + if (!rsb) +diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h +index 604e1526cd00..eb19bf2b2a81 100644 +--- a/include/linux/fscache-cache.h ++++ b/include/linux/fscache-cache.h +@@ -360,6 +360,7 @@ struct fscache_object { + #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ + #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ + #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ ++#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ + + struct list_head cache_link; /* link in cache->object_list */ + struct hlist_node cookie_link; /* link in cookie->backing_objects */ +diff --git a/include/linux/log2.h b/include/linux/log2.h +index f38fae23bdac..c373295f359f 100644 +--- a/include/linux/log2.h ++++ b/include/linux/log2.h +@@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + * ... and so on. + */ + +-#define order_base_2(n) ilog2(roundup_pow_of_two(n)) ++static inline __attribute_const__ ++int __order_base_2(unsigned long n) ++{ ++ return n > 1 ? ilog2(n - 1) + 1 : 0; ++} + ++#define order_base_2(n) \ ++( \ ++ __builtin_constant_p(n) ? ( \ ++ ((n) == 0 || (n) == 1) ? 0 : \ ++ ilog2((n) - 1) + 1) : \ ++ __order_base_2(n) \ ++) + #endif /* _LINUX_LOG2_H */ +diff --git a/include/net/ipv6.h b/include/net/ipv6.h +index ad1d6039185d..7a8066b90289 100644 +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -744,6 +744,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, + { + u32 hash; + ++ /* @flowlabel may include more than a flow label, eg, the traffic class. ++ * Here we want only the flow label value. ++ */ ++ flowlabel &= IPV6_FLOWLABEL_MASK; ++ + if (flowlabel || + net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || + (!autolabel && +diff --git a/mm/kasan/report.c b/mm/kasan/report.c +index 12f222d0224b..b4e31f78ae69 100644 +--- a/mm/kasan/report.c ++++ b/mm/kasan/report.c +@@ -13,6 +13,7 @@ + * + */ + ++#include + #include + #include + #include +@@ -251,6 +252,8 @@ void kasan_report(unsigned long addr, size_t size, + if (likely(!kasan_report_enabled())) + return; + ++ disable_trace_on_warning(); ++ + info.access_addr = (void *)addr; + info.access_size = size; + info.is_write = is_write; +diff --git a/net/core/ethtool.c b/net/core/ethtool.c +index 29edf74846fc..b6bca625b0d2 100644 +--- a/net/core/ethtool.c ++++ b/net/core/ethtool.c +@@ -886,9 +886,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) + if (regs.len > reglen) + regs.len = reglen; + +- regbuf = vzalloc(reglen); +- if (reglen && !regbuf) +- return -ENOMEM; ++ regbuf = NULL; ++ if (reglen) { ++ regbuf = vzalloc(reglen); ++ if (!regbuf) ++ return -ENOMEM; ++ } + + ops->get_regs(dev, ®s, regbuf); + +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c +index 59b3e0e8fd51..711b4dfa17c3 100644 +--- a/net/ipv4/arp.c ++++ b/net/ipv4/arp.c +@@ -1250,7 +1250,7 @@ void __init arp_init(void) + /* + * ax25 -> ASCII conversion + */ +-static char *ax2asc2(ax25_address *a, char *buf) ++static void ax2asc2(ax25_address *a, char *buf) + { + char c, *s; + int n; +@@ -1272,10 +1272,10 @@ static char *ax2asc2(ax25_address *a, char *buf) + *s++ = n + '0'; + *s++ = '\0'; + +- if (*buf == '\0' || *buf == '-') +- return "*"; +- +- return buf; ++ if (*buf == '\0' || *buf == '-') { ++ buf[0] = '*'; ++ buf[1] = '\0'; ++ } + } + #endif /* CONFIG_AX25 */ + +@@ -1309,7 +1309,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, + } + #endif + sprintf(tbuf, "%pI4", n->primary_key); +- seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", ++ seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n", + tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); + read_unlock(&n->lock); + } +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 7090fef372cc..8cf3fc7c2932 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -3211,9 +3211,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + } + + if (idev) { +- if (idev->if_flags & IF_READY) +- /* device is already configured. */ ++ if (idev->if_flags & IF_READY) { ++ /* device is already configured - ++ * but resend MLD reports, we might ++ * have roamed and need to update ++ * multicast snooping switches ++ */ ++ ipv6_mc_up(idev); + break; ++ } + idev->if_flags |= IF_READY; + } + +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index 428162155280..cae37bfd12ab 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -76,18 +76,22 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a + } + } + +- addr_type = ipv6_addr_type(&usin->sin6_addr); +- +- if (addr_type == IPV6_ADDR_ANY) { ++ if (ipv6_addr_any(&usin->sin6_addr)) { + /* + * connect to self + */ +- usin->sin6_addr.s6_addr[15] = 0x01; ++ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) ++ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), ++ &usin->sin6_addr); ++ else ++ usin->sin6_addr = in6addr_loopback; + } + ++ addr_type = ipv6_addr_type(&usin->sin6_addr); ++ + daddr = &usin->sin6_addr; + +- if (addr_type == IPV6_ADDR_MAPPED) { ++ if (addr_type & IPV6_ADDR_MAPPED) { + struct sockaddr_in sin; + + if (__ipv6_only_sock(sk)) { +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 1db17efe36c1..19c0d67ce8c4 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1004,6 +1004,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, + } + } + #endif ++ if (ipv6_addr_v4mapped(&fl6->saddr) && ++ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) ++ return -EAFNOSUPPORT; + + return 0; + +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 8e958fde6e4b..59c908ff251a 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -149,8 +149,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + * connect() to INADDR_ANY means loopback (BSD'ism). + */ + +- if (ipv6_addr_any(&usin->sin6_addr)) +- usin->sin6_addr.s6_addr[15] = 0x1; ++ if (ipv6_addr_any(&usin->sin6_addr)) { ++ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) ++ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), ++ &usin->sin6_addr); ++ else ++ usin->sin6_addr = in6addr_loopback; ++ } + + addr_type = ipv6_addr_type(&usin->sin6_addr); + +@@ -189,7 +194,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + * TCP over IPv4 + */ + +- if (addr_type == IPV6_ADDR_MAPPED) { ++ if (addr_type & IPV6_ADDR_MAPPED) { + u32 exthdrlen = icsk->icsk_ext_hdr_len; + struct sockaddr_in sin; + +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index dfa85e7264df..6fd4af3b5b79 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -1136,6 +1136,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + if (addr_len < SIN6_LEN_RFC2133) + return -EINVAL; + daddr = &sin6->sin6_addr; ++ if (ipv6_addr_any(daddr) && ++ ipv6_addr_v4mapped(&np->saddr)) ++ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), ++ daddr); + break; + case AF_INET: + goto do_udp_sendmsg; +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index c96d666cef29..956141b71619 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, + sctp_assoc_t id) + { + struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; +- struct sctp_transport *transport; ++ struct sctp_af *af = sctp_get_af_specific(addr->ss_family); + union sctp_addr *laddr = (union sctp_addr *)addr; ++ struct sctp_transport *transport; ++ ++ if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) ++ return NULL; + + addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, + laddr, +diff --git a/net/tipc/server.c b/net/tipc/server.c +index 922e04a43396..50f5b0ca7b3c 100644 +--- a/net/tipc/server.c ++++ b/net/tipc/server.c +@@ -452,6 +452,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, + if (!con) + return -EINVAL; + ++ if (!test_bit(CF_CONNECTED, &con->flags)) { ++ conn_put(con); ++ return 0; ++ } ++ + e = tipc_alloc_entry(data, len); + if (!e) { + conn_put(con); +@@ -465,12 +470,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, + list_add_tail(&e->list, &con->outqueue); + spin_unlock_bh(&con->outqueue_lock); + +- if (test_bit(CF_CONNECTED, &con->flags)) { +- if (!queue_work(s->send_wq, &con->swork)) +- conn_put(con); +- } else { ++ if (!queue_work(s->send_wq, &con->swork)) + conn_put(con); +- } + return 0; + } + +@@ -494,7 +495,7 @@ static void tipc_send_to_sock(struct tipc_conn *con) + int ret; + + spin_lock_bh(&con->outqueue_lock); +- while (1) { ++ while (test_bit(CF_CONNECTED, &con->flags)) { + e = list_entry(con->outqueue.next, struct outqueue_entry, + list); + if ((struct list_head *) e == &con->outqueue) diff --git a/patch/kernel/rk3328-default/patch-4.4.73-74.patch b/patch/kernel/rk3328-default/patch-4.4.73-74.patch new file mode 100644 index 000000000..4c15d4deb --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.73-74.patch @@ -0,0 +1,1488 @@ +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index ca64ca566099..7c77d7edb851 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -3580,6 +3580,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + spia_pedr= + spia_peddr= + ++ stack_guard_gap= [MM] ++ override the default stack gap protection. The value ++ is in page units and it defines how many pages prior ++ to (for stacks growing down) resp. after (for stacks ++ growing up) the main stack are reserved for no other ++ mapping. Default value is 256 pages. ++ + stacktrace [FTRACE] + Enabled the stack tracer on boot up. + +diff --git a/Makefile b/Makefile +index ba5a70b6e32c..1f75507acbf4 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 73 ++SUBLEVEL = 74 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c +index 2e06d56e987b..cf4ae6958240 100644 +--- a/arch/arc/mm/mmap.c ++++ b/arch/arc/mm/mmap.c +@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c +index 407dc786583a..c469c0665752 100644 +--- a/arch/arm/mm/mmap.c ++++ b/arch/arm/mm/mmap.c +@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c +index 836f14707a62..efa59f1f8022 100644 +--- a/arch/frv/mm/elf-fdpic.c ++++ b/arch/frv/mm/elf-fdpic.c +@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + goto success; + } + +diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c +index d8f9b357b222..e9fed8ca9b42 100644 +--- a/arch/mips/kernel/branch.c ++++ b/arch/mips/kernel/branch.c +@@ -816,8 +816,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + break; + } + /* Compact branch: BNEZC || JIALC */ +- if (insn.i_format.rs) ++ if (!insn.i_format.rs) { ++ /* JIALC: set $31/ra */ + regs->regs[31] = epc + 4; ++ } + regs->cp0_epc += 8; + break; + #endif +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index 5c81fdd032c3..025cb31aa0a2 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c +index 5aba01ac457f..4dda73c44fee 100644 +--- a/arch/parisc/kernel/sys_parisc.c ++++ b/arch/parisc/kernel/sys_parisc.c +@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) + { + struct mm_struct *mm = current->mm; +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + unsigned long task_size = TASK_SIZE; + int do_color_align, last_mmap; + struct vm_unmapped_area_info info; +@@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + else + addr = PAGE_ALIGN(addr); + +- vma = find_vma(mm, addr); ++ vma = find_vma_prev(mm, addr, &prev); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma)) && ++ (!prev || addr >= vm_end_gap(prev))) + goto found_addr; + } + +@@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) + { +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_color_align, last_mmap; +@@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = COLOR_ALIGN(addr, last_mmap, pgoff); + else + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); ++ ++ vma = find_vma_prev(mm, addr, &prev); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma)) && ++ (!prev || addr >= vm_end_gap(prev))) + goto found_addr; + } + +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index 0f432a702870..6ad12b244770 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return (!vma || (addr + len) <= vm_start_gap(vma)); + } + + static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c +index f2b6b1d9c804..126c4a9b9bf9 100644 +--- a/arch/s390/mm/mmap.c ++++ b/arch/s390/mm/mmap.c +@@ -97,7 +97,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -135,7 +135,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c +index 6777177807c2..7df7d5944188 100644 +--- a/arch/sh/mm/mmap.c ++++ b/arch/sh/mm/mmap.c +@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index c690c8e16a96..7f0f7c01b297 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + + vma = find_vma(mm, addr); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + vma = find_vma(mm, addr); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c +index da1142401bf4..ffa842b4d7d4 100644 +--- a/arch/sparc/mm/hugetlbpage.c ++++ b/arch/sparc/mm/hugetlbpage.c +@@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); + if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c +index c034dc3fe2d4..c97ee6c7f949 100644 +--- a/arch/tile/mm/hugetlbpage.c ++++ b/arch/tile/mm/hugetlbpage.c +@@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + if (current->mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c +index 10e0272d789a..136ad7c1ce7b 100644 +--- a/arch/x86/kernel/sys_x86_64.c ++++ b/arch/x86/kernel/sys_x86_64.c +@@ -143,7 +143,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +@@ -186,7 +186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c +index 42982b26e32b..39bdaf3ac44a 100644 +--- a/arch/x86/mm/hugetlbpage.c ++++ b/arch/x86/mm/hugetlbpage.c +@@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c +index 47b6436e41c2..3686a1db25b2 100644 +--- a/arch/x86/mm/numa_32.c ++++ b/arch/x86/mm/numa_32.c +@@ -100,5 +100,6 @@ void __init initmem_init(void) + printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", + (ulong) pfn_to_kaddr(highstart_pfn)); + ++ __vmalloc_start_set = true; + setup_bootmem_allocator(); + } +diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c +index 83cf49685373..3aaaae18417c 100644 +--- a/arch/xtensa/kernel/syscall.c ++++ b/arch/xtensa/kernel/syscall.c +@@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (!vmm || addr + len <= vm_start_gap(vmm)) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) +diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c +index 1fa1deb6e91f..c395f9198fd2 100644 +--- a/drivers/cpufreq/cpufreq_conservative.c ++++ b/drivers/cpufreq/cpufreq_conservative.c +@@ -212,8 +212,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, + int ret; + ret = sscanf(buf, "%u", &input); + +- /* cannot be lower than 11 otherwise freq will not fall */ +- if (ret != 1 || input < 11 || input > 100 || ++ /* cannot be lower than 1 otherwise freq will not fall */ ++ if (ret != 1 || input < 1 || input > 100 || + input >= cs_tuners->up_threshold) + return -EINVAL; + +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c +index 9e6d1cdb7fcd..420478924a0c 100644 +--- a/drivers/iio/proximity/as3935.c ++++ b/drivers/iio/proximity/as3935.c +@@ -263,8 +263,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private) + + static void calibrate_as3935(struct as3935_state *st) + { +- mutex_lock(&st->lock); +- + /* mask disturber interrupt bit */ + as3935_write(st, AS3935_INT, BIT(5)); + +@@ -274,8 +272,6 @@ static void calibrate_as3935(struct as3935_state *st) + + mdelay(2); + as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); +- +- mutex_unlock(&st->lock); + } + + #ifdef CONFIG_PM_SLEEP +@@ -312,6 +308,8 @@ static int as3935_resume(struct device *dev) + val &= ~AS3935_AFE_PWR_BIT; + ret = as3935_write(st, AS3935_AFE_GAIN, val); + ++ calibrate_as3935(st); ++ + err_resume: + mutex_unlock(&st->lock); + +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c +index e1907cd0c3b7..7613d1fee104 100644 +--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c ++++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c +@@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw) + memset(&tvdata,0,sizeof(tvdata)); + + eeprom = pvr2_eeprom_fetch(hdw); +- if (!eeprom) return -EINVAL; +- +- { +- struct i2c_client fake_client; +- /* Newer version expects a useless client interface */ +- fake_client.addr = hdw->eeprom_addr; +- fake_client.adapter = &hdw->i2c_adap; +- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom); +- } ++ if (!eeprom) ++ return -EINVAL; ++ ++ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom); + + trace_eeprom("eeprom assumed v4l tveeprom module"); + trace_eeprom("eeprom direct call results:"); +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c +index 47f37683893a..3dc9ed2e0774 100644 +--- a/drivers/media/v4l2-core/videobuf2-core.c ++++ b/drivers/media/v4l2-core/videobuf2-core.c +@@ -793,7 +793,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs); + */ + void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) + { +- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) ++ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) + return NULL; + + return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); +diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c +index c30290f33430..fe51e9709210 100644 +--- a/drivers/mfd/omap-usb-tll.c ++++ b/drivers/mfd/omap-usb-tll.c +@@ -375,8 +375,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata) + * and use SDR Mode + */ + reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE +- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF + | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); ++ reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF; + } else if (pdata->port_mode[i] == + OMAP_EHCI_PORT_MODE_HSIC) { + /* +diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c +index 5484301d57d9..3dc61ea7dc64 100644 +--- a/drivers/misc/c2port/c2port-duramar2150.c ++++ b/drivers/misc/c2port/c2port-duramar2150.c +@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void) + + duramar2150_c2port_dev = c2port_device_register("uc", + &duramar2150_c2port_ops, NULL); +- if (!duramar2150_c2port_dev) { +- ret = -ENODEV; ++ if (IS_ERR(duramar2150_c2port_dev)) { ++ ret = PTR_ERR(duramar2150_c2port_dev); + goto free_region; + } + +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c +index cbc99d5649af..ae5709354546 100644 +--- a/drivers/net/can/usb/gs_usb.c ++++ b/drivers/net/can/usb/gs_usb.c +@@ -246,6 +246,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) + sizeof(*dm), + 1000); + ++ kfree(dm); ++ + return rc; + } + +diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c +index 3cdb40fea5ee..f5cedbbc552a 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_ap.c ++++ b/drivers/staging/rtl8188eu/core/rtw_ap.c +@@ -894,7 +894,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + return _FAIL; + + +- if (len > MAX_IE_SZ) ++ if (len < 0 || len > MAX_IE_SZ) + return _FAIL; + + pbss_network->IELength = len; +diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c +index 195acc868763..5d476916191b 100644 +--- a/drivers/tty/serial/efm32-uart.c ++++ b/drivers/tty/serial/efm32-uart.c +@@ -27,6 +27,7 @@ + #define UARTn_FRAME 0x04 + #define UARTn_FRAME_DATABITS__MASK 0x000f + #define UARTn_FRAME_DATABITS(n) ((n) - 3) ++#define UARTn_FRAME_PARITY__MASK 0x0300 + #define UARTn_FRAME_PARITY_NONE 0x0000 + #define UARTn_FRAME_PARITY_EVEN 0x0200 + #define UARTn_FRAME_PARITY_ODD 0x0300 +@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port, + 16 * (4 + (clkdiv >> 6))); + + frame = efm32_uart_read32(efm_port, UARTn_FRAME); +- if (frame & UARTn_FRAME_PARITY_ODD) ++ switch (frame & UARTn_FRAME_PARITY__MASK) { ++ case UARTn_FRAME_PARITY_ODD: + *parity = 'o'; +- else if (frame & UARTn_FRAME_PARITY_EVEN) ++ break; ++ case UARTn_FRAME_PARITY_EVEN: + *parity = 'e'; +- else ++ break; ++ default: + *parity = 'n'; ++ } + + *bits = (frame & UARTn_FRAME_DATABITS__MASK) - + UARTn_FRAME_DATABITS(4) + 4; +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index c3f4f2ab7b33..b403596818db 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -2511,6 +2511,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, + hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), + GFP_KERNEL); + if (!hcd->bandwidth_mutex) { ++ kfree(hcd->address0_mutex); + kfree(hcd); + dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); + return NULL; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index b627392ad52a..1d59d489a1ad 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -1318,7 +1318,13 @@ static int hub_configure(struct usb_hub *hub, + if (ret < 0) { + message = "can't read hub descriptor"; + goto fail; +- } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { ++ } ++ ++ maxchild = USB_MAXCHILDREN; ++ if (hub_is_superspeed(hdev)) ++ maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS); ++ ++ if (hub->descriptor->bNbrPorts > maxchild) { + message = "hub has too many ports!"; + ret = -ENODEV; + goto fail; +diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c +index 2f1fb7e7aa54..9eba51b92f72 100644 +--- a/drivers/usb/dwc3/dwc3-exynos.c ++++ b/drivers/usb/dwc3/dwc3-exynos.c +@@ -148,7 +148,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev) + exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); + if (IS_ERR(exynos->axius_clk)) { + dev_err(dev, "no AXI UpScaler clk specified\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto axius_clk_err; + } + clk_prepare_enable(exynos->axius_clk); + } else { +@@ -206,6 +207,7 @@ err3: + regulator_disable(exynos->vdd33); + err2: + clk_disable_unprepare(exynos->axius_clk); ++axius_clk_err: + clk_disable_unprepare(exynos->susp_clk); + clk_disable_unprepare(exynos->clk); + return ret; +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index de014436fb22..43ce2cfcdb4d 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -1676,9 +1676,10 @@ static void + gadgetfs_suspend (struct usb_gadget *gadget) + { + struct dev_data *dev = get_gadget_data (gadget); ++ unsigned long flags; + + INFO (dev, "suspended from state %d\n", dev->state); +- spin_lock (&dev->lock); ++ spin_lock_irqsave(&dev->lock, flags); + switch (dev->state) { + case STATE_DEV_SETUP: // VERY odd... host died?? + case STATE_DEV_CONNECTED: +@@ -1689,7 +1690,7 @@ gadgetfs_suspend (struct usb_gadget *gadget) + default: + break; + } +- spin_unlock (&dev->lock); ++ spin_unlock_irqrestore(&dev->lock, flags); + } + + static struct usb_gadget_driver gadgetfs_driver = { +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c +index 6610f7a023d3..64f404a1a072 100644 +--- a/drivers/usb/gadget/udc/dummy_hcd.c ++++ b/drivers/usb/gadget/udc/dummy_hcd.c +@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd) + /* Report reset and disconnect events to the driver */ + if (dum->driver && (disconnect || reset)) { + stop_activity(dum); +- spin_unlock(&dum->lock); + if (reset) + usb_gadget_udc_reset(&dum->gadget, dum->driver); + else + dum->driver->disconnect(&dum->gadget); +- spin_lock(&dum->lock); + } + } else if (dum_hcd->active != dum_hcd->old_active) { +- if (dum_hcd->old_active && dum->driver->suspend) { +- spin_unlock(&dum->lock); ++ if (dum_hcd->old_active && dum->driver->suspend) + dum->driver->suspend(&dum->gadget); +- spin_lock(&dum->lock); +- } else if (!dum_hcd->old_active && dum->driver->resume) { +- spin_unlock(&dum->lock); ++ else if (!dum_hcd->old_active && dum->driver->resume) + dum->driver->resume(&dum->gadget); +- spin_lock(&dum->lock); +- } + } + + dum_hcd->old_status = dum_hcd->port_status; +@@ -985,7 +978,9 @@ static int dummy_udc_stop(struct usb_gadget *g) + struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); + struct dummy *dum = dum_hcd->dum; + ++ spin_lock_irq(&dum->lock); + dum->driver = NULL; ++ spin_unlock_irq(&dum->lock); + + return 0; + } +@@ -2011,7 +2006,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc) + HUB_CHAR_COMMON_OCPM); + desc->bNbrPorts = 1; + desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ +- desc->u.ss.DeviceRemovable = 0xffff; ++ desc->u.ss.DeviceRemovable = 0; + } + + static inline void hub_descriptor(struct usb_hub_descriptor *desc) +@@ -2023,8 +2018,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc) + HUB_CHAR_INDV_PORT_LPSM | + HUB_CHAR_COMMON_OCPM); + desc->bNbrPorts = 1; +- desc->u.hs.DeviceRemovable[0] = 0xff; +- desc->u.hs.DeviceRemovable[1] = 0xff; ++ desc->u.hs.DeviceRemovable[0] = 0; ++ desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */ + } + + static int dummy_hub_control( +diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c +index 6706aef907f4..a47de8c31ce9 100644 +--- a/drivers/usb/gadget/udc/net2280.c ++++ b/drivers/usb/gadget/udc/net2280.c +@@ -2425,11 +2425,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) + nuke(&dev->ep[i]); + + /* report disconnect; the driver is already quiesced */ +- if (driver) { +- spin_unlock(&dev->lock); ++ if (driver) + driver->disconnect(&dev->gadget); +- spin_lock(&dev->lock); +- } + + usb_reinit(dev); + } +@@ -3275,8 +3272,6 @@ next_endpoints: + BIT(PCI_RETRY_ABORT_INTERRUPT)) + + static void handle_stat1_irqs(struct net2280 *dev, u32 stat) +-__releases(dev->lock) +-__acquires(dev->lock) + { + struct net2280_ep *ep; + u32 tmp, num, mask, scratch; +@@ -3317,14 +3312,12 @@ __acquires(dev->lock) + if (disconnect || reset) { + stop_activity(dev, dev->driver); + ep0_start(dev); +- spin_unlock(&dev->lock); + if (reset) + usb_gadget_udc_reset + (&dev->gadget, dev->driver); + else + (dev->driver->disconnect) + (&dev->gadget); +- spin_lock(&dev->lock); + return; + } + } +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c +index 4cbd0633c5c2..a11c2c8bda53 100644 +--- a/drivers/usb/host/r8a66597-hcd.c ++++ b/drivers/usb/host/r8a66597-hcd.c +@@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td) + time = 30; + break; + default: +- time = 300; ++ time = 50; + break; + } + +@@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597) + pipe = td->pipe; + pipe_stop(r8a66597, pipe); + ++ /* Select a different address or endpoint */ + new_td = td; + do { + list_move_tail(&new_td->queue, +@@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597) + new_td = td; + break; + } +- } while (td != new_td && td->address == new_td->address); ++ } while (td != new_td && td->address == new_td->address && ++ td->pipe->info.epnum == new_td->pipe->info.epnum); + + start_transfer(r8a66597, new_td); + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 30c4ae80c8f9..e8f990642281 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -198,6 +198,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x1042) + xhci->quirks |= XHCI_BROKEN_STREAMS; ++ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && ++ pdev->device == 0x1142) ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH; + + if (xhci->quirks & XHCI_RESET_ON_RESUME) + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, +diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c +index ec5c8325b503..0525ebc3aea2 100644 +--- a/fs/configfs/symlink.c ++++ b/fs/configfs/symlink.c +@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item, + ret = -ENOMEM; + sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); + if (sl) { +- sl->sl_target = config_item_get(item); + spin_lock(&configfs_dirent_lock); + if (target_sd->s_type & CONFIGFS_USET_DROPPING) { + spin_unlock(&configfs_dirent_lock); +- config_item_put(item); + kfree(sl); + return -ENOENT; + } ++ sl->sl_target = config_item_get(item); + list_add(&sl->sl_list, &target_sd->s_links); + spin_unlock(&configfs_dirent_lock); + ret = configfs_create_link(sl, parent_item->ci_dentry, +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 595ebdb41846..a17da8b57fc6 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index db1a1427c27a..07ef85e19fbc 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -295,11 +295,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) + + /* We don't show the stack guard page in /proc/maps */ + start = vma->vm_start; +- if (stack_guard_page_start(vma, start)) +- start += PAGE_SIZE; + end = vma->vm_end; +- if (stack_guard_page_end(vma, end)) +- end -= PAGE_SIZE; + + seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", +diff --git a/include/linux/mm.h b/include/linux/mm.h +index f0ffa01c90d9..55f950afb60d 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1278,39 +1278,11 @@ int clear_page_dirty_for_io(struct page *page); + + int get_cmdline(struct task_struct *task, char *buffer, int buflen); + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- + static inline bool vma_is_anonymous(struct vm_area_struct *vma) + { + return !vma->vm_ops; + } + +-static inline int stack_guard_page_start(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_growsdown(vma->vm_prev, addr); +-} +- +-/* Is the vma a continuation of the stack vma below it? */ +-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +-} +- +-static inline int stack_guard_page_end(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSUP) && +- (vma->vm_end == addr) && +- !vma_growsup(vma->vm_next, addr); +-} +- + int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); + + extern unsigned long move_page_tables(struct vm_area_struct *vma, +@@ -2012,6 +1984,7 @@ void page_cache_async_readahead(struct address_space *mapping, + pgoff_t offset, + unsigned long size); + ++extern unsigned long stack_guard_gap; + /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ + extern int expand_stack(struct vm_area_struct *vma, unsigned long address); + +@@ -2040,6 +2013,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m + return vma; + } + ++static inline unsigned long vm_start_gap(struct vm_area_struct *vma) ++{ ++ unsigned long vm_start = vma->vm_start; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) { ++ vm_start -= stack_guard_gap; ++ if (vm_start > vma->vm_start) ++ vm_start = 0; ++ } ++ return vm_start; ++} ++ ++static inline unsigned long vm_end_gap(struct vm_area_struct *vma) ++{ ++ unsigned long vm_end = vma->vm_end; ++ ++ if (vma->vm_flags & VM_GROWSUP) { ++ vm_end += stack_guard_gap; ++ if (vm_end < vma->vm_end) ++ vm_end = -PAGE_SIZE; ++ } ++ return vm_end; ++} ++ + static inline unsigned long vma_pages(struct vm_area_struct *vma) + { + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; +diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h +index 331499d597fa..9ce10d4a0245 100644 +--- a/include/uapi/linux/usb/ch11.h ++++ b/include/uapi/linux/usb/ch11.h +@@ -22,6 +22,9 @@ + */ + #define USB_MAXCHILDREN 31 + ++/* See USB 3.1 spec Table 10-5 */ ++#define USB_SS_MAXPORTS 15 ++ + /* + * Hub request types + */ +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 6ead200370da..a079ed14f230 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1287,8 +1287,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) + ret = __irq_set_trigger(desc, + new->flags & IRQF_TRIGGER_MASK); + +- if (ret) ++ if (ret) { ++ irq_release_resources(desc); + goto out_mask; ++ } + } + + desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index 7fbba635a549..2c3a23d77704 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -339,7 +339,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start) + { + struct alarm_base *base = &alarm_bases[alarm->type]; + +- start = ktime_add(start, base->gettime()); ++ start = ktime_add_safe(start, base->gettime()); + alarm_start(alarm, start); + } + EXPORT_SYMBOL_GPL(alarm_start_relative); +@@ -425,7 +425,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) + overrun++; + } + +- alarm->node.expires = ktime_add(alarm->node.expires, interval); ++ alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); + return overrun; + } + EXPORT_SYMBOL_GPL(alarm_forward); +@@ -611,13 +611,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, + + /* start the timer */ + timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); ++ ++ /* ++ * Rate limit to the tick as a hot fix to prevent DOS. Will be ++ * mopped up later. ++ */ ++ if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC) ++ timr->it.alarm.interval = ktime_set(0, TICK_NSEC); ++ + exp = timespec_to_ktime(new_setting->it_value); + /* Convert (if necessary) to absolute time */ + if (flags != TIMER_ABSTIME) { + ktime_t now; + + now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); +- exp = ktime_add(now, exp); ++ exp = ktime_add_safe(now, exp); + } + + alarm_start(&timr->it.alarm.alarmtimer, exp); +diff --git a/mm/gup.c b/mm/gup.c +index 4b0b7e7d1136..b599526db9f7 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -312,11 +312,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, + /* mlock all present pages, but do not fault in new pages */ + if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) + return -ENOENT; +- /* For mm_populate(), just skip the stack guard page. */ +- if ((*flags & FOLL_POPULATE) && +- (stack_guard_page_start(vma, address) || +- stack_guard_page_end(vma, address + PAGE_SIZE))) +- return -ENOENT; + if (*flags & FOLL_WRITE) + fault_flags |= FAULT_FLAG_WRITE; + if (nonblocking) +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 43aee7ab143e..091fe9b06663 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1208,7 +1208,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + * page_remove_rmap() in try_to_unmap_one(). So to determine page status + * correctly, we save a copy of the page flags at this time. + */ +- page_flags = p->flags; ++ if (PageHuge(p)) ++ page_flags = hpage->flags; ++ else ++ page_flags = p->flags; + + /* + * unpoison always clear PG_hwpoison inside page lock +diff --git a/mm/memory.c b/mm/memory.c +index 76dcee317714..e6fa13484447 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2662,40 +2662,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_{down|up}wards()", +- * except we must first make sure that 'address{-|+}PAGE_SIZE' +- * doesn't hit another vma. +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- return expand_downwards(vma, address - PAGE_SIZE); +- } +- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { +- struct vm_area_struct *next = vma->vm_next; +- +- /* As VM_GROWSDOWN but s/below/above/ */ +- if (next && next->vm_start == address + PAGE_SIZE) +- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; +- +- return expand_upwards(vma, address + PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -2715,10 +2681,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + if (vma->vm_flags & VM_SHARED) + return VM_FAULT_SIGBUS; + +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGSEGV; +- + /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), +diff --git a/mm/mmap.c b/mm/mmap.c +index 455772a05e54..0990f8bc0fbe 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -288,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + unsigned long retval; + unsigned long newbrk, oldbrk; + struct mm_struct *mm = current->mm; ++ struct vm_area_struct *next; + unsigned long min_brk; + bool populate; + +@@ -332,7 +333,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + } + + /* Check against existing mmap mappings. */ +- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) ++ next = find_vma(mm, oldbrk); ++ if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) + goto out; + + /* Ok, looks good - let it rip. */ +@@ -355,10 +357,22 @@ out: + + static long vma_compute_subtree_gap(struct vm_area_struct *vma) + { +- unsigned long max, subtree_gap; +- max = vma->vm_start; +- if (vma->vm_prev) +- max -= vma->vm_prev->vm_end; ++ unsigned long max, prev_end, subtree_gap; ++ ++ /* ++ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we ++ * allow two stack_guard_gaps between them here, and when choosing ++ * an unmapped area; whereas when expanding we only require one. ++ * That's a little inconsistent, but keeps the code here simpler. ++ */ ++ max = vm_start_gap(vma); ++ if (vma->vm_prev) { ++ prev_end = vm_end_gap(vma->vm_prev); ++ if (max > prev_end) ++ max -= prev_end; ++ else ++ max = 0; ++ } + if (vma->vm_rb.rb_left) { + subtree_gap = rb_entry(vma->vm_rb.rb_left, + struct vm_area_struct, vm_rb)->rb_subtree_gap; +@@ -451,7 +465,7 @@ static void validate_mm(struct mm_struct *mm) + anon_vma_unlock_read(anon_vma); + } + +- highest_address = vma->vm_end; ++ highest_address = vm_end_gap(vma); + vma = vma->vm_next; + i++; + } +@@ -620,7 +634,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, + if (vma->vm_next) + vma_gap_update(vma->vm_next); + else +- mm->highest_vm_end = vma->vm_end; ++ mm->highest_vm_end = vm_end_gap(vma); + + /* + * vma->vm_prev wasn't known when we followed the rbtree to find the +@@ -866,7 +880,7 @@ again: remove_next = 1 + (end > next->vm_end); + vma_gap_update(vma); + if (end_changed) { + if (!next) +- mm->highest_vm_end = end; ++ mm->highest_vm_end = vm_end_gap(vma); + else if (!adjust_next) + vma_gap_update(next); + } +@@ -909,7 +923,7 @@ again: remove_next = 1 + (end > next->vm_end); + else if (next) + vma_gap_update(next); + else +- mm->highest_vm_end = end; ++ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); + } + if (insert && file) + uprobe_mmap(insert); +@@ -1741,7 +1755,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) + + while (true) { + /* Visit left subtree if it looks promising */ +- gap_end = vma->vm_start; ++ gap_end = vm_start_gap(vma); + if (gap_end >= low_limit && vma->vm_rb.rb_left) { + struct vm_area_struct *left = + rb_entry(vma->vm_rb.rb_left, +@@ -1752,12 +1766,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) + } + } + +- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; ++ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; + check_current: + /* Check if current node has a suitable gap */ + if (gap_start > high_limit) + return -ENOMEM; +- if (gap_end >= low_limit && gap_end - gap_start >= length) ++ if (gap_end >= low_limit && ++ gap_end > gap_start && gap_end - gap_start >= length) + goto found; + + /* Visit right subtree if it looks promising */ +@@ -1779,8 +1794,8 @@ check_current: + vma = rb_entry(rb_parent(prev), + struct vm_area_struct, vm_rb); + if (prev == vma->vm_rb.rb_left) { +- gap_start = vma->vm_prev->vm_end; +- gap_end = vma->vm_start; ++ gap_start = vm_end_gap(vma->vm_prev); ++ gap_end = vm_start_gap(vma); + goto check_current; + } + } +@@ -1844,7 +1859,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + + while (true) { + /* Visit right subtree if it looks promising */ +- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; ++ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; + if (gap_start <= high_limit && vma->vm_rb.rb_right) { + struct vm_area_struct *right = + rb_entry(vma->vm_rb.rb_right, +@@ -1857,10 +1872,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) + + check_current: + /* Check if current node has a suitable gap */ +- gap_end = vma->vm_start; ++ gap_end = vm_start_gap(vma); + if (gap_end < low_limit) + return -ENOMEM; +- if (gap_start <= high_limit && gap_end - gap_start >= length) ++ if (gap_start <= high_limit && ++ gap_end > gap_start && gap_end - gap_start >= length) + goto found; + + /* Visit left subtree if it looks promising */ +@@ -1883,7 +1899,7 @@ check_current: + struct vm_area_struct, vm_rb); + if (prev == vma->vm_rb.rb_right) { + gap_start = vma->vm_prev ? +- vma->vm_prev->vm_end : 0; ++ vm_end_gap(vma->vm_prev) : 0; + goto check_current; + } + } +@@ -1921,7 +1937,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) + { + struct mm_struct *mm = current->mm; +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + struct vm_unmapped_area_info info; + + if (len > TASK_SIZE - mmap_min_addr) +@@ -1932,9 +1948,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); ++ vma = find_vma_prev(mm, addr, &prev); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma)) && ++ (!prev || addr >= vm_end_gap(prev))) + return addr; + } + +@@ -1957,7 +1974,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) + { +- struct vm_area_struct *vma; ++ struct vm_area_struct *vma, *prev; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + struct vm_unmapped_area_info info; +@@ -1972,9 +1989,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); ++ vma = find_vma_prev(mm, addr, &prev); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ (!vma || addr + len <= vm_start_gap(vma)) && ++ (!prev || addr >= vm_end_gap(prev))) + return addr; + } + +@@ -2099,21 +2117,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, + * update accounting. This is shared with both the + * grow-up and grow-down cases. + */ +-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) ++static int acct_stack_growth(struct vm_area_struct *vma, ++ unsigned long size, unsigned long grow) + { + struct mm_struct *mm = vma->vm_mm; + struct rlimit *rlim = current->signal->rlim; +- unsigned long new_start, actual_size; ++ unsigned long new_start; + + /* address space limit tests */ + if (!may_expand_vm(mm, grow)) + return -ENOMEM; + + /* Stack limit test */ +- actual_size = size; +- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) +- actual_size -= PAGE_SIZE; +- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) ++ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + return -ENOMEM; + + /* mlock limit tests */ +@@ -2151,16 +2167,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + int expand_upwards(struct vm_area_struct *vma, unsigned long address) + { + struct mm_struct *mm = vma->vm_mm; ++ struct vm_area_struct *next; ++ unsigned long gap_addr; + int error = 0; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + +- /* Guard against wrapping around to address 0. */ +- if (address < PAGE_ALIGN(address+4)) +- address = PAGE_ALIGN(address+4); +- else ++ /* Guard against exceeding limits of the address space. */ ++ address &= PAGE_MASK; ++ if (address >= TASK_SIZE) + return -ENOMEM; ++ address += PAGE_SIZE; ++ ++ /* Enforce stack_guard_gap */ ++ gap_addr = address + stack_guard_gap; ++ ++ /* Guard against overflow */ ++ if (gap_addr < address || gap_addr > TASK_SIZE) ++ gap_addr = TASK_SIZE; ++ ++ next = vma->vm_next; ++ if (next && next->vm_start < gap_addr) { ++ if (!(next->vm_flags & VM_GROWSUP)) ++ return -ENOMEM; ++ /* Check that both stack segments have the same anon_vma? */ ++ } + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) +@@ -2206,7 +2238,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + if (vma->vm_next) + vma_gap_update(vma->vm_next); + else +- mm->highest_vm_end = address; ++ mm->highest_vm_end = vm_end_gap(vma); + spin_unlock(&mm->page_table_lock); + + perf_event_mmap(vma); +@@ -2227,6 +2259,8 @@ int expand_downwards(struct vm_area_struct *vma, + unsigned long address) + { + struct mm_struct *mm = vma->vm_mm; ++ struct vm_area_struct *prev; ++ unsigned long gap_addr; + int error; + + address &= PAGE_MASK; +@@ -2234,6 +2268,17 @@ int expand_downwards(struct vm_area_struct *vma, + if (error) + return error; + ++ /* Enforce stack_guard_gap */ ++ gap_addr = address - stack_guard_gap; ++ if (gap_addr > address) ++ return -ENOMEM; ++ prev = vma->vm_prev; ++ if (prev && prev->vm_end > gap_addr) { ++ if (!(prev->vm_flags & VM_GROWSDOWN)) ++ return -ENOMEM; ++ /* Check that both stack segments have the same anon_vma? */ ++ } ++ + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; +@@ -2289,28 +2334,25 @@ int expand_downwards(struct vm_area_struct *vma, + return error; + } + +-/* +- * Note how expand_stack() refuses to expand the stack all the way to +- * abut the next virtual mapping, *unless* that mapping itself is also +- * a stack mapping. We want to leave room for a guard page, after all +- * (the guard page itself is not added here, that is done by the +- * actual page faulting logic) +- * +- * This matches the behavior of the guard page logic (see mm/memory.c: +- * check_stack_guard_page()), which only allows the guard page to be +- * removed under these circumstances. +- */ ++/* enforced gap between the expanding stack and other mappings. */ ++unsigned long stack_guard_gap = 256UL<len - hdrlen - MICHAEL_MIC_LEN; + key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; + michael_mic(key, hdr, data, data_len, mic); +- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) ++ if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) + goto mic_fail; + + /* remove Michael MIC from payload */ +@@ -1044,7 +1045,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) + bip_aad(skb, aad); + ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mic); +- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { ++ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { + key->u.aes_cmac.icverrors++; + return RX_DROP_UNUSABLE; + } +@@ -1094,7 +1095,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) + bip_aad(skb, aad); + ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, + skb->data + 24, skb->len - 24, mic); +- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { ++ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { + key->u.aes_cmac.icverrors++; + return RX_DROP_UNUSABLE; + } +@@ -1198,7 +1199,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) + if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, + skb->data + 24, skb->len - 24, + mic) < 0 || +- memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { ++ crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { + key->u.aes_gmac.icverrors++; + return RX_DROP_UNUSABLE; + } diff --git a/patch/kernel/rk3328-default/patch-4.4.74-75.patch b/patch/kernel/rk3328-default/patch-4.4.74-75.patch new file mode 100644 index 000000000..9a62ab113 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.74-75.patch @@ -0,0 +1,996 @@ +diff --git a/Makefile b/Makefile +index 1f75507acbf4..696d15d8ad5d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 74 ++SUBLEVEL = 75 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c +index 7c053f281406..1138fec3dd65 100644 +--- a/arch/powerpc/kernel/kprobes.c ++++ b/arch/powerpc/kernel/kprobes.c +@@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) + #endif + #endif + ++ /* ++ * jprobes use jprobe_return() which skips the normal return ++ * path of the function, and this messes up the accounting of the ++ * function graph tracer. ++ * ++ * Pause function graph tracing while performing the jprobe function. ++ */ ++ pause_graph_tracing(); ++ + return 1; + } + +@@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) + * saved regs... + */ + memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); ++ /* It's OK to start function graph tracing again */ ++ unpause_graph_tracing(); + preempt_enable_no_resched(); + return 1; + } +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c +index 3c3a367b6e59..396dc44e783b 100644 +--- a/arch/powerpc/kvm/book3s_hv.c ++++ b/arch/powerpc/kvm/book3s_hv.c +@@ -2693,6 +2693,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) + return -EINVAL; + } + ++ /* ++ * Don't allow entry with a suspended transaction, because ++ * the guest entry/exit code will lose it. ++ * If the guest has TM enabled, save away their TM-related SPRs ++ * (they will get restored by the TM unavailable interrupt). ++ */ ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM ++ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && ++ (current->thread.regs->msr & MSR_TM)) { ++ if (MSR_TM_ACTIVE(current->thread.regs->msr)) { ++ run->exit_reason = KVM_EXIT_FAIL_ENTRY; ++ run->fail_entry.hardware_entry_failure_reason = 0; ++ return -EINVAL; ++ } ++ current->thread.tm_tfhar = mfspr(SPRN_TFHAR); ++ current->thread.tm_tfiar = mfspr(SPRN_TFIAR); ++ current->thread.tm_texasr = mfspr(SPRN_TEXASR); ++ current->thread.regs->msr &= ~MSR_TM; ++ } ++#endif ++ + kvmppc_core_prepare_to_enter(vcpu); + + /* No need to go into the guest when all we'll do is come back out */ +diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S +index 4c48b487698c..0b48ce40d351 100644 +--- a/arch/powerpc/mm/slb_low.S ++++ b/arch/powerpc/mm/slb_low.S +@@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) + b slb_finish_load + + 8: /* invalid EA */ ++ /* ++ * It's possible the bad EA is too large to fit in the SLB cache, which ++ * would mean we'd fail to invalidate it on context switch. So mark the ++ * SLB cache as full so we force a full flush. We also set cr7+eq to ++ * mark the address as a kernel address, so slb_finish_load() skips ++ * trying to insert it into the SLB cache. ++ */ ++ li r9,SLB_CACHE_ENTRIES + 1 ++ sth r9,PACASLBCACHEPTR(r13) ++ crset 4*cr7+eq + li r10,0 /* BAD_VSID */ + li r9,0 /* BAD_VSID */ + li r11,SLB_VSID_USER /* flags don't much matter */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +index 51a9942cdb40..f4cae5357e40 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +@@ -681,6 +681,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 60000; ++ } else if (adev->clock.default_dispclk <= 60000) { ++ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", ++ adev->clock.default_dispclk / 100); ++ adev->clock.default_dispclk = 62500; + } + adev->clock.dp_extclk = + le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +index 49aa35016653..247b088990dc 100644 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +@@ -164,7 +164,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); +- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ++ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; + + memset(&args, 0, sizeof(args)); + +@@ -177,7 +177,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) + void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) + { + int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); +- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ++ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; + + memset(&args, 0, sizeof(args)); + +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +index a9b01bcf7d0a..fcecaf5b5526 100644 +--- a/drivers/gpu/drm/radeon/radeon_combios.c ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -3394,6 +3394,13 @@ void radeon_combios_asic_init(struct drm_device *dev) + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x280a) + return; ++ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume ++ * - it hangs on resume inside the dynclk 1 table. ++ */ ++ if (rdev->family == CHIP_RS400 && ++ rdev->pdev->subsystem_vendor == 0x1179 && ++ rdev->pdev->subsystem_device == 0xff31) ++ return; + + /* DYN CLK 1 */ + table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 4aa2cbe4c85f..a77521695c9a 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -127,6 +127,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { + * https://bugzilla.kernel.org/show_bug.cgi?id=51381 + */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, ++ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU ++ * https://bugs.freedesktop.org/show_bug.cgi?id=101491 ++ */ ++ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* macbook pro 8.2 */ + { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, + { 0, 0, 0, 0, 0 }, +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index e37030624165..c7f8b70d15ee 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -285,6 +285,9 @@ + #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 + #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a + ++#define USB_VENDOR_ID_DELL 0x413c ++#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a ++ + #define USB_VENDOR_ID_DELORME 0x1163 + #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 + #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index 6ca6ab00fa93..ce1543d69acb 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -72,6 +72,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index e7b96f1ac2c5..5be14ad29d46 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -788,6 +788,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + }, + }, ++ { ++ /* Fujitsu UH554 laptop */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c +index 37e4135ab213..64d6f053c2a5 100644 +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -1057,6 +1057,13 @@ static int spansion_quad_enable(struct spi_nor *nor) + return -EINVAL; + } + ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) { ++ dev_err(nor->dev, ++ "timeout while writing configuration register\n"); ++ return ret; ++ } ++ + /* read back and check it */ + ret = read_cr(nor); + if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index d2701c53ed68..ebec2dceff45 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -822,8 +822,6 @@ static int marvell_read_status(struct phy_device *phydev) + phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | + mii_lpa_to_ethtool_lpa_t(lpa); + +- lpa &= adv; +- + if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) + phydev->duplex = DUPLEX_FULL; + else +diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c +index c0b4e65267af..46fe1ae919a3 100644 +--- a/drivers/net/phy/mdio-bcm-iproc.c ++++ b/drivers/net/phy/mdio-bcm-iproc.c +@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg) + if (rc) + return rc; + +- iproc_mdio_config_clk(priv->base); +- + /* Prepare the read operation */ + cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | + (reg << MII_DATA_RA_SHIFT) | +@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id, + if (rc) + return rc; + +- iproc_mdio_config_clk(priv->base); +- + /* Prepare the write operation */ + cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | + (reg << MII_DATA_RA_SHIFT) | +@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev) + bus->read = iproc_mdio_read; + bus->write = iproc_mdio_write; + ++ iproc_mdio_config_clk(priv->base); ++ + rc = of_mdiobus_register(bus, pdev->dev.of_node); + if (rc) { + dev_err(&pdev->dev, "MDIO bus registration failed\n"); +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index 044253dca30a..b8a5a8e8f57d 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -27,6 +27,13 @@ enum { + NVME_NS_LIGHTNVM = 1, + }; + ++/* The below value is the specific amount of delay needed before checking ++ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the ++ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was ++ * found empirically. ++ */ ++#define NVME_QUIRK_DELAY_AMOUNT 2000 ++ + /* + * Represents an NVM Express device. Each nvme_dev is a PCI function. + */ +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index c851bc53831c..4c673d45f1bd 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -1633,10 +1633,15 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) + */ + static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) + { ++ struct pci_dev *pdev = to_pci_dev(dev->dev); ++ + dev->ctrl_config &= ~NVME_CC_SHN_MASK; + dev->ctrl_config &= ~NVME_CC_ENABLE; + writel(dev->ctrl_config, &dev->bar->cc); + ++ if (pdev->vendor == 0x1c58 && pdev->device == 0x0003) ++ msleep(NVME_QUIRK_DELAY_AMOUNT); ++ + return nvme_wait_ready(dev, cap, false); + } + +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c +index 655f79db7899..58048dd5fcd0 100644 +--- a/drivers/of/fdt.c ++++ b/drivers/of/fdt.c +@@ -632,9 +632,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, + const char *pathp; + int offset, rc = 0, depth = -1; + +- for (offset = fdt_next_node(blob, -1, &depth); +- offset >= 0 && depth >= 0 && !rc; +- offset = fdt_next_node(blob, offset, &depth)) { ++ if (!blob) ++ return 0; ++ ++ for (offset = fdt_next_node(blob, -1, &depth); ++ offset >= 0 && depth >= 0 && !rc; ++ offset = fdt_next_node(blob, offset, &depth)) { + + pathp = fdt_get_name(blob, offset, NULL); + if (*pathp == '/') +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 200d3de8bc1e..a180c000e246 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -1112,6 +1112,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, + */ + if (dump_payload) + goto after_immediate_data; ++ /* ++ * Check for underflow case where both EDTL and immediate data payload ++ * exceeds what is presented by CDB's TRANSFER LENGTH, and what has ++ * already been set in target_cmd_size_check() as se_cmd->data_length. ++ * ++ * For this special case, fail the command and dump the immediate data ++ * payload. ++ */ ++ if (cmd->first_burst_len > cmd->se_cmd.data_length) { ++ cmd->sense_reason = TCM_INVALID_CDB_FIELD; ++ goto after_immediate_data; ++ } + + immed_ret = iscsit_handle_immediate_data(cmd, hdr, + cmd->first_burst_len); +diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h +index 253a91bff943..272e6f755322 100644 +--- a/drivers/target/target_core_internal.h ++++ b/drivers/target/target_core_internal.h +@@ -132,7 +132,7 @@ int init_se_kmem_caches(void); + void release_se_kmem_caches(void); + u32 scsi_get_new_index(scsi_index_t); + void transport_subsystem_check_init(void); +-void transport_cmd_finish_abort(struct se_cmd *, int); ++int transport_cmd_finish_abort(struct se_cmd *, int); + unsigned char *transport_dump_cmd_direction(struct se_cmd *); + void transport_dump_dev_state(struct se_device *, char *, int *); + void transport_dump_dev_info(struct se_device *, struct se_lun *, +diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c +index 46b1991fbb50..c9be953496ec 100644 +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) + kfree(tmr); + } + +-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) ++static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) + { + unsigned long flags; + bool remove = true, send_tas; +@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) + transport_send_task_abort(cmd); + } + +- transport_cmd_finish_abort(cmd, remove); ++ return transport_cmd_finish_abort(cmd, remove); + } + + static int target_check_cdb_and_preempt(struct list_head *list, +@@ -185,8 +185,8 @@ void core_tmr_abort_task( + cancel_work_sync(&se_cmd->work); + transport_wait_for_tasks(se_cmd); + +- transport_cmd_finish_abort(se_cmd, true); +- target_put_sess_cmd(se_cmd); ++ if (!transport_cmd_finish_abort(se_cmd, true)) ++ target_put_sess_cmd(se_cmd); + + printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" + " ref_tag: %llu\n", ref_tag); +@@ -286,8 +286,8 @@ static void core_tmr_drain_tmr_list( + cancel_work_sync(&cmd->work); + transport_wait_for_tasks(cmd); + +- transport_cmd_finish_abort(cmd, 1); +- target_put_sess_cmd(cmd); ++ if (!transport_cmd_finish_abort(cmd, 1)) ++ target_put_sess_cmd(cmd); + } + } + +@@ -385,8 +385,8 @@ static void core_tmr_drain_state_list( + cancel_work_sync(&cmd->work); + transport_wait_for_tasks(cmd); + +- core_tmr_handle_tas_abort(cmd, tas); +- target_put_sess_cmd(cmd); ++ if (!core_tmr_handle_tas_abort(cmd, tas)) ++ target_put_sess_cmd(cmd); + } + } + +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 60743bf27f37..37c77db6e737 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -639,9 +639,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) + percpu_ref_put(&lun->lun_ref); + } + +-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) ++int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + { + bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); ++ int ret = 0; + + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) + transport_lun_remove_cmd(cmd); +@@ -653,9 +654,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) + cmd->se_tfo->aborted_task(cmd); + + if (transport_cmd_check_stop_to_fabric(cmd)) +- return; ++ return 1; + if (remove && ack_kref) +- transport_put_cmd(cmd); ++ ret = transport_put_cmd(cmd); ++ ++ return ret; + } + + static void target_complete_failure_work(struct work_struct *work) +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c +index 7fbe19d5279e..81b2b9f808b5 100644 +--- a/drivers/usb/usbip/vhci_hcd.c ++++ b/drivers/usb/usbip/vhci_hcd.c +@@ -215,14 +215,19 @@ done: + + static inline void hub_descriptor(struct usb_hub_descriptor *desc) + { ++ int width; ++ + memset(desc, 0, sizeof(*desc)); + desc->bDescriptorType = USB_DT_HUB; +- desc->bDescLength = 9; + desc->wHubCharacteristics = cpu_to_le16( + HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); ++ + desc->bNbrPorts = VHCI_NPORTS; +- desc->u.hs.DeviceRemovable[0] = 0xff; +- desc->u.hs.DeviceRemovable[1] = 0xff; ++ BUILD_BUG_ON(VHCI_NPORTS > USB_MAXCHILDREN); ++ width = desc->bNbrPorts / 8 + 1; ++ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width; ++ memset(&desc->u.hs.DeviceRemovable[0], 0, width); ++ memset(&desc->u.hs.DeviceRemovable[width], 0xff, width); + } + + static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, +diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c +index ac7d921ed984..257425511d10 100644 +--- a/fs/autofs4/dev-ioctl.c ++++ b/fs/autofs4/dev-ioctl.c +@@ -331,7 +331,7 @@ static int autofs_dev_ioctl_fail(struct file *fp, + int status; + + token = (autofs_wqt_t) param->fail.token; +- status = param->fail.status ? param->fail.status : -ENOENT; ++ status = param->fail.status < 0 ? param->fail.status : -ENOENT; + return autofs4_wait_release(sbi, token, status); + } + +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index 87b87e091e8e..efd72e1fae74 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, + struct cifs_fid *fid, __u16 search_flags, + struct cifs_search_info *srch_inf) + { +- return CIFSFindFirst(xid, tcon, path, cifs_sb, +- &fid->netfid, search_flags, srch_inf, true); ++ int rc; ++ ++ rc = CIFSFindFirst(xid, tcon, path, cifs_sb, ++ &fid->netfid, search_flags, srch_inf, true); ++ if (rc) ++ cifs_dbg(FYI, "find first failed=%d\n", rc); ++ return rc; + } + + static int +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 087918c4612a..1d125d3d0d89 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -909,7 +909,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, + rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); + kfree(utf16_path); + if (rc) { +- cifs_dbg(VFS, "open dir failed\n"); ++ cifs_dbg(FYI, "open dir failed rc=%d\n", rc); + return rc; + } + +@@ -919,7 +919,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, + rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, + fid->volatile_fid, 0, srch_inf); + if (rc) { +- cifs_dbg(VFS, "query directory failed\n"); ++ cifs_dbg(FYI, "query directory failed rc=%d\n", rc); + SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); + } + return rc; +diff --git a/fs/exec.c b/fs/exec.c +index 3a6de10d3891..02153068a694 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -206,8 +206,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + + if (write) { + unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; ++ unsigned long ptr_size; + struct rlimit *rlim; + ++ /* ++ * Since the stack will hold pointers to the strings, we ++ * must account for them as well. ++ * ++ * The size calculation is the entire vma while each arg page is ++ * built, so each time we get here it's calculating how far it ++ * is currently (rather than each call being just the newly ++ * added size from the arg page). As a result, we need to ++ * always add the entire size of the pointers, so that on the ++ * last call to get_arg_page() we'll actually have the entire ++ * correct size. ++ */ ++ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); ++ if (ptr_size > ULONG_MAX - size) ++ goto fail; ++ size += ptr_size; ++ + acct_arg_size(bprm, size / PAGE_SIZE); + + /* +@@ -225,13 +243,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + * to work from. + */ + rlim = current->signal->rlim; +- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { +- put_page(page); +- return NULL; +- } ++ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) ++ goto fail; + } + + return page; ++ ++fail: ++ put_page(page); ++ return NULL; + } + + static void put_arg_page(struct page *page) +diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h +index 25247220b4b7..f0f1793cfa49 100644 +--- a/include/linux/timekeeper_internal.h ++++ b/include/linux/timekeeper_internal.h +@@ -29,7 +29,6 @@ + */ + struct tk_read_base { + struct clocksource *clock; +- cycle_t (*read)(struct clocksource *cs); + cycle_t mask; + cycle_t cycle_last; + u32 mult; +diff --git a/kernel/signal.c b/kernel/signal.c +index f3f1f7a972fd..b92a047ddc82 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -503,7 +503,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) + return !tsk->ptrace; + } + +-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) ++static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, ++ bool *resched_timer) + { + struct sigqueue *q, *first = NULL; + +@@ -525,6 +526,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) + still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); ++ ++ *resched_timer = ++ (first->flags & SIGQUEUE_PREALLOC) && ++ (info->si_code == SI_TIMER) && ++ (info->si_sys_private); ++ + __sigqueue_free(first); + } else { + /* +@@ -541,12 +548,12 @@ still_pending: + } + + static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, +- siginfo_t *info) ++ siginfo_t *info, bool *resched_timer) + { + int sig = next_signal(pending, mask); + + if (sig) +- collect_signal(sig, pending, info); ++ collect_signal(sig, pending, info, resched_timer); + return sig; + } + +@@ -558,15 +565,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, + */ + int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) + { ++ bool resched_timer = false; + int signr; + + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +- signr = __dequeue_signal(&tsk->pending, mask, info); ++ signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); + if (!signr) { + signr = __dequeue_signal(&tsk->signal->shared_pending, +- mask, info); ++ mask, info, &resched_timer); + /* + * itimer signal ? + * +@@ -611,7 +619,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) + */ + current->jobctl |= JOBCTL_STOP_DEQUEUED; + } +- if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { ++ if (resched_timer) { + /* + * Release the siglock to ensure proper locking order + * of timer locks outside of siglocks. Note, we leave +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 738012d68117..6e4866834d26 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) + tk->offs_boot = ktime_add(tk->offs_boot, delta); + } + ++/* ++ * tk_clock_read - atomic clocksource read() helper ++ * ++ * This helper is necessary to use in the read paths because, while the ++ * seqlock ensures we don't return a bad value while structures are updated, ++ * it doesn't protect from potential crashes. There is the possibility that ++ * the tkr's clocksource may change between the read reference, and the ++ * clock reference passed to the read function. This can cause crashes if ++ * the wrong clocksource is passed to the wrong read function. ++ * This isn't necessary to use when holding the timekeeper_lock or doing ++ * a read of the fast-timekeeper tkrs (which is protected by its own locking ++ * and update logic). ++ */ ++static inline u64 tk_clock_read(struct tk_read_base *tkr) ++{ ++ struct clocksource *clock = READ_ONCE(tkr->clock); ++ ++ return clock->read(clock); ++} ++ + #ifdef CONFIG_DEBUG_TIMEKEEPING + #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ + +@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) + */ + do { + seq = read_seqcount_begin(&tk_core.seq); +- now = tkr->read(tkr->clock); ++ now = tk_clock_read(tkr); + last = tkr->cycle_last; + mask = tkr->mask; + max = tkr->clock->max_cycles; +@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) + cycle_t cycle_now, delta; + + /* read clocksource */ +- cycle_now = tkr->read(tkr->clock); ++ cycle_now = tk_clock_read(tkr); + + /* calculate the delta since the last update_wall_time */ + delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); +@@ -235,12 +255,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) + + old_clock = tk->tkr_mono.clock; + tk->tkr_mono.clock = clock; +- tk->tkr_mono.read = clock->read; + tk->tkr_mono.mask = clock->mask; +- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); ++ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); + + tk->tkr_raw.clock = clock; +- tk->tkr_raw.read = clock->read; + tk->tkr_raw.mask = clock->mask; + tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; + +@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) + + now += timekeeping_delta_to_ns(tkr, + clocksource_delta( +- tkr->read(tkr->clock), ++ tk_clock_read(tkr), + tkr->cycle_last, + tkr->mask)); + } while (read_seqcount_retry(&tkf->seq, seq)); +@@ -432,6 +450,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs) + return cycles_at_suspend; + } + ++static struct clocksource dummy_clock = { ++ .read = dummy_clock_read, ++}; ++ + /** + * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. + * @tk: Timekeeper to snapshot. +@@ -448,13 +470,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) + struct tk_read_base *tkr = &tk->tkr_mono; + + memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); +- cycles_at_suspend = tkr->read(tkr->clock); +- tkr_dummy.read = dummy_clock_read; ++ cycles_at_suspend = tk_clock_read(tkr); ++ tkr_dummy.clock = &dummy_clock; + update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); + + tkr = &tk->tkr_raw; + memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); +- tkr_dummy.read = dummy_clock_read; ++ tkr_dummy.clock = &dummy_clock; + update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); + } + +@@ -618,11 +640,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) + */ + static void timekeeping_forward_now(struct timekeeper *tk) + { +- struct clocksource *clock = tk->tkr_mono.clock; + cycle_t cycle_now, delta; + s64 nsec; + +- cycle_now = tk->tkr_mono.read(clock); ++ cycle_now = tk_clock_read(&tk->tkr_mono); + delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); + tk->tkr_mono.cycle_last = cycle_now; + tk->tkr_raw.cycle_last = cycle_now; +@@ -1405,7 +1426,7 @@ void timekeeping_resume(void) + * The less preferred source will only be tried if there is no better + * usable source. The rtc part is handled separately in rtc core code. + */ +- cycle_now = tk->tkr_mono.read(clock); ++ cycle_now = tk_clock_read(&tk->tkr_mono); + if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && + cycle_now > tk->tkr_mono.cycle_last) { + u64 num, max = ULLONG_MAX; +@@ -1800,7 +1821,7 @@ void update_wall_time(void) + #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET + offset = real_tk->cycle_interval; + #else +- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), ++ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), + tk->tkr_mono.cycle_last, tk->tkr_mono.mask); + #endif + +diff --git a/lib/cmdline.c b/lib/cmdline.c +index 8f13cf73c2ec..79069d7938ea 100644 +--- a/lib/cmdline.c ++++ b/lib/cmdline.c +@@ -22,14 +22,14 @@ + * the values[M, M+1, ..., N] into the ints array in get_options. + */ + +-static int get_range(char **str, int *pint) ++static int get_range(char **str, int *pint, int n) + { + int x, inc_counter, upper_range; + + (*str)++; + upper_range = simple_strtol((*str), NULL, 0); + inc_counter = upper_range - *pint; +- for (x = *pint; x < upper_range; x++) ++ for (x = *pint; n && x < upper_range; x++, n--) + *pint++ = x; + return inc_counter; + } +@@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints) + break; + if (res == 3) { + int range_nums; +- range_nums = get_range((char **)&str, ints + i); ++ range_nums = get_range((char **)&str, ints + i, nints - i); + if (range_nums < 0) + break; + /* +diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c +index da3cc09f683e..91d43ab3a961 100644 +--- a/net/rxrpc/ar-key.c ++++ b/net/rxrpc/ar-key.c +@@ -215,7 +215,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, + unsigned int *_toklen) + { + const __be32 *xdr = *_xdr; +- unsigned int toklen = *_toklen, n_parts, loop, tmp; ++ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; + + /* there must be at least one name, and at least #names+1 length + * words */ +@@ -245,16 +245,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, + toklen -= 4; + if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) + return -EINVAL; +- if (tmp > toklen) ++ paddedlen = (tmp + 3) & ~3; ++ if (paddedlen > toklen) + return -EINVAL; + princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); + if (!princ->name_parts[loop]) + return -ENOMEM; + memcpy(princ->name_parts[loop], xdr, tmp); + princ->name_parts[loop][tmp] = 0; +- tmp = (tmp + 3) & ~3; +- toklen -= tmp; +- xdr += tmp >> 2; ++ toklen -= paddedlen; ++ xdr += paddedlen >> 2; + } + + if (toklen < 4) +@@ -263,16 +263,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, + toklen -= 4; + if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) + return -EINVAL; +- if (tmp > toklen) ++ paddedlen = (tmp + 3) & ~3; ++ if (paddedlen > toklen) + return -EINVAL; + princ->realm = kmalloc(tmp + 1, GFP_KERNEL); + if (!princ->realm) + return -ENOMEM; + memcpy(princ->realm, xdr, tmp); + princ->realm[tmp] = 0; +- tmp = (tmp + 3) & ~3; +- toklen -= tmp; +- xdr += tmp >> 2; ++ toklen -= paddedlen; ++ xdr += paddedlen >> 2; + + _debug("%s/...@%s", princ->name_parts[0], princ->realm); + +@@ -291,7 +291,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, + unsigned int *_toklen) + { + const __be32 *xdr = *_xdr; +- unsigned int toklen = *_toklen, len; ++ unsigned int toklen = *_toklen, len, paddedlen; + + /* there must be at least one tag and one length word */ + if (toklen <= 8) +@@ -305,15 +305,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, + toklen -= 8; + if (len > max_data_size) + return -EINVAL; ++ paddedlen = (len + 3) & ~3; ++ if (paddedlen > toklen) ++ return -EINVAL; + td->data_len = len; + + if (len > 0) { + td->data = kmemdup(xdr, len, GFP_KERNEL); + if (!td->data) + return -ENOMEM; +- len = (len + 3) & ~3; +- toklen -= len; +- xdr += len >> 2; ++ toklen -= paddedlen; ++ xdr += paddedlen >> 2; + } + + _debug("tag %x len %x", td->tag, td->data_len); +@@ -385,7 +387,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, + const __be32 **_xdr, unsigned int *_toklen) + { + const __be32 *xdr = *_xdr; +- unsigned int toklen = *_toklen, len; ++ unsigned int toklen = *_toklen, len, paddedlen; + + /* there must be at least one length word */ + if (toklen <= 4) +@@ -397,6 +399,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, + toklen -= 4; + if (len > AFSTOKEN_K5_TIX_MAX) + return -EINVAL; ++ paddedlen = (len + 3) & ~3; ++ if (paddedlen > toklen) ++ return -EINVAL; + *_tktlen = len; + + _debug("ticket len %u", len); +@@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, + *_ticket = kmemdup(xdr, len, GFP_KERNEL); + if (!*_ticket) + return -ENOMEM; +- len = (len + 3) & ~3; +- toklen -= len; +- xdr += len >> 2; ++ toklen -= paddedlen; ++ xdr += paddedlen >> 2; + } + + *_xdr = xdr; +@@ -550,7 +554,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) + { + const __be32 *xdr = prep->data, *token; + const char *cp; +- unsigned int len, tmp, loop, ntoken, toklen, sec_ix; ++ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; + size_t datalen = prep->datalen; + int ret; + +@@ -576,22 +580,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) + if (len < 1 || len > AFSTOKEN_CELL_MAX) + goto not_xdr; + datalen -= 4; +- tmp = (len + 3) & ~3; +- if (tmp > datalen) ++ paddedlen = (len + 3) & ~3; ++ if (paddedlen > datalen) + goto not_xdr; + + cp = (const char *) xdr; + for (loop = 0; loop < len; loop++) + if (!isprint(cp[loop])) + goto not_xdr; +- if (len < tmp) +- for (; loop < tmp; loop++) +- if (cp[loop]) +- goto not_xdr; ++ for (; loop < paddedlen; loop++) ++ if (cp[loop]) ++ goto not_xdr; + _debug("cellname: [%u/%u] '%*.*s'", +- len, tmp, len, len, (const char *) xdr); +- datalen -= tmp; +- xdr += tmp >> 2; ++ len, paddedlen, len, len, (const char *) xdr); ++ datalen -= paddedlen; ++ xdr += paddedlen >> 2; + + /* get the token count */ + if (datalen < 12) +@@ -612,10 +615,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) + sec_ix = ntohl(*xdr); + datalen -= 4; + _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); +- if (toklen < 20 || toklen > datalen) ++ paddedlen = (toklen + 3) & ~3; ++ if (toklen < 20 || toklen > datalen || paddedlen > datalen) + goto not_xdr; +- datalen -= (toklen + 3) & ~3; +- xdr += (toklen + 3) >> 2; ++ datalen -= paddedlen; ++ xdr += paddedlen >> 2; + + } while (--loop > 0); + diff --git a/patch/kernel/rk3328-default/patch-4.4.75-76.patch b/patch/kernel/rk3328-default/patch-4.4.75-76.patch new file mode 100644 index 000000000..3232cef62 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.75-76.patch @@ -0,0 +1,2823 @@ +diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt +index af70d1541d3a..be61d53e997f 100644 +--- a/Documentation/sysctl/kernel.txt ++++ b/Documentation/sysctl/kernel.txt +@@ -810,14 +810,13 @@ via the /proc/sys interface: + Each write syscall must fully contain the sysctl value to be + written, and multiple writes on the same sysctl file descriptor + will rewrite the sysctl value, regardless of file position. +- 0 - (default) Same behavior as above, but warn about processes that +- perform writes to a sysctl file descriptor when the file position +- is not 0. +- 1 - Respect file position when writing sysctl strings. Multiple writes +- will append to the sysctl value buffer. Anything past the max length +- of the sysctl value buffer will be ignored. Writes to numeric sysctl +- entries must always be at file position 0 and the value must be +- fully contained in the buffer sent in the write syscall. ++ 0 - Same behavior as above, but warn about processes that perform writes ++ to a sysctl file descriptor when the file position is not 0. ++ 1 - (default) Respect file position when writing sysctl strings. Multiple ++ writes will append to the sysctl value buffer. Anything past the max ++ length of the sysctl value buffer will be ignored. Writes to numeric ++ sysctl entries must always be at file position 0 and the value must ++ be fully contained in the buffer sent in the write syscall. + + ============================================================== + +diff --git a/Makefile b/Makefile +index 696d15d8ad5d..902ab134446e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 75 ++SUBLEVEL = 76 + EXTRAVERSION = + NAME = Blurry Fish Butt + +@@ -633,6 +633,12 @@ endif + # Tell gcc to never replace conditional load with a non-conditional one + KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) + ++# check for 'asm goto' ++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) ++ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO ++ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO ++endif ++ + ifdef CONFIG_READABLE_ASM + # Disable optimizations that make assembler listings hard to read. + # reorder blocks reorders the control in the function +@@ -805,17 +811,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) + # use the deterministic mode of AR if available + KBUILD_ARFLAGS := $(call ar-option,D) + +-# check for 'asm goto' +-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) +- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO +- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +-else ifneq ($(findstring aarch64-linux-android, $(CROSS_COMPILE)),) +-# It seems than android gcc can't pass gcc-goto.sh check, but asm goto work. +-# So let's active it. +- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO +- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +-endif +- + include scripts/Makefile.kasan + include scripts/Makefile.extrawarn + +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi +index 6f50f672efbd..de8ac998604d 100644 +--- a/arch/arm/boot/dts/bcm5301x.dtsi ++++ b/arch/arm/boot/dts/bcm5301x.dtsi +@@ -54,14 +54,14 @@ + timer@0200 { + compatible = "arm,cortex-a9-global-timer"; + reg = <0x0200 0x100>; +- interrupts = ; ++ interrupts = ; + clocks = <&clk_periph>; + }; + + local-timer@0600 { + compatible = "arm,cortex-a9-twd-timer"; + reg = <0x0600 0x100>; +- interrupts = ; ++ interrupts = ; + clocks = <&clk_periph>; + }; + +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index 4867f5daf82c..e47cffd25c6c 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -1184,15 +1184,15 @@ void __init sanity_check_meminfo(void) + + high_memory = __va(arm_lowmem_limit - 1) + 1; + ++ if (!memblock_limit) ++ memblock_limit = arm_lowmem_limit; ++ + /* + * Round the memblock limit down to a pmd size. This + * helps to ensure that we will allocate memory from the + * last full pmd, which should be mapped. + */ +- if (memblock_limit) +- memblock_limit = round_down(memblock_limit, PMD_SIZE); +- if (!memblock_limit) +- memblock_limit = arm_lowmem_limit; ++ memblock_limit = round_down(memblock_limit, PMD_SIZE); + + memblock_set_current_limit(memblock_limit); + } +diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h +index caafd63b8092..40d1351e7573 100644 +--- a/arch/arm64/include/asm/acpi.h ++++ b/arch/arm64/include/asm/acpi.h +@@ -22,9 +22,9 @@ + #define ACPI_MADT_GICC_LENGTH \ + (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +-#define BAD_MADT_GICC_ENTRY(entry, end) \ +- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ +- (entry)->header.length != ACPI_MADT_GICC_LENGTH) ++#define BAD_MADT_GICC_ENTRY(entry, end) \ ++ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ ++ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) + + /* Basic configuration for ACPI */ + #ifdef CONFIG_ACPI +diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c +index 3cedd1f95e0f..8ae4067a5eda 100644 +--- a/arch/mips/ath79/common.c ++++ b/arch/mips/ath79/common.c +@@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void) + { + BUG_ON(!ath79_ddr_pci_win_base); + +- __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0); +- __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1); +- __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2); +- __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3); +- __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4); +- __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5); +- __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6); +- __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7); ++ __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0); ++ __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4); ++ __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8); ++ __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc); ++ __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10); ++ __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14); ++ __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18); ++ __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c); + } + EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows); + +diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S +index 7791840cf22c..db07793f7b43 100644 +--- a/arch/mips/kernel/entry.S ++++ b/arch/mips/kernel/entry.S +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -137,6 +138,7 @@ work_pending: + andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS + beqz t0, work_notifysig + work_resched: ++ TRACE_IRQS_OFF + jal schedule + + local_irq_disable # make sure need_resched and +@@ -173,6 +175,7 @@ syscall_exit_work: + beqz t0, work_pending # trace bit set? + local_irq_enable # could let syscall_trace_leave() + # call schedule() instead ++ TRACE_IRQS_ON + move a0, sp + jal syscall_trace_leave + b resume_userspace +diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c +index f63a289977cc..0b3e58a3189f 100644 +--- a/arch/mips/kernel/pm-cps.c ++++ b/arch/mips/kernel/pm-cps.c +@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); + * state. Actually per-core rather than per-CPU. + */ + static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); +-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); + + /* Indicates online CPUs coupled with the current CPU */ + static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); +@@ -625,7 +624,6 @@ static int __init cps_gen_core_entries(unsigned cpu) + { + enum cps_pm_state state; + unsigned core = cpu_data[cpu].core; +- unsigned dlinesz = cpu_data[cpu].dcache.linesz; + void *entry_fn, *core_rc; + + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { +@@ -645,16 +643,11 @@ static int __init cps_gen_core_entries(unsigned cpu) + } + + if (!per_cpu(ready_count, core)) { +- core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); ++ core_rc = kmalloc(sizeof(u32), GFP_KERNEL); + if (!core_rc) { + pr_err("Failed allocate core %u ready_count\n", core); + return -ENOMEM; + } +- per_cpu(ready_count_alloc, core) = core_rc; +- +- /* Ensure ready_count is aligned to a cacheline boundary */ +- core_rc += dlinesz - 1; +- core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); + per_cpu(ready_count, core) = core_rc; + } + +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 99a402231f4d..31ca2edd7218 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -194,6 +194,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) + { + struct pt_regs regs; + mm_segment_t old_fs = get_fs(); ++ ++ regs.cp0_status = KSU_KERNEL; + if (sp) { + regs.regs[29] = (unsigned long)sp; + regs.regs[31] = 0; +diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c +index dfb04fcedb04..48d6349fd9d7 100644 +--- a/arch/mips/ralink/mt7620.c ++++ b/arch/mips/ralink/mt7620.c +@@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = { + }; + + static struct rt2880_pmx_func pwm1_grp_mt7628[] = { +- FUNC("sdcx", 3, 19, 1), ++ FUNC("sdxc d6", 3, 19, 1), + FUNC("utif", 2, 19, 1), + FUNC("gpio", 1, 19, 1), +- FUNC("pwm", 0, 19, 1), ++ FUNC("pwm1", 0, 19, 1), + }; + + static struct rt2880_pmx_func pwm0_grp_mt7628[] = { +- FUNC("sdcx", 3, 18, 1), ++ FUNC("sdxc d7", 3, 18, 1), + FUNC("utif", 2, 18, 1), + FUNC("gpio", 1, 18, 1), +- FUNC("pwm", 0, 18, 1), ++ FUNC("pwm0", 0, 18, 1), + }; + + static struct rt2880_pmx_func uart2_grp_mt7628[] = { +- FUNC("sdcx", 3, 20, 2), ++ FUNC("sdxc d5 d4", 3, 20, 2), + FUNC("pwm", 2, 20, 2), + FUNC("gpio", 1, 20, 2), +- FUNC("uart", 0, 20, 2), ++ FUNC("uart2", 0, 20, 2), + }; + + static struct rt2880_pmx_func uart1_grp_mt7628[] = { +- FUNC("sdcx", 3, 45, 2), ++ FUNC("sw_r", 3, 45, 2), + FUNC("pwm", 2, 45, 2), + FUNC("gpio", 1, 45, 2), +- FUNC("uart", 0, 45, 2), ++ FUNC("uart1", 0, 45, 2), + }; + + static struct rt2880_pmx_func i2c_grp_mt7628[] = { +@@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { + + static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; + static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) }; ++static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; + static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; + + static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { + FUNC("jtag", 3, 22, 8), + FUNC("utif", 2, 22, 8), + FUNC("gpio", 1, 22, 8), +- FUNC("sdcx", 0, 22, 8), ++ FUNC("sdxc", 0, 22, 8), + }; + + static struct rt2880_pmx_func uart0_grp_mt7628[] = { + FUNC("-", 3, 12, 2), + FUNC("-", 2, 12, 2), + FUNC("gpio", 1, 12, 2), +- FUNC("uart", 0, 12, 2), ++ FUNC("uart0", 0, 12, 2), + }; + + static struct rt2880_pmx_func i2s_grp_mt7628[] = { +@@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { + FUNC("-", 3, 6, 1), + FUNC("refclk", 2, 6, 1), + FUNC("gpio", 1, 6, 1), +- FUNC("spi", 0, 6, 1), ++ FUNC("spi cs1", 0, 6, 1), + }; + + static struct rt2880_pmx_func spis_grp_mt7628[] = { +@@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = { + FUNC("gpio", 0, 11, 1), + }; + +-#define MT7628_GPIO_MODE_MASK 0x3 +- +-#define MT7628_GPIO_MODE_PWM1 30 +-#define MT7628_GPIO_MODE_PWM0 28 +-#define MT7628_GPIO_MODE_UART2 26 +-#define MT7628_GPIO_MODE_UART1 24 +-#define MT7628_GPIO_MODE_I2C 20 +-#define MT7628_GPIO_MODE_REFCLK 18 +-#define MT7628_GPIO_MODE_PERST 16 +-#define MT7628_GPIO_MODE_WDT 14 +-#define MT7628_GPIO_MODE_SPI 12 +-#define MT7628_GPIO_MODE_SDMODE 10 +-#define MT7628_GPIO_MODE_UART0 8 +-#define MT7628_GPIO_MODE_I2S 6 +-#define MT7628_GPIO_MODE_CS1 4 +-#define MT7628_GPIO_MODE_SPIS 2 +-#define MT7628_GPIO_MODE_GPIO 0 ++static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { ++ FUNC("rsvd", 3, 35, 1), ++ FUNC("rsvd", 2, 35, 1), ++ FUNC("gpio", 1, 35, 1), ++ FUNC("wled_kn", 0, 35, 1), ++}; ++ ++static struct rt2880_pmx_func wled_an_grp_mt7628[] = { ++ FUNC("rsvd", 3, 44, 1), ++ FUNC("rsvd", 2, 44, 1), ++ FUNC("gpio", 1, 44, 1), ++ FUNC("wled_an", 0, 44, 1), ++}; ++ ++#define MT7628_GPIO_MODE_MASK 0x3 ++ ++#define MT7628_GPIO_MODE_WLED_KN 48 ++#define MT7628_GPIO_MODE_WLED_AN 32 ++#define MT7628_GPIO_MODE_PWM1 30 ++#define MT7628_GPIO_MODE_PWM0 28 ++#define MT7628_GPIO_MODE_UART2 26 ++#define MT7628_GPIO_MODE_UART1 24 ++#define MT7628_GPIO_MODE_I2C 20 ++#define MT7628_GPIO_MODE_REFCLK 18 ++#define MT7628_GPIO_MODE_PERST 16 ++#define MT7628_GPIO_MODE_WDT 14 ++#define MT7628_GPIO_MODE_SPI 12 ++#define MT7628_GPIO_MODE_SDMODE 10 ++#define MT7628_GPIO_MODE_UART0 8 ++#define MT7628_GPIO_MODE_I2S 6 ++#define MT7628_GPIO_MODE_CS1 4 ++#define MT7628_GPIO_MODE_SPIS 2 ++#define MT7628_GPIO_MODE_GPIO 0 + + static struct rt2880_pmx_group mt7628an_pinmux_data[] = { +- GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, ++ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_PWM1), +- GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, ++ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_PWM0), + GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_UART2), +@@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = { + 1, MT7628_GPIO_MODE_SPIS), + GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_GPIO), ++ GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, ++ 1, MT7628_GPIO_MODE_WLED_AN), ++ GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, ++ 1, MT7628_GPIO_MODE_WLED_KN), + { 0 } + }; + +@@ -439,7 +459,7 @@ void __init ralink_clk_init(void) + ralink_clk_add("10000c00.uartlite", periph_rate); + ralink_clk_add("10180000.wmac", xtal_rate); + +- if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) { ++ if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) { + /* + * When the CPU goes into sleep mode, the BUS clock will be + * too low for USB to function properly. Adjust the busses +diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c +index 15506a1ff22a..9dd67749c592 100644 +--- a/arch/mips/ralink/rt288x.c ++++ b/arch/mips/ralink/rt288x.c +@@ -109,5 +109,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info) + soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; + + rt2880_pinmux_data = rt2880_pinmux_data_act; +- ralink_soc == RT2880_SOC; ++ ralink_soc = RT2880_SOC; + } +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c +index 98949b0df00a..6696c1986844 100644 +--- a/arch/powerpc/kernel/eeh.c ++++ b/arch/powerpc/kernel/eeh.c +@@ -304,9 +304,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) + * + * For pHyp, we have to enable IO for log retrieval. Otherwise, + * 0xFF's is always returned from PCI config space. ++ * ++ * When the @severity is EEH_LOG_PERM, the PE is going to be ++ * removed. Prior to that, the drivers for devices included in ++ * the PE will be closed. The drivers rely on working IO path ++ * to bring the devices to quiet state. Otherwise, PCI traffic ++ * from those devices after they are removed is like to cause ++ * another unexpected EEH error. + */ + if (!(pe->type & EEH_PE_PHB)) { +- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) ++ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || ++ severity == EEH_LOG_PERM) + eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); + + /* +diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h +index d7697ab802f6..8e136b88cdf4 100644 +--- a/arch/s390/include/asm/ctl_reg.h ++++ b/arch/s390/include/asm/ctl_reg.h +@@ -15,7 +15,9 @@ + BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ + asm volatile( \ + " lctlg %1,%2,%0\n" \ +- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ ++ : \ ++ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ ++ : "memory"); \ + } + + #define __ctl_store(array, low, high) { \ +diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h +index e9cd7befcb76..19d14ac23ef9 100644 +--- a/arch/x86/include/asm/kvm_emulate.h ++++ b/arch/x86/include/asm/kvm_emulate.h +@@ -221,6 +221,9 @@ struct x86_emulate_ops { + void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); + void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); ++ ++ unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); ++ void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); + }; + + typedef u32 __attribute__((vector_size(16))) sse128_t; +@@ -290,7 +293,6 @@ struct x86_emulate_ctxt { + + /* interruptibility state, as a result of execution of STI or MOV SS */ + int interruptibility; +- int emul_flags; + + bool perm_ok; /* do not check permissions if true */ + bool ud; /* inject an #UD if host doesn't support insn */ +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 1dcea225977d..04b2f3cad7ba 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2531,7 +2531,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + u64 smbase; + int ret; + +- if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) ++ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) + return emulate_ud(ctxt); + + /* +@@ -2580,11 +2580,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + return X86EMUL_UNHANDLEABLE; + } + +- if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ++ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) + ctxt->ops->set_nmi_mask(ctxt, false); + +- ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; +- ctxt->emul_flags &= ~X86EMUL_SMM_MASK; ++ ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & ++ ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); + return X86EMUL_CONTINUE; + } + +@@ -5296,6 +5296,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + const struct x86_emulate_ops *ops = ctxt->ops; + int rc = X86EMUL_CONTINUE; + int saved_dst_type = ctxt->dst.type; ++ unsigned emul_flags; + + ctxt->mem_read.pos = 0; + +@@ -5310,6 +5311,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + goto done; + } + ++ emul_flags = ctxt->ops->get_hflags(ctxt); + if (unlikely(ctxt->d & + (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { + if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || +@@ -5343,7 +5345,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + fetch_possible_mmx_operand(ctxt, &ctxt->dst); + } + +- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { ++ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { + rc = emulator_check_intercept(ctxt, ctxt->intercept, + X86_ICPT_PRE_EXCEPT); + if (rc != X86EMUL_CONTINUE) +@@ -5372,7 +5374,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + goto done; + } + +- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { ++ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + rc = emulator_check_intercept(ctxt, ctxt->intercept, + X86_ICPT_POST_EXCEPT); + if (rc != X86EMUL_CONTINUE) +@@ -5426,7 +5428,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + + special_insn: + +- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { ++ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + rc = emulator_check_intercept(ctxt, ctxt->intercept, + X86_ICPT_POST_MEMACCESS); + if (rc != X86EMUL_CONTINUE) +diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c +index ab38af4f4947..23a7c7ba377a 100644 +--- a/arch/x86/kvm/pmu_intel.c ++++ b/arch/x86/kvm/pmu_intel.c +@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) + ((u64)1 << edx.split.bit_width_fixed) - 1; + } + +- pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | ++ pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | + (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); + pmu->global_ctrl_mask = ~pmu->global_ctrl; + +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 50ca8f409a7c..bbaa11f4e74b 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -2264,7 +2264,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) + if (!(vmcs12->exception_bitmap & (1u << nr))) + return 0; + +- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, ++ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_readl(EXIT_QUALIFICATION)); + return 1; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 6c82792487e9..8e526c6fd784 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -4844,6 +4844,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, + + if (var.unusable) { + memset(desc, 0, sizeof(*desc)); ++ if (base3) ++ *base3 = 0; + return false; + } + +@@ -4999,6 +5001,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) + kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); + } + ++static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) ++{ ++ return emul_to_vcpu(ctxt)->arch.hflags; ++} ++ ++static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) ++{ ++ kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); ++} ++ + static const struct x86_emulate_ops emulate_ops = { + .read_gpr = emulator_read_gpr, + .write_gpr = emulator_write_gpr, +@@ -5038,6 +5050,8 @@ static const struct x86_emulate_ops emulate_ops = { + .intercept = emulator_intercept, + .get_cpuid = emulator_get_cpuid, + .set_nmi_mask = emulator_set_nmi_mask, ++ .get_hflags = emulator_get_hflags, ++ .set_hflags = emulator_set_hflags, + }; + + static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) +@@ -5090,7 +5104,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) + BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); + BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); + BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); +- ctxt->emul_flags = vcpu->arch.hflags; + + init_decode_cache(ctxt); + vcpu->arch.emulate_regs_need_sync_from_vcpu = false; +@@ -5486,8 +5499,6 @@ restart: + unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); + toggle_interruptibility(vcpu, ctxt->interruptibility); + vcpu->arch.emulate_regs_need_sync_to_vcpu = false; +- if (vcpu->arch.hflags != ctxt->emul_flags) +- kvm_set_hflags(vcpu, ctxt->emul_flags); + kvm_rip_write(vcpu, ctxt->eip); + if (r == EMULATE_DONE) + kvm_vcpu_check_singlestep(vcpu, rflags, &r); +@@ -5974,7 +5985,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) + + kvm_x86_ops->patch_hypercall(vcpu, instruction); + +- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); ++ return emulator_write_emulated(ctxt, rip, instruction, 3, ++ &ctxt->exception); + } + + static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c +index ef05755a1900..7ed47b1e6f42 100644 +--- a/arch/x86/mm/mpx.c ++++ b/arch/x86/mm/mpx.c +@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) + * We were not able to extract an address from the instruction, + * probably because there was something invalid in it. + */ +- if (info->si_addr == (void *)-1) { ++ if (info->si_addr == (void __user *)-1) { + err = -EINVAL; + goto err_out; + } +@@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void) + if (!kernel_managing_mpx_tables(current->mm)) + return -EINVAL; + +- if (do_mpx_bt_fault()) { +- force_sig(SIGSEGV, current); +- /* +- * The force_sig() is essentially "handling" this +- * exception, so we do not pass up the error +- * from do_mpx_bt_fault(). +- */ +- } +- return 0; ++ return do_mpx_bt_fault(); + } + + /* +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 5fb6adaaa796..5a760fd66bec 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -134,8 +134,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, + { + struct flush_tlb_info info; + +- if (end == 0) +- end = start + PAGE_SIZE; + info.flush_mm = mm; + info.flush_start = start; + info.flush_end = end; +@@ -264,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) + } + + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) +- flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); ++ flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE); + + preempt_enable(); + } +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 31e8ae916ba0..be0b09a0fb44 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work) + { + struct ports_device *portdev; + +- portdev = container_of(work, struct ports_device, control_work); ++ portdev = container_of(work, struct ports_device, config_work); + if (!use_multiport(portdev)) { + struct virtio_device *vdev; + struct port *port; +diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c +index d6d425773fa4..5b2db3c6568f 100644 +--- a/drivers/cpufreq/s3c2416-cpufreq.c ++++ b/drivers/cpufreq/s3c2416-cpufreq.c +@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) + rate = clk_get_rate(s3c_freq->hclk); + if (rate < 133 * 1000 * 1000) { + pr_err("cpufreq: HCLK not at 133MHz\n"); +- clk_put(s3c_freq->hclk); + ret = -EINVAL; + goto err_armclk; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index 25a3e2485cc2..2bc17a907ecf 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -124,6 +124,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, + } + break; + } ++ ++ if (!(*out_ring && (*out_ring)->adev)) { ++ DRM_ERROR("Ring %d is not initialized on IP %d\n", ++ ring, ip_type); ++ return -EINVAL; ++ } ++ + return 0; + } + +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h +index b92139e9b9d8..b5c64edeb668 100644 +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -113,7 +113,11 @@ struct ast_private { + struct ttm_bo_kmap_obj cache_kmap; + int next_cursor; + bool support_wide_screen; +- bool DisableP2A; ++ enum { ++ ast_use_p2a, ++ ast_use_dt, ++ ast_use_defaults ++ } config_mode; + + enum ast_tx_chip tx_chip_type; + u8 dp501_maxclk; +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 6c021165ca67..498a94069e6b 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, + return ret; + } + ++static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) ++{ ++ struct device_node *np = dev->pdev->dev.of_node; ++ struct ast_private *ast = dev->dev_private; ++ uint32_t data, jregd0, jregd1; ++ ++ /* Defaults */ ++ ast->config_mode = ast_use_defaults; ++ *scu_rev = 0xffffffff; ++ ++ /* Check if we have device-tree properties */ ++ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", ++ scu_rev)) { ++ /* We do, disable P2A access */ ++ ast->config_mode = ast_use_dt; ++ DRM_INFO("Using device-tree for configuration\n"); ++ return; ++ } ++ ++ /* Not all families have a P2A bridge */ ++ if (dev->pdev->device != PCI_CHIP_AST2000) ++ return; ++ ++ /* ++ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge ++ * is disabled. We force using P2A if VGA only mode bit ++ * is set D[7] ++ */ ++ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); ++ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); ++ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { ++ /* Double check it's actually working */ ++ data = ast_read32(ast, 0xf004); ++ if (data != 0xFFFFFFFF) { ++ /* P2A works, grab silicon revision */ ++ ast->config_mode = ast_use_p2a; ++ ++ DRM_INFO("Using P2A bridge for configuration\n"); ++ ++ /* Read SCU7c (silicon revision register) */ ++ ast_write32(ast, 0xf004, 0x1e6e0000); ++ ast_write32(ast, 0xf000, 0x1); ++ *scu_rev = ast_read32(ast, 0x1207c); ++ return; ++ } ++ } ++ ++ /* We have a P2A bridge but it's disabled */ ++ DRM_INFO("P2A bridge disabled, using default configuration\n"); ++} + + static int ast_detect_chip(struct drm_device *dev, bool *need_post) + { + struct ast_private *ast = dev->dev_private; +- uint32_t data, jreg; ++ uint32_t jreg, scu_rev; ++ ++ /* ++ * If VGA isn't enabled, we need to enable now or subsequent ++ * access to the scratch registers will fail. We also inform ++ * our caller that it needs to POST the chip ++ * (Assumption: VGA not enabled -> need to POST) ++ */ ++ if (!ast_is_vga_enabled(dev)) { ++ ast_enable_vga(dev); ++ DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); ++ *need_post = true; ++ } else ++ *need_post = false; ++ ++ ++ /* Enable extended register access */ ++ ast_enable_mmio(dev); + ast_open_key(ast); + ++ /* Find out whether P2A works or whether to use device-tree */ ++ ast_detect_config_mode(dev, &scu_rev); ++ ++ /* Identify chipset */ + if (dev->pdev->device == PCI_CHIP_AST1180) { + ast->chip = AST1100; + DRM_INFO("AST 1180 detected\n"); +@@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) + ast->chip = AST2300; + DRM_INFO("AST 2300 detected\n"); + } else if (dev->pdev->revision >= 0x10) { +- uint32_t data; +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- +- data = ast_read32(ast, 0x1207c); +- switch (data & 0x0300) { ++ switch (scu_rev & 0x0300) { + case 0x0200: + ast->chip = AST1100; + DRM_INFO("AST 1100 detected\n"); +@@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) + } + } + +- /* +- * If VGA isn't enabled, we need to enable now or subsequent +- * access to the scratch registers will fail. We also inform +- * our caller that it needs to POST the chip +- * (Assumption: VGA not enabled -> need to POST) +- */ +- if (!ast_is_vga_enabled(dev)) { +- ast_enable_vga(dev); +- ast_enable_mmio(dev); +- DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); +- *need_post = true; +- } else +- *need_post = false; +- +- /* Check P2A Access */ +- ast->DisableP2A = true; +- data = ast_read32(ast, 0xf004); +- if (data != 0xFFFFFFFF) +- ast->DisableP2A = false; +- + /* Check if we support wide screen */ + switch (ast->chip) { + case AST1180: +@@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) + ast->support_wide_screen = true; + else { + ast->support_wide_screen = false; +- if (ast->DisableP2A == false) { +- /* Read SCU7c (silicon revision register) */ +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- data = ast_read32(ast, 0x1207c); +- data &= 0x300; +- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ +- ast->support_wide_screen = true; +- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ +- ast->support_wide_screen = true; +- } ++ if (ast->chip == AST2300 && ++ (scu_rev & 0x300) == 0x0) /* ast1300 */ ++ ast->support_wide_screen = true; ++ if (ast->chip == AST2400 && ++ (scu_rev & 0x300) == 0x100) /* ast1400 */ ++ ast->support_wide_screen = true; + } + break; + } +@@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) + + static int ast_get_dram_info(struct drm_device *dev) + { ++ struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; +- uint32_t data, data2; +- uint32_t denum, num, div, ref_pll; ++ uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; ++ uint32_t denum, num, div, ref_pll, dsel; + +- if (ast->DisableP2A) +- { ++ switch (ast->config_mode) { ++ case ast_use_dt: ++ /* ++ * If some properties are missing, use reasonable ++ * defaults for AST2400 ++ */ ++ if (of_property_read_u32(np, "aspeed,mcr-configuration", ++ &mcr_cfg)) ++ mcr_cfg = 0x00000577; ++ if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", ++ &mcr_scu_mpll)) ++ mcr_scu_mpll = 0x000050C0; ++ if (of_property_read_u32(np, "aspeed,mcr-scu-strap", ++ &mcr_scu_strap)) ++ mcr_scu_strap = 0; ++ break; ++ case ast_use_p2a: ++ ast_write32(ast, 0xf004, 0x1e6e0000); ++ ast_write32(ast, 0xf000, 0x1); ++ mcr_cfg = ast_read32(ast, 0x10004); ++ mcr_scu_mpll = ast_read32(ast, 0x10120); ++ mcr_scu_strap = ast_read32(ast, 0x10170); ++ break; ++ case ast_use_defaults: ++ default: + ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + ast->mclk = 396; ++ return 0; + } +- else +- { +- ast_write32(ast, 0xf004, 0x1e6e0000); +- ast_write32(ast, 0xf000, 0x1); +- data = ast_read32(ast, 0x10004); +- +- if (data & 0x40) +- ast->dram_bus_width = 16; +- else +- ast->dram_bus_width = 32; + +- if (ast->chip == AST2300 || ast->chip == AST2400) { +- switch (data & 0x03) { +- case 0: +- ast->dram_type = AST_DRAM_512Mx16; +- break; +- default: +- case 1: +- ast->dram_type = AST_DRAM_1Gx16; +- break; +- case 2: +- ast->dram_type = AST_DRAM_2Gx16; +- break; +- case 3: +- ast->dram_type = AST_DRAM_4Gx16; +- break; +- } +- } else { +- switch (data & 0x0c) { +- case 0: +- case 4: +- ast->dram_type = AST_DRAM_512Mx16; +- break; +- case 8: +- if (data & 0x40) +- ast->dram_type = AST_DRAM_1Gx16; +- else +- ast->dram_type = AST_DRAM_512Mx32; +- break; +- case 0xc: +- ast->dram_type = AST_DRAM_1Gx32; +- break; +- } +- } ++ if (mcr_cfg & 0x40) ++ ast->dram_bus_width = 16; ++ else ++ ast->dram_bus_width = 32; + +- data = ast_read32(ast, 0x10120); +- data2 = ast_read32(ast, 0x10170); +- if (data2 & 0x2000) +- ref_pll = 14318; +- else +- ref_pll = 12000; +- +- denum = data & 0x1f; +- num = (data & 0x3fe0) >> 5; +- data = (data & 0xc000) >> 14; +- switch (data) { +- case 3: +- div = 0x4; ++ if (ast->chip == AST2300 || ast->chip == AST2400) { ++ switch (mcr_cfg & 0x03) { ++ case 0: ++ ast->dram_type = AST_DRAM_512Mx16; + break; +- case 2: ++ default: + case 1: +- div = 0x2; ++ ast->dram_type = AST_DRAM_1Gx16; + break; +- default: +- div = 0x1; ++ case 2: ++ ast->dram_type = AST_DRAM_2Gx16; ++ break; ++ case 3: ++ ast->dram_type = AST_DRAM_4Gx16; ++ break; ++ } ++ } else { ++ switch (mcr_cfg & 0x0c) { ++ case 0: ++ case 4: ++ ast->dram_type = AST_DRAM_512Mx16; ++ break; ++ case 8: ++ if (mcr_cfg & 0x40) ++ ast->dram_type = AST_DRAM_1Gx16; ++ else ++ ast->dram_type = AST_DRAM_512Mx32; ++ break; ++ case 0xc: ++ ast->dram_type = AST_DRAM_1Gx32; + break; + } +- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + } ++ ++ if (mcr_scu_strap & 0x2000) ++ ref_pll = 14318; ++ else ++ ref_pll = 12000; ++ ++ denum = mcr_scu_mpll & 0x1f; ++ num = (mcr_scu_mpll & 0x3fe0) >> 5; ++ dsel = (mcr_scu_mpll & 0xc000) >> 14; ++ switch (dsel) { ++ case 3: ++ div = 0x4; ++ break; ++ case 2: ++ case 1: ++ div = 0x2; ++ break; ++ default: ++ div = 0x1; ++ break; ++ } ++ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); + return 0; + } + +diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c +index 270e8fb2803f..c7c58becb25d 100644 +--- a/drivers/gpu/drm/ast/ast_post.c ++++ b/drivers/gpu/drm/ast/ast_post.c +@@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev) + ast_enable_mmio(dev); + ast_set_def_ext_reg(dev); + +- if (ast->DisableP2A == false) +- { ++ if (ast->config_mode == ast_use_p2a) { + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); + + ast_init_3rdtx(dev); +- } +- else +- { ++ } else { + if (ast->tx_chip_type != AST_TX_NONE) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +index 13db8a2851ed..1f013d45c9e9 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) + list_for_each_entry_safe(entry, next, &man->list, head) + vmw_cmdbuf_res_free(man, entry); + ++ drm_ht_remove(&man->resources); + kfree(man); + } + +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index 0b80633bae91..d4d655a10df1 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -364,6 +364,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) + if (ret) + return ret; + ++ /* ++ * The HID over I2C specification states that if a DEVICE needs time ++ * after the PWR_ON request, it should utilise CLOCK stretching. ++ * However, it has been observered that the Windows driver provides a ++ * 1ms sleep between the PWR_ON and RESET requests and that some devices ++ * rely on this. ++ */ ++ usleep_range(1000, 5000); ++ + i2c_hid_dbg(ihid, "resetting...\n"); + + ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); +diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c +index 4831eb910fc7..22160e481794 100644 +--- a/drivers/iommu/amd_iommu_v2.c ++++ b/drivers/iommu/amd_iommu_v2.c +@@ -699,9 +699,9 @@ out_clear_state: + + out_unregister: + mmu_notifier_unregister(&pasid_state->mn, mm); ++ mmput(mm); + + out_free: +- mmput(mm); + free_pasid_state(pasid_state); + + out: +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index b92b8a724efb..f9711aceef54 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -1137,7 +1137,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, + if (!dma_pte_present(pte) || dma_pte_superpage(pte)) + goto next; + +- level_pfn = pfn & level_mask(level - 1); ++ level_pfn = pfn & level_mask(level); + level_pte = phys_to_virt(dma_pte_addr(pte)); + + if (level > 2) +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index 515bb8b80952..a070fa39521a 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -391,36 +391,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) + device->dev = dev; + + ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); +- if (ret) { +- kfree(device); +- return ret; +- } ++ if (ret) ++ goto err_free_device; + + device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); + rename: + if (!device->name) { +- sysfs_remove_link(&dev->kobj, "iommu_group"); +- kfree(device); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_remove_link; + } + + ret = sysfs_create_link_nowarn(group->devices_kobj, + &dev->kobj, device->name); + if (ret) { +- kfree(device->name); + if (ret == -EEXIST && i >= 0) { + /* + * Account for the slim chance of collision + * and append an instance to the name. + */ ++ kfree(device->name); + device->name = kasprintf(GFP_KERNEL, "%s.%d", + kobject_name(&dev->kobj), i++); + goto rename; + } +- +- sysfs_remove_link(&dev->kobj, "iommu_group"); +- kfree(device); +- return ret; ++ goto err_free_name; + } + + kobject_get(group->devices_kobj); +@@ -432,8 +426,10 @@ rename: + mutex_lock(&group->mutex); + list_add_tail(&device->list, &group->devices); + if (group->domain) +- __iommu_attach_device(group->domain, dev); ++ ret = __iommu_attach_device(group->domain, dev); + mutex_unlock(&group->mutex); ++ if (ret) ++ goto err_put_group; + + /* Notify any listeners about change to group. */ + blocking_notifier_call_chain(&group->notifier, +@@ -444,6 +440,21 @@ rename: + pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); + + return 0; ++ ++err_put_group: ++ mutex_lock(&group->mutex); ++ list_del(&device->list); ++ mutex_unlock(&group->mutex); ++ dev->iommu_group = NULL; ++ kobject_put(group->devices_kobj); ++err_free_name: ++ kfree(device->name); ++err_remove_link: ++ sysfs_remove_link(&dev->kobj, "iommu_group"); ++err_free_device: ++ kfree(device); ++ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); ++ return ret; + } + EXPORT_SYMBOL_GPL(iommu_group_add_device); + +diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c +index 5abab8800891..9190057535e6 100644 +--- a/drivers/mtd/bcm47xxpart.c ++++ b/drivers/mtd/bcm47xxpart.c +@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master, + { + uint32_t buf; + size_t bytes_read; ++ int err; + +- if (mtd_read(master, offset, sizeof(buf), &bytes_read, +- (uint8_t *)&buf) < 0) { +- pr_err("mtd_read error while parsing (offset: 0x%X)!\n", +- offset); ++ err = mtd_read(master, offset, sizeof(buf), &bytes_read, ++ (uint8_t *)&buf); ++ if (err && !mtd_is_bitflip(err)) { ++ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", ++ offset, err); + goto out_default; + } + +@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, + int trx_part = -1; + int last_trx_part = -1; + int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; ++ int err; + + /* + * Some really old flashes (like AT45DB*) had smaller erasesize-s, but +@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master, + /* Parse block by block looking for magics */ + for (offset = 0; offset <= master->size - blocksize; + offset += blocksize) { +- /* Nothing more in higher memory */ +- if (offset >= 0x2000000) ++ /* Nothing more in higher memory on BCM47XX (MIPS) */ ++ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000) + break; + + if (curr_part >= BCM47XXPART_MAX_PARTS) { +@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, + } + + /* Read beginning of the block */ +- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, +- &bytes_read, (uint8_t *)buf) < 0) { +- pr_err("mtd_read error while parsing (offset: 0x%X)!\n", +- offset); ++ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, ++ &bytes_read, (uint8_t *)buf); ++ if (err && !mtd_is_bitflip(err)) { ++ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", ++ offset, err); + continue; + } + +@@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, + } + + /* Read middle of the block */ +- if (mtd_read(master, offset + 0x8000, 0x4, +- &bytes_read, (uint8_t *)buf) < 0) { +- pr_err("mtd_read error while parsing (offset: 0x%X)!\n", +- offset); ++ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read, ++ (uint8_t *)buf); ++ if (err && !mtd_is_bitflip(err)) { ++ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", ++ offset, err); + continue; + } + +@@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, + } + + offset = master->size - possible_nvram_sizes[i]; +- if (mtd_read(master, offset, 0x4, &bytes_read, +- (uint8_t *)buf) < 0) { +- pr_err("mtd_read error while reading at offset 0x%X!\n", +- offset); ++ err = mtd_read(master, offset, 0x4, &bytes_read, ++ (uint8_t *)buf); ++ if (err && !mtd_is_bitflip(err)) { ++ pr_err("mtd_read error while reading (offset 0x%X): %d\n", ++ offset, err); + continue; + } + +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +index 5e6238e0b2bd..75e6e7e6baed 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +@@ -2732,8 +2732,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata) + + /* Flush Tx queues */ + ret = xgbe_flush_tx_queues(pdata); +- if (ret) ++ if (ret) { ++ netdev_err(pdata->netdev, "error flushing TX queues\n"); + return ret; ++ } + + /* + * Initialize DMA related features +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +index 865b7e0b133b..64034ff081a0 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +@@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata) + + DBGPR("-->xgbe_start\n"); + +- hw_if->init(pdata); ++ ret = hw_if->init(pdata); ++ if (ret) ++ return ret; + + ret = phy_if->phy_start(pdata); + if (ret) +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c +index b56c9c581359..70da30095b89 100644 +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) + while (ring->start != ring->end) { + int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[slot_idx]; +- u32 ctl1; ++ u32 ctl0, ctl1; + int len; + + if (slot_idx == empty_slot) + break; + ++ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); + ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); + len = ctl1 & BGMAC_DESC_CTL1_LEN; +- if (ctl1 & BGMAC_DESC_CTL0_SOF) ++ if (ctl0 & BGMAC_DESC_CTL0_SOF) + /* Unmap no longer used buffer */ + dma_unmap_single(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); +@@ -469,6 +470,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, + len -= ETH_FCS_LEN; + + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); ++ if (unlikely(!skb)) { ++ bgmac_err(bgmac, "build_skb failed\n"); ++ put_page(virt_to_head_page(buf)); ++ break; ++ } + skb_put(skb, BGMAC_RX_FRAME_OFFSET + + BGMAC_RX_BUF_OFFSET + len); + skb_pull(skb, BGMAC_RX_FRAME_OFFSET + +@@ -1302,7 +1308,8 @@ static int bgmac_open(struct net_device *net_dev) + + phy_start(bgmac->phy_dev); + +- netif_carrier_on(net_dev); ++ netif_start_queue(net_dev); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c +index 1795c935ff02..7b8638ddb673 100644 +--- a/drivers/net/ethernet/emulex/benet/be_cmds.c ++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c +@@ -1052,7 +1052,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, + err: + spin_unlock_bh(&adapter->mcc_lock); + +- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) ++ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) + status = -EPERM; + + return status; +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index 6a061f17a44f..4cd2a7d0124f 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -2939,7 +2939,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + size, GFAR_RXB_TRUESIZE); + + /* try reuse page */ +- if (unlikely(page_count(page) != 1)) ++ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) + return false; + + /* change offset to the other half */ +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index f9e4988ea30e..2f9b12cf9ee5 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1602,8 +1602,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) + netdev->netdev_ops = &ibmveth_netdev_ops; + netdev->ethtool_ops = &netdev_ethtool_ops; + SET_NETDEV_DEV(netdev, &dev->dev); +- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | +- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++ netdev->hw_features = NETIF_F_SG; ++ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { ++ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ++ NETIF_F_RXCSUM; ++ } + + netdev->features |= netdev->hw_features; + +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c +index d74f5f4e5782..07eabf72c480 100644 +--- a/drivers/net/ethernet/korina.c ++++ b/drivers/net/ethernet/korina.c +@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) + DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, + &lp->rx_dma_regs->dmasm); + +- korina_free_ring(dev); +- + napi_disable(&lp->napi); + ++ korina_free_ring(dev); ++ + if (korina_init(dev) < 0) { + printk(KERN_ERR "%s: cannot restart device\n", dev->name); + return; +@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) + tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; + writel(tmp, &lp->rx_dma_regs->dmasm); + +- korina_free_ring(dev); +- + napi_disable(&lp->napi); + + cancel_work_sync(&lp->restart_task); + ++ korina_free_ring(dev); ++ + free_irq(lp->rx_irq, dev); + free_irq(lp->tx_irq, dev); + free_irq(lp->ovr_irq, dev); +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index 71ec9cb08e06..15056f06754a 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -2446,7 +2446,7 @@ static void mvneta_start_dev(struct mvneta_port *pp) + mvneta_port_enable(pp); + + /* Enable polling on the port */ +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + napi_enable(&port->napi); +@@ -2472,7 +2472,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp) + + phy_stop(pp->phy_dev); + +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + napi_disable(&port->napi); +@@ -2902,13 +2902,11 @@ err_cleanup_rxqs: + static int mvneta_stop(struct net_device *dev) + { + struct mvneta_port *pp = netdev_priv(dev); +- int cpu; + + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + unregister_cpu_notifier(&pp->cpu_notifier); +- for_each_present_cpu(cpu) +- smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); ++ on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); + mvneta_cleanup_rxqs(pp); + mvneta_cleanup_txqs(pp); +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c +index 603d1c3d3b2e..ff77b8b608bd 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c +@@ -542,8 +542,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) + break; + + case MLX4_EVENT_TYPE_SRQ_LIMIT: +- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", +- __func__); ++ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", ++ __func__, be32_to_cpu(eqe->event.srq.srqn), ++ eq->eqn); + case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: + if (mlx4_is_master(dev)) { + /* forward only to slave owning the SRQ */ +@@ -558,15 +559,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) + eq->eqn, eq->cons_index, ret); + break; + } +- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", +- __func__, slave, +- be32_to_cpu(eqe->event.srq.srqn), +- eqe->type, eqe->subtype); ++ if (eqe->type == ++ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) ++ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", ++ __func__, slave, ++ be32_to_cpu(eqe->event.srq.srqn), ++ eqe->type, eqe->subtype); + + if (!ret && slave != dev->caps.function) { +- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", +- __func__, eqe->type, +- eqe->subtype, slave); ++ if (eqe->type == ++ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) ++ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", ++ __func__, eqe->type, ++ eqe->subtype, slave); + mlx4_slave_event(dev, slave, eqe); + break; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 1e611980cf99..f5c1f4acc57b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -153,8 +153,9 @@ static struct mlx5_profile profile[] = { + }, + }; + +-#define FW_INIT_TIMEOUT_MILI 2000 +-#define FW_INIT_WAIT_MS 2 ++#define FW_INIT_TIMEOUT_MILI 2000 ++#define FW_INIT_WAIT_MS 2 ++#define FW_PRE_INIT_TIMEOUT_MILI 10000 + + static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) + { +@@ -934,6 +935,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) + */ + dev->state = MLX5_DEVICE_STATE_UP; + ++ /* wait for firmware to accept initialization segments configurations ++ */ ++ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); ++ if (err) { ++ dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", ++ FW_PRE_INIT_TIMEOUT_MILI); ++ goto out; ++ } ++ + err = mlx5_cmd_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 1e61d4da72db..585e90f8341d 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -221,18 +221,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) + int ring_size; + int i; + +- /* Free RX skb ringbuffer */ +- if (priv->rx_skb[q]) { +- for (i = 0; i < priv->num_rx_ring[q]; i++) +- dev_kfree_skb(priv->rx_skb[q][i]); +- } +- kfree(priv->rx_skb[q]); +- priv->rx_skb[q] = NULL; +- +- /* Free aligned TX buffers */ +- kfree(priv->tx_align[q]); +- priv->tx_align[q] = NULL; +- + if (priv->rx_ring[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; +@@ -261,6 +249,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) + priv->tx_ring[q] = NULL; + } + ++ /* Free RX skb ringbuffer */ ++ if (priv->rx_skb[q]) { ++ for (i = 0; i < priv->num_rx_ring[q]; i++) ++ dev_kfree_skb(priv->rx_skb[q][i]); ++ } ++ kfree(priv->rx_skb[q]); ++ priv->rx_skb[q] = NULL; ++ ++ /* Free aligned TX buffers */ ++ kfree(priv->tx_align[q]); ++ priv->tx_align[q] = NULL; ++ + /* Free TX skb ringbuffer. + * SKBs are freed by ravb_tx_free() call above. + */ +diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c +index d790cb8d9db3..8e832ba8ab24 100644 +--- a/drivers/net/ethernet/sfc/falcon.c ++++ b/drivers/net/ethernet/sfc/falcon.c +@@ -2796,6 +2796,11 @@ const struct efx_nic_type falcon_a1_nic_type = { + .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, + .offload_features = NETIF_F_IP_CSUM, + .mcdi_max_ver = -1, ++#ifdef CONFIG_SFC_SRIOV ++ .vswitching_probe = efx_port_dummy_op_int, ++ .vswitching_restore = efx_port_dummy_op_int, ++ .vswitching_remove = efx_port_dummy_op_void, ++#endif + }; + + const struct efx_nic_type falcon_b0_nic_type = { +@@ -2897,4 +2902,9 @@ const struct efx_nic_type falcon_b0_nic_type = { + .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, + .mcdi_max_ver = -1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, ++#ifdef CONFIG_SFC_SRIOV ++ .vswitching_probe = efx_port_dummy_op_int, ++ .vswitching_restore = efx_port_dummy_op_int, ++ .vswitching_remove = efx_port_dummy_op_void, ++#endif + }; +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 7f7c87762bc6..8dfc75250583 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -47,8 +47,16 @@ module_param(gso, bool, 0444); + */ + DECLARE_EWMA(pkt_len, 1, 64) + ++/* With mergeable buffers we align buffer address and use the low bits to ++ * encode its true size. Buffer size is up to 1 page so we need to align to ++ * square root of page size to ensure we reserve enough bits to encode the true ++ * size. ++ */ ++#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) ++ + /* Minimum alignment for mergeable packet buffers. */ +-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) ++#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ ++ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) + + #define VIRTNET_DRIVER_VERSION "1.0.0" + +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 9a986ccd42e5..dab3bf6649e6 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -2240,7 +2240,7 @@ static void vxlan_cleanup(unsigned long arg) + = container_of(p, struct vxlan_fdb, hlist); + unsigned long timeout; + +- if (f->state & NUD_PERMANENT) ++ if (f->state & (NUD_PERMANENT | NUD_NOARP)) + continue; + + timeout = f->used + vxlan->cfg.age_interval * HZ; +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 888e9cfef51a..34a062ccb11d 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) + queue->rx.req_prod_pvt = req_prod; + + /* Not enough requests? Try again later. */ +- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { ++ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { + mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); + return; + } +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c +index be3bc2f4edd4..09cc64b3b695 100644 +--- a/drivers/platform/x86/ideapad-laptop.c ++++ b/drivers/platform/x86/ideapad-laptop.c +@@ -807,6 +807,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) + case 11: + case 7: + case 6: ++ case 1: + ideapad_input_report(priv, vpc_bit); + break; + case 5: +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 59ced8864b2f..0e6aaef9a038 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -3563,12 +3563,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) + } else { + buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; + lpfc_els_free_data(phba, buf_ptr1); ++ elsiocb->context2 = NULL; + } + } + + if (elsiocb->context3) { + buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; + lpfc_els_free_bpl(phba, buf_ptr); ++ elsiocb->context3 = NULL; + } + lpfc_sli_release_iocbq(phba, elsiocb); + return 0; +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index f5aeda8f014f..38e90d9c2ced 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) + + free_vfi_bmask: + kfree(phba->sli4_hba.vfi_bmask); ++ phba->sli4_hba.vfi_bmask = NULL; + free_xri_ids: + kfree(phba->sli4_hba.xri_ids); ++ phba->sli4_hba.xri_ids = NULL; + free_xri_bmask: + kfree(phba->sli4_hba.xri_bmask); ++ phba->sli4_hba.xri_bmask = NULL; + free_vpi_ids: + kfree(phba->vpi_ids); ++ phba->vpi_ids = NULL; + free_vpi_bmask: + kfree(phba->vpi_bmask); ++ phba->vpi_bmask = NULL; + free_rpi_ids: + kfree(phba->sli4_hba.rpi_ids); ++ phba->sli4_hba.rpi_ids = NULL; + free_rpi_bmask: + kfree(phba->sli4_hba.rpi_bmask); ++ phba->sli4_hba.rpi_bmask = NULL; + err_exit: + return rc; + } +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c +index 0e59731f95ad..1f6a3b86965f 100644 +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -2466,6 +2466,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) + if (pkt->entry_status & RF_BUSY) + res = DID_BUS_BUSY << 16; + ++ if (pkt->entry_type == NOTIFY_ACK_TYPE && ++ pkt->handle == QLA_TGT_SKIP_HANDLE) ++ return; ++ + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (sp) { + sp->done(ha, sp, res); +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c +index f57d96984ae4..e6faa0b050d1 100644 +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -2865,7 +2865,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; +- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; ++ pkt->handle = QLA_TGT_SKIP_HANDLE; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 4d5207dff960..8750c86f95f9 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -2566,7 +2566,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) + if (sdp->broken_fua) { + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; +- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { ++ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && ++ !sdkp->device->use_16_for_rw) { + sd_first_printk(KERN_NOTICE, sdkp, + "Uses READ/WRITE(6), disabling FUA\n"); + sdkp->DPOFUA = 0; +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c +index 7dbbb29d24c6..03a2aadf0d3c 100644 +--- a/drivers/scsi/virtio_scsi.c ++++ b/drivers/scsi/virtio_scsi.c +@@ -533,7 +533,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, + { + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); ++ unsigned long flags; + int req_size; ++ int ret; + + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); + +@@ -561,8 +563,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, + req_size = sizeof(cmd->req.cmd); + } + +- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) ++ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); ++ if (ret == -EIO) { ++ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; ++ spin_lock_irqsave(&req_vq->vq_lock, flags); ++ virtscsi_complete_cmd(vscsi, cmd); ++ spin_unlock_irqrestore(&req_vq->vq_lock, flags); ++ } else if (ret != 0) { + return SCSI_MLQUEUE_HOST_BUSY; ++ } + return 0; + } + +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c +index 7d3af3eacf57..1ddba9ae8c0f 100644 +--- a/drivers/spi/spi-davinci.c ++++ b/drivers/spi/spi-davinci.c +@@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) + buf = t->rx_buf; + t->rx_dma = dma_map_single(&spi->dev, buf, + t->len, DMA_FROM_DEVICE); +- if (!t->rx_dma) { ++ if (dma_mapping_error(&spi->dev, !t->rx_dma)) { + ret = -EFAULT; + goto err_rx_map; + } +@@ -665,7 +665,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) + buf = (void *)t->tx_buf; + t->tx_dma = dma_map_single(&spi->dev, buf, + t->len, DMA_TO_DEVICE); +- if (!t->tx_dma) { ++ if (dma_mapping_error(&spi->dev, t->tx_dma)) { + ret = -EFAULT; + goto err_tx_map; + } +diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c +index 1a9f18b40be6..34e4b3ad8b92 100644 +--- a/drivers/vfio/vfio_iommu_spapr_tce.c ++++ b/drivers/vfio/vfio_iommu_spapr_tce.c +@@ -1163,6 +1163,10 @@ static int tce_iommu_attach_group(void *iommu_data, + /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", + iommu_group_id(iommu_group), iommu_group); */ + table_group = iommu_group_get_iommudata(iommu_group); ++ if (!table_group) { ++ ret = -ENODEV; ++ goto unlock_exit; ++ } + + if (tce_groups_attached(container) && (!table_group->ops || + !table_group->ops->take_ownership || +diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c +index e0c98423f2c9..11a72bc2c71b 100644 +--- a/drivers/watchdog/bcm_kona_wdt.c ++++ b/drivers/watchdog/bcm_kona_wdt.c +@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) + if (!wdt) + return -ENOMEM; + ++ spin_lock_init(&wdt->lock); ++ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->base = devm_ioremap_resource(dev, res); + if (IS_ERR(wdt->base)) +@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) + return ret; + } + +- spin_lock_init(&wdt->lock); + platform_set_drvdata(pdev, wdt); + watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); + bcm_kona_wdt_wdd.parent = &pdev->dev; +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 7399782c0998..8a58bbc14de2 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, + if (map == SWIOTLB_MAP_ERROR) + return DMA_ERROR_CODE; + ++ dev_addr = xen_phys_to_bus(map); + xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), + dev_addr, map & ~PAGE_MASK, size, dir, attrs); +- dev_addr = xen_phys_to_bus(map); + + /* + * Ensure that the address returned is DMA'ble +@@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + sg_dma_len(sgl) = 0; + return 0; + } ++ dev_addr = xen_phys_to_bus(map); + xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), + dev_addr, + map & ~PAGE_MASK, + sg->length, + dir, + attrs); +- sg->dma_address = xen_phys_to_bus(map); ++ sg->dma_address = dev_addr; + } else { + /* we are not interested in the dma_addr returned by + * xen_dma_map_page, only in the potential cache flushes executed +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 3a93755e880f..29ef427c0652 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -2295,6 +2295,7 @@ static int elf_core_dump(struct coredump_params *cprm) + goto end_coredump; + } + } ++ dump_truncate(cprm); + + if (!elf_core_write_extra_data(cprm)) + goto end_coredump; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 863fa0f1972b..a61926cb01c0 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -4397,8 +4397,19 @@ search_again: + if (found_type > min_type) { + del_item = 1; + } else { +- if (item_end < new_size) ++ if (item_end < new_size) { ++ /* ++ * With NO_HOLES mode, for the following mapping ++ * ++ * [0-4k][hole][8k-12k] ++ * ++ * if truncating isize down to 6k, it ends up ++ * isize being 8k. ++ */ ++ if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) ++ last_size = new_size; + break; ++ } + if (found_key.offset >= new_size) + del_item = 1; + else +diff --git a/fs/coredump.c b/fs/coredump.c +index 5d15c4975ba1..a8852293038a 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -810,3 +810,21 @@ int dump_align(struct coredump_params *cprm, int align) + return mod ? dump_skip(cprm, align - mod) : 1; + } + EXPORT_SYMBOL(dump_align); ++ ++/* ++ * Ensures that file size is big enough to contain the current file ++ * postion. This prevents gdb from complaining about a truncated file ++ * if the last "write" to the file was dump_skip. ++ */ ++void dump_truncate(struct coredump_params *cprm) ++{ ++ struct file *file = cprm->file; ++ loff_t offset; ++ ++ if (file->f_op->llseek && file->f_op->llseek != no_llseek) { ++ offset = file->f_op->llseek(file, 0, SEEK_CUR); ++ if (i_size_read(file->f_mapping->host) < offset) ++ do_truncate(file->f_path.dentry, offset, 0, file); ++ } ++} ++EXPORT_SYMBOL(dump_truncate); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 4e3679b25b9b..8e425f2c5ddd 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2188,8 +2188,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred, + if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) + return 0; + +- /* even though OPEN succeeded, access is denied. Close the file */ +- nfs4_close_state(state, fmode); + return -EACCES; + } + +diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c +index 709fbbd44c65..acebc350e98d 100644 +--- a/fs/ocfs2/cluster/heartbeat.c ++++ b/fs/ocfs2/cluster/heartbeat.c +@@ -2070,13 +2070,13 @@ unlock: + spin_unlock(&o2hb_live_lock); + } + +-static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item, ++static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item, + char *page) + { + return sprintf(page, "%u\n", o2hb_dead_threshold); + } + +-static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item, ++static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item, + const char *page, size_t count) + { + unsigned long tmp; +@@ -2125,11 +2125,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item, + + } + +-CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold); ++CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold); + CONFIGFS_ATTR(o2hb_heartbeat_group_, mode); + + static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { +- &o2hb_heartbeat_group_attr_threshold, ++ &o2hb_heartbeat_group_attr_dead_threshold, + &o2hb_heartbeat_group_attr_mode, + NULL, + }; +diff --git a/include/linux/coredump.h b/include/linux/coredump.h +index d016a121a8c4..28ffa94aed6b 100644 +--- a/include/linux/coredump.h ++++ b/include/linux/coredump.h +@@ -14,6 +14,7 @@ struct coredump_params; + extern int dump_skip(struct coredump_params *cprm, size_t nr); + extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); + extern int dump_align(struct coredump_params *cprm, int align); ++extern void dump_truncate(struct coredump_params *cprm); + #ifdef CONFIG_COREDUMP + extern void do_coredump(const siginfo_t *siginfo); + #else +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index d6f6e5006ee9..185fb037b332 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -948,10 +948,6 @@ struct xfrm_dst { + struct flow_cache_object flo; + struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; + int num_pols, num_xfrms; +-#ifdef CONFIG_XFRM_SUB_POLICY +- struct flowi *origin; +- struct xfrm_selector *partner; +-#endif + u32 xfrm_genid; + u32 policy_genid; + u32 route_mtu_cached; +@@ -967,12 +963,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) + dst_release(xdst->route); + if (likely(xdst->u.dst.xfrm)) + xfrm_state_put(xdst->u.dst.xfrm); +-#ifdef CONFIG_XFRM_SUB_POLICY +- kfree(xdst->origin); +- xdst->origin = NULL; +- kfree(xdst->partner); +- xdst->partner = NULL; +-#endif + } + #endif + +diff --git a/kernel/panic.c b/kernel/panic.c +index 41e2b54f36b5..1d07cf9af849 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -167,7 +167,7 @@ void panic(const char *fmt, ...) + * Delay timeout seconds before rebooting the machine. + * We can't use the "normal" timers since we just panicked. + */ +- pr_emerg("Rebooting in %d seconds..", panic_timeout); ++ pr_emerg("Rebooting in %d seconds..\n", panic_timeout); + + for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { + touch_nmi_watchdog(); +diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c +index b0b93fd33af9..f8e8d68ed3fd 100644 +--- a/kernel/sched/loadavg.c ++++ b/kernel/sched/loadavg.c +@@ -201,8 +201,9 @@ void calc_load_exit_idle(void) + struct rq *this_rq = this_rq(); + + /* +- * If we're still before the sample window, we're done. ++ * If we're still before the pending sample window, we're done. + */ ++ this_rq->calc_load_update = calc_load_update; + if (time_before(jiffies, this_rq->calc_load_update)) + return; + +@@ -211,7 +212,6 @@ void calc_load_exit_idle(void) + * accounted through the nohz accounting, so skip the entire deal and + * sync up for the next window. + */ +- this_rq->calc_load_update = calc_load_update; + if (time_before(jiffies, this_rq->calc_load_update + 10)) + this_rq->calc_load_update += LOAD_FREQ; + } +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 300d64162aff..464a7864e4c5 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -174,7 +174,7 @@ extern int no_unaligned_warning; + #define SYSCTL_WRITES_WARN 0 + #define SYSCTL_WRITES_STRICT 1 + +-static int sysctl_writes_strict = SYSCTL_WRITES_WARN; ++static int sysctl_writes_strict = SYSCTL_WRITES_STRICT; + + static int proc_do_cad_pid(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +diff --git a/lib/swiotlb.c b/lib/swiotlb.c +index 76f29ecba8f4..771234d050c7 100644 +--- a/lib/swiotlb.c ++++ b/lib/swiotlb.c +@@ -452,11 +452,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, + : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); + + /* +- * For mappings greater than a page, we limit the stride (and +- * hence alignment) to a page size. ++ * For mappings greater than or equal to a page, we limit the stride ++ * (and hence alignment) to a page size. + */ + nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; +- if (size > PAGE_SIZE) ++ if (size >= PAGE_SIZE) + stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); + else + stride = 1; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 47b469663822..6c6f5ccfcda1 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1363,8 +1363,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, + */ + if (unlikely(pmd_trans_migrating(*pmdp))) { + page = pmd_page(*pmdp); ++ if (!get_page_unless_zero(page)) ++ goto out_unlock; + spin_unlock(ptl); + wait_on_page_locked(page); ++ put_page(page); + goto out; + } + +@@ -1396,8 +1399,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, + + /* Migration could have started since the pmd_trans_migrating check */ + if (!page_locked) { ++ if (!get_page_unless_zero(page)) ++ goto out_unlock; + spin_unlock(ptl); + wait_on_page_locked(page); ++ put_page(page); + page_nid = -1; + goto out; + } +diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c +index 40dd0f9b00d6..09f733b0424a 100644 +--- a/mm/swap_cgroup.c ++++ b/mm/swap_cgroup.c +@@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type) + struct page *page = map[i]; + if (page) + __free_page(page); ++ if (!(i % SWAP_CLUSTER_MAX)) ++ cond_resched(); + } + vfree(map); + } +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index ad8d6e6b87ca..e20ae2d3c498 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) + return 0; + + out_free_newdev: +- free_netdev(new_dev); ++ if (new_dev->reg_state == NETREG_UNINITIALIZED) ++ free_netdev(new_dev); + return err; + } + +diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c +index 59ce1fcc220c..71b6ab240dea 100644 +--- a/net/caif/cfpkt_skbuff.c ++++ b/net/caif/cfpkt_skbuff.c +@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) + { + struct sk_buff *skb; + +- if (likely(in_interrupt())) +- skb = alloc_skb(len + pfx, GFP_ATOMIC); +- else +- skb = alloc_skb(len + pfx, GFP_KERNEL); +- ++ skb = alloc_skb(len + pfx, GFP_ATOMIC); + if (unlikely(skb == NULL)) + return NULL; + +diff --git a/net/core/dev.c b/net/core/dev.c +index 87b8754f34ac..524d8b28e690 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1246,8 +1246,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) + if (!new_ifalias) + return -ENOMEM; + dev->ifalias = new_ifalias; ++ memcpy(dev->ifalias, alias, len); ++ dev->ifalias[len] = 0; + +- strlcpy(dev->ifalias, alias, len+1); + return len; + } + +diff --git a/net/core/dst.c b/net/core/dst.c +index d7ad628bf64e..e72d706f8d0c 100644 +--- a/net/core/dst.c ++++ b/net/core/dst.c +@@ -462,6 +462,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, + spin_lock_bh(&dst_garbage.lock); + dst = dst_garbage.list; + dst_garbage.list = NULL; ++ /* The code in dst_ifdown places a hold on the loopback device. ++ * If the gc entry processing is set to expire after a lengthy ++ * interval, this hold can cause netdev_wait_allrefs() to hang ++ * out and wait for a long time -- until the the loopback ++ * interface is released. If we're really unlucky, it'll emit ++ * pr_emerg messages to console too. Reset the interval here, ++ * so dst cleanups occur in a more timely fashion. ++ */ ++ if (dst_garbage.timer_inc > DST_GC_INC) { ++ dst_garbage.timer_inc = DST_GC_INC; ++ dst_garbage.timer_expires = DST_GC_MIN; ++ mod_delayed_work(system_wq, &dst_gc_work, ++ dst_garbage.timer_expires); ++ } + spin_unlock_bh(&dst_garbage.lock); + + if (last) +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index d43544ce7550..2ec5324a7ff7 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -897,6 +897,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + + nla_total_size(1) /* IFLA_LINKMODE */ + + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ + + nla_total_size(4) /* IFLA_LINK_NETNSID */ ++ + nla_total_size(4) /* IFLA_GROUP */ + + nla_total_size(ext_filter_mask + & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ +@@ -1089,6 +1090,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, + struct ifla_vf_mac vf_mac; + struct ifla_vf_info ivi; + ++ memset(&ivi, 0, sizeof(ivi)); ++ + /* Not all SR-IOV capable drivers support the + * spoofcheck and "RSS query enable" query. Preset to + * -1 so the user space tool can detect that the driver +@@ -1097,7 +1100,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, + ivi.spoofchk = -1; + ivi.rss_query_en = -1; + ivi.trusted = -1; +- memset(ivi.mac, 0, sizeof(ivi.mac)); + /* The default value for VF link state is "auto" + * IFLA_VF_LINK_STATE_AUTO which equals zero + */ +@@ -1370,6 +1372,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { + [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, + [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, + [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, ++ [IFLA_GROUP] = { .type = NLA_U32 }, + }; + + static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { +diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c +index b1dc096d22f8..403593bd2b83 100644 +--- a/net/decnet/dn_route.c ++++ b/net/decnet/dn_route.c +@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt) + call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); + } + +-static inline void dnrt_drop(struct dn_route *rt) +-{ +- dst_release(&rt->dst); +- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); +-} +- + static void dn_dst_check_expire(unsigned long dummy) + { + int i; +@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops) + } + *rtp = rt->dst.dn_next; + rt->dst.dn_next = NULL; +- dnrt_drop(rt); ++ dnrt_free(rt); + break; + } + spin_unlock_bh(&dn_rt_hash_table[i].lock); +@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou + dst_use(&rth->dst, now); + spin_unlock_bh(&dn_rt_hash_table[hash].lock); + +- dnrt_drop(rt); ++ dst_free(&rt->dst); + *rp = rth; + return 0; + } +@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy) + for(; rt; rt = next) { + next = rcu_dereference_raw(rt->dst.dn_next); + RCU_INIT_POINTER(rt->dst.dn_next, NULL); +- dst_free((struct dst_entry *)rt); ++ dnrt_free(rt); + } + + nothing_to_declare: +@@ -1187,7 +1181,7 @@ make_route: + if (dev_out->flags & IFF_LOOPBACK) + flags |= RTCF_LOCAL; + +- rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); ++ rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); + if (rt == NULL) + goto e_nobufs; + +diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c +index 85f2fdc360c2..29246bc9a7b4 100644 +--- a/net/decnet/netfilter/dn_rtmsg.c ++++ b/net/decnet/netfilter/dn_rtmsg.c +@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) + { + struct nlmsghdr *nlh = nlmsg_hdr(skb); + +- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) ++ if (skb->len < sizeof(*nlh) || ++ nlh->nlmsg_len < sizeof(*nlh) || ++ skb->len < nlh->nlmsg_len) + return; + + if (!netlink_capable(skb, CAP_NET_ADMIN)) +diff --git a/net/dsa/slave.c b/net/dsa/slave.c +index 8dfe9fb7ad36..554c2a961ad5 100644 +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1006,10 +1006,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, + /* Use already configured phy mode */ + if (p->phy_interface == PHY_INTERFACE_MODE_NA) + p->phy_interface = p->phy->interface; +- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, +- p->phy_interface); +- +- return 0; ++ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, ++ p->phy_interface); + } + + static int dsa_slave_phy_setup(struct dsa_slave_priv *p, +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 17adfdaf5795..3809d523d012 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -1102,6 +1102,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) + pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); + if (!pmc) + return; ++ spin_lock_init(&pmc->lock); + spin_lock_bh(&im->lock); + pmc->interface = im->interface; + in_dev_hold(in_dev); +@@ -2026,21 +2027,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, + + static void ip_mc_clear_src(struct ip_mc_list *pmc) + { +- struct ip_sf_list *psf, *nextpsf; ++ struct ip_sf_list *psf, *nextpsf, *tomb, *sources; + +- for (psf = pmc->tomb; psf; psf = nextpsf) { ++ spin_lock_bh(&pmc->lock); ++ tomb = pmc->tomb; ++ pmc->tomb = NULL; ++ sources = pmc->sources; ++ pmc->sources = NULL; ++ pmc->sfmode = MCAST_EXCLUDE; ++ pmc->sfcount[MCAST_INCLUDE] = 0; ++ pmc->sfcount[MCAST_EXCLUDE] = 1; ++ spin_unlock_bh(&pmc->lock); ++ ++ for (psf = tomb; psf; psf = nextpsf) { + nextpsf = psf->sf_next; + kfree(psf); + } +- pmc->tomb = NULL; +- for (psf = pmc->sources; psf; psf = nextpsf) { ++ for (psf = sources; psf; psf = nextpsf) { + nextpsf = psf->sf_next; + kfree(psf); + } +- pmc->sources = NULL; +- pmc->sfmode = MCAST_EXCLUDE; +- pmc->sfcount[MCAST_INCLUDE] = 0; +- pmc->sfcount[MCAST_EXCLUDE] = 1; + } + + /* Join a multicast group +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 8cf3fc7c2932..03dadbf6cc5e 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -291,9 +291,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, + static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, + unsigned long delay) + { +- if (!delayed_work_pending(&ifp->dad_work)) +- in6_ifa_hold(ifp); +- mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); ++ in6_ifa_hold(ifp); ++ if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) ++ in6_ifa_put(ifp); + } + + static int snmp6_alloc_dev(struct inet6_dev *idev) +diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c +index ed33abf57abd..9ac4f0cef27d 100644 +--- a/net/ipv6/fib6_rules.c ++++ b/net/ipv6/fib6_rules.c +@@ -32,7 +32,6 @@ struct fib6_rule { + struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, + int flags, pol_lookup_t lookup) + { +- struct rt6_info *rt; + struct fib_lookup_arg arg = { + .lookup_ptr = lookup, + .flags = FIB_LOOKUP_NOREF, +@@ -41,21 +40,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, + fib_rules_lookup(net->ipv6.fib6_rules_ops, + flowi6_to_flowi(fl6), flags, &arg); + +- rt = arg.result; ++ if (arg.result) ++ return arg.result; + +- if (!rt) { +- dst_hold(&net->ipv6.ip6_null_entry->dst); +- return &net->ipv6.ip6_null_entry->dst; +- } +- +- if (rt->rt6i_flags & RTF_REJECT && +- rt->dst.error == -EAGAIN) { +- ip6_rt_put(rt); +- rt = net->ipv6.ip6_null_entry; +- dst_hold(&rt->dst); +- } +- +- return &rt->dst; ++ dst_hold(&net->ipv6.ip6_null_entry->dst); ++ return &net->ipv6.ip6_null_entry->dst; + } + + static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, +@@ -116,7 +105,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, + flp6->saddr = saddr; + } + err = rt->dst.error; +- goto out; ++ if (err != -EAGAIN) ++ goto out; + } + again: + ip6_rt_put(rt); +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 85bf86458706..1ac06723f0d7 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -290,8 +290,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, + struct rt6_info *rt; + + rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); +- if (rt->rt6i_flags & RTF_REJECT && +- rt->dst.error == -EAGAIN) { ++ if (rt->dst.error == -EAGAIN) { + ip6_rt_put(rt); + rt = net->ipv6.ip6_null_entry; + dst_hold(&rt->dst); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 19c0d67ce8c4..7d339fc1057f 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1005,8 +1005,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, + } + #endif + if (ipv6_addr_v4mapped(&fl6->saddr) && +- !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) +- return -EAFNOSUPPORT; ++ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) { ++ err = -EAFNOSUPPORT; ++ goto out_err_release; ++ } + + return 0; + +diff --git a/net/key/af_key.c b/net/key/af_key.c +index f9c9ecb0cdd3..e67c28e614b9 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, + goto out; + } + ++ err = -ENOBUFS; + key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; + if (sa->sadb_sa_auth) { + int keysize = 0; +@@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, + if (key) + keysize = (key->sadb_key_bits + 7) / 8; + x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); +- if (!x->aalg) ++ if (!x->aalg) { ++ err = -ENOMEM; + goto out; ++ } + strcpy(x->aalg->alg_name, a->name); + x->aalg->alg_key_len = 0; + if (key) { +@@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, + goto out; + } + x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); +- if (!x->calg) ++ if (!x->calg) { ++ err = -ENOMEM; + goto out; ++ } + strcpy(x->calg->alg_name, a->name); + x->props.calgo = sa->sadb_sa_encrypt; + } else { +@@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, + if (key) + keysize = (key->sadb_key_bits + 7) / 8; + x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); +- if (!x->ealg) ++ if (!x->ealg) { ++ err = -ENOMEM; + goto out; ++ } + strcpy(x->ealg->alg_name, a->name); + x->ealg->alg_key_len = 0; + if (key) { +@@ -1227,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, + struct xfrm_encap_tmpl *natt; + + x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); +- if (!x->encap) ++ if (!x->encap) { ++ err = -ENOMEM; + goto out; ++ } + + natt = x->encap; + n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index 175ffcf7fb06..2ee53dc1ddf7 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -891,12 +891,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + supp_ht = supp_ht || sband->ht_cap.ht_supported; + supp_vht = supp_vht || sband->vht_cap.vht_supported; + +- if (sband->ht_cap.ht_supported) +- local->rx_chains = +- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), +- local->rx_chains); ++ if (!sband->ht_cap.ht_supported) ++ continue; + + /* TODO: consider VHT for RX chains, hopefully it's the same */ ++ local->rx_chains = ++ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), ++ local->rx_chains); ++ ++ /* no need to mask, SM_PS_DISABLED has all bits set */ ++ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << ++ IEEE80211_HT_CAP_SM_PS_SHIFT; + } + + /* if low-level driver supports AP, we also support VLAN */ +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index 9f5272968abb..e565b2becb14 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -45,6 +45,8 @@ + #include + #include + #include ++#include ++#include + #ifdef CONFIG_NF_NAT_NEEDED + #include + #include +@@ -1798,6 +1800,8 @@ ctnetlink_create_conntrack(struct net *net, + nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); + nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); + nf_ct_labels_ext_add(ct); ++ nfct_seqadj_ext_add(ct); ++ nfct_synproxy_ext_add(ct); + + /* we must add conntrack extensions before confirmation. */ + ct->status |= IPS_CONFIRMED; +diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c +index b7c43def0dc6..00f798b20b20 100644 +--- a/net/netfilter/xt_TCPMSS.c ++++ b/net/netfilter/xt_TCPMSS.c +@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, + tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); + tcp_hdrlen = tcph->doff * 4; + +- if (len < tcp_hdrlen) ++ if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) + return -1; + + if (info->mss == XT_TCPMSS_CLAMP_PMTU) { +@@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, + if (len > tcp_hdrlen) + return 0; + ++ /* tcph->doff has 4 bits, do not wrap it to 0 */ ++ if (tcp_hdrlen >= 15 * 4) ++ return 0; ++ + /* + * MSS Option not found ?! add it.. + */ +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 956141b71619..3ebf3b652d60 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, + union sctp_addr *laddr = (union sctp_addr *)addr; + struct sctp_transport *transport; + +- if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) ++ if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) + return NULL; + + addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index b2e934ff2448..e05ec54ac53f 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -997,7 +997,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct path path = { NULL, NULL }; + + err = -EINVAL; +- if (sunaddr->sun_family != AF_UNIX) ++ if (addr_len < offsetofend(struct sockaddr_un, sun_family) || ++ sunaddr->sun_family != AF_UNIX) + goto out; + + if (addr_len == sizeof(short)) { +@@ -1108,6 +1109,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, + unsigned int hash; + int err; + ++ err = -EINVAL; ++ if (alen < offsetofend(struct sockaddr, sa_family)) ++ goto out; ++ + if (addr->sa_family != AF_UNSPEC) { + err = unix_mkname(sunaddr, alen, &hash); + if (err < 0) +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 36a50ef9295d..8a0fdd870395 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1776,43 +1776,6 @@ free_dst: + goto out; + } + +-#ifdef CONFIG_XFRM_SUB_POLICY +-static int xfrm_dst_alloc_copy(void **target, const void *src, int size) +-{ +- if (!*target) { +- *target = kmalloc(size, GFP_ATOMIC); +- if (!*target) +- return -ENOMEM; +- } +- +- memcpy(*target, src, size); +- return 0; +-} +-#endif +- +-static int xfrm_dst_update_parent(struct dst_entry *dst, +- const struct xfrm_selector *sel) +-{ +-#ifdef CONFIG_XFRM_SUB_POLICY +- struct xfrm_dst *xdst = (struct xfrm_dst *)dst; +- return xfrm_dst_alloc_copy((void **)&(xdst->partner), +- sel, sizeof(*sel)); +-#else +- return 0; +-#endif +-} +- +-static int xfrm_dst_update_origin(struct dst_entry *dst, +- const struct flowi *fl) +-{ +-#ifdef CONFIG_XFRM_SUB_POLICY +- struct xfrm_dst *xdst = (struct xfrm_dst *)dst; +- return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); +-#else +- return 0; +-#endif +-} +- + static int xfrm_expand_policies(const struct flowi *fl, u16 family, + struct xfrm_policy **pols, + int *num_pols, int *num_xfrms) +@@ -1884,16 +1847,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, + + xdst = (struct xfrm_dst *)dst; + xdst->num_xfrms = err; +- if (num_pols > 1) +- err = xfrm_dst_update_parent(dst, &pols[1]->selector); +- else +- err = xfrm_dst_update_origin(dst, fl); +- if (unlikely(err)) { +- dst_free(dst); +- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); +- return ERR_PTR(err); +- } +- + xdst->num_pols = num_pols; + memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); + xdst->policy_genid = atomic_read(&pols[0]->genid); +diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h +index 373fcad840ea..776dffa88aee 100644 +--- a/sound/pci/hda/hda_codec.h ++++ b/sound/pci/hda/hda_codec.h +@@ -294,6 +294,8 @@ struct hda_codec { + + #define list_for_each_codec(c, bus) \ + list_for_each_entry(c, &(bus)->core.codec_list, core.list) ++#define list_for_each_codec_safe(c, n, bus) \ ++ list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) + + /* snd_hda_codec_read/write optional flags */ + #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0) +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c +index 5baf8b56b6e7..9c6e10fb479f 100644 +--- a/sound/pci/hda/hda_controller.c ++++ b/sound/pci/hda/hda_controller.c +@@ -1128,8 +1128,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs); + /* configure each codec instance */ + int azx_codec_configure(struct azx *chip) + { +- struct hda_codec *codec; +- list_for_each_codec(codec, &chip->bus) { ++ struct hda_codec *codec, *next; ++ ++ /* use _safe version here since snd_hda_codec_configure() deregisters ++ * the device upon error and deletes itself from the bus list. ++ */ ++ list_for_each_codec_safe(codec, next, &chip->bus) { + snd_hda_codec_configure(codec); + } + return 0; +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c +index dc2fa576d60d..689df78f640a 100644 +--- a/sound/pci/hda/hda_generic.c ++++ b/sound/pci/hda/hda_generic.c +@@ -3190,6 +3190,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec) + spec->input_paths[i][nums]); + spec->input_paths[i][nums] = + spec->input_paths[i][n]; ++ spec->input_paths[i][n] = 0; + } + } + nums++; +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c +index 05012bb178d7..fdd87c7e3e91 100644 +--- a/tools/perf/util/probe-finder.c ++++ b/tools/perf/util/probe-finder.c +@@ -1460,16 +1460,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, + Dwarf_Addr _addr = 0, baseaddr = 0; + const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; + int baseline = 0, lineno = 0, ret = 0; +- bool reloc = false; + +-retry: ++ /* We always need to relocate the address for aranges */ ++ if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) ++ addr += baseaddr; + /* Find cu die */ + if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { +- if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { +- addr += baseaddr; +- reloc = true; +- goto retry; +- } + pr_warning("Failed to find debug information for address %lx\n", + addr); + ret = -EINVAL; diff --git a/patch/kernel/rk3328-default/patch-4.4.76-77.patch b/patch/kernel/rk3328-default/patch-4.4.76-77.patch new file mode 100644 index 000000000..49a9caee4 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.76-77.patch @@ -0,0 +1,1584 @@ +diff --git a/Makefile b/Makefile +index 902ab134446e..bf49a61d02e2 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 76 ++SUBLEVEL = 77 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h +index 0b1ff4c1c14e..fffb2794dd89 100644 +--- a/arch/x86/include/asm/pat.h ++++ b/arch/x86/include/asm/pat.h +@@ -7,6 +7,7 @@ + bool pat_enabled(void); + void pat_disable(const char *reason); + extern void pat_init(void); ++extern void init_cache_modes(void); + + extern int reserve_memtype(u64 start, u64 end, + enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index d2bbe343fda7..e67b834279b2 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -1048,6 +1048,13 @@ void __init setup_arch(char **cmdline_p) + if (mtrr_trim_uncached_memory(max_pfn)) + max_pfn = e820_end_of_ram_pfn(); + ++ /* ++ * This call is required when the CPU does not support PAT. If ++ * mtrr_bp_init() invoked it already via pat_init() the call has no ++ * effect. ++ */ ++ init_cache_modes(); ++ + #ifdef CONFIG_X86_32 + /* max_low_pfn get updated here */ + find_low_pfn_range(); +diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S +index 27f89c79a44b..423644c230e7 100644 +--- a/arch/x86/lib/copy_user_64.S ++++ b/arch/x86/lib/copy_user_64.S +@@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled) + movl %edx,%ecx + andl $63,%edx + shrl $6,%ecx +- jz 17f ++ jz .L_copy_short_string + 1: movq (%rsi),%r8 + 2: movq 1*8(%rsi),%r9 + 3: movq 2*8(%rsi),%r10 +@@ -101,7 +101,8 @@ ENTRY(copy_user_generic_unrolled) + leaq 64(%rdi),%rdi + decl %ecx + jnz 1b +-17: movl %edx,%ecx ++.L_copy_short_string: ++ movl %edx,%ecx + andl $7,%edx + shrl $3,%ecx + jz 20f +@@ -215,6 +216,8 @@ ENDPROC(copy_user_generic_string) + */ + ENTRY(copy_user_enhanced_fast_string) + ASM_STAC ++ cmpl $64,%edx ++ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ + movl %edx,%ecx + 1: rep + movsb +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index 6ad687d104ca..3f1bb4f93a5a 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -36,14 +36,14 @@ + #undef pr_fmt + #define pr_fmt(fmt) "" fmt + +-static bool boot_cpu_done; +- +-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); +-static void init_cache_modes(void); ++static bool __read_mostly boot_cpu_done; ++static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); ++static bool __read_mostly pat_initialized; ++static bool __read_mostly init_cm_done; + + void pat_disable(const char *reason) + { +- if (!__pat_enabled) ++ if (pat_disabled) + return; + + if (boot_cpu_done) { +@@ -51,10 +51,8 @@ void pat_disable(const char *reason) + return; + } + +- __pat_enabled = 0; ++ pat_disabled = true; + pr_info("x86/PAT: %s\n", reason); +- +- init_cache_modes(); + } + + static int __init nopat(char *str) +@@ -66,7 +64,7 @@ early_param("nopat", nopat); + + bool pat_enabled(void) + { +- return !!__pat_enabled; ++ return pat_initialized; + } + EXPORT_SYMBOL_GPL(pat_enabled); + +@@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat) + update_cache_mode_entry(i, cache); + } + pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); ++ ++ init_cm_done = true; + } + + #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) +@@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat) + } + + wrmsrl(MSR_IA32_CR_PAT, pat); ++ pat_initialized = true; + + __init_cache_modes(pat); + } +@@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat) + wrmsrl(MSR_IA32_CR_PAT, pat); + } + +-static void init_cache_modes(void) ++void init_cache_modes(void) + { + u64 pat = 0; +- static int init_cm_done; + + if (init_cm_done) + return; +@@ -286,8 +286,6 @@ static void init_cache_modes(void) + } + + __init_cache_modes(pat); +- +- init_cm_done = 1; + } + + /** +@@ -305,10 +303,8 @@ void pat_init(void) + u64 pat; + struct cpuinfo_x86 *c = &boot_cpu_data; + +- if (!pat_enabled()) { +- init_cache_modes(); ++ if (pat_disabled) + return; +- } + + if ((c->x86_vendor == X86_VENDOR_INTEL) && + (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || +diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c +index 0c2fae8d929d..73eb7fd4aec4 100644 +--- a/arch/x86/tools/relocs.c ++++ b/arch/x86/tools/relocs.c +@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode) + die("Segment relocations found but --realmode not specified\n"); + + /* Order the relocations for more efficient processing */ +- sort_relocs(&relocs16); + sort_relocs(&relocs32); + #if ELF_BITS == 64 + sort_relocs(&relocs32neg); + sort_relocs(&relocs64); ++#else ++ sort_relocs(&relocs16); + #endif + + /* Print the relocations */ +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index ba66330cea67..cb4ad6e98b28 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -807,7 +807,7 @@ static ssize_t driver_override_store(struct device *dev, + const char *buf, size_t count) + { + struct platform_device *pdev = to_platform_device(dev); +- char *driver_override, *old = pdev->driver_override, *cp; ++ char *driver_override, *old, *cp; + + if (count > PATH_MAX) + return -EINVAL; +@@ -820,12 +820,15 @@ static ssize_t driver_override_store(struct device *dev, + if (cp) + *cp = '\0'; + ++ device_lock(dev); ++ old = pdev->driver_override; + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } ++ device_unlock(dev); + + kfree(old); + +@@ -836,8 +839,12 @@ static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct platform_device *pdev = to_platform_device(dev); ++ ssize_t len; + +- return sprintf(buf, "%s\n", pdev->driver_override); ++ device_lock(dev); ++ len = sprintf(buf, "%s\n", pdev->driver_override); ++ device_unlock(dev); ++ return len; + } + static DEVICE_ATTR_RW(driver_override); + +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c +index f300eba95bb1..1244cdf52859 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c +@@ -81,8 +81,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, + return -ENOMEM; + size = roundup(size, PAGE_SIZE); + ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); +- if (ret != 0) ++ if (ret != 0) { ++ kfree(bo); + return ret; ++ } + bo->dumb = false; + virtio_gpu_init_ttm_placement(bo, pinned); + +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index 1c02deab068f..9eca4b41fa0a 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -2287,6 +2287,10 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + ++ if (cmd.port_num < rdma_start_port(ib_dev) || ++ cmd.port_num > rdma_end_port(ib_dev)) ++ return -EINVAL; ++ + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, + out_len); + +@@ -2827,6 +2831,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + ++ if (cmd.attr.port_num < rdma_start_port(ib_dev) || ++ cmd.attr.port_num > rdma_end_port(ib_dev)) ++ return -EINVAL; ++ + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); + if (!uobj) + return -ENOMEM; +diff --git a/drivers/md/md.c b/drivers/md/md.c +index eff554a12fb4..0a856cb181e9 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -1866,7 +1866,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) + } + sb = page_address(rdev->sb_page); + sb->data_size = cpu_to_le64(num_sectors); +- sb->super_offset = rdev->sb_start; ++ sb->super_offset = cpu_to_le64(rdev->sb_start); + sb->sb_csum = calc_sb_1_csum(sb); + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + rdev->sb_page); +@@ -2273,7 +2273,7 @@ static bool does_sb_need_changing(struct mddev *mddev) + /* Check if any mddev parameters have changed */ + if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || + (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || +- (mddev->layout != le64_to_cpu(sb->layout)) || ++ (mddev->layout != le32_to_cpu(sb->layout)) || + (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || + (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) + return true; +diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c +index 8ef6399d794f..bc957528f69f 100644 +--- a/drivers/media/pci/saa7134/saa7134-i2c.c ++++ b/drivers/media/pci/saa7134/saa7134-i2c.c +@@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = { + + /* ----------------------------------------------------------- */ + ++/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */ ++static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) ++{ ++ u8 subaddr = 0x7, dmdregval; ++ u8 data[2]; ++ int ret; ++ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0, ++ .buf = &subaddr, .len = 1}, ++ {.addr = 0x08, ++ .flags = I2C_M_RD, ++ .buf = &dmdregval, .len = 1} ++ }; ++ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0, ++ .buf = data, .len = 2} }; ++ ++ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2); ++ if ((ret == 2) && (dmdregval & 0x2)) { ++ pr_debug("%s: DVB-T demod i2c gate was left closed\n", ++ dev->name); ++ ++ data[0] = subaddr; ++ data[1] = (dmdregval & ~0x2); ++ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1) ++ pr_err("%s: EEPROM i2c gate open failure\n", ++ dev->name); ++ } ++} ++ + static int + saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len) + { + unsigned char buf; + int i,err; + ++ if (dev->board == SAA7134_BOARD_MD7134) ++ saa7134_i2c_eeprom_md7134_gate(dev); ++ + dev->i2c_client.addr = 0xa0 >> 1; + buf = 0; + if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) { +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c +index 70da30095b89..a5e4b4b93d1b 100644 +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -1583,6 +1583,11 @@ static int bgmac_probe(struct bcma_device *core) + dev_warn(&core->dev, "Using random MAC: %pM\n", mac); + } + ++ /* This (reset &) enable is not preset in specs or reference driver but ++ * Broadcom does it in arch PCI code when enabling fake PCI device. ++ */ ++ bcma_core_enable(core, 0); ++ + /* Allocation and references */ + net_dev = alloc_etherdev(sizeof(*bgmac)); + if (!net_dev) +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c +index 930785a724e1..907fd60c4241 100644 +--- a/drivers/net/wireless/ath/ath10k/pci.c ++++ b/drivers/net/wireless/ath/ath10k/pci.c +@@ -3050,7 +3050,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, + goto err_core_destroy; + } + +- if (QCA_REV_6174(ar)) ++ if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) + ath10k_pci_override_ce_config(ar); + + ret = ath10k_pci_alloc_pipes(ar); +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index d59769e858f4..019d7165a045 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -2539,7 +2539,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, + + tasklet_hrtimer_init(&data->beacon_timer, + mac80211_hwsim_beacon, +- CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS); ++ CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + + spin_lock_bh(&hwsim_radio_lock); + list_add_tail(&data->list, &hwsim_radios); +diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c +index 6bbda6b4ab50..5da9c95dccb7 100644 +--- a/drivers/pinctrl/freescale/pinctrl-mxs.c ++++ b/drivers/pinctrl/freescale/pinctrl-mxs.c +@@ -195,6 +195,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, + return 0; + } + ++static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg) ++{ ++ u32 tmp; ++ ++ tmp = readl(reg); ++ tmp &= ~(mask << shift); ++ tmp |= value << shift; ++ writel(tmp, reg); ++} ++ + static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) + { +@@ -212,8 +222,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, + reg += bank * 0x20 + pin / 16 * 0x10; + shift = pin % 16 * 2; + +- writel(0x3 << shift, reg + CLR); +- writel(g->muxsel[i] << shift, reg + SET); ++ mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg); + } + + return 0; +@@ -280,8 +289,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev, + /* mA */ + if (config & MA_PRESENT) { + shift = pin % 8 * 4; +- writel(0x3 << shift, reg + CLR); +- writel(ma << shift, reg + SET); ++ mxs_pinctrl_rmwl(ma, 0x3, shift, reg); + } + + /* vol */ +diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c +index 9677807db364..b505b87661f8 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson8b.c ++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c +@@ -732,8 +732,8 @@ static const char * const sdxc_c_groups[] = { + static const char * const nand_groups[] = { + "nand_io", "nand_io_ce0", "nand_io_ce1", + "nand_io_rb0", "nand_ale", "nand_cle", +- "nand_wen_clk", "nand_ren_clk", "nand_dqs0", +- "nand_dqs1" ++ "nand_wen_clk", "nand_ren_clk", "nand_dqs_0", ++ "nand_dqs_1" + }; + + static const char * const nor_groups[] = { +diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c +index 2b0d70217bbd..699efb1a8c45 100644 +--- a/drivers/pinctrl/sh-pfc/core.c ++++ b/drivers/pinctrl/sh-pfc/core.c +@@ -543,6 +543,9 @@ static int sh_pfc_probe(struct platform_device *pdev) + ret = info->ops->init(pfc); + if (ret < 0) + return ret; ++ ++ /* .init() may have overridden pfc->info */ ++ info = pfc->info; + } + + /* Enable dummy states for those platforms without pinctrl support */ +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +index 87a4f44147c1..42ffa8708abc 100644 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +@@ -1102,7 +1102,7 @@ static const u16 pinmux_data[] = { + PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4), + PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT), + PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1), +- PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0), ++ PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0), + PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0), + PINMUX_IPSR_DATA(IP6_9_8, IRQ0), + PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3), +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +index 90b973e15982..a7c81e988656 100644 +--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), +- SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ ++ SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c +index 7b4af519e17e..8fed55342b0f 100644 +--- a/drivers/staging/comedi/comedi_fops.c ++++ b/drivers/staging/comedi/comedi_fops.c +@@ -2911,6 +2911,7 @@ static int __init comedi_init(void) + dev = comedi_alloc_board_minor(NULL); + if (IS_ERR(dev)) { + comedi_cleanup_board_minors(); ++ class_destroy(comedi_class); + cdev_del(&comedi_cdev); + unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), + COMEDI_NUM_MINORS); +diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c +index 01e642db311e..f35ee85f61b5 100644 +--- a/drivers/staging/vt6656/main_usb.c ++++ b/drivers/staging/vt6656/main_usb.c +@@ -529,6 +529,9 @@ static int vnt_start(struct ieee80211_hw *hw) + goto free_all; + } + ++ if (vnt_key_init_table(priv)) ++ goto free_all; ++ + priv->int_interval = 1; /* bInterval is set to 1 */ + + vnt_int_start_interrupt(priv); +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 96b21b0dac1e..3116edfcdc18 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Blackmagic Design UltraStudio SDI */ + { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, + ++ /* Hauppauge HVR-950q */ ++ { USB_DEVICE(0x2040, 0x7200), .driver_info = ++ USB_QUIRK_CONFIG_INTF_STRINGS }, ++ + /* INTEL VALUE SSD */ + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c +index 5c0adb9c6fb2..81db2fa08cad 100644 +--- a/drivers/usb/dwc3/dwc3-st.c ++++ b/drivers/usb/dwc3/dwc3-st.c +@@ -224,7 +224,7 @@ static int st_dwc3_probe(struct platform_device *pdev) + + dwc3_data->syscfg_reg_off = res->start; + +- dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", ++ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n", + dwc3_data->glue_base, dwc3_data->syscfg_reg_off); + + dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown"); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index ec7a50f98f57..d3bd1afd6302 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1095,14 +1095,14 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) + + if (!dep->endpoint.desc) { + dwc3_trace(trace_dwc3_gadget, +- "trying to queue request %p to disabled %s", ++ "trying to queue request %pK to disabled %s", + &req->request, dep->endpoint.name); + return -ESHUTDOWN; + } + +- if (WARN(req->dep != dep, "request %p belongs to '%s'\n", ++ if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", + &req->request, req->dep->name)) { +- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'", ++ dwc3_trace(trace_dwc3_gadget, "request %pK belongs to '%s'", + &req->request, req->dep->name); + return -EINVAL; + } +@@ -1310,7 +1310,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, + dwc3_stop_active_transfer(dwc, dep->number, true); + goto out1; + } +- dev_err(dwc->dev, "request %p was not queued to %s\n", ++ dev_err(dwc->dev, "request %pK was not queued to %s\n", + request, ep->name); + ret = -EINVAL; + goto out0; +@@ -1992,7 +1992,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, + * would help. Lets hope that if this occurs, someone + * fixes the root cause instead of looking away :) + */ +- dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", ++ dev_err(dwc->dev, "%s's TRB (%pK) still owned by HW\n", + dep->name, trb); + count = trb->size & DWC3_TRB_SIZE_MASK; + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 33cec50978b8..b0dc6da3d970 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -134,6 +134,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ ++ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 3bf61acfc26b..ebe51f11105d 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = { + .driver_info = (kernel_ulong_t)&four_g_w100_blacklist + }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, ++ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, ++ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, + { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index fd509ed6cf70..652b4334b26d 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ ++ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ + {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ + {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ +diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c +index 44ab43fc4fcc..af10f7b131a4 100644 +--- a/drivers/usb/usbip/stub_main.c ++++ b/drivers/usb/usbip/stub_main.c +@@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) + kmem_cache_free(stub_priv_cache, priv); + + kfree(urb->transfer_buffer); ++ urb->transfer_buffer = NULL; ++ + kfree(urb->setup_packet); ++ urb->setup_packet = NULL; ++ + usb_free_urb(urb); + } + } +diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c +index dbcabc9dbe0d..021003c4de53 100644 +--- a/drivers/usb/usbip/stub_tx.c ++++ b/drivers/usb/usbip/stub_tx.c +@@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv) + struct urb *urb = priv->urb; + + kfree(urb->setup_packet); ++ urb->setup_packet = NULL; ++ + kfree(urb->transfer_buffer); ++ urb->transfer_buffer = NULL; ++ + list_del(&priv->list); + kmem_cache_free(stub_priv_cache, priv); + usb_free_urb(urb); +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c +index 5d09ea585840..c2ee23acf359 100644 +--- a/fs/ext4/sysfs.c ++++ b/fs/ext4/sysfs.c +@@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a, + int ret; + + ret = kstrtoull(skip_spaces(buf), 0, &val); +- if (!ret || val >= clusters) ++ if (ret || val >= clusters) + return -EINVAL; + + atomic64_set(&sbi->s_resv_clusters, val); +diff --git a/fs/fcntl.c b/fs/fcntl.c +index ee85cd4e136a..62376451bbce 100644 +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -740,16 +740,10 @@ static int __init fcntl_init(void) + * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY + * is defined as O_NONBLOCK on some platforms and not on others. + */ +- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( +- O_RDONLY | O_WRONLY | O_RDWR | +- O_CREAT | O_EXCL | O_NOCTTY | +- O_TRUNC | O_APPEND | /* O_NONBLOCK | */ +- __O_SYNC | O_DSYNC | FASYNC | +- O_DIRECT | O_LARGEFILE | O_DIRECTORY | +- O_NOFOLLOW | O_NOATIME | O_CLOEXEC | +- __FMODE_EXEC | O_PATH | __O_TMPFILE | +- __FMODE_NONOTIFY +- )); ++ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != ++ HWEIGHT32( ++ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | ++ __FMODE_EXEC | __FMODE_NONOTIFY)); + + fasync_cache = kmem_cache_create("fasync_cache", + sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index 9cd8c92b953d..070901e76653 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = { + + static struct rhashtable gl_hash_table; + +-void gfs2_glock_free(struct gfs2_glock *gl) ++static void gfs2_glock_dealloc(struct rcu_head *rcu) + { +- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; ++ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); + + if (gl->gl_ops->go_flags & GLOF_ASPACE) { + kmem_cache_free(gfs2_glock_aspace_cachep, gl); +@@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl) + kfree(gl->gl_lksb.sb_lvbptr); + kmem_cache_free(gfs2_glock_cachep, gl); + } ++} ++ ++void gfs2_glock_free(struct gfs2_glock *gl) ++{ ++ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; ++ ++ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); + } +diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h +index be519416c112..4a9077ec9313 100644 +--- a/fs/gfs2/incore.h ++++ b/fs/gfs2/incore.h +@@ -367,6 +367,7 @@ struct gfs2_glock { + loff_t end; + } gl_vm; + }; ++ struct rcu_head gl_rcu; + struct rhash_head gl_node; + }; + +diff --git a/fs/open.c b/fs/open.c +index 157b9940dd73..fbc5c7b230b3 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -885,6 +885,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o + int lookup_flags = 0; + int acc_mode; + ++ /* ++ * Clear out all open flags we don't know about so that we don't report ++ * them in fcntl(F_GETFD) or similar interfaces. ++ */ ++ flags &= VALID_OPEN_FLAGS; ++ + if (flags & (O_CREAT | __O_TMPFILE)) + op->mode = (mode & S_IALLUGO) | S_IFREG; + else +diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h +index 76ce329e656d..1b48d9c9a561 100644 +--- a/include/linux/fcntl.h ++++ b/include/linux/fcntl.h +@@ -3,6 +3,12 @@ + + #include + ++/* list of all valid flags for the open/openat flags argument: */ ++#define VALID_OPEN_FLAGS \ ++ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \ ++ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \ ++ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ ++ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) + + #ifndef force_o_largefile + #define force_o_largefile() (BITS_PER_LONG != 32) +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h +index 3993b21f3d11..757c554408ce 100644 +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -560,9 +560,9 @@ extern void usb_ep0_reinit(struct usb_device *); + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + + #define EndpointRequest \ +- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) ++ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) + #define EndpointOutRequest \ +- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) ++ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) + + /* class requests from the USB 2.0 hub spec, table 11-15 */ + /* GetBusState and SetHubDescriptor are optional, omitted */ +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index 161a1807e6ef..5e24eb0ab5dd 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -1251,8 +1251,10 @@ retry: + + timeo = MAX_SCHEDULE_TIMEOUT; + ret = netlink_attachskb(sock, nc, &timeo, NULL); +- if (ret == 1) ++ if (ret == 1) { ++ sock = NULL; + goto retry; ++ } + if (ret) { + sock = NULL; + nc = NULL; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 464a7864e4c5..002ec084124b 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -2067,9 +2067,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp, + if (write) { + if (*negp) + return -EINVAL; ++ if (*lvalp > UINT_MAX) ++ return -EINVAL; + *valp = *lvalp; + } else { + unsigned int val = *valp; ++ *negp = false; + *lvalp = (unsigned long)val; + } + return 0; +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 12ea4ea619ee..e9092a0247bf 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -659,30 +659,25 @@ static int create_trace_kprobe(int argc, char **argv) + pr_info("Probe point is not specified.\n"); + return -EINVAL; + } +- if (isdigit(argv[1][0])) { +- if (is_return) { +- pr_info("Return probe point must be a symbol.\n"); +- return -EINVAL; +- } +- /* an address specified */ +- ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); +- if (ret) { +- pr_info("Failed to parse address.\n"); +- return ret; +- } +- } else { ++ ++ /* try to parse an address. if that fails, try to read the ++ * input as a symbol. */ ++ if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { + /* a symbol specified */ + symbol = argv[1]; + /* TODO: support .init module functions */ + ret = traceprobe_split_symbol_offset(symbol, &offset); + if (ret) { +- pr_info("Failed to parse symbol.\n"); ++ pr_info("Failed to parse either an address or a symbol.\n"); + return ret; + } + if (offset && is_return) { + pr_info("Return probe must be used without offset.\n"); + return -EINVAL; + } ++ } else if (is_return) { ++ pr_info("Return probe point must be a symbol.\n"); ++ return -EINVAL; + } + argc -= 2; argv += 2; + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index bfc5050cbd01..440c2df9be82 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2529,7 +2529,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) + if (!populated_zone(zone)) + continue; + +- classzone_idx = requested_highidx; ++ classzone_idx = gfp_zone(sc->gfp_mask); + while (!populated_zone(zone->zone_pgdat->node_zones + + classzone_idx)) + classzone_idx--; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 87791f803627..8f13b2eaabf8 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2165,8 +2165,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) + { + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; +- int cnt, oldcnt; +- int err; ++ int cnt, oldcnt, lost; + unsigned int mss; + /* Use SACK to deduce losses of new sequences sent during recovery */ + const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; +@@ -2206,9 +2205,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) + break; + + mss = tcp_skb_mss(skb); +- err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, +- mss, GFP_ATOMIC); +- if (err < 0) ++ /* If needed, chop off the prefix to mark as lost. */ ++ lost = (packets - oldcnt) * mss; ++ if (lost < skb->len && ++ tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0) + break; + cnt = packets; + } +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c +index 696ccfa08d10..31898856682e 100644 +--- a/security/keys/encrypted-keys/encrypted.c ++++ b/security/keys/encrypted-keys/encrypted.c +@@ -428,7 +428,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key, + static struct key *request_master_key(struct encrypted_key_payload *epayload, + const u8 **master_key, size_t *master_keylen) + { +- struct key *mkey = NULL; ++ struct key *mkey = ERR_PTR(-EINVAL); + + if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX, + KEY_TRUSTED_PREFIX_LEN)) { +diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h +index fa7208a32d76..8a679b21f0c4 100644 +--- a/tools/include/linux/compiler.h ++++ b/tools/include/linux/compiler.h +@@ -115,4 +115,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s + #define WRITE_ONCE(x, val) \ + ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + ++ ++#ifndef __fallthrough ++# if defined(__GNUC__) && __GNUC__ >= 7 ++# define __fallthrough __attribute__ ((fallthrough)) ++# else ++# define __fallthrough ++# endif ++#endif ++ + #endif /* _TOOLS_LINUX_COMPILER_H */ +diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c +index d28c1b6a3b54..fa5d17af88b7 100644 +--- a/tools/perf/arch/x86/tests/intel-cqm.c ++++ b/tools/perf/arch/x86/tests/intel-cqm.c +@@ -17,7 +17,7 @@ static pid_t spawn(void) + if (pid) + return pid; + +- while(1); ++ while(1) + sleep(5); + return 0; + } +diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c +index 9223c164e545..1f86ee8fb831 100644 +--- a/tools/perf/arch/x86/util/dwarf-regs.c ++++ b/tools/perf/arch/x86/util/dwarf-regs.c +@@ -63,6 +63,8 @@ struct pt_regs_offset { + # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)} + #endif + ++/* TODO: switching by dwarf address size */ ++#ifndef __x86_64__ + static const struct pt_regs_offset x86_32_regoffset_table[] = { + REG_OFFSET_NAME_32("%ax", eax), + REG_OFFSET_NAME_32("%cx", ecx), +@@ -75,6 +77,8 @@ static const struct pt_regs_offset x86_32_regoffset_table[] = { + REG_OFFSET_END, + }; + ++#define regoffset_table x86_32_regoffset_table ++#else + static const struct pt_regs_offset x86_64_regoffset_table[] = { + REG_OFFSET_NAME_64("%ax", rax), + REG_OFFSET_NAME_64("%dx", rdx), +@@ -95,11 +99,7 @@ static const struct pt_regs_offset x86_64_regoffset_table[] = { + REG_OFFSET_END, + }; + +-/* TODO: switching by dwarf address size */ +-#ifdef __x86_64__ + #define regoffset_table x86_64_regoffset_table +-#else +-#define regoffset_table x86_32_regoffset_table + #endif + + /* Minus 1 for the ending REG_OFFSET_END */ +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c +index 492df2752a2d..b4eb5b679081 100644 +--- a/tools/perf/bench/numa.c ++++ b/tools/perf/bench/numa.c +@@ -1570,13 +1570,13 @@ static int __bench_numa(const char *name) + "GB/sec,", "total-speed", "GB/sec total speed"); + + if (g->p.show_details >= 2) { +- char tname[32]; ++ char tname[14 + 2 * 10 + 1]; + struct thread_data *td; + for (p = 0; p < g->p.nr_proc; p++) { + for (t = 0; t < g->p.nr_threads; t++) { +- memset(tname, 0, 32); ++ memset(tname, 0, sizeof(tname)); + td = g->threads + p*g->p.nr_threads + t; +- snprintf(tname, 32, "process%d:thread%d", p, t); ++ snprintf(tname, sizeof(tname), "process%d:thread%d", p, t); + print_res(tname, td->speed_gbs, + "GB/sec", "thread-speed", "GB/sec/thread speed"); + print_res(tname, td->system_time_ns / 1e9, +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c +index 72b5deb4bd79..20f0e27918dd 100644 +--- a/tools/perf/builtin-script.c ++++ b/tools/perf/builtin-script.c +@@ -1252,21 +1252,19 @@ static int is_directory(const char *base_path, const struct dirent *dent) + return S_ISDIR(st.st_mode); + } + +-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\ +- while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \ +- lang_next) \ +- if ((lang_dirent.d_type == DT_DIR || \ +- (lang_dirent.d_type == DT_UNKNOWN && \ +- is_directory(scripts_path, &lang_dirent))) && \ +- (strcmp(lang_dirent.d_name, ".")) && \ +- (strcmp(lang_dirent.d_name, ".."))) +- +-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\ +- while (!readdir_r(lang_dir, &script_dirent, &script_next) && \ +- script_next) \ +- if (script_dirent.d_type != DT_DIR && \ +- (script_dirent.d_type != DT_UNKNOWN || \ +- !is_directory(lang_path, &script_dirent))) ++#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \ ++ while ((lang_dirent = readdir(scripts_dir)) != NULL) \ ++ if ((lang_dirent->d_type == DT_DIR || \ ++ (lang_dirent->d_type == DT_UNKNOWN && \ ++ is_directory(scripts_path, lang_dirent))) && \ ++ (strcmp(lang_dirent->d_name, ".")) && \ ++ (strcmp(lang_dirent->d_name, ".."))) ++ ++#define for_each_script(lang_path, lang_dir, script_dirent) \ ++ while ((script_dirent = readdir(lang_dir)) != NULL) \ ++ if (script_dirent->d_type != DT_DIR && \ ++ (script_dirent->d_type != DT_UNKNOWN || \ ++ !is_directory(lang_path, script_dirent))) + + + #define RECORD_SUFFIX "-record" +@@ -1412,7 +1410,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused, + const char *s __maybe_unused, + int unset __maybe_unused) + { +- struct dirent *script_next, *lang_next, script_dirent, lang_dirent; ++ struct dirent *script_dirent, *lang_dirent; + char scripts_path[MAXPATHLEN]; + DIR *scripts_dir, *lang_dir; + char script_path[MAXPATHLEN]; +@@ -1427,19 +1425,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused, + if (!scripts_dir) + return -1; + +- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { ++ for_each_lang(scripts_path, scripts_dir, lang_dirent) { + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, +- lang_dirent.d_name); ++ lang_dirent->d_name); + lang_dir = opendir(lang_path); + if (!lang_dir) + continue; + +- for_each_script(lang_path, lang_dir, script_dirent, script_next) { +- script_root = get_script_root(&script_dirent, REPORT_SUFFIX); ++ for_each_script(lang_path, lang_dir, script_dirent) { ++ script_root = get_script_root(script_dirent, REPORT_SUFFIX); + if (script_root) { + desc = script_desc__findnew(script_root); + snprintf(script_path, MAXPATHLEN, "%s/%s", +- lang_path, script_dirent.d_name); ++ lang_path, script_dirent->d_name); + read_script_info(desc, script_path); + free(script_root); + } +@@ -1527,7 +1525,7 @@ static int check_ev_match(char *dir_name, char *scriptname, + */ + int find_scripts(char **scripts_array, char **scripts_path_array) + { +- struct dirent *script_next, *lang_next, script_dirent, lang_dirent; ++ struct dirent *script_dirent, *lang_dirent; + char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN]; + DIR *scripts_dir, *lang_dir; + struct perf_session *session; +@@ -1550,9 +1548,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array) + return -1; + } + +- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { ++ for_each_lang(scripts_path, scripts_dir, lang_dirent) { + snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, +- lang_dirent.d_name); ++ lang_dirent->d_name); + #ifdef NO_LIBPERL + if (strstr(lang_path, "perl")) + continue; +@@ -1566,16 +1564,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array) + if (!lang_dir) + continue; + +- for_each_script(lang_path, lang_dir, script_dirent, script_next) { ++ for_each_script(lang_path, lang_dir, script_dirent) { + /* Skip those real time scripts: xxxtop.p[yl] */ +- if (strstr(script_dirent.d_name, "top.")) ++ if (strstr(script_dirent->d_name, "top.")) + continue; + sprintf(scripts_path_array[i], "%s/%s", lang_path, +- script_dirent.d_name); +- temp = strchr(script_dirent.d_name, '.'); ++ script_dirent->d_name); ++ temp = strchr(script_dirent->d_name, '.'); + snprintf(scripts_array[i], +- (temp - script_dirent.d_name) + 1, +- "%s", script_dirent.d_name); ++ (temp - script_dirent->d_name) + 1, ++ "%s", script_dirent->d_name); + + if (check_ev_match(lang_path, + scripts_array[i], session)) +@@ -1593,7 +1591,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array) + + static char *get_script_path(const char *script_root, const char *suffix) + { +- struct dirent *script_next, *lang_next, script_dirent, lang_dirent; ++ struct dirent *script_dirent, *lang_dirent; + char scripts_path[MAXPATHLEN]; + char script_path[MAXPATHLEN]; + DIR *scripts_dir, *lang_dir; +@@ -1606,21 +1604,21 @@ static char *get_script_path(const char *script_root, const char *suffix) + if (!scripts_dir) + return NULL; + +- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { ++ for_each_lang(scripts_path, scripts_dir, lang_dirent) { + snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, +- lang_dirent.d_name); ++ lang_dirent->d_name); + lang_dir = opendir(lang_path); + if (!lang_dir) + continue; + +- for_each_script(lang_path, lang_dir, script_dirent, script_next) { +- __script_root = get_script_root(&script_dirent, suffix); ++ for_each_script(lang_path, lang_dir, script_dirent) { ++ __script_root = get_script_root(script_dirent, suffix); + if (__script_root && !strcmp(script_root, __script_root)) { + free(__script_root); + closedir(lang_dir); + closedir(scripts_dir); + snprintf(script_path, MAXPATHLEN, "%s/%s", +- lang_path, script_dirent.d_name); ++ lang_path, script_dirent->d_name); + return strdup(script_path); + } + free(__script_root); +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c +index 7e2e72e6d9d1..4a8a02c302d2 100644 +--- a/tools/perf/builtin-top.c ++++ b/tools/perf/builtin-top.c +@@ -636,7 +636,7 @@ repeat: + case -1: + if (errno == EINTR) + continue; +- /* Fall trhu */ ++ __fallthrough; + default: + c = getc(stdin); + tcsetattr(0, TCSAFLUSH, &save); +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c +index c783d8fd3a80..ebe7115c751a 100644 +--- a/tools/perf/builtin-trace.c ++++ b/tools/perf/builtin-trace.c +@@ -1617,6 +1617,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine, + color_fprintf(trace->output, PERF_COLOR_RED, + "LOST %" PRIu64 " events!\n", event->lost.lost); + ret = machine__process_lost_event(machine, event, sample); ++ break; + default: + ret = machine__process_event(machine, event, sample); + break; +diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c +index 636d7b42d844..54af2f2e2ee4 100644 +--- a/tools/perf/tests/parse-events.c ++++ b/tools/perf/tests/parse-events.c +@@ -1727,15 +1727,14 @@ static int test_pmu_events(void) + } + + while (!ret && (ent = readdir(dir))) { +-#define MAX_NAME 100 + struct evlist_test e; +- char name[MAX_NAME]; ++ char name[2 * NAME_MAX + 1 + 12 + 3]; + + if (!strcmp(ent->d_name, ".") || + !strcmp(ent->d_name, "..")) + continue; + +- snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name); ++ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name); + + e.name = name; + e.check = test__checkevent_pmu_events; +@@ -1743,11 +1742,10 @@ static int test_pmu_events(void) + ret = test_event(&e); + if (ret) + break; +- snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name); ++ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name); + e.name = name; + e.check = test__checkevent_pmu_events_mix; + ret = test_event(&e); +-#undef MAX_NAME + } + + closedir(dir); +diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c +index d4d7cc27252f..718bd46d47fa 100644 +--- a/tools/perf/ui/browsers/annotate.c ++++ b/tools/perf/ui/browsers/annotate.c +@@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser, + nd = browser->curr_hot; + break; + case K_UNTAB: +- if (nd != NULL) ++ if (nd != NULL) { + nd = rb_next(nd); + if (nd == NULL) + nd = rb_first(&browser->entries); +- else ++ } else + nd = browser->curr_hot; + break; + case K_F1: +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c +index 956187bf1a85..26cba64345e3 100644 +--- a/tools/perf/util/event.c ++++ b/tools/perf/util/event.c +@@ -416,7 +416,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, + { + char filename[PATH_MAX]; + DIR *tasks; +- struct dirent dirent, *next; ++ struct dirent *dirent; + pid_t tgid, ppid; + int rc = 0; + +@@ -445,11 +445,11 @@ static int __event__synthesize_thread(union perf_event *comm_event, + return 0; + } + +- while (!readdir_r(tasks, &dirent, &next) && next) { ++ while ((dirent = readdir(tasks)) != NULL) { + char *end; + pid_t _pid; + +- _pid = strtol(dirent.d_name, &end, 10); ++ _pid = strtol(dirent->d_name, &end, 10); + if (*end) + continue; + +@@ -558,7 +558,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, + { + DIR *proc; + char proc_path[PATH_MAX]; +- struct dirent dirent, *next; ++ struct dirent *dirent; + union perf_event *comm_event, *mmap_event, *fork_event; + int err = -1; + +@@ -583,9 +583,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool, + if (proc == NULL) + goto out_free_fork; + +- while (!readdir_r(proc, &dirent, &next) && next) { ++ while ((dirent = readdir(proc)) != NULL) { + char *end; +- pid_t pid = strtol(dirent.d_name, &end, 10); ++ pid_t pid = strtol(dirent->d_name, &end, 10); + + if (*end) /* only interested in proper numerical dirents */ + continue; +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +index 71df7acf8643..933a509a90f8 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include "../cache.h" + #include "../util.h" +@@ -1708,6 +1709,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) + switch (decoder->packet.type) { + case INTEL_PT_TIP_PGD: + decoder->continuous_period = false; ++ __fallthrough; + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + intel_pt_log("ERROR: Unexpected packet\n"); +@@ -1762,6 +1764,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) + decoder->pge = false; + decoder->continuous_period = false; + intel_pt_clear_tx_flags(decoder); ++ __fallthrough; ++ + case INTEL_PT_TNT: + decoder->have_tma = false; + intel_pt_log("ERROR: Unexpected packet\n"); +@@ -1802,6 +1806,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) + switch (decoder->packet.type) { + case INTEL_PT_TIP_PGD: + decoder->continuous_period = false; ++ __fallthrough; + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +index b1257c816310..9b2fce25162b 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include "intel-pt-pkt-decoder.h" + +@@ -488,6 +489,7 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf, + case INTEL_PT_FUP: + if (!(packet->count)) + return snprintf(buf, buf_len, "%s no ip", name); ++ __fallthrough; + case INTEL_PT_CYC: + case INTEL_PT_VMCS: + case INTEL_PT_MTC: +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c +index a35db828bd0d..38304b7e4f81 100644 +--- a/tools/perf/util/parse-events.c ++++ b/tools/perf/util/parse-events.c +@@ -138,11 +138,11 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { + #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) + #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) + +-#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ +- while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ +- if (sys_dirent.d_type == DT_DIR && \ +- (strcmp(sys_dirent.d_name, ".")) && \ +- (strcmp(sys_dirent.d_name, ".."))) ++#define for_each_subsystem(sys_dir, sys_dirent) \ ++ while ((sys_dirent = readdir(sys_dir)) != NULL) \ ++ if (sys_dirent->d_type == DT_DIR && \ ++ (strcmp(sys_dirent->d_name, ".")) && \ ++ (strcmp(sys_dirent->d_name, ".."))) + + static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) + { +@@ -159,12 +159,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) + return 0; + } + +-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ +- while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ +- if (evt_dirent.d_type == DT_DIR && \ +- (strcmp(evt_dirent.d_name, ".")) && \ +- (strcmp(evt_dirent.d_name, "..")) && \ +- (!tp_event_has_id(&sys_dirent, &evt_dirent))) ++#define for_each_event(sys_dirent, evt_dir, evt_dirent) \ ++ while ((evt_dirent = readdir(evt_dir)) != NULL) \ ++ if (evt_dirent->d_type == DT_DIR && \ ++ (strcmp(evt_dirent->d_name, ".")) && \ ++ (strcmp(evt_dirent->d_name, "..")) && \ ++ (!tp_event_has_id(sys_dirent, evt_dirent))) + + #define MAX_EVENT_LENGTH 512 + +@@ -173,7 +173,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) + { + struct tracepoint_path *path = NULL; + DIR *sys_dir, *evt_dir; +- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; ++ struct dirent *sys_dirent, *evt_dirent; + char id_buf[24]; + int fd; + u64 id; +@@ -184,18 +184,18 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) + if (!sys_dir) + return NULL; + +- for_each_subsystem(sys_dir, sys_dirent, sys_next) { ++ for_each_subsystem(sys_dir, sys_dirent) { + + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, +- sys_dirent.d_name); ++ sys_dirent->d_name); + evt_dir = opendir(dir_path); + if (!evt_dir) + continue; + +- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { ++ for_each_event(sys_dirent, evt_dir, evt_dirent) { + + snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, +- evt_dirent.d_name); ++ evt_dirent->d_name); + fd = open(evt_path, O_RDONLY); + if (fd < 0) + continue; +@@ -220,9 +220,9 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) + free(path); + return NULL; + } +- strncpy(path->system, sys_dirent.d_name, ++ strncpy(path->system, sys_dirent->d_name, + MAX_EVENT_LENGTH); +- strncpy(path->name, evt_dirent.d_name, ++ strncpy(path->name, evt_dirent->d_name, + MAX_EVENT_LENGTH); + return path; + } +@@ -1629,7 +1629,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, + bool name_only) + { + DIR *sys_dir, *evt_dir; +- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; ++ struct dirent *sys_dirent, *evt_dirent; + char evt_path[MAXPATHLEN]; + char dir_path[MAXPATHLEN]; + char **evt_list = NULL; +@@ -1647,20 +1647,20 @@ restart: + goto out_close_sys_dir; + } + +- for_each_subsystem(sys_dir, sys_dirent, sys_next) { ++ for_each_subsystem(sys_dir, sys_dirent) { + if (subsys_glob != NULL && +- !strglobmatch(sys_dirent.d_name, subsys_glob)) ++ !strglobmatch(sys_dirent->d_name, subsys_glob)) + continue; + + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, +- sys_dirent.d_name); ++ sys_dirent->d_name); + evt_dir = opendir(dir_path); + if (!evt_dir) + continue; + +- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { ++ for_each_event(sys_dirent, evt_dir, evt_dirent) { + if (event_glob != NULL && +- !strglobmatch(evt_dirent.d_name, event_glob)) ++ !strglobmatch(evt_dirent->d_name, event_glob)) + continue; + + if (!evt_num_known) { +@@ -1669,7 +1669,7 @@ restart: + } + + snprintf(evt_path, MAXPATHLEN, "%s:%s", +- sys_dirent.d_name, evt_dirent.d_name); ++ sys_dirent->d_name, evt_dirent->d_name); + + evt_list[evt_i] = strdup(evt_path); + if (evt_list[evt_i] == NULL) +@@ -1722,7 +1722,7 @@ out_close_sys_dir: + int is_valid_tracepoint(const char *event_string) + { + DIR *sys_dir, *evt_dir; +- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; ++ struct dirent *sys_dirent, *evt_dirent; + char evt_path[MAXPATHLEN]; + char dir_path[MAXPATHLEN]; + +@@ -1730,17 +1730,17 @@ int is_valid_tracepoint(const char *event_string) + if (!sys_dir) + return 0; + +- for_each_subsystem(sys_dir, sys_dirent, sys_next) { ++ for_each_subsystem(sys_dir, sys_dirent) { + + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, +- sys_dirent.d_name); ++ sys_dirent->d_name); + evt_dir = opendir(dir_path); + if (!evt_dir) + continue; + +- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { ++ for_each_event(sys_dirent, evt_dir, evt_dirent) { + snprintf(evt_path, MAXPATHLEN, "%s:%s", +- sys_dirent.d_name, evt_dirent.d_name); ++ sys_dirent->d_name, evt_dirent->d_name); + if (!strcmp(evt_path, event_string)) { + closedir(evt_dir); + closedir(sys_dir); +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c +index 6f2a0279476c..593066c68e3d 100644 +--- a/tools/perf/util/pmu.c ++++ b/tools/perf/util/pmu.c +@@ -153,7 +153,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n + if (fd == -1) + return -1; + +- sret = read(fd, alias->unit, UNIT_MAX_LEN); ++ sret = read(fd, alias->unit, UNIT_MAX_LEN); + if (sret < 0) + goto error; + +diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build +index 6516e220c247..82d28c67e0f3 100644 +--- a/tools/perf/util/scripting-engines/Build ++++ b/tools/perf/util/scripting-engines/Build +@@ -1,6 +1,6 @@ + libperf-$(CONFIG_LIBPERL) += trace-event-perl.o + libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o + +-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default ++CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default + + CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow +diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c +index bcae659b6546..efb53772e0ec 100644 +--- a/tools/perf/util/strfilter.c ++++ b/tools/perf/util/strfilter.c +@@ -269,6 +269,7 @@ static int strfilter_node__sprint(struct strfilter_node *node, char *buf) + len = strfilter_node__sprint_pt(node->l, buf); + if (len < 0) + return len; ++ __fallthrough; + case '!': + if (buf) { + *(buf + len++) = *node->p; +diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c +index fc8781de62db..accb7ece1d3c 100644 +--- a/tools/perf/util/string.c ++++ b/tools/perf/util/string.c +@@ -21,6 +21,8 @@ s64 perf_atoll(const char *str) + case 'b': case 'B': + if (*p) + goto out_err; ++ ++ __fallthrough; + case '\0': + return length; + default: +diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c +index 0a9ae8014729..829508a21448 100644 +--- a/tools/perf/util/thread.c ++++ b/tools/perf/util/thread.c +@@ -227,7 +227,7 @@ void thread__find_cpumode_addr_location(struct thread *thread, + struct addr_location *al) + { + size_t i; +- const u8 const cpumodes[] = { ++ const u8 cpumodes[] = { + PERF_RECORD_MISC_USER, + PERF_RECORD_MISC_KERNEL, + PERF_RECORD_MISC_GUEST_USER, +diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c +index 6ec3c5ca438f..4e666b95b87e 100644 +--- a/tools/perf/util/thread_map.c ++++ b/tools/perf/util/thread_map.c +@@ -92,8 +92,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) + { + DIR *proc; + int max_threads = 32, items, i; +- char path[256]; +- struct dirent dirent, *next, **namelist = NULL; ++ char path[NAME_MAX + 1 + 6]; ++ struct dirent *dirent, **namelist = NULL; + struct thread_map *threads = thread_map__alloc(max_threads); + + if (threads == NULL) +@@ -106,16 +106,16 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) + threads->nr = 0; + atomic_set(&threads->refcnt, 1); + +- while (!readdir_r(proc, &dirent, &next) && next) { ++ while ((dirent = readdir(proc)) != NULL) { + char *end; + bool grow = false; + struct stat st; +- pid_t pid = strtol(dirent.d_name, &end, 10); ++ pid_t pid = strtol(dirent->d_name, &end, 10); + + if (*end) /* only interested in proper numerical dirents */ + continue; + +- snprintf(path, sizeof(path), "/proc/%s", dirent.d_name); ++ snprintf(path, sizeof(path), "/proc/%s", dirent->d_name); + + if (stat(path, &st) != 0) + continue; diff --git a/patch/kernel/rk3328-default/patch-4.4.77-78.patch b/patch/kernel/rk3328-default/patch-4.4.77-78.patch new file mode 100644 index 000000000..e79df63d6 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.77-78.patch @@ -0,0 +1,3019 @@ +diff --git a/Makefile b/Makefile +index bf49a61d02e2..ac77ae8ee0b1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 77 ++SUBLEVEL = 78 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h +index d2315ffd8f12..f13ae153fb24 100644 +--- a/arch/arm/include/asm/elf.h ++++ b/arch/arm/include/asm/elf.h +@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + #define CORE_DUMP_USE_REGSET + #define ELF_EXEC_PAGESIZE 4096 + +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical +- use of this is to invoke "./ld.so someprog" to test out a new version of +- the loader. We need to make sure that it is out of the way of the program +- that it will "exec", and that there is sufficient room for the brk. */ +- +-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++/* This is the base location for PIE (ET_DYN with INTERP) loads. */ ++#define ELF_ET_DYN_BASE 0x400000UL + + /* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h +index 44dd892a4bbe..9e11dbe1cec3 100644 +--- a/arch/arm64/include/asm/elf.h ++++ b/arch/arm64/include/asm/elf.h +@@ -120,12 +120,11 @@ typedef struct user_fpsimd_state elf_fpregset_t; + #define ELF_EXEC_PAGESIZE PAGE_SIZE + + /* +- * This is the location that an ET_DYN program is loaded if exec'ed. Typical +- * use of this is to invoke "./ld.so someprog" to test out a new version of +- * the loader. We need to make sure that it is out of the way of the program +- * that it will "exec", and that there is sufficient room for the brk. ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address ++ * space open for things that want to use the area for 32-bit pointers. + */ +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) ++#define ELF_ET_DYN_BASE 0x100000000UL + + /* + * When the program starts, a1 contains a pointer to a function to be +@@ -165,7 +164,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, + + #ifdef CONFIG_COMPAT + +-#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) ++/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ ++#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL + + /* AArch32 registers. */ + #define COMPAT_ELF_NGREG 18 +diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h +index d8d60a57183f..f53725202955 100644 +--- a/arch/parisc/include/asm/dma-mapping.h ++++ b/arch/parisc/include/asm/dma-mapping.h +@@ -39,6 +39,8 @@ struct hppa_dma_ops { + ** flush/purge and allocate "regular" cacheable pages for everything. + */ + ++#define DMA_ERROR_CODE (~(dma_addr_t)0) ++ + #ifdef CONFIG_PA11 + extern struct hppa_dma_ops pcxl_dma_ops; + extern struct hppa_dma_ops pcx_dma_ops; +@@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev) + break; + } + } +- BUG_ON(!dev->platform_data); + return dev->platform_data; + } +- +-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) +- ++ ++#define GET_IOC(dev) ({ \ ++ void *__pdata = parisc_walk_tree(dev); \ ++ __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ ++}) + + #ifdef CONFIG_IOMMU_CCIO + struct parisc_device; +diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h +index 59be25764433..a81226257878 100644 +--- a/arch/parisc/include/asm/mmu_context.h ++++ b/arch/parisc/include/asm/mmu_context.h +@@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context) + mtctl(__space_to_prot(context), 8); + } + +-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) ++static inline void switch_mm_irqs_off(struct mm_struct *prev, ++ struct mm_struct *next, struct task_struct *tsk) + { +- + if (prev != next) { + mtctl(__pa(next->pgd), 25); + load_context(next->context); + } + } + ++static inline void switch_mm(struct mm_struct *prev, ++ struct mm_struct *next, struct task_struct *tsk) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ switch_mm_irqs_off(prev, next, tsk); ++ local_irq_restore(flags); ++} ++#define switch_mm_irqs_off switch_mm_irqs_off ++ + #define deactivate_mm(tsk,mm) do { } while (0) + + static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) +diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S +index d4ffcfbc9885..041e1f9ec129 100644 +--- a/arch/parisc/kernel/syscall_table.S ++++ b/arch/parisc/kernel/syscall_table.S +@@ -361,7 +361,7 @@ + ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ + ENTRY_SAME(add_key) + ENTRY_SAME(request_key) /* 265 */ +- ENTRY_SAME(keyctl) ++ ENTRY_COMP(keyctl) + ENTRY_SAME(ioprio_set) + ENTRY_SAME(ioprio_get) + ENTRY_SAME(inotify_init) +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index f9064449908a..d8c2f3bcfc18 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -303,7 +303,7 @@ bad_area: + case 15: /* Data TLB miss fault/Data page fault */ + /* send SIGSEGV when outside of vma */ + if (!vma || +- address < vma->vm_start || address > vma->vm_end) { ++ address < vma->vm_start || address >= vma->vm_end) { + si.si_signo = SIGSEGV; + si.si_code = SEGV_MAPERR; + break; +diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h +index ee46ffef608e..743ad7a400d6 100644 +--- a/arch/powerpc/include/asm/elf.h ++++ b/arch/powerpc/include/asm/elf.h +@@ -23,12 +23,13 @@ + #define CORE_DUMP_USE_REGSET + #define ELF_EXEC_PAGESIZE PAGE_SIZE + +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical +- use of this is to invoke "./ld.so someprog" to test out a new version of +- the loader. We need to make sure that it is out of the way of the program +- that it will "exec", and that there is sufficient room for the brk. */ +- +-#define ELF_ET_DYN_BASE 0x20000000 ++/* ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address ++ * space open for things that want to use the area for 32-bit pointers. ++ */ ++#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ ++ 0x100000000UL) + + #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) + +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h +index bab6739a1154..b9eb7b1a49d2 100644 +--- a/arch/s390/include/asm/elf.h ++++ b/arch/s390/include/asm/elf.h +@@ -154,14 +154,13 @@ extern unsigned int vdso_enabled; + #define CORE_DUMP_USE_REGSET + #define ELF_EXEC_PAGESIZE 4096 + +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical +- use of this is to invoke "./ld.so someprog" to test out a new version of +- the loader. We need to make sure that it is out of the way of the program +- that it will "exec", and that there is sufficient room for the brk. 64-bit +- tasks are aligned to 4GB. */ +-#define ELF_ET_DYN_BASE (is_32bit_task() ? \ +- (STACK_TOP / 3 * 2) : \ +- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) ++/* ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address ++ * space open for things that want to use the area for 32-bit pointers. ++ */ ++#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ ++ 0x100000000UL) + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c +index dd14616b7739..7de207a11014 100644 +--- a/arch/x86/crypto/sha1_ssse3_glue.c ++++ b/arch/x86/crypto/sha1_ssse3_glue.c +@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, + + static bool avx2_usable(void) + { +- if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) ++ if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) + && boot_cpu_has(X86_FEATURE_BMI1) + && boot_cpu_has(X86_FEATURE_BMI2)) + return true; +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index d262f985bbc8..07cf288b692e 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -245,12 +245,13 @@ extern int force_personality32; + #define CORE_DUMP_USE_REGSET + #define ELF_EXEC_PAGESIZE 4096 + +-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical +- use of this is to invoke "./ld.so someprog" to test out a new version of +- the loader. We need to make sure that it is out of the way of the program +- that it will "exec", and that there is sufficient room for the brk. */ +- +-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++/* ++ * This is the base location for PIE (ET_DYN with INTERP) loads. On ++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address ++ * space open for things that want to use the area for 32-bit pointers. ++ */ ++#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ ++ 0x100000000UL) + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 690b4027e17c..37db36fddc88 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -405,6 +405,8 @@ + #define MSR_IA32_TSC_ADJUST 0x0000003b + #define MSR_IA32_BNDCFGS 0x00000d90 + ++#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc ++ + #define MSR_IA32_XSS 0x00000da0 + + #define FEATURE_CONTROL_LOCKED (1<<0) +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 9357b29de9bc..83d6369c45f5 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) + return ret; + } + ++bool kvm_mpx_supported(void) ++{ ++ return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) ++ && kvm_x86_ops->mpx_supported()); ++} ++EXPORT_SYMBOL_GPL(kvm_mpx_supported); ++ + u64 kvm_supported_xcr0(void) + { + u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; + +- if (!kvm_x86_ops->mpx_supported()) ++ if (!kvm_mpx_supported()) + xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); + + return xcr0; +@@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) + if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) + best->ebx = xstate_required_size(vcpu->arch.xcr0, true); + +- vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); ++ vcpu->arch.eager_fpu = use_eager_fpu(); + if (vcpu->arch.eager_fpu) + kvm_x86_ops->fpu_activate(vcpu); + +@@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + #endif + unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; + unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; +- unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; ++ unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; + unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; + + /* cpuid 1.edx */ +diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h +index 3f5c48ddba45..d1534feefcfe 100644 +--- a/arch/x86/kvm/cpuid.h ++++ b/arch/x86/kvm/cpuid.h +@@ -4,6 +4,7 @@ + #include "x86.h" + + int kvm_update_cpuid(struct kvm_vcpu *vcpu); ++bool kvm_mpx_supported(void); + struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, + u32 function, u32 index); + int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, +@@ -134,20 +135,20 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) + return best && (best->ebx & bit(X86_FEATURE_RTM)); + } + +-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) ++static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) + { + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); +- return best && (best->ebx & bit(X86_FEATURE_MPX)); ++ return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); + } + +-static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) ++static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) + { + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); +- return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); ++ return best && (best->ebx & bit(X86_FEATURE_MPX)); + } + + static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index bbaa11f4e74b..b12391119ce8 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); + static u64 construct_eptp(unsigned long root_hpa); + static void kvm_cpu_vmxon(u64 addr); + static void kvm_cpu_vmxoff(void); +-static bool vmx_mpx_supported(void); + static bool vmx_xsaves_supported(void); + static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu); + static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); +@@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) + VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; + +- if (vmx_mpx_supported()) ++ if (kvm_mpx_supported()) + vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; + + /* We support free control of debug control saving. */ +@@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) + VM_ENTRY_LOAD_IA32_PAT; + vmx->nested.nested_vmx_entry_ctls_high |= + (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); +- if (vmx_mpx_supported()) ++ if (kvm_mpx_supported()) + vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; + + /* We support free control of debug control loading. */ +@@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); + break; + case MSR_IA32_BNDCFGS: +- if (!vmx_mpx_supported()) ++ if (!kvm_mpx_supported() || ++ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) + return 1; + msr_info->data = vmcs_read64(GUEST_BNDCFGS); + break; +@@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + vmcs_writel(GUEST_SYSENTER_ESP, data); + break; + case MSR_IA32_BNDCFGS: +- if (!vmx_mpx_supported()) ++ if (!kvm_mpx_supported() || ++ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) ++ return 1; ++ if (is_noncanonical_address(data & PAGE_MASK) || ++ (data & MSR_IA32_BNDCFGS_RSVD)) + return 1; + vmcs_write64(GUEST_BNDCFGS, data); + break; +@@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void) + for (i = j = 0; i < max_shadow_read_write_fields; i++) { + switch (shadow_read_write_fields[i]) { + case GUEST_BNDCFGS: +- if (!vmx_mpx_supported()) ++ if (!kvm_mpx_supported()) + continue; + break; + default: +@@ -6253,7 +6257,6 @@ static __init int hardware_setup(void) + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); + vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); +- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); + + memcpy(vmx_msr_bitmap_legacy_x2apic, + vmx_msr_bitmap_legacy, PAGE_SIZE); +@@ -10265,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); + vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); + vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); +- if (vmx_mpx_supported()) ++ if (kvm_mpx_supported()) + vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); + if (nested_cpu_has_xsaves(vmcs12)) + vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); +diff --git a/drivers/base/core.c b/drivers/base/core.c +index f18856f5954b..afe045792796 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -2094,7 +2094,11 @@ void device_shutdown(void) + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); + +- if (dev->bus && dev->bus->shutdown) { ++ if (dev->class && dev->class->shutdown) { ++ if (initcall_debug) ++ dev_info(dev, "shutdown\n"); ++ dev->class->shutdown(dev); ++ } else if (dev->bus && dev->bus->shutdown) { + if (initcall_debug) + dev_info(dev, "shutdown\n"); + dev->bus->shutdown(dev); +diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c +index a7b46798c81d..39efa7e6c0c0 100644 +--- a/drivers/base/power/sysfs.c ++++ b/drivers/base/power/sysfs.c +@@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + value = PM_QOS_LATENCY_ANY; ++ else ++ return -EINVAL; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c +index a1e0b9ab847a..e613633ffe9c 100644 +--- a/drivers/base/power/wakeup.c ++++ b/drivers/base/power/wakeup.c +@@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources); + + static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); + ++DEFINE_STATIC_SRCU(wakeup_srcu); ++ + static struct wakeup_source deleted_ws = { + .name = "deleted", + .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), +@@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws) + spin_lock_irqsave(&events_lock, flags); + list_del_rcu(&ws->entry); + spin_unlock_irqrestore(&events_lock, flags); +- synchronize_rcu(); ++ synchronize_srcu(&wakeup_srcu); + } + EXPORT_SYMBOL_GPL(wakeup_source_remove); + +@@ -330,13 +332,14 @@ void device_wakeup_detach_irq(struct device *dev) + void device_wakeup_arm_wake_irqs(void) + { + struct wakeup_source *ws; ++ int srcuidx; + +- rcu_read_lock(); ++ srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->wakeirq) + dev_pm_arm_wake_irq(ws->wakeirq); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&wakeup_srcu, srcuidx); + } + + /** +@@ -347,13 +350,14 @@ void device_wakeup_arm_wake_irqs(void) + void device_wakeup_disarm_wake_irqs(void) + { + struct wakeup_source *ws; ++ int srcuidx; + +- rcu_read_lock(); ++ srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->wakeirq) + dev_pm_disarm_wake_irq(ws->wakeirq); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&wakeup_srcu, srcuidx); + } + + /** +@@ -807,10 +811,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_event); + void pm_print_active_wakeup_sources(void) + { + struct wakeup_source *ws; +- int active = 0; ++ int srcuidx, active = 0; + struct wakeup_source *last_activity_ws = NULL; + +- rcu_read_lock(); ++ srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->active) { + pr_info("active wakeup source: %s\n", ws->name); +@@ -826,7 +830,7 @@ void pm_print_active_wakeup_sources(void) + if (!active && last_activity_ws) + pr_info("last active wakeup source: %s\n", + last_activity_ws->name); +- rcu_read_unlock(); ++ srcu_read_unlock(&wakeup_srcu, srcuidx); + } + EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); + +@@ -953,8 +957,9 @@ void pm_wakep_autosleep_enabled(bool set) + { + struct wakeup_source *ws; + ktime_t now = ktime_get(); ++ int srcuidx; + +- rcu_read_lock(); ++ srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + spin_lock_irq(&ws->lock); + if (ws->autosleep_enabled != set) { +@@ -968,7 +973,7 @@ void pm_wakep_autosleep_enabled(bool set) + } + spin_unlock_irq(&ws->lock); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&wakeup_srcu, srcuidx); + } + #endif /* CONFIG_PM_AUTOSLEEP */ + +@@ -1061,15 +1066,16 @@ static int print_wakeup_source_stats(struct seq_file *m, + static int wakeup_sources_stats_show(struct seq_file *m, void *unused) + { + struct wakeup_source *ws; ++ int srcuidx; + + seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t" + "expire_count\tactive_since\ttotal_time\tmax_time\t" + "last_change\tprevent_suspend_time\n"); + +- rcu_read_lock(); ++ srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) + print_wakeup_source_stats(m, ws); +- rcu_read_unlock(); ++ srcu_read_unlock(&wakeup_srcu, srcuidx); + + print_wakeup_source_stats(m, &deleted_ws); + +diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c +index 252142524ff2..6d56877b2e0a 100644 +--- a/drivers/char/tpm/tpm-chip.c ++++ b/drivers/char/tpm/tpm-chip.c +@@ -36,10 +36,60 @@ static DEFINE_SPINLOCK(driver_lock); + struct class *tpm_class; + dev_t tpm_devt; + +-/* +- * tpm_chip_find_get - return tpm_chip for a given chip number +- * @chip_num the device number for the chip ++/** ++ * tpm_try_get_ops() - Get a ref to the tpm_chip ++ * @chip: Chip to ref ++ * ++ * The caller must already have some kind of locking to ensure that chip is ++ * valid. This function will lock the chip so that the ops member can be ++ * accessed safely. The locking prevents tpm_chip_unregister from ++ * completing, so it should not be held for long periods. ++ * ++ * Returns -ERRNO if the chip could not be got. ++ */ ++int tpm_try_get_ops(struct tpm_chip *chip) ++{ ++ int rc = -EIO; ++ ++ get_device(&chip->dev); ++ ++ down_read(&chip->ops_sem); ++ if (!chip->ops) ++ goto out_lock; ++ ++ if (!try_module_get(chip->dev.parent->driver->owner)) ++ goto out_lock; ++ ++ return 0; ++out_lock: ++ up_read(&chip->ops_sem); ++ put_device(&chip->dev); ++ return rc; ++} ++EXPORT_SYMBOL_GPL(tpm_try_get_ops); ++ ++/** ++ * tpm_put_ops() - Release a ref to the tpm_chip ++ * @chip: Chip to put ++ * ++ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may ++ * be kfree'd. + */ ++void tpm_put_ops(struct tpm_chip *chip) ++{ ++ module_put(chip->dev.parent->driver->owner); ++ up_read(&chip->ops_sem); ++ put_device(&chip->dev); ++} ++EXPORT_SYMBOL_GPL(tpm_put_ops); ++ ++/** ++ * tpm_chip_find_get() - return tpm_chip for a given chip number ++ * @chip_num: id to find ++ * ++ * The return'd chip has been tpm_try_get_ops'd and must be released via ++ * tpm_put_ops ++ */ + struct tpm_chip *tpm_chip_find_get(int chip_num) + { + struct tpm_chip *pos, *chip = NULL; +@@ -49,10 +99,10 @@ struct tpm_chip *tpm_chip_find_get(int chip_num) + if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) + continue; + +- if (try_module_get(pos->pdev->driver->owner)) { ++ /* rcu prevents chip from being free'd */ ++ if (!tpm_try_get_ops(pos)) + chip = pos; +- break; +- } ++ break; + } + rcu_read_unlock(); + return chip; +@@ -74,6 +124,41 @@ static void tpm_dev_release(struct device *dev) + kfree(chip); + } + ++ ++/** ++ * tpm_class_shutdown() - prepare the TPM device for loss of power. ++ * @dev: device to which the chip is associated. ++ * ++ * Issues a TPM2_Shutdown command prior to loss of power, as required by the ++ * TPM 2.0 spec. ++ * Then, calls bus- and device- specific shutdown code. ++ * ++ * XXX: This codepath relies on the fact that sysfs is not enabled for ++ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2 ++ * has sysfs support enabled before TPM sysfs's implicit locking is fixed. ++ */ ++static int tpm_class_shutdown(struct device *dev) ++{ ++ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); ++ ++ if (chip->flags & TPM_CHIP_FLAG_TPM2) { ++ down_write(&chip->ops_sem); ++ tpm2_shutdown(chip, TPM2_SU_CLEAR); ++ chip->ops = NULL; ++ up_write(&chip->ops_sem); ++ } ++ /* Allow bus- and device-specific code to run. Note: since chip->ops ++ * is NULL, more-specific shutdown code will not be able to issue TPM ++ * commands. ++ */ ++ if (dev->bus && dev->bus->shutdown) ++ dev->bus->shutdown(dev); ++ else if (dev->driver && dev->driver->shutdown) ++ dev->driver->shutdown(dev); ++ return 0; ++} ++ ++ + /** + * tpmm_chip_alloc() - allocate a new struct tpm_chip instance + * @dev: device to which the chip is associated +@@ -94,6 +179,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, + return ERR_PTR(-ENOMEM); + + mutex_init(&chip->tpm_mutex); ++ init_rwsem(&chip->ops_sem); + INIT_LIST_HEAD(&chip->list); + + chip->ops = ops; +@@ -112,13 +198,12 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, + + scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); + +- chip->pdev = dev; +- + dev_set_drvdata(dev, chip); + + chip->dev.class = tpm_class; ++ chip->dev.class->shutdown = tpm_class_shutdown; + chip->dev.release = tpm_dev_release; +- chip->dev.parent = chip->pdev; ++ chip->dev.parent = dev; + #ifdef CONFIG_ACPI + chip->dev.groups = chip->groups; + #endif +@@ -133,7 +218,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, + device_initialize(&chip->dev); + + cdev_init(&chip->cdev, &tpm_fops); +- chip->cdev.owner = chip->pdev->driver->owner; ++ chip->cdev.owner = dev->driver->owner; + chip->cdev.kobj.parent = &chip->dev.kobj; + + devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev); +@@ -173,6 +258,12 @@ static int tpm_add_char_device(struct tpm_chip *chip) + static void tpm_del_char_device(struct tpm_chip *chip) + { + cdev_del(&chip->cdev); ++ ++ /* Make the driver uncallable. */ ++ down_write(&chip->ops_sem); ++ chip->ops = NULL; ++ up_write(&chip->ops_sem); ++ + device_del(&chip->dev); + } + +@@ -236,9 +327,8 @@ int tpm_chip_register(struct tpm_chip *chip) + chip->flags |= TPM_CHIP_FLAG_REGISTERED; + + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { +- rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj, +- &chip->dev.kobj, +- "ppi"); ++ rc = __compat_only_sysfs_link_entry_to_kobj( ++ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); + if (rc && rc != -ENOENT) { + tpm_chip_unregister(chip); + return rc; +@@ -259,6 +349,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); + * Takes the chip first away from the list of available TPM chips and then + * cleans up all the resources reserved by tpm_chip_register(). + * ++ * Once this function returns the driver call backs in 'op's will not be ++ * running and will no longer start. ++ * + * NOTE: This function should be only called before deinitializing chip + * resources. + */ +@@ -273,7 +366,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) + synchronize_rcu(); + + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) +- sysfs_remove_link(&chip->pdev->kobj, "ppi"); ++ sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); + + tpm1_chip_unregister(chip); + tpm_del_char_device(chip); +diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c +index 4f3137d9a35e..912ad30be585 100644 +--- a/drivers/char/tpm/tpm-dev.c ++++ b/drivers/char/tpm/tpm-dev.c +@@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file) + * by the check of is_open variable, which is protected + * by driver_lock. */ + if (test_and_set_bit(0, &chip->is_open)) { +- dev_dbg(chip->pdev, "Another process owns this TPM\n"); ++ dev_dbg(&chip->dev, "Another process owns this TPM\n"); + return -EBUSY; + } + +@@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file) + INIT_WORK(&priv->work, timeout_work); + + file->private_data = priv; +- get_device(chip->pdev); + return 0; + } + +@@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, + return -EFAULT; + } + +- /* atomic tpm command send and result receive */ ++ /* atomic tpm command send and result receive. We only hold the ops ++ * lock during this period so that the tpm can be unregistered even if ++ * the char dev is held open. ++ */ ++ if (tpm_try_get_ops(priv->chip)) { ++ mutex_unlock(&priv->buffer_mutex); ++ return -EPIPE; ++ } + out_size = tpm_transmit(priv->chip, priv->data_buffer, + sizeof(priv->data_buffer), 0); ++ ++ tpm_put_ops(priv->chip); + if (out_size < 0) { + mutex_unlock(&priv->buffer_mutex); + return out_size; +@@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file) + file->private_data = NULL; + atomic_set(&priv->data_pending, 0); + clear_bit(0, &priv->chip->is_open); +- put_device(priv->chip->pdev); + kfree(priv); + return 0; + } +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c +index 17abe52e6365..8588f2e4b9af 100644 +--- a/drivers/char/tpm/tpm-interface.c ++++ b/drivers/char/tpm/tpm-interface.c +@@ -343,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, + if (count == 0) + return -ENODATA; + if (count > bufsiz) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "invalid count value %x %zx\n", count, bufsiz); + return -E2BIG; + } +@@ -353,7 +353,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, + + rc = chip->ops->send(chip, (u8 *) buf, count); + if (rc < 0) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "tpm_transmit: tpm_send: error %zd\n", rc); + goto out; + } +@@ -372,7 +372,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, + goto out_recv; + + if (chip->ops->req_canceled(chip, status)) { +- dev_err(chip->pdev, "Operation Canceled\n"); ++ dev_err(&chip->dev, "Operation Canceled\n"); + rc = -ECANCELED; + goto out; + } +@@ -382,14 +382,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, + } while (time_before(jiffies, stop)); + + chip->ops->cancel(chip); +- dev_err(chip->pdev, "Operation Timed out\n"); ++ dev_err(&chip->dev, "Operation Timed out\n"); + rc = -ETIME; + goto out; + + out_recv: + rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); + if (rc < 0) +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "tpm_transmit: tpm_recv: error %zd\n", rc); + out: + if (!(flags & TPM_TRANSMIT_UNLOCKED)) +@@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, + + err = be32_to_cpu(header->return_code); + if (err != 0 && desc) +- dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err, ++ dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, + desc); + + return err; +@@ -514,7 +514,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) + if (rc == TPM_ERR_INVALID_POSTINIT) { + /* The TPM is not started, we are the first to talk to it. + Execute a startup command. */ +- dev_info(chip->pdev, "Issuing TPM_STARTUP"); ++ dev_info(&chip->dev, "Issuing TPM_STARTUP"); + if (tpm_startup(chip, TPM_ST_CLEAR)) + return rc; + +@@ -526,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) + 0, NULL); + } + if (rc) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "A TPM error (%zd) occurred attempting to determine the timeouts\n", + rc); + goto duration; +@@ -565,7 +565,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) + + /* Report adjusted timeouts */ + if (chip->vendor.timeout_adjusted) { +- dev_info(chip->pdev, ++ dev_info(&chip->dev, + HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", + old_timeout[0], new_timeout[0], + old_timeout[1], new_timeout[1], +@@ -612,7 +612,7 @@ duration: + chip->vendor.duration[TPM_MEDIUM] *= 1000; + chip->vendor.duration[TPM_LONG] *= 1000; + chip->vendor.duration_adjusted = true; +- dev_info(chip->pdev, "Adjusting TPM timeout parameters."); ++ dev_info(&chip->dev, "Adjusting TPM timeout parameters."); + } + return 0; + } +@@ -687,7 +687,7 @@ int tpm_is_tpm2(u32 chip_num) + + rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; + +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + + return rc; + } +@@ -716,7 +716,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) + rc = tpm2_pcr_read(chip, pcr_idx, res_buf); + else + rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return rc; + } + EXPORT_SYMBOL_GPL(tpm_pcr_read); +@@ -751,7 +751,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + rc = tpm2_pcr_extend(chip, pcr_idx, hash); +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return rc; + } + +@@ -761,7 +761,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) + rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0, + "attempting extend a PCR value"); + +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return rc; + } + EXPORT_SYMBOL_GPL(tpm_pcr_extend); +@@ -802,7 +802,9 @@ int tpm_do_selftest(struct tpm_chip *chip) + * around 300ms while the self test is ongoing, keep trying + * until the self test duration expires. */ + if (rc == -ETIME) { +- dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test"); ++ dev_info( ++ &chip->dev, HW_ERR ++ "TPM command timed out during continue self test"); + msleep(delay_msec); + continue; + } +@@ -812,7 +814,7 @@ int tpm_do_selftest(struct tpm_chip *chip) + + rc = be32_to_cpu(cmd.header.out.return_code); + if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { +- dev_info(chip->pdev, ++ dev_info(&chip->dev, + "TPM is disabled/deactivated (0x%X)\n", rc); + /* TPM is disabled and/or deactivated; driver can + * proceed and TPM does handle commands for +@@ -840,7 +842,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen) + + rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd"); + +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return rc; + } + EXPORT_SYMBOL_GPL(tpm_send); +@@ -966,10 +968,10 @@ int tpm_pm_suspend(struct device *dev) + } + + if (rc) +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "Error (%d) sending savestate before suspend\n", rc); + else if (try > 0) +- dev_warn(chip->pdev, "TPM savestate took %dms\n", ++ dev_warn(&chip->dev, "TPM savestate took %dms\n", + try * TPM_TIMEOUT_RETRY); + + return rc; +@@ -1023,7 +1025,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + err = tpm2_get_random(chip, out, max); +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return err; + } + +@@ -1045,7 +1047,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) + num_bytes -= recd; + } while (retries-- && total < max); + +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return total ? total : -EIO; + } + EXPORT_SYMBOL_GPL(tpm_get_random); +@@ -1071,7 +1073,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload, + + rc = tpm2_seal_trusted(chip, payload, options); + +- tpm_chip_put(chip); ++ tpm_put_ops(chip); + return rc; + } + EXPORT_SYMBOL_GPL(tpm_seal_trusted); +@@ -1097,7 +1099,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload, + + rc = tpm2_unseal_trusted(chip, payload, options); + +- tpm_chip_put(chip); ++ tpm_put_ops(chip); ++ + return rc; + } + EXPORT_SYMBOL_GPL(tpm_unseal_trusted); +diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c +index f880856aa75e..6a4056a3f7ee 100644 +--- a/drivers/char/tpm/tpm-sysfs.c ++++ b/drivers/char/tpm/tpm-sysfs.c +@@ -284,16 +284,28 @@ static const struct attribute_group tpm_dev_group = { + int tpm_sysfs_add_device(struct tpm_chip *chip) + { + int err; +- err = sysfs_create_group(&chip->pdev->kobj, ++ ++ /* XXX: If you wish to remove this restriction, you must first update ++ * tpm_sysfs to explicitly lock chip->ops. ++ */ ++ if (chip->flags & TPM_CHIP_FLAG_TPM2) ++ return 0; ++ ++ err = sysfs_create_group(&chip->dev.parent->kobj, + &tpm_dev_group); + + if (err) +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "failed to create sysfs attributes, %d\n", err); + return err; + } + + void tpm_sysfs_del_device(struct tpm_chip *chip) + { +- sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group); ++ /* The sysfs routines rely on an implicit tpm_try_get_ops, this ++ * function is called before ops is null'd and the sysfs core ++ * synchronizes this removal so that no callbacks are running or can ++ * run again ++ */ ++ sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group); + } +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h +index 2216861f89f1..e21e2c599e66 100644 +--- a/drivers/char/tpm/tpm.h ++++ b/drivers/char/tpm/tpm.h +@@ -171,11 +171,16 @@ enum tpm_chip_flags { + }; + + struct tpm_chip { +- struct device *pdev; /* Device stuff */ + struct device dev; + struct cdev cdev; + ++ /* A driver callback under ops cannot be run unless ops_sem is held ++ * (sometimes implicitly, eg for the sysfs code). ops becomes null ++ * when the driver is unregistered, see tpm_try_get_ops. ++ */ ++ struct rw_semaphore ops_sem; + const struct tpm_class_ops *ops; ++ + unsigned int flags; + + int dev_num; /* /dev/tpm# */ +@@ -201,11 +206,6 @@ struct tpm_chip { + + #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) + +-static inline void tpm_chip_put(struct tpm_chip *chip) +-{ +- module_put(chip->pdev->driver->owner); +-} +- + static inline int tpm_read_index(int base, int index) + { + outb(index, base); +@@ -517,6 +517,9 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, + wait_queue_head_t *, bool); + + struct tpm_chip *tpm_chip_find_get(int chip_num); ++__must_check int tpm_try_get_ops(struct tpm_chip *chip); ++void tpm_put_ops(struct tpm_chip *chip); ++ + extern struct tpm_chip *tpmm_chip_alloc(struct device *dev, + const struct tpm_class_ops *ops); + extern int tpm_chip_register(struct tpm_chip *chip); +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c +index cb7e4f6b70ba..286bd090a488 100644 +--- a/drivers/char/tpm/tpm2-cmd.c ++++ b/drivers/char/tpm/tpm2-cmd.c +@@ -570,7 +570,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); + if (rc) { +- dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n", ++ dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n", + handle); + return; + } +@@ -580,7 +580,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, + rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, + "flushing context"); + if (rc) +- dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle, ++ dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle, + rc); + + tpm_buf_destroy(&buf); +@@ -753,7 +753,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) + * except print the error code on a system failure. + */ + if (rc < 0) +- dev_warn(chip->pdev, "transmit returned %d while stopping the TPM", ++ dev_warn(&chip->dev, "transmit returned %d while stopping the TPM", + rc); + } + EXPORT_SYMBOL_GPL(tpm2_shutdown); +@@ -820,7 +820,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) + * immediately. This is a workaround for that. + */ + if (rc == TPM2_RC_TESTING) { +- dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n"); ++ dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n"); + rc = 0; + } + +diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c +index dfadad0916a1..a48a878f791d 100644 +--- a/drivers/char/tpm/tpm_atmel.c ++++ b/drivers/char/tpm/tpm_atmel.c +@@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) + for (i = 0; i < 6; i++) { + status = ioread8(chip->vendor.iobase + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { +- dev_err(chip->pdev, "error reading header\n"); ++ dev_err(&chip->dev, "error reading header\n"); + return -EIO; + } + *buf++ = ioread8(chip->vendor.iobase); +@@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) + size = be32_to_cpu(*native_size); + + if (count < size) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "Recv size(%d) less than available space\n", size); + for (; i < size; i++) { /* clear the waiting data anyway */ + status = ioread8(chip->vendor.iobase + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { +- dev_err(chip->pdev, "error reading data\n"); ++ dev_err(&chip->dev, "error reading data\n"); + return -EIO; + } + } +@@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) + for (; i < size; i++) { + status = ioread8(chip->vendor.iobase + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { +- dev_err(chip->pdev, "error reading data\n"); ++ dev_err(&chip->dev, "error reading data\n"); + return -EIO; + } + *buf++ = ioread8(chip->vendor.iobase); +@@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) + status = ioread8(chip->vendor.iobase + 1); + + if (status & ATML_STATUS_DATA_AVAIL) { +- dev_err(chip->pdev, "data available is stuck\n"); ++ dev_err(&chip->dev, "data available is stuck\n"); + return -EIO; + } + +@@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) + { + int i; + +- dev_dbg(chip->pdev, "tpm_atml_send:\n"); ++ dev_dbg(&chip->dev, "tpm_atml_send:\n"); + for (i = 0; i < count; i++) { +- dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); ++ dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); + iowrite8(buf[i], chip->vendor.iobase); + } + +diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c +index 8dfb88b9739c..dd8f0eb3170a 100644 +--- a/drivers/char/tpm/tpm_i2c_atmel.c ++++ b/drivers/char/tpm/tpm_i2c_atmel.c +@@ -52,7 +52,7 @@ struct priv_data { + static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) + { + struct priv_data *priv = chip->vendor.priv; +- struct i2c_client *client = to_i2c_client(chip->pdev); ++ struct i2c_client *client = to_i2c_client(chip->dev.parent); + s32 status; + + priv->len = 0; +@@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) + + status = i2c_master_send(client, buf, len); + +- dev_dbg(chip->pdev, ++ dev_dbg(&chip->dev, + "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, + (int)min_t(size_t, 64, len), buf, len, status); + return status; +@@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) + static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) + { + struct priv_data *priv = chip->vendor.priv; +- struct i2c_client *client = to_i2c_client(chip->pdev); ++ struct i2c_client *client = to_i2c_client(chip->dev.parent); + struct tpm_output_header *hdr = + (struct tpm_output_header *)priv->buffer; + u32 expected_len; +@@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) + return -ENOMEM; + + if (priv->len >= expected_len) { +- dev_dbg(chip->pdev, ++ dev_dbg(&chip->dev, + "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); +@@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) + } + + rc = i2c_master_recv(client, buf, expected_len); +- dev_dbg(chip->pdev, ++ dev_dbg(&chip->dev, + "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); +@@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) + + static void i2c_atmel_cancel(struct tpm_chip *chip) + { +- dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported"); ++ dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported"); + } + + static u8 i2c_atmel_read_status(struct tpm_chip *chip) + { + struct priv_data *priv = chip->vendor.priv; +- struct i2c_client *client = to_i2c_client(chip->pdev); ++ struct i2c_client *client = to_i2c_client(chip->dev.parent); + int rc; + + /* The TPM fails the I2C read until it is ready, so we do the entire +@@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip) + /* Once the TPM has completed the command the command remains readable + * until another command is issued. */ + rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); +- dev_dbg(chip->pdev, ++ dev_dbg(&chip->dev, + "%s: sts=%d", __func__, rc); + if (rc <= 0) + return 0; +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c +index 63d5d22e9e60..f2aa99e34b4b 100644 +--- a/drivers/char/tpm/tpm_i2c_infineon.c ++++ b/drivers/char/tpm/tpm_i2c_infineon.c +@@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) + /* read first 10 bytes, including tag, paramsize, and result */ + size = recv_data(chip, buf, TPM_HEADER_SIZE); + if (size < TPM_HEADER_SIZE) { +- dev_err(chip->pdev, "Unable to read header\n"); ++ dev_err(&chip->dev, "Unable to read header\n"); + goto out; + } + +@@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) + size += recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE); + if (size < expected) { +- dev_err(chip->pdev, "Unable to read remainder of result\n"); ++ dev_err(&chip->dev, "Unable to read remainder of result\n"); + size = -ETIME; + goto out; + } + + wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); + if (status & TPM_STS_DATA_AVAIL) { /* retry? */ +- dev_err(chip->pdev, "Error left over data\n"); ++ dev_err(&chip->dev, "Error left over data\n"); + size = -EIO; + goto out; + } +diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c +index 847f1597fe9b..a1e1474dda30 100644 +--- a/drivers/char/tpm/tpm_i2c_nuvoton.c ++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c +@@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, + /* read TPM_STS register */ + static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) + { +- struct i2c_client *client = to_i2c_client(chip->pdev); ++ struct i2c_client *client = to_i2c_client(chip->dev.parent); + s32 status; + u8 data; + + status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); + if (status <= 0) { +- dev_err(chip->pdev, "%s() error return %d\n", __func__, ++ dev_err(&chip->dev, "%s() error return %d\n", __func__, + status); + data = TPM_STS_ERR_VAL; + } +@@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) + /* write commandReady to TPM_STS register */ + static void i2c_nuvoton_ready(struct tpm_chip *chip) + { +- struct i2c_client *client = to_i2c_client(chip->pdev); ++ struct i2c_client *client = to_i2c_client(chip->dev.parent); + s32 status; + + /* this causes the current command to be aborted */ + status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); + if (status < 0) +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "%s() fail to write TPM_STS.commandReady\n", __func__); + } + +@@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, + return 0; + } while (time_before(jiffies, stop)); + } +- dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask, ++ dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + value); + return -ETIMEDOUT; + } +@@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, + &chip->vendor.read_queue) == 0) { + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "%s() fail to read burstCount=%d\n", __func__, + burst_count); + return -EIO; +@@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, + bytes2read, &buf[size]); + if (rc < 0) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "%s() fail on i2c_nuvoton_read_buf()=%d\n", + __func__, rc); + return -EIO; + } +- dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read); ++ dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read); + size += bytes2read; + } + +@@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, + /* Read TPM command results */ + static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) + { +- struct device *dev = chip->pdev; ++ struct device *dev = chip->dev.parent; + struct i2c_client *client = to_i2c_client(dev); + s32 rc; + int expected, status, burst_count, retries, size = 0; +@@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) + break; + } + i2c_nuvoton_ready(chip); +- dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size); ++ dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size); + return size; + } + +@@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) + */ + static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) + { +- struct device *dev = chip->pdev; ++ struct device *dev = chip->dev.parent; + struct i2c_client *client = to_i2c_client(dev); + u32 ordinal; + size_t count = 0; +diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c +index 6c488e635fdd..e3cf9f3545c5 100644 +--- a/drivers/char/tpm/tpm_infineon.c ++++ b/drivers/char/tpm/tpm_infineon.c +@@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) + } + if (i == TPM_MAX_TRIES) { /* timeout occurs */ + if (wait_for_bit == STAT_XFE) +- dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n"); ++ dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n"); + if (wait_for_bit == STAT_RDA) +- dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n"); ++ dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n"); + return -EIO; + } + return 0; +@@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) + static void tpm_wtx(struct tpm_chip *chip) + { + number_of_wtx++; +- dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n", ++ dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n", + number_of_wtx, TPM_MAX_WTX_PACKAGES); + wait_and_send(chip, TPM_VL_VER); + wait_and_send(chip, TPM_CTRL_WTX); +@@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip) + + static void tpm_wtx_abort(struct tpm_chip *chip) + { +- dev_info(chip->pdev, "Aborting WTX\n"); ++ dev_info(&chip->dev, "Aborting WTX\n"); + wait_and_send(chip, TPM_VL_VER); + wait_and_send(chip, TPM_CTRL_WTX_ABORT); + wait_and_send(chip, 0x00); +@@ -257,7 +257,7 @@ recv_begin: + } + + if (buf[0] != TPM_VL_VER) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "Wrong transport protocol implementation!\n"); + return -EIO; + } +@@ -272,7 +272,7 @@ recv_begin: + } + + if ((size == 0x6D00) && (buf[1] == 0x80)) { +- dev_err(chip->pdev, "Error handling on vendor layer!\n"); ++ dev_err(&chip->dev, "Error handling on vendor layer!\n"); + return -EIO; + } + +@@ -284,7 +284,7 @@ recv_begin: + } + + if (buf[1] == TPM_CTRL_WTX) { +- dev_info(chip->pdev, "WTX-package received\n"); ++ dev_info(&chip->dev, "WTX-package received\n"); + if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { + tpm_wtx(chip); + goto recv_begin; +@@ -295,14 +295,14 @@ recv_begin: + } + + if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { +- dev_info(chip->pdev, "WTX-abort acknowledged\n"); ++ dev_info(&chip->dev, "WTX-abort acknowledged\n"); + return size; + } + + if (buf[1] == TPM_CTRL_ERROR) { +- dev_err(chip->pdev, "ERROR-package received:\n"); ++ dev_err(&chip->dev, "ERROR-package received:\n"); + if (buf[4] == TPM_INF_NAK) +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "-> Negative acknowledgement" + " - retransmit command!\n"); + return -EIO; +@@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) + + ret = empty_fifo(chip, 1); + if (ret) { +- dev_err(chip->pdev, "Timeout while clearing FIFO\n"); ++ dev_err(&chip->dev, "Timeout while clearing FIFO\n"); + return -EIO; + } + +diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c +index 289389ecef84..766370bed60c 100644 +--- a/drivers/char/tpm/tpm_nsc.c ++++ b/drivers/char/tpm/tpm_nsc.c +@@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip) + } + while (time_before(jiffies, stop)); + +- dev_info(chip->pdev, "wait for ready failed\n"); ++ dev_info(&chip->dev, "wait for ready failed\n"); + return -EBUSY; + } + +@@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) + return -EIO; + + if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { +- dev_err(chip->pdev, "F0 timeout\n"); ++ dev_err(&chip->dev, "F0 timeout\n"); + return -EIO; + } + if ((data = + inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { +- dev_err(chip->pdev, "not in normal mode (0x%x)\n", ++ dev_err(&chip->dev, "not in normal mode (0x%x)\n", + data); + return -EIO; + } +@@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) + for (p = buffer; p < &buffer[count]; p++) { + if (wait_for_stat + (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "OBF timeout (while reading data)\n"); + return -EIO; + } +@@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) + + if ((data & NSC_STATUS_F0) == 0 && + (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { +- dev_err(chip->pdev, "F0 not set\n"); ++ dev_err(&chip->dev, "F0 not set\n"); + return -EIO; + } + if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "expected end of command(0x%x)\n", data); + return -EIO; + } +@@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) + return -EIO; + + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { +- dev_err(chip->pdev, "IBF timeout\n"); ++ dev_err(&chip->dev, "IBF timeout\n"); + return -EIO; + } + + outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); + if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { +- dev_err(chip->pdev, "IBR timeout\n"); ++ dev_err(&chip->dev, "IBR timeout\n"); + return -EIO; + } + + for (i = 0; i < count; i++) { + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + "IBF timeout (while writing data)\n"); + return -EIO; + } +@@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) + } + + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { +- dev_err(chip->pdev, "IBF timeout\n"); ++ dev_err(&chip->dev, "IBF timeout\n"); + return -EIO; + } + outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index f10a107614b4..7f13221aeb30 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -293,7 +293,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) + /* read first 10 bytes, including tag, paramsize, and result */ + if ((size = + recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { +- dev_err(chip->pdev, "Unable to read header\n"); ++ dev_err(&chip->dev, "Unable to read header\n"); + goto out; + } + +@@ -306,7 +306,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) + if ((size += + recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE)) < expected) { +- dev_err(chip->pdev, "Unable to read remainder of result\n"); ++ dev_err(&chip->dev, "Unable to read remainder of result\n"); + size = -ETIME; + goto out; + } +@@ -315,7 +315,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) + &chip->vendor.int_queue, false); + status = tpm_tis_status(chip); + if (status & TPM_STS_DATA_AVAIL) { /* retry? */ +- dev_err(chip->pdev, "Error left over data\n"); ++ dev_err(&chip->dev, "Error left over data\n"); + size = -EIO; + goto out; + } +@@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip) + iowrite32(intmask, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); +- devm_free_irq(chip->pdev, chip->vendor.irq, chip); ++ devm_free_irq(&chip->dev, chip->vendor.irq, chip); + chip->vendor.irq = 0; + } + +@@ -463,7 +463,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) + msleep(1); + if (!priv->irq_tested) { + disable_interrupts(chip); +- dev_err(chip->pdev, ++ dev_err(&chip->dev, + FW_BUG "TPM interrupt not working, polling instead\n"); + } + priv->irq_tested = true; +@@ -533,7 +533,7 @@ static int probe_itpm(struct tpm_chip *chip) + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) { +- dev_info(chip->pdev, "Detected an iTPM.\n"); ++ dev_info(&chip->dev, "Detected an iTPM.\n"); + rc = 1; + } else + rc = -EFAULT; +@@ -766,7 +766,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, + if (devm_request_irq + (dev, i, tis_int_probe, IRQF_SHARED, + chip->devname, chip) != 0) { +- dev_info(chip->pdev, ++ dev_info(&chip->dev, + "Unable to request irq: %d for probe\n", + i); + continue; +@@ -818,7 +818,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, + if (devm_request_irq + (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED, + chip->devname, chip) != 0) { +- dev_info(chip->pdev, ++ dev_info(&chip->dev, + "Unable to request irq: %d for use\n", + chip->vendor.irq); + chip->vendor.irq = 0; +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c +index 0dadb6332f0e..7abe908427df 100644 +--- a/drivers/crypto/atmel-sha.c ++++ b/drivers/crypto/atmel-sha.c +@@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req) + ctx->flags |= SHA_FLAGS_FINUP; + + err1 = atmel_sha_update(req); +- if (err1 == -EINPROGRESS || err1 == -EBUSY) ++ if (err1 == -EINPROGRESS || ++ (err1 == -EBUSY && (ahash_request_flags(req) & ++ CRYPTO_TFM_REQ_MAY_BACKLOG))) + return err1; + + /* +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c +index 99d5e11db194..e06cc5df30be 100644 +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -498,7 +498,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); + if (!ret) { + /* in progress */ +- wait_for_completion_interruptible(&result.completion); ++ wait_for_completion(&result.completion); + ret = result.err; + #ifdef DEBUG + print_hex_dump(KERN_ERR, +diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c +index e1eaf4ff9762..3ce1d5cdcbd2 100644 +--- a/drivers/crypto/caam/key_gen.c ++++ b/drivers/crypto/caam/key_gen.c +@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); + if (!ret) { + /* in progress */ +- wait_for_completion_interruptible(&result.completion); ++ wait_for_completion(&result.completion); + ret = result.err; + #ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c +index 9a8a18aafd5c..6a60936b46e0 100644 +--- a/drivers/crypto/talitos.c ++++ b/drivers/crypto/talitos.c +@@ -804,7 +804,7 @@ static void talitos_unregister_rng(struct device *dev) + * crypto alg + */ + #define TALITOS_CRA_PRIORITY 3000 +-#define TALITOS_MAX_KEY_SIZE 96 ++#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) + #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ + + struct talitos_ctx { +@@ -1388,6 +1388,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, + { + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + ++ if (keylen > TALITOS_MAX_KEY_SIZE) { ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++ } ++ + memcpy(&ctx->key, key, keylen); + ctx->keylen = keylen; + +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +index e33c729b9f48..5a1490b046ac 100644 +--- a/drivers/irqchip/irq-gic-v3.c ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -632,6 +632,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + int enabled; + u64 val; + ++ if (cpu >= nr_cpu_ids) ++ return -EINVAL; ++ + if (gic_irq_in_rdist(d)) + return -EINVAL; + +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index 84b9cca152eb..e83acc608678 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -907,7 +907,7 @@ static void decode_txts(struct dp83640_private *dp83640, + if (overflow) { + pr_debug("tx timestamp queue overflow, count %d\n", overflow); + while (skb) { +- skb_complete_tx_timestamp(skb, NULL); ++ kfree_skb(skb); + skb = skb_dequeue(&dp83640->tx_queue); + } + return; +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c +index e13ad6cdcc22..c8b85f1069ff 100644 +--- a/drivers/net/phy/micrel.c ++++ b/drivers/net/phy/micrel.c +@@ -539,6 +539,8 @@ static int ksz9031_read_status(struct phy_device *phydev) + if ((regval & 0xFF) == 0xFF) { + phy_init_hw(phydev); + phydev->link = 0; ++ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) ++ phydev->drv->config_intr(phydev); + } + + return 0; +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index 349aecbc210a..ac945f8781ac 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -733,15 +733,15 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) + static void vrf_dev_uninit(struct net_device *dev) + { + struct net_vrf *vrf = netdev_priv(dev); +- struct slave_queue *queue = &vrf->queue; +- struct list_head *head = &queue->all_slaves; +- struct slave *slave, *next; ++// struct slave_queue *queue = &vrf->queue; ++// struct list_head *head = &queue->all_slaves; ++// struct slave *slave, *next; + + vrf_rtable_destroy(vrf); + vrf_rt6_destroy(vrf); + +- list_for_each_entry_safe(slave, next, head, list) +- vrf_del_slave(dev, slave->dev); ++// list_for_each_entry_safe(slave, next, head, list) ++// vrf_del_slave(dev, slave->dev); + + free_percpu(dev->dstats); + dev->dstats = NULL; +@@ -914,6 +914,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) + + static void vrf_dellink(struct net_device *dev, struct list_head *head) + { ++ struct net_vrf *vrf = netdev_priv(dev); ++ struct slave_queue *queue = &vrf->queue; ++ struct list_head *all_slaves = &queue->all_slaves; ++ struct slave *slave, *next; ++ ++ list_for_each_entry_safe(slave, next, all_slaves, list) ++ vrf_del_slave(dev, slave->dev); ++ + unregister_netdevice_queue(dev, head); + } + +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +index 70a6985334d5..da5826d788d6 100644 +--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +@@ -4472,6 +4472,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, + GFP_KERNEL); + } else if (ieee80211_is_action(mgmt->frame_control)) { ++ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { ++ brcmf_err("invalid action frame length\n"); ++ err = -EINVAL; ++ goto exit; ++ } + af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); + if (af_params == NULL) { + brcmf_err("unable to allocate frame\n"); +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c +index 8e11fb2831cd..34f1d6b41fb9 100644 +--- a/drivers/parisc/ccio-dma.c ++++ b/drivers/parisc/ccio-dma.c +@@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size, + + BUG_ON(!dev); + ioc = GET_IOC(dev); ++ if (!ioc) ++ return DMA_ERROR_CODE; + + BUG_ON(size <= 0); + +@@ -805,6 +807,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, + + BUG_ON(!dev); + ioc = GET_IOC(dev); ++ if (!ioc) { ++ WARN_ON(!ioc); ++ return; ++ } + + DBG_RUN("%s() iovp 0x%lx/%x\n", + __func__, (long)iova, size); +@@ -908,6 +914,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, + + BUG_ON(!dev); + ioc = GET_IOC(dev); ++ if (!ioc) ++ return 0; + + DBG_RUN_SG("%s() START %d entries\n", __func__, nents); + +@@ -980,6 +988,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, + + BUG_ON(!dev); + ioc = GET_IOC(dev); ++ if (!ioc) { ++ WARN_ON(!ioc); ++ return; ++ } + + DBG_RUN_SG("%s() START %d entries, %p,%x\n", + __func__, nents, sg_virt(sglist), sglist->length); +diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c +index a0580afe1713..7b0ca1551d7b 100644 +--- a/drivers/parisc/dino.c ++++ b/drivers/parisc/dino.c +@@ -154,7 +154,10 @@ struct dino_device + }; + + /* Looks nice and keeps the compiler happy */ +-#define DINO_DEV(d) ((struct dino_device *) d) ++#define DINO_DEV(d) ({ \ ++ void *__pdata = d; \ ++ BUG_ON(!__pdata); \ ++ (struct dino_device *)__pdata; }) + + + /* +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c +index 42844c2bc065..d0c2759076a2 100644 +--- a/drivers/parisc/lba_pci.c ++++ b/drivers/parisc/lba_pci.c +@@ -111,8 +111,10 @@ static u32 lba_t32; + + + /* Looks nice and keeps the compiler happy */ +-#define LBA_DEV(d) ((struct lba_device *) (d)) +- ++#define LBA_DEV(d) ({ \ ++ void *__pdata = d; \ ++ BUG_ON(!__pdata); \ ++ (struct lba_device *)__pdata; }) + + /* + ** Only allow 8 subsidiary busses per LBA +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c +index 225049b492e5..d6326144ce01 100644 +--- a/drivers/parisc/sba_iommu.c ++++ b/drivers/parisc/sba_iommu.c +@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask) + return 0; + + ioc = GET_IOC(dev); ++ if (!ioc) ++ return 0; + + /* + * check if mask is >= than the current max IO Virt Address +@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, + int pide; + + ioc = GET_IOC(dev); ++ if (!ioc) ++ return DMA_ERROR_CODE; + + /* save offset bits */ + offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; +@@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, + DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); + + ioc = GET_IOC(dev); ++ if (!ioc) { ++ WARN_ON(!ioc); ++ return; ++ } + offset = iova & ~IOVP_MASK; + iova ^= offset; /* clear offset bits */ + size += offset; +@@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, + DBG_RUN_SG("%s() START %d entries\n", __func__, nents); + + ioc = GET_IOC(dev); ++ if (!ioc) ++ return 0; + + /* Fast path single entry scatterlists. */ + if (nents == 1) { +@@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, + __func__, nents, sg_virt(sglist), sglist->length); + + ioc = GET_IOC(dev); ++ if (!ioc) { ++ WARN_ON(!ioc); ++ return; ++ } + + #ifdef SBA_COLLECT_STATS + ioc->usg_calls++; +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 5ab54ef4f304..e4f69bddcfb1 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -2708,13 +2708,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) + * related to the kernel should not use this. + */ + data = vt_get_shift_state(); +- ret = __put_user(data, p); ++ ret = put_user(data, p); + break; + case TIOCL_GETMOUSEREPORTING: + console_lock(); /* May be overkill */ + data = mouse_reporting(); + console_unlock(); +- ret = __put_user(data, p); ++ ret = put_user(data, p); + break; + case TIOCL_SETVESABLANK: + console_lock(); +@@ -2723,7 +2723,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) + break; + case TIOCL_GETKMSGREDIRECT: + data = vt_get_kmsg_redirect(); +- ret = __put_user(data, p); ++ ret = put_user(data, p); + break; + case TIOCL_SETKMSGREDIRECT: + if (!capable(CAP_SYS_ADMIN)) { +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 29ef427c0652..f44e93d2650d 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -905,17 +905,60 @@ static int load_elf_binary(struct linux_binprm *bprm) + elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; + + vaddr = elf_ppnt->p_vaddr; ++ /* ++ * If we are loading ET_EXEC or we have already performed ++ * the ET_DYN load_addr calculations, proceed normally. ++ */ + if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { + elf_flags |= MAP_FIXED; + } else if (loc->elf_ex.e_type == ET_DYN) { +- /* Try and get dynamic programs out of the way of the +- * default mmap base, as well as whatever program they +- * might try to exec. This is because the brk will +- * follow the loader, and is not movable. */ +- load_bias = ELF_ET_DYN_BASE - vaddr; +- if (current->flags & PF_RANDOMIZE) +- load_bias += arch_mmap_rnd(); +- load_bias = ELF_PAGESTART(load_bias); ++ /* ++ * This logic is run once for the first LOAD Program ++ * Header for ET_DYN binaries to calculate the ++ * randomization (load_bias) for all the LOAD ++ * Program Headers, and to calculate the entire ++ * size of the ELF mapping (total_size). (Note that ++ * load_addr_set is set to true later once the ++ * initial mapping is performed.) ++ * ++ * There are effectively two types of ET_DYN ++ * binaries: programs (i.e. PIE: ET_DYN with INTERP) ++ * and loaders (ET_DYN without INTERP, since they ++ * _are_ the ELF interpreter). The loaders must ++ * be loaded away from programs since the program ++ * may otherwise collide with the loader (especially ++ * for ET_EXEC which does not have a randomized ++ * position). For example to handle invocations of ++ * "./ld.so someprog" to test out a new version of ++ * the loader, the subsequent program that the ++ * loader loads must avoid the loader itself, so ++ * they cannot share the same load range. Sufficient ++ * room for the brk must be allocated with the ++ * loader as well, since brk must be available with ++ * the loader. ++ * ++ * Therefore, programs are loaded offset from ++ * ELF_ET_DYN_BASE and loaders are loaded into the ++ * independently randomized mmap region (0 load_bias ++ * without MAP_FIXED). ++ */ ++ if (elf_interpreter) { ++ load_bias = ELF_ET_DYN_BASE; ++ if (current->flags & PF_RANDOMIZE) ++ load_bias += arch_mmap_rnd(); ++ elf_flags |= MAP_FIXED; ++ } else ++ load_bias = 0; ++ ++ /* ++ * Since load_bias is used for all subsequent loading ++ * calculations, we must lower it by the first vaddr ++ * so that the remaining calculations based on the ++ * ELF vaddrs will be correctly offset. The result ++ * is then page aligned. ++ */ ++ load_bias = ELF_PAGESTART(load_bias - vaddr); ++ + total_size = total_mapping_size(elf_phdata, + loc->elf_ex.e_phnum); + if (!total_size) { +diff --git a/fs/dcache.c b/fs/dcache.c +index 849c1c1e787b..3000cbb54949 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1128,11 +1128,12 @@ void shrink_dcache_sb(struct super_block *sb) + LIST_HEAD(dispose); + + freed = list_lru_walk(&sb->s_dentry_lru, +- dentry_lru_isolate_shrink, &dispose, UINT_MAX); ++ dentry_lru_isolate_shrink, &dispose, 1024); + + this_cpu_sub(nr_dentry_unused, freed); + shrink_dentry_list(&dispose); +- } while (freed > 0); ++ cond_resched(); ++ } while (list_lru_count(&sb->s_dentry_lru) > 0); + } + EXPORT_SYMBOL(shrink_dcache_sb); + +diff --git a/fs/exec.c b/fs/exec.c +index 02153068a694..9c5ee2a880aa 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -206,8 +206,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + + if (write) { + unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; +- unsigned long ptr_size; +- struct rlimit *rlim; ++ unsigned long ptr_size, limit; + + /* + * Since the stack will hold pointers to the strings, we +@@ -236,14 +235,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + return page; + + /* +- * Limit to 1/4-th the stack size for the argv+env strings. ++ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM ++ * (whichever is smaller) for the argv+env strings. + * This ensures that: + * - the remaining binfmt code will not run out of stack space, + * - the program will have a reasonable amount of stack left + * to work from. + */ +- rlim = current->signal->rlim; +- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) ++ limit = _STK_LIM / 4 * 3; ++ limit = min(limit, rlimit(RLIMIT_STACK) / 4); ++ if (size > limit) + goto fail; + } + +diff --git a/fs/mount.h b/fs/mount.h +index 13a4ebbbaa74..37c64bbe840c 100644 +--- a/fs/mount.h ++++ b/fs/mount.h +@@ -57,6 +57,7 @@ struct mount { + struct mnt_namespace *mnt_ns; /* containing namespace */ + struct mountpoint *mnt_mp; /* where is it mounted */ + struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ ++ struct list_head mnt_umounting; /* list entry for umount propagation */ + #ifdef CONFIG_FSNOTIFY + struct hlist_head mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; +diff --git a/fs/namespace.c b/fs/namespace.c +index f26d18d69712..ec4078d16eb7 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name) + INIT_LIST_HEAD(&mnt->mnt_slave_list); + INIT_LIST_HEAD(&mnt->mnt_slave); + INIT_HLIST_NODE(&mnt->mnt_mp_list); ++ INIT_LIST_HEAD(&mnt->mnt_umounting); + #ifdef CONFIG_FSNOTIFY + INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); + #endif +diff --git a/fs/pnode.c b/fs/pnode.c +index b394ca5307ec..d15c63e97ef1 100644 +--- a/fs/pnode.c ++++ b/fs/pnode.c +@@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p) + return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); + } + ++static inline struct mount *last_slave(struct mount *p) ++{ ++ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); ++} ++ + static inline struct mount *next_slave(struct mount *p) + { + return list_entry(p->mnt_slave.next, struct mount, mnt_slave); +@@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m, + } + } + ++static struct mount *skip_propagation_subtree(struct mount *m, ++ struct mount *origin) ++{ ++ /* ++ * Advance m such that propagation_next will not return ++ * the slaves of m. ++ */ ++ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) ++ m = last_slave(m); ++ ++ return m; ++} ++ + static struct mount *next_group(struct mount *m, struct mount *origin) + { + while (1) { +@@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt) + } + } + +-/* +- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. +- */ +-static void mark_umount_candidates(struct mount *mnt) ++static void umount_one(struct mount *mnt, struct list_head *to_umount) + { +- struct mount *parent = mnt->mnt_parent; +- struct mount *m; +- +- BUG_ON(parent == mnt); +- +- for (m = propagation_next(parent, parent); m; +- m = propagation_next(m, parent)) { +- struct mount *child = __lookup_mnt(&m->mnt, +- mnt->mnt_mountpoint); +- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT)) +- continue; +- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) { +- SET_MNT_MARK(child); +- } +- } ++ CLEAR_MNT_MARK(mnt); ++ mnt->mnt.mnt_flags |= MNT_UMOUNT; ++ list_del_init(&mnt->mnt_child); ++ list_del_init(&mnt->mnt_umounting); ++ list_move_tail(&mnt->mnt_list, to_umount); + } + + /* + * NOTE: unmounting 'mnt' naturally propagates to all other mounts its + * parent propagates to. + */ +-static void __propagate_umount(struct mount *mnt) ++static bool __propagate_umount(struct mount *mnt, ++ struct list_head *to_umount, ++ struct list_head *to_restore) + { +- struct mount *parent = mnt->mnt_parent; +- struct mount *m; ++ bool progress = false; ++ struct mount *child; + +- BUG_ON(parent == mnt); ++ /* ++ * The state of the parent won't change if this mount is ++ * already unmounted or marked as without children. ++ */ ++ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) ++ goto out; + +- for (m = propagation_next(parent, parent); m; +- m = propagation_next(m, parent)) { +- struct mount *topper; +- struct mount *child = __lookup_mnt(&m->mnt, +- mnt->mnt_mountpoint); +- /* +- * umount the child only if the child has no children +- * and the child is marked safe to unmount. +- */ +- if (!child || !IS_MNT_MARKED(child)) ++ /* Verify topper is the only grandchild that has not been ++ * speculatively unmounted. ++ */ ++ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { ++ if (child->mnt_mountpoint == mnt->mnt.mnt_root) + continue; +- CLEAR_MNT_MARK(child); ++ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) ++ continue; ++ /* Found a mounted child */ ++ goto children; ++ } + +- /* If there is exactly one mount covering all of child +- * replace child with that mount. +- */ +- topper = find_topper(child); +- if (topper) +- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp, +- topper); ++ /* Mark mounts that can be unmounted if not locked */ ++ SET_MNT_MARK(mnt); ++ progress = true; ++ ++ /* If a mount is without children and not locked umount it. */ ++ if (!IS_MNT_LOCKED(mnt)) { ++ umount_one(mnt, to_umount); ++ } else { ++children: ++ list_move_tail(&mnt->mnt_umounting, to_restore); ++ } ++out: ++ return progress; ++} ++ ++static void umount_list(struct list_head *to_umount, ++ struct list_head *to_restore) ++{ ++ struct mount *mnt, *child, *tmp; ++ list_for_each_entry(mnt, to_umount, mnt_list) { ++ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { ++ /* topper? */ ++ if (child->mnt_mountpoint == mnt->mnt.mnt_root) ++ list_move_tail(&child->mnt_umounting, to_restore); ++ else ++ umount_one(child, to_umount); ++ } ++ } ++} + +- if (list_empty(&child->mnt_mounts)) { +- list_del_init(&child->mnt_child); +- child->mnt.mnt_flags |= MNT_UMOUNT; +- list_move_tail(&child->mnt_list, &mnt->mnt_list); ++static void restore_mounts(struct list_head *to_restore) ++{ ++ /* Restore mounts to a clean working state */ ++ while (!list_empty(to_restore)) { ++ struct mount *mnt, *parent; ++ struct mountpoint *mp; ++ ++ mnt = list_first_entry(to_restore, struct mount, mnt_umounting); ++ CLEAR_MNT_MARK(mnt); ++ list_del_init(&mnt->mnt_umounting); ++ ++ /* Should this mount be reparented? */ ++ mp = mnt->mnt_mp; ++ parent = mnt->mnt_parent; ++ while (parent->mnt.mnt_flags & MNT_UMOUNT) { ++ mp = parent->mnt_mp; ++ parent = parent->mnt_parent; + } ++ if (parent != mnt->mnt_parent) ++ mnt_change_mountpoint(parent, mp, mnt); ++ } ++} ++ ++static void cleanup_umount_visitations(struct list_head *visited) ++{ ++ while (!list_empty(visited)) { ++ struct mount *mnt = ++ list_first_entry(visited, struct mount, mnt_umounting); ++ list_del_init(&mnt->mnt_umounting); + } + } + +@@ -487,11 +544,68 @@ static void __propagate_umount(struct mount *mnt) + int propagate_umount(struct list_head *list) + { + struct mount *mnt; ++ LIST_HEAD(to_restore); ++ LIST_HEAD(to_umount); ++ LIST_HEAD(visited); ++ ++ /* Find candidates for unmounting */ ++ list_for_each_entry_reverse(mnt, list, mnt_list) { ++ struct mount *parent = mnt->mnt_parent; ++ struct mount *m; ++ ++ /* ++ * If this mount has already been visited it is known that it's ++ * entire peer group and all of their slaves in the propagation ++ * tree for the mountpoint has already been visited and there is ++ * no need to visit them again. ++ */ ++ if (!list_empty(&mnt->mnt_umounting)) ++ continue; ++ ++ list_add_tail(&mnt->mnt_umounting, &visited); ++ for (m = propagation_next(parent, parent); m; ++ m = propagation_next(m, parent)) { ++ struct mount *child = __lookup_mnt(&m->mnt, ++ mnt->mnt_mountpoint); ++ if (!child) ++ continue; ++ ++ if (!list_empty(&child->mnt_umounting)) { ++ /* ++ * If the child has already been visited it is ++ * know that it's entire peer group and all of ++ * their slaves in the propgation tree for the ++ * mountpoint has already been visited and there ++ * is no need to visit this subtree again. ++ */ ++ m = skip_propagation_subtree(m, parent); ++ continue; ++ } else if (child->mnt.mnt_flags & MNT_UMOUNT) { ++ /* ++ * We have come accross an partially unmounted ++ * mount in list that has not been visited yet. ++ * Remember it has been visited and continue ++ * about our merry way. ++ */ ++ list_add_tail(&child->mnt_umounting, &visited); ++ continue; ++ } ++ ++ /* Check the child and parents while progress is made */ ++ while (__propagate_umount(child, ++ &to_umount, &to_restore)) { ++ /* Is the parent a umount candidate? */ ++ child = child->mnt_parent; ++ if (list_empty(&child->mnt_umounting)) ++ break; ++ } ++ } ++ } + +- list_for_each_entry_reverse(mnt, list, mnt_list) +- mark_umount_candidates(mnt); ++ umount_list(&to_umount, &to_restore); ++ restore_mounts(&to_restore); ++ cleanup_umount_visitations(&visited); ++ list_splice_tail(&to_umount, list); + +- list_for_each_entry(mnt, list, mnt_list) +- __propagate_umount(mnt); + return 0; + } +diff --git a/include/linux/device.h b/include/linux/device.h +index b8f411b57dcb..7075a2485ed3 100644 +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -368,6 +368,7 @@ int subsys_virtual_register(struct bus_type *subsys, + * @suspend: Used to put the device to sleep mode, usually to a low power + * state. + * @resume: Used to bring the device from the sleep mode. ++ * @shutdown: Called at shut-down time to quiesce the device. + * @ns_type: Callbacks so sysfs can detemine namespaces. + * @namespace: Namespace of the device belongs to this class. + * @pm: The default device power management operations of this class. +@@ -396,6 +397,7 @@ struct class { + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); ++ int (*shutdown)(struct device *dev); + + const struct kobj_ns_type_operations *ns_type; + const void *(*namespace)(struct device *dev); +diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h +index 2a6b9947aaa3..743b34f56f2b 100644 +--- a/include/linux/list_lru.h ++++ b/include/linux/list_lru.h +@@ -44,6 +44,7 @@ struct list_lru_node { + /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ + struct list_lru_memcg *memcg_lrus; + #endif ++ long nr_items; + } ____cacheline_aligned_in_smp; + + struct list_lru { +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h +index 6275d651f76e..b8a8d4239e85 100644 +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -21,6 +21,7 @@ struct route_info { + #include + #include + #include ++#include + #include + #include + #include +@@ -208,4 +209,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, + return daddr; + } + ++static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) ++{ ++ return a->dst.dev == b->dst.dev && ++ a->rt6i_idev == b->rt6i_idev && ++ ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && ++ !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); ++} + #endif +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 2cbfba78d3db..863e24f1e62e 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -754,6 +754,11 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) + if (err) + return err; + ++ if (is_pointer_value(env, insn->src_reg)) { ++ verbose("R%d leaks addr into mem\n", insn->src_reg); ++ return -EACCES; ++ } ++ + /* check whether atomic_add can read the memory */ + err = check_mem_access(env, insn->dst_reg, insn->off, + BPF_SIZE(insn->code), BPF_READ, -1); +diff --git a/kernel/extable.c b/kernel/extable.c +index e820ccee9846..4f06fc34313f 100644 +--- a/kernel/extable.c ++++ b/kernel/extable.c +@@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr) + return 0; + } + +-int core_kernel_text(unsigned long addr) ++int notrace core_kernel_text(unsigned long addr) + { + if (addr >= (unsigned long)_stext && + addr < (unsigned long)_etext) +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 20253dbc8610..c436426a80dd 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6123,6 +6123,9 @@ enum s_alloc { + * Build an iteration mask that can exclude certain CPUs from the upwards + * domain traversal. + * ++ * Only CPUs that can arrive at this group should be considered to continue ++ * balancing. ++ * + * Asymmetric node setups can result in situations where the domain tree is of + * unequal depth, make sure to skip domains that already cover the entire + * range. +@@ -6134,18 +6137,31 @@ enum s_alloc { + */ + static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) + { +- const struct cpumask *span = sched_domain_span(sd); ++ const struct cpumask *sg_span = sched_group_cpus(sg); + struct sd_data *sdd = sd->private; + struct sched_domain *sibling; + int i; + +- for_each_cpu(i, span) { ++ for_each_cpu(i, sg_span) { + sibling = *per_cpu_ptr(sdd->sd, i); +- if (!cpumask_test_cpu(i, sched_domain_span(sibling))) ++ ++ /* ++ * Can happen in the asymmetric case, where these siblings are ++ * unused. The mask will not be empty because those CPUs that ++ * do have the top domain _should_ span the domain. ++ */ ++ if (!sibling->child) ++ continue; ++ ++ /* If we would not end up here, we can't continue from here */ ++ if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) + continue; + + cpumask_set_cpu(i, sched_group_mask(sg)); + } ++ ++ /* We must not have empty masks here */ ++ WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); + } + + /* +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 4c21c0b7dc91..c83d59913d78 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1660,7 +1660,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, + TRACE_FLAG_IRQS_NOSUPPORT | + #endif + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | +- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | ++ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); + } +diff --git a/mm/list_lru.c b/mm/list_lru.c +index 5d8dffd5b57c..786176b1a0ee 100644 +--- a/mm/list_lru.c ++++ b/mm/list_lru.c +@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) + l = list_lru_from_kmem(nlru, item); + list_add_tail(item, &l->list); + l->nr_items++; ++ nlru->nr_items++; + spin_unlock(&nlru->lock); + return true; + } +@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) + l = list_lru_from_kmem(nlru, item); + list_del_init(item); + l->nr_items--; ++ nlru->nr_items--; + spin_unlock(&nlru->lock); + return true; + } +@@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one); + + unsigned long list_lru_count_node(struct list_lru *lru, int nid) + { +- long count = 0; +- int memcg_idx; ++ struct list_lru_node *nlru; + +- count += __list_lru_count_one(lru, nid, -1); +- if (list_lru_memcg_aware(lru)) { +- for_each_memcg_cache_index(memcg_idx) +- count += __list_lru_count_one(lru, nid, memcg_idx); +- } +- return count; ++ nlru = &lru->node[nid]; ++ return nlru->nr_items; + } + EXPORT_SYMBOL_GPL(list_lru_count_node); + +@@ -226,6 +223,7 @@ restart: + assert_spin_locked(&nlru->lock); + case LRU_REMOVED: + isolated++; ++ nlru->nr_items--; + /* + * If the lru lock has been dropped, our list + * traversal is now invalid and so we have to +diff --git a/mm/mmap.c b/mm/mmap.c +index 0990f8bc0fbe..eaa460ddcaf9 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2176,7 +2176,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + + /* Guard against exceeding limits of the address space. */ + address &= PAGE_MASK; +- if (address >= TASK_SIZE) ++ if (address >= (TASK_SIZE & PAGE_MASK)) + return -ENOMEM; + address += PAGE_SIZE; + +diff --git a/net/core/dev.c b/net/core/dev.c +index 524d8b28e690..dc5d3d546150 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -4375,6 +4375,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type) + } + EXPORT_SYMBOL(gro_find_complete_by_type); + ++static void napi_skb_free_stolen_head(struct sk_buff *skb) ++{ ++ skb_dst_drop(skb); ++ kmem_cache_free(skbuff_head_cache, skb); ++} ++ + static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) + { + switch (ret) { +@@ -4388,12 +4394,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) + break; + + case GRO_MERGED_FREE: +- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { +- skb_dst_drop(skb); +- kmem_cache_free(skbuff_head_cache, skb); +- } else { ++ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) ++ napi_skb_free_stolen_head(skb); ++ else + __kfree_skb(skb); +- } + break; + + case GRO_HELD: +@@ -4459,10 +4463,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, + break; + + case GRO_DROP: +- case GRO_MERGED_FREE: + napi_reuse_skb(napi, skb); + break; + ++ case GRO_MERGED_FREE: ++ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) ++ napi_skb_free_stolen_head(skb); ++ else ++ napi_reuse_skb(napi, skb); ++ break; ++ + case GRO_MERGED: + break; + } +@@ -7052,8 +7062,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + } else { + netdev_stats_to_stats64(storage, &dev->stats); + } +- storage->rx_dropped += atomic_long_read(&dev->rx_dropped); +- storage->tx_dropped += atomic_long_read(&dev->tx_dropped); ++ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); ++ storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); + return storage; + } + EXPORT_SYMBOL(dev_get_stats); +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 4bd8678329d6..0870a86e9d96 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2263,6 +2263,8 @@ int tcp_disconnect(struct sock *sk, int flags) + tcp_init_send_head(sk); + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); + __sk_dst_reset(sk); ++ dst_release(sk->sk_rx_dst); ++ sk->sk_rx_dst = NULL; + tcp_saved_syn_free(tp); + + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 03dadbf6cc5e..735b22b1b4ea 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -1772,17 +1772,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add + + static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) + { +- if (ifp->flags&IFA_F_PERMANENT) { +- spin_lock_bh(&ifp->lock); +- addrconf_del_dad_work(ifp); +- ifp->flags |= IFA_F_TENTATIVE; +- if (dad_failed) +- ifp->flags |= IFA_F_DADFAILED; +- spin_unlock_bh(&ifp->lock); +- if (dad_failed) +- ipv6_ifa_notify(0, ifp); +- in6_ifa_put(ifp); +- } else if (ifp->flags&IFA_F_TEMPORARY) { ++ if (ifp->flags&IFA_F_TEMPORARY) { + struct inet6_ifaddr *ifpub; + spin_lock_bh(&ifp->lock); + ifpub = ifp->ifpub; +@@ -1795,6 +1785,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) + spin_unlock_bh(&ifp->lock); + } + ipv6_del_addr(ifp); ++ } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { ++ spin_lock_bh(&ifp->lock); ++ addrconf_del_dad_work(ifp); ++ ifp->flags |= IFA_F_TENTATIVE; ++ if (dad_failed) ++ ifp->flags |= IFA_F_DADFAILED; ++ spin_unlock_bh(&ifp->lock); ++ if (dad_failed) ++ ipv6_ifa_notify(0, ifp); ++ in6_ifa_put(ifp); + } else { + ipv6_del_addr(ifp); + } +@@ -3143,6 +3143,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + { + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct inet6_dev *idev = __in6_dev_get(dev); ++ struct net *net = dev_net(dev); + int run_pending = 0; + int err; + +@@ -3158,7 +3159,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + case NETDEV_CHANGEMTU: + /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ + if (dev->mtu < IPV6_MIN_MTU) { +- addrconf_ifdown(dev, 1); ++ addrconf_ifdown(dev, dev != net->loopback_dev); + break; + } + +@@ -3271,7 +3272,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + * IPV6_MIN_MTU stop IPv6 on this interface. + */ + if (dev->mtu < IPV6_MIN_MTU) +- addrconf_ifdown(dev, 1); ++ addrconf_ifdown(dev, dev != net->loopback_dev); + } + break; + +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 1ac06723f0d7..f60e8caea767 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -767,10 +767,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, + goto next_iter; + } + +- if (iter->dst.dev == rt->dst.dev && +- iter->rt6i_idev == rt->rt6i_idev && +- ipv6_addr_equal(&iter->rt6i_gateway, +- &rt->rt6i_gateway)) { ++ if (rt6_duplicate_nexthop(iter, rt)) { + if (rt->rt6i_nsiblings) + rt->rt6i_nsiblings = 0; + if (!(iter->rt6i_flags & RTF_EXPIRES)) +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 8f4177a1d4f5..ef335070e98a 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2833,17 +2833,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list, + struct rt6_info *rt, struct fib6_config *r_cfg) + { + struct rt6_nh *nh; +- struct rt6_info *rtnh; + int err = -EEXIST; + + list_for_each_entry(nh, rt6_nh_list, next) { + /* check if rt6_info already exists */ +- rtnh = nh->rt6_info; +- +- if (rtnh->dst.dev == rt->dst.dev && +- rtnh->rt6i_idev == rt->rt6i_idev && +- ipv6_addr_equal(&rtnh->rt6i_gateway, +- &rt->rt6i_gateway)) ++ if (rt6_duplicate_nexthop(nh->rt6_info, rt)) + return err; + } + +diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c +index 0936a4a32b47..e353e3255206 100644 +--- a/net/rds/tcp_listen.c ++++ b/net/rds/tcp_listen.c +@@ -78,7 +78,7 @@ int rds_tcp_accept_one(struct socket *sock) + struct inet_sock *inet; + struct rds_tcp_connection *rs_tcp; + +- ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, ++ ret = sock_create_lite(sock->sk->sk_family, + sock->sk->sk_type, sock->sk->sk_protocol, + &new_sock); + if (ret) +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 95b560f0b253..6d340cd6e2a7 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, + + return sch; + } ++ /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ ++ if (ops->destroy) ++ ops->destroy(sch); + err_out3: + dev_put(dev); + kfree((char *) sch - sch->padded); +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c +index 13d6f83ec491..45d4b2f22f62 100644 +--- a/net/sched/sch_hhf.c ++++ b/net/sched/sch_hhf.c +@@ -636,7 +636,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) + q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * + sizeof(u32)); + if (!q->hhf_arrays[i]) { +- hhf_destroy(sch); ++ /* Note: hhf_destroy() will be called ++ * by our caller. ++ */ + return -ENOMEM; + } + } +@@ -647,7 +649,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) + q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / + BITS_PER_BYTE); + if (!q->hhf_valid_bits[i]) { +- hhf_destroy(sch); ++ /* Note: hhf_destroy() will be called ++ * by our caller. ++ */ + return -ENOMEM; + } + } +diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c +index 3e82f047caaf..d9c84328e7eb 100644 +--- a/net/sched/sch_mq.c ++++ b/net/sched/sch_mq.c +@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) + /* pre-allocate qdiscs, attachment can't fail */ + priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), + GFP_KERNEL); +- if (priv->qdiscs == NULL) ++ if (!priv->qdiscs) + return -ENOMEM; + + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { +@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) + qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, + TC_H_MAKE(TC_H_MAJ(sch->handle), + TC_H_MIN(ntx + 1))); +- if (qdisc == NULL) +- goto err; ++ if (!qdisc) ++ return -ENOMEM; + priv->qdiscs[ntx] = qdisc; + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; + } + + sch->flags |= TCQ_F_MQROOT; + return 0; +- +-err: +- mq_destroy(sch); +- return -ENOMEM; + } + + static void mq_attach(struct Qdisc *sch) +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c +index ad70ecf57ce7..66bccc5ff4ea 100644 +--- a/net/sched/sch_mqprio.c ++++ b/net/sched/sch_mqprio.c +@@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) + /* pre-allocate qdisc, attachment can't fail */ + priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), + GFP_KERNEL); +- if (priv->qdiscs == NULL) { +- err = -ENOMEM; +- goto err; +- } ++ if (!priv->qdiscs) ++ return -ENOMEM; + + for (i = 0; i < dev->num_tx_queues; i++) { + dev_queue = netdev_get_tx_queue(dev, i); + qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, + TC_H_MAKE(TC_H_MAJ(sch->handle), + TC_H_MIN(i + 1))); +- if (qdisc == NULL) { +- err = -ENOMEM; +- goto err; +- } ++ if (!qdisc) ++ return -ENOMEM; ++ + priv->qdiscs[i] = qdisc; + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; + } +@@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) + priv->hw_owned = 1; + err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc); + if (err) +- goto err; ++ return err; + } else { + netdev_set_num_tc(dev, qopt->num_tc); + for (i = 0; i < qopt->num_tc; i++) +@@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) + + sch->flags |= TCQ_F_MQROOT; + return 0; +- +-err: +- mqprio_destroy(sch); +- return err; + } + + static void mqprio_attach(struct Qdisc *sch) +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index 498f0a2cb47f..4431e2833e45 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) + q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); + q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); + if (!q->ht || !q->slots) { +- sfq_destroy(sch); ++ /* Note: sfq_destroy() will be called by our caller */ + return -ENOMEM; + } ++ + for (i = 0; i < q->divisor; i++) + q->ht[i] = SFQ_EMPTY_SLOT; + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 9d0953e5734f..de10e3c0e2a4 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -302,8 +302,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, + [NL80211_ATTR_PID] = { .type = NLA_U32 }, + [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, +- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, +- .len = WLAN_PMKID_LEN }, ++ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, + [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, + [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, +@@ -359,6 +358,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, + [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, + [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, ++ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 }, + [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, + [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, + [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, +@@ -5705,6 +5705,10 @@ static int validate_scan_freqs(struct nlattr *freqs) + struct nlattr *attr1, *attr2; + int n_channels = 0, tmp1, tmp2; + ++ nla_for_each_nested(attr1, freqs, tmp1) ++ if (nla_len(attr1) != sizeof(u32)) ++ return 0; ++ + nla_for_each_nested(attr1, freqs, tmp1) { + n_channels++; + /* +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl +index 2b3c22808c3b..6ac6550d751c 100755 +--- a/scripts/checkpatch.pl ++++ b/scripts/checkpatch.pl +@@ -3252,7 +3252,7 @@ sub process { + $fixedline =~ s/\s*=\s*$/ = {/; + fix_insert_line($fixlinenr, $fixedline); + $fixedline = $line; +- $fixedline =~ s/^(.\s*){\s*/$1/; ++ $fixedline =~ s/^(.\s*)\{\s*/$1/; + fix_insert_line($fixlinenr, $fixedline); + } + } +@@ -3602,7 +3602,7 @@ sub process { + my $fixedline = rtrim($prevrawline) . " {"; + fix_insert_line($fixlinenr, $fixedline); + $fixedline = $rawline; +- $fixedline =~ s/^(.\s*){\s*/$1\t/; ++ $fixedline =~ s/^(.\s*)\{\s*/$1\t/; + if ($fixedline !~ /^\+\s*$/) { + fix_insert_line($fixlinenr, $fixedline); + } +@@ -4091,7 +4091,7 @@ sub process { + if (ERROR("SPACING", + "space required before the open brace '{'\n" . $herecurr) && + $fix) { +- $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/; ++ $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/; + } + } + +diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h +index c808c7d02d21..e69118b2077e 100644 +--- a/tools/lib/lockdep/uinclude/linux/lockdep.h ++++ b/tools/lib/lockdep/uinclude/linux/lockdep.h +@@ -8,7 +8,7 @@ + #include + #include + +-#define MAX_LOCK_DEPTH 2000UL ++#define MAX_LOCK_DEPTH 255UL + + #define asmlinkage + #define __visible +diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c +index 10a21a958aaf..763f37fecfb8 100644 +--- a/tools/testing/selftests/capabilities/test_execve.c ++++ b/tools/testing/selftests/capabilities/test_execve.c +@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void) + + if (chdir(cwd) != 0) + err(1, "chdir to private tmpfs"); +- +- if (umount2(".", MNT_DETACH) != 0) +- err(1, "detach private tmpfs"); + } + + static void copy_fromat_to(int fromfd, const char *fromname, const char *toname) +@@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path) + err(1, "chown"); + if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0) + err(1, "chmod"); +-} ++ } + + capng_get_caps_process(); + +@@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path) + } else { + printf("[RUN]\tNon-root +ia, sgidnonroot => i\n"); + exec_other_validate_cap("./validate_cap_sgidnonroot", +- false, false, true, false); ++ false, false, true, false); + + if (fork_wait()) { + printf("[RUN]\tNon-root +ia, sgidroot => i\n"); diff --git a/patch/kernel/rk3328-default/patch-4.4.78-79.patch b/patch/kernel/rk3328-default/patch-4.4.78-79.patch new file mode 100644 index 000000000..bf34060d4 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.78-79.patch @@ -0,0 +1,2607 @@ +diff --git a/Makefile b/Makefile +index ac77ae8ee0b1..1440a94b2474 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 78 ++SUBLEVEL = 79 + EXTRAVERSION = + NAME = Blurry Fish Butt + +@@ -619,6 +619,9 @@ include arch/$(SRCARCH)/Makefile + KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) + KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) + KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) ++KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) ++KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) ++KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) + + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE + KBUILD_CFLAGS += -Os +diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h +index de781cf54bc7..da80878f2c0d 100644 +--- a/arch/mips/include/asm/branch.h ++++ b/arch/mips/include/asm/branch.h +@@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs) + return __microMIPS_compute_return_epc(regs); + if (cpu_has_mips16) + return __MIPS16e_compute_return_epc(regs); +- return regs->cp0_epc; +- } +- +- if (!delay_slot(regs)) { ++ } else if (!delay_slot(regs)) { + regs->cp0_epc += 4; + return 0; + } +diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c +index e9fed8ca9b42..71e8f4c0b8da 100644 +--- a/arch/mips/kernel/branch.c ++++ b/arch/mips/kernel/branch.c +@@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) + * + * @regs: Pointer to pt_regs + * @insn: branch instruction to decode +- * @returns: -EFAULT on error and forces SIGBUS, and on success ++ * @returns: -EFAULT on error and forces SIGILL, and on success + * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after + * evaluating the branch. + * +@@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + /* Fall through */ + case jr_op: + if (NO_R6EMU && insn.r_format.func == jr_op) +- goto sigill_r6; ++ goto sigill_r2r6; + regs->cp0_epc = regs->regs[insn.r_format.rs]; + break; + } +@@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + switch (insn.i_format.rt) { + case bltzl_op: + if (NO_R6EMU) +- goto sigill_r6; ++ goto sigill_r2r6; + case bltz_op: + if ((long)regs->regs[insn.i_format.rs] < 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); +@@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + + case bgezl_op: + if (NO_R6EMU) +- goto sigill_r6; ++ goto sigill_r2r6; + case bgez_op: + if ((long)regs->regs[insn.i_format.rs] >= 0) { + epc = epc + 4 + (insn.i_format.simmediate << 2); +@@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + case bltzal_op: + case bltzall_op: + if (NO_R6EMU && (insn.i_format.rs || +- insn.i_format.rt == bltzall_op)) { +- ret = -SIGILL; +- break; +- } ++ insn.i_format.rt == bltzall_op)) ++ goto sigill_r2r6; + regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a NAL +@@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + case bgezal_op: + case bgezall_op: + if (NO_R6EMU && (insn.i_format.rs || +- insn.i_format.rt == bgezall_op)) { +- ret = -SIGILL; +- break; +- } ++ insn.i_format.rt == bgezall_op)) ++ goto sigill_r2r6; + regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a BAL +@@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + /* + * These are unconditional and in j_format. + */ ++ case jalx_op: + case jal_op: + regs->regs[31] = regs->cp0_epc + 8; + case j_op: +@@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + */ + case beql_op: + if (NO_R6EMU) +- goto sigill_r6; ++ goto sigill_r2r6; + case beq_op: + if (regs->regs[insn.i_format.rs] == + regs->regs[insn.i_format.rt]) { +@@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + + case bnel_op: + if (NO_R6EMU) +- goto sigill_r6; ++ goto sigill_r2r6; + case bne_op: + if (regs->regs[insn.i_format.rs] != + regs->regs[insn.i_format.rt]) { +@@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + + case blezl_op: /* not really i_format */ + if (!insn.i_format.rt && NO_R6EMU) +- goto sigill_r6; ++ goto sigill_r2r6; + case blez_op: + /* + * Compact branches for R6 for the +@@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + + case bgtzl_op: + if (!insn.i_format.rt && NO_R6EMU) +- goto sigill_r6; ++ goto sigill_r2r6; + case bgtz_op: + /* + * Compact branches for R6 for the +@@ -843,11 +840,12 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, + return ret; + + sigill_dsp: +- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); +- force_sig(SIGBUS, current); ++ pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n", ++ current->comm); ++ force_sig(SIGILL, current); + return -EFAULT; +-sigill_r6: +- pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", ++sigill_r2r6: ++ pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); + return -EFAULT; +diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c +index 298b2b773d12..f1fab6ff53e6 100644 +--- a/arch/mips/kernel/proc.c ++++ b/arch/mips/kernel/proc.c +@@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) + } + + seq_printf(m, "isa\t\t\t:"); +- if (cpu_has_mips_r1) ++ if (cpu_has_mips_1) + seq_printf(m, " mips1"); + if (cpu_has_mips_2) + seq_printf(m, "%s", " mips2"); +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index c95bf18260f8..24c115a0721a 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -927,7 +927,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) +- trace_sys_exit(regs, regs->regs[2]); ++ trace_sys_exit(regs, regs_return_value(regs)); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, 0); +diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S +index 2d23c834ba96..29b0c5f978e4 100644 +--- a/arch/mips/kernel/scall32-o32.S ++++ b/arch/mips/kernel/scall32-o32.S +@@ -372,7 +372,7 @@ EXPORT(sys_call_table) + PTR sys_writev + PTR sys_cacheflush + PTR sys_cachectl +- PTR sys_sysmips ++ PTR __sys_sysmips + PTR sys_ni_syscall /* 4150 */ + PTR sys_getsid + PTR sys_fdatasync +diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S +index deac63315d0e..a6323a969919 100644 +--- a/arch/mips/kernel/scall64-64.S ++++ b/arch/mips/kernel/scall64-64.S +@@ -312,7 +312,7 @@ EXPORT(sys_call_table) + PTR sys_sched_getaffinity + PTR sys_cacheflush + PTR sys_cachectl +- PTR sys_sysmips ++ PTR __sys_sysmips + PTR sys_io_setup /* 5200 */ + PTR sys_io_destroy + PTR sys_io_getevents +diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S +index ee93d5fe61d7..e0fdca8d3abe 100644 +--- a/arch/mips/kernel/scall64-n32.S ++++ b/arch/mips/kernel/scall64-n32.S +@@ -298,7 +298,7 @@ EXPORT(sysn32_call_table) + PTR compat_sys_sched_getaffinity + PTR sys_cacheflush + PTR sys_cachectl +- PTR sys_sysmips ++ PTR __sys_sysmips + PTR compat_sys_io_setup /* 6200 */ + PTR sys_io_destroy + PTR compat_sys_io_getevents +diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S +index b77052ec6fb2..87c697181d25 100644 +--- a/arch/mips/kernel/scall64-o32.S ++++ b/arch/mips/kernel/scall64-o32.S +@@ -367,7 +367,7 @@ EXPORT(sys32_call_table) + PTR compat_sys_writev + PTR sys_cacheflush + PTR sys_cachectl +- PTR sys_sysmips ++ PTR __sys_sysmips + PTR sys_ni_syscall /* 4150 */ + PTR sys_getsid + PTR sys_fdatasync +diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c +index 53a7ef9a8f32..4234b2d726c5 100644 +--- a/arch/mips/kernel/syscall.c ++++ b/arch/mips/kernel/syscall.c +@@ -28,6 +28,7 @@ + #include + + #include ++#include + #include + #include + #include +@@ -138,10 +139,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) + __asm__ __volatile__ ( + " .set "MIPS_ISA_ARCH_LEVEL" \n" + " li %[err], 0 \n" +- "1: ll %[old], (%[addr]) \n" ++ "1: \n" ++ user_ll("%[old]", "(%[addr])") + " move %[tmp], %[new] \n" +- "2: sc %[tmp], (%[addr]) \n" +- " bnez %[tmp], 4f \n" ++ "2: \n" ++ user_sc("%[tmp]", "(%[addr])") ++ " beqz %[tmp], 4f \n" + "3: \n" + " .insn \n" + " .subsection 2 \n" +@@ -199,6 +202,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) + unreachable(); + } + ++/* ++ * mips_atomic_set() normally returns directly via syscall_exit potentially ++ * clobbering static registers, so be sure to preserve them. ++ */ ++save_static_function(sys_sysmips); ++ + SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) + { + switch (cmd) { +diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c +index 734a2c7665ec..6da2e4a6ba39 100644 +--- a/arch/mips/math-emu/cp1emu.c ++++ b/arch/mips/math-emu/cp1emu.c +@@ -2496,6 +2496,35 @@ dcopuop: + return 0; + } + ++/* ++ * Emulate FPU instructions. ++ * ++ * If we use FPU hardware, then we have been typically called to handle ++ * an unimplemented operation, such as where an operand is a NaN or ++ * denormalized. In that case exit the emulation loop after a single ++ * iteration so as to let hardware execute any subsequent instructions. ++ * ++ * If we have no FPU hardware or it has been disabled, then continue ++ * emulating floating-point instructions until one of these conditions ++ * has occurred: ++ * ++ * - a non-FPU instruction has been encountered, ++ * ++ * - an attempt to emulate has ended with a signal, ++ * ++ * - the ISA mode has been switched. ++ * ++ * We need to terminate the emulation loop if we got switched to the ++ * MIPS16 mode, whether supported or not, so that we do not attempt ++ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction. ++ * Similarly if we got switched to the microMIPS mode and only the ++ * regular MIPS mode is supported, so that we do not attempt to emulate ++ * a microMIPS instruction as a regular MIPS FPU instruction. Or if ++ * we got switched to the regular MIPS mode and only the microMIPS mode ++ * is supported, so that we do not attempt to emulate a regular MIPS ++ * instruction that should cause an Address Error exception instead. ++ * For simplicity we always terminate upon an ISA mode switch. ++ */ + int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, + int has_fpu, void *__user *fault_addr) + { +@@ -2581,6 +2610,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, + break; + if (sig) + break; ++ /* ++ * We have to check for the ISA bit explicitly here, ++ * because `get_isa16_mode' may return 0 if support ++ * for code compression has been globally disabled, ++ * or otherwise we may produce the wrong signal or ++ * even proceed successfully where we must not. ++ */ ++ if ((xcp->cp0_epc ^ prevepc) & 0x1) ++ break; + + cond_resched(); + } while (xcp->cp0_epc > prevepc); +diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h +index 55f106ed12bf..039c4b910615 100644 +--- a/arch/powerpc/include/asm/atomic.h ++++ b/arch/powerpc/include/asm/atomic.h +@@ -460,7 +460,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + * Atomically increments @v by 1, so long as @v is non-zero. + * Returns non-zero if @v was non-zero, and zero otherwise. + */ +-static __inline__ long atomic64_inc_not_zero(atomic64_t *v) ++static __inline__ int atomic64_inc_not_zero(atomic64_t *v) + { + long t1, t2; + +@@ -479,7 +479,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v) + : "r" (&v->counter) + : "cc", "xer", "memory"); + +- return t1; ++ return t1 != 0; + } + + #endif /* __powerpc64__ */ +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 627d129d7fcb..ca372bbc0ffe 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -1236,7 +1236,7 @@ static inline unsigned long mfvtb (void) + " .llong 0\n" \ + ".previous" \ + : "=r" (rval) \ +- : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \ ++ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ + rval;}) + #else + #define mftb() ({unsigned long rval; \ +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c +index 4014881e9843..e37162d356d8 100644 +--- a/arch/powerpc/lib/sstep.c ++++ b/arch/powerpc/lib/sstep.c +@@ -687,8 +687,10 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, + case 19: + switch ((instr >> 1) & 0x3ff) { + case 0: /* mcrf */ +- rd = (instr >> 21) & 0x1c; +- ra = (instr >> 16) & 0x1c; ++ rd = 7 - ((instr >> 23) & 0x7); ++ ra = 7 - ((instr >> 18) & 0x7); ++ rd *= 4; ++ ra *= 4; + val = (regs->ccr >> ra) & 0xf; + regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); + goto instr_done; +@@ -967,6 +969,19 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, + #endif + + case 19: /* mfcr */ ++ if ((instr >> 20) & 1) { ++ imm = 0xf0000000UL; ++ for (sh = 0; sh < 8; ++sh) { ++ if (instr & (0x80000 >> sh)) { ++ regs->gpr[rd] = regs->ccr & imm; ++ break; ++ } ++ imm >>= 4; ++ } ++ ++ goto instr_done; ++ } ++ + regs->gpr[rd] = regs->ccr; + regs->gpr[rd] &= 0xffffffffUL; + goto instr_done; +diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h +index 6ba0bf928909..6bc941be6921 100644 +--- a/arch/s390/include/asm/syscall.h ++++ b/arch/s390/include/asm/syscall.h +@@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task, + { + unsigned long mask = -1UL; + ++ /* ++ * No arguments for this syscall, there's nothing to do. ++ */ ++ if (!n) ++ return; ++ + BUG_ON(i + n > 6); + #ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) +diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h +index 4c20dd333412..85133b2b8e99 100644 +--- a/arch/x86/include/asm/xen/hypercall.h ++++ b/arch/x86/include/asm/xen/hypercall.h +@@ -43,6 +43,7 @@ + + #include + #include ++#include + + #include + #include +@@ -213,10 +214,12 @@ privcmd_call(unsigned call, + __HYPERCALL_DECLS; + __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + ++ stac(); + asm volatile("call *%[call]" + : __HYPERCALL_5PARAM + : [call] "a" (&hypercall_page[call]) + : __HYPERCALL_CLOBBER5); ++ clac(); + + return (long)__res; + } +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index e75907601a41..1e5eb9f2ff5f 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -329,6 +329,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, + struct mpc_intsrc mp_irq; + + /* ++ * Check bus_irq boundary. ++ */ ++ if (bus_irq >= NR_IRQS_LEGACY) { ++ pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq); ++ return; ++ } ++ ++ /* + * Convert 'gsi' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(gsi); +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 1e5d2f07416b..fc91c98bee01 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -2115,7 +2115,7 @@ static inline void __init check_timer(void) + int idx; + idx = find_irq_entry(apic1, pin1, mp_INT); + if (idx != -1 && irq_trigger(idx)) +- unmask_ioapic_irq(irq_get_chip_data(0)); ++ unmask_ioapic_irq(irq_get_irq_data(0)); + } + irq_domain_deactivate_irq(irq_data); + irq_domain_activate_irq(irq_data); +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index a48824deabc5..3f94e5fc110a 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -1373,7 +1373,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); + int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) + { +- struct gpd_link *link; ++ struct gpd_link *l, *link; + int ret = -EINVAL; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) +@@ -1388,7 +1388,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + goto out; + } + +- list_for_each_entry(link, &genpd->master_links, master_node) { ++ list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { + if (link->slave != subdomain) + continue; + +@@ -1642,10 +1642,10 @@ EXPORT_SYMBOL_GPL(__of_genpd_add_provider); + */ + void of_genpd_del_provider(struct device_node *np) + { +- struct of_genpd_provider *cp; ++ struct of_genpd_provider *cp, *tmp; + + mutex_lock(&of_genpd_mutex); +- list_for_each_entry(cp, &of_genpd_providers, link) { ++ list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { + if (cp->node == np) { + list_del(&cp->link); + of_node_put(cp->node); +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index a084a4751fa9..25372dc381d4 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -3877,6 +3877,9 @@ static void smi_recv_tasklet(unsigned long val) + * because the lower layer is allowed to hold locks while calling + * message delivery. + */ ++ ++ rcu_read_lock(); ++ + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (intf->curr_msg == NULL && !intf->in_shutdown) { +@@ -3899,6 +3902,8 @@ static void smi_recv_tasklet(unsigned long val) + if (newmsg) + intf->handlers->sender(intf->send_info, newmsg); + ++ rcu_read_unlock(); ++ + handle_new_recv_msgs(intf); + } + +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 0d83cfb9708f..f53e8ba2c718 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -758,6 +758,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, + result, len, data[2]); + } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 + || data[1] != IPMI_GET_MSG_FLAGS_CMD) { ++ /* ++ * Don't abort here, maybe it was a queued ++ * response to a previous command. ++ */ ++ ipmi_ssif_unlock_cond(ssif_info, flags); + pr_warn(PFX "Invalid response getting flags: %x %x\n", + data[0], data[1]); + } else { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +index 475c38fe9245..e40a6d8b0b92 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +@@ -1126,6 +1126,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + ++ if (*pos >= adev->mc.mc_vram_size) ++ return -ENXIO; ++ + while (size) { + unsigned long flags; + uint32_t value; +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index a3b96d691ac9..58bf94b69186 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, + return false; + } + ++ /* ++ * ignore out-of-order messages or messages that are part of a ++ * failed transaction ++ */ ++ if (!recv_hdr.somt && !msg->have_somt) ++ return false; ++ + /* get length contained in this portion */ + msg->curchunk_len = recv_hdr.msg_len; + msg->curchunk_hdrlen = hdrlen; +@@ -2163,7 +2170,7 @@ out_unlock: + } + EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); + +-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ++static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) + { + int len; + u8 replyblock[32]; +@@ -2178,12 +2185,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); +- return; ++ return false; + } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); + if (!ret) { + DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); +- return; ++ return false; + } + replylen = msg->curchunk_len + msg->curchunk_hdrlen; + +@@ -2195,21 +2202,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) + ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, + replyblock, len); + if (ret != len) { +- DRM_DEBUG_KMS("failed to read a chunk\n"); ++ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", ++ len, ret); ++ return false; + } ++ + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); +- if (ret == false) ++ if (!ret) { + DRM_DEBUG_KMS("failed to build sideband msg\n"); ++ return false; ++ } ++ + curreply += len; + replylen -= len; + } ++ return true; + } + + static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) + { + int ret = 0; + +- drm_dp_get_one_sb_msg(mgr, false); ++ if (!drm_dp_get_one_sb_msg(mgr, false)) { ++ memset(&mgr->down_rep_recv, 0, ++ sizeof(struct drm_dp_sideband_msg_rx)); ++ return 0; ++ } + + if (mgr->down_rep_recv.have_eomt) { + struct drm_dp_sideband_msg_tx *txmsg; +@@ -2265,7 +2283,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) + static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) + { + int ret = 0; +- drm_dp_get_one_sb_msg(mgr, true); ++ ++ if (!drm_dp_get_one_sb_msg(mgr, true)) { ++ memset(&mgr->up_req_recv, 0, ++ sizeof(struct drm_dp_sideband_msg_rx)); ++ return 0; ++ } + + if (mgr->up_req_recv.have_eomt) { + struct drm_dp_sideband_msg_req_body msg; +@@ -2317,7 +2340,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); + } + +- drm_dp_put_mst_branch_device(mstb); ++ if (mstb) ++ drm_dp_put_mst_branch_device(mstb); ++ + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + } + return ret; +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c +index d4ac8c837314..8e86cf7da614 100644 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c +@@ -30,6 +30,7 @@ + #include "radeon_audio.h" + #include "atom.h" + #include ++#include + + extern int atom_debug; + +@@ -2183,9 +2184,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) + goto assigned; + } + +- /* on DCE32 and encoder can driver any block so just crtc id */ ++ /* ++ * On DCE32 any encoder can drive any block so usually just use crtc id, ++ * but Apple thinks different at least on iMac10,1, so there use linkb, ++ * otherwise the internal eDP panel will stay dark. ++ */ + if (ASIC_IS_DCE32(rdev)) { +- enc_idx = radeon_crtc->crtc_id; ++ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) ++ enc_idx = (dig->linkb) ? 1 : 0; ++ else ++ enc_idx = radeon_crtc->crtc_id; ++ + goto assigned; + } + +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c +index 3c32f095a873..2ccf81168d1e 100644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c ++++ b/drivers/gpu/drm/radeon/ci_dpm.c +@@ -782,6 +782,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) + if (r600_dpm_get_vrefresh(rdev) > 120) + return true; + ++ /* disable mclk switching if the refresh is >120Hz, even if the ++ * blanking period would allow it ++ */ ++ if (r600_dpm_get_vrefresh(rdev) > 120) ++ return true; ++ + if (vblank_time < switch_limit) + return true; + else +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 3f5741a3e728..43d5166db4c6 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -857,6 +857,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, + } else + ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, + qp_attr_mask); ++ qp_attr->port_num = id_priv->id.port_num; ++ *qp_attr_mask |= IB_QP_PORT; + } else + ret = -ENOSYS; + +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 89abfdb539ac..c84c685056b9 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -434,8 +434,10 @@ static int i8042_start(struct serio *serio) + { + struct i8042_port *port = serio->port_data; + ++ spin_lock_irq(&i8042_lock); + port->exists = true; +- mb(); ++ spin_unlock_irq(&i8042_lock); ++ + return 0; + } + +@@ -448,16 +450,20 @@ static void i8042_stop(struct serio *serio) + { + struct i8042_port *port = serio->port_data; + ++ spin_lock_irq(&i8042_lock); + port->exists = false; ++ port->serio = NULL; ++ spin_unlock_irq(&i8042_lock); + + /* ++ * We need to make sure that interrupt handler finishes using ++ * our serio port before we return from this function. + * We synchronize with both AUX and KBD IRQs because there is + * a (very unlikely) chance that AUX IRQ is raised for KBD port + * and vice versa. + */ + synchronize_irq(I8042_AUX_IRQ); + synchronize_irq(I8042_KBD_IRQ); +- port->serio = NULL; + } + + /* +@@ -574,7 +580,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) + + spin_unlock_irqrestore(&i8042_lock, flags); + +- if (likely(port->exists && !filtered)) ++ if (likely(serio && !filtered)) + serio_interrupt(serio, data, dfl); + + out: +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index d81be5e471d0..f24a9e14021d 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1088,7 +1088,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) + */ + DEFINE_WAIT(w); + for (;;) { +- flush_signals(current); ++ sigset_t full, old; + prepare_to_wait(&conf->wait_barrier, + &w, TASK_INTERRUPTIBLE); + if (bio_end_sector(bio) <= mddev->suspend_lo || +@@ -1097,7 +1097,10 @@ static void make_request(struct mddev *mddev, struct bio * bio) + !md_cluster_ops->area_resyncing(mddev, WRITE, + bio->bi_iter.bi_sector, bio_end_sector(bio)))) + break; ++ sigfillset(&full); ++ sigprocmask(SIG_BLOCK, &full, &old); + schedule(); ++ sigprocmask(SIG_SETMASK, &old, NULL); + } + finish_wait(&conf->wait_barrier, &w); + } +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 4384b46cee1a..fc182c4f2619 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -5279,12 +5279,15 @@ static void make_request(struct mddev *mddev, struct bio * bi) + * userspace, we want an interruptible + * wait. + */ +- flush_signals(current); + prepare_to_wait(&conf->wait_for_overlap, + &w, TASK_INTERRUPTIBLE); + if (logical_sector >= mddev->suspend_lo && + logical_sector < mddev->suspend_hi) { ++ sigset_t full, old; ++ sigfillset(&full); ++ sigprocmask(SIG_BLOCK, &full, &old); + schedule(); ++ sigprocmask(SIG_SETMASK, &old, NULL); + do_prepare = true; + } + goto retry; +@@ -7528,12 +7531,10 @@ static void end_reshape(struct r5conf *conf) + { + + if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { +- struct md_rdev *rdev; + + spin_lock_irq(&conf->device_lock); + conf->previous_raid_disks = conf->raid_disks; +- rdev_for_each(rdev, conf->mddev) +- rdev->data_offset = rdev->new_data_offset; ++ md_finish_reshape(conf->mddev); + smp_wmb(); + conf->reshape_progress = MaxSector; + conf->mddev->reshape_position = MaxSector; +diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c +index 8f2556ec3971..61611d1682d1 100644 +--- a/drivers/media/pci/cx88/cx88-cards.c ++++ b/drivers/media/pci/cx88/cx88-cards.c +@@ -3691,7 +3691,14 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr) + core->nr = nr; + sprintf(core->name, "cx88[%d]", core->nr); + +- core->tvnorm = V4L2_STD_NTSC_M; ++ /* ++ * Note: Setting initial standard here would cause first call to ++ * cx88_set_tvnorm() to return without programming any registers. Leave ++ * it blank for at this point and it will get set later in ++ * cx8800_initdev() ++ */ ++ core->tvnorm = 0; ++ + core->width = 320; + core->height = 240; + core->field = V4L2_FIELD_INTERLACED; +diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c +index aef9acf351f6..abbf5b05b6f5 100644 +--- a/drivers/media/pci/cx88/cx88-video.c ++++ b/drivers/media/pci/cx88/cx88-video.c +@@ -1429,7 +1429,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, + + /* initial device configuration */ + mutex_lock(&core->lock); +- cx88_set_tvnorm(core, core->tvnorm); ++ cx88_set_tvnorm(core, V4L2_STD_NTSC_M); + v4l2_ctrl_handler_setup(&core->video_hdl); + v4l2_ctrl_handler_setup(&core->audio_hdl); + cx88_video_mux(core, 0); +diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c +index 4a608cbe0fdb..9c6fc09b88e0 100644 +--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c ++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c +@@ -1098,10 +1098,10 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, + struct s5p_jpeg_ctx *ctx) + { + int c, components = 0, notfound, n_dht = 0, n_dqt = 0; +- unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0, +- sof_len = 0; +- unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER], +- dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER]; ++ unsigned int height = 0, width = 0, word, subsampling = 0; ++ unsigned int sos = 0, sof = 0, sof_len = 0; ++ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER]; ++ unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER]; + long length; + struct s5p_jpeg_buffer jpeg_buffer; + +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c +index 65f80b8b9f7a..eb9e7feb9b13 100644 +--- a/drivers/media/rc/imon.c ++++ b/drivers/media/rc/imon.c +@@ -1629,7 +1629,7 @@ static void imon_incoming_packet(struct imon_context *ictx, + if (kc == KEY_KEYBOARD && !ictx->release_code) { + ictx->last_keycode = kc; + if (!nomouse) { +- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1; ++ ictx->pad_mouse = !ictx->pad_mouse; + dev_dbg(dev, "toggling to %s mode\n", + ictx->pad_mouse ? "mouse" : "keyboard"); + spin_unlock_irqrestore(&ictx->kc_lock, flags); +diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c +index 65fed7146e9b..cc91f7b3d90c 100644 +--- a/drivers/misc/enclosure.c ++++ b/drivers/misc/enclosure.c +@@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component, + struct device *dev) + { + struct enclosure_component *cdev; ++ int err; + + if (!edev || component >= edev->components) + return -EINVAL; +@@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component, + if (cdev->dev == dev) + return -EEXIST; + +- if (cdev->dev) ++ if (cdev->dev) { + enclosure_remove_links(cdev); +- +- put_device(cdev->dev); ++ put_device(cdev->dev); ++ } + cdev->dev = get_device(dev); +- return enclosure_add_links(cdev); ++ err = enclosure_add_links(cdev); ++ if (err) { ++ put_device(cdev->dev); ++ cdev->dev = NULL; ++ } ++ return err; + } + EXPORT_SYMBOL_GPL(enclosure_add_device); + +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c +index 201425e7f9cb..fbc8c9a9014b 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c +@@ -1815,8 +1815,6 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah) + static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum) + { + REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR); +- REG_SET_BIT(ah, 0x9864, 0x7f000); +- REG_SET_BIT(ah, 0x9924, 0x7f00fe); + REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); + REG_WRITE(ah, AR_CR, AR_CR_RXD); + REG_WRITE(ah, AR_DLCL_IFS(qnum), 0); +diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c +index ac4781f37e78..b4e6304afd40 100644 +--- a/drivers/net/wireless/ath/ath9k/tx99.c ++++ b/drivers/net/wireless/ath/ath9k/tx99.c +@@ -190,22 +190,27 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, + if (strtobool(buf, &start)) + return -EINVAL; + ++ mutex_lock(&sc->mutex); ++ + if (start == sc->tx99_state) { + if (!start) +- return count; ++ goto out; + ath_dbg(common, XMIT, "Resetting TX99\n"); + ath9k_tx99_deinit(sc); + } + + if (!start) { + ath9k_tx99_deinit(sc); +- return count; ++ goto out; + } + + r = ath9k_tx99_init(sc); +- if (r) ++ if (r) { ++ mutex_unlock(&sc->mutex); + return r; +- ++ } ++out: ++ mutex_unlock(&sc->mutex); + return count; + } + +diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c +index 44f059f7f34e..9ebe00ea8f81 100644 +--- a/drivers/net/wireless/ti/wlcore/spi.c ++++ b/drivers/net/wireless/ti/wlcore/spi.c +@@ -71,7 +71,7 @@ + * only support SPI for 12xx - this code should be reworked when 18xx + * support is introduced + */ +-#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) ++#define SPI_AGGR_BUFFER_SIZE (4 * SZ_4K) + + /* Maximum number of SPI write chunks */ + #define WSPI_MAX_NUM_OF_CHUNKS \ +diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c +index f8dcdf4b24f6..af62c4c854f3 100644 +--- a/drivers/nfc/nfcmrvl/fw_dnld.c ++++ b/drivers/nfc/nfcmrvl/fw_dnld.c +@@ -459,7 +459,7 @@ int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv) + + INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work); + snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq", +- dev_name(priv->dev)); ++ dev_name(&priv->ndev->nfc_dev->dev)); + priv->fw_dnld.rx_wq = create_singlethread_workqueue(name); + if (!priv->fw_dnld.rx_wq) + return -ENOMEM; +@@ -496,6 +496,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name) + { + struct nfcmrvl_private *priv = nci_get_drvdata(ndev); + struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld; ++ int res; + + if (!priv->support_fw_dnld) + return -ENOTSUPP; +@@ -511,7 +512,9 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name) + */ + + /* Retrieve FW binary */ +- if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) { ++ res = request_firmware(&fw_dnld->fw, firmware_name, ++ &ndev->nfc_dev->dev); ++ if (res < 0) { + nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name); + return -ENOENT; + } +diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c +index 51c8240a1672..a446590a71ca 100644 +--- a/drivers/nfc/nfcmrvl/main.c ++++ b/drivers/nfc/nfcmrvl/main.c +@@ -124,12 +124,13 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, + memcpy(&priv->config, pdata, sizeof(*pdata)); + + if (priv->config.reset_n_io) { +- rc = devm_gpio_request_one(dev, +- priv->config.reset_n_io, +- GPIOF_OUT_INIT_LOW, +- "nfcmrvl_reset_n"); +- if (rc < 0) ++ rc = gpio_request_one(priv->config.reset_n_io, ++ GPIOF_OUT_INIT_LOW, ++ "nfcmrvl_reset_n"); ++ if (rc < 0) { ++ priv->config.reset_n_io = 0; + nfc_err(dev, "failed to request reset_n io\n"); ++ } + } + + if (phy == NFCMRVL_PHY_SPI) { +@@ -154,7 +155,13 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, + if (!priv->ndev) { + nfc_err(dev, "nci_allocate_device failed\n"); + rc = -ENOMEM; +- goto error; ++ goto error_free_gpio; ++ } ++ ++ rc = nfcmrvl_fw_dnld_init(priv); ++ if (rc) { ++ nfc_err(dev, "failed to initialize FW download %d\n", rc); ++ goto error_free_dev; + } + + nci_set_drvdata(priv->ndev, priv); +@@ -162,24 +169,22 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, + rc = nci_register_device(priv->ndev); + if (rc) { + nfc_err(dev, "nci_register_device failed %d\n", rc); +- goto error_free_dev; ++ goto error_fw_dnld_deinit; + } + + /* Ensure that controller is powered off */ + nfcmrvl_chip_halt(priv); + +- rc = nfcmrvl_fw_dnld_init(priv); +- if (rc) { +- nfc_err(dev, "failed to initialize FW download %d\n", rc); +- goto error_free_dev; +- } +- + nfc_info(dev, "registered with nci successfully\n"); + return priv; + ++error_fw_dnld_deinit: ++ nfcmrvl_fw_dnld_deinit(priv); + error_free_dev: + nci_free_device(priv->ndev); +-error: ++error_free_gpio: ++ if (priv->config.reset_n_io) ++ gpio_free(priv->config.reset_n_io); + kfree(priv); + return ERR_PTR(rc); + } +@@ -195,7 +200,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) + nfcmrvl_fw_dnld_deinit(priv); + + if (priv->config.reset_n_io) +- devm_gpio_free(priv->dev, priv->config.reset_n_io); ++ gpio_free(priv->config.reset_n_io); + + nci_unregister_device(ndev); + nci_free_device(ndev); +diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c +index 83a99e38e7bd..6c0c301611c4 100644 +--- a/drivers/nfc/nfcmrvl/uart.c ++++ b/drivers/nfc/nfcmrvl/uart.c +@@ -109,6 +109,7 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) + struct nfcmrvl_private *priv; + struct nfcmrvl_platform_data *pdata = NULL; + struct nfcmrvl_platform_data config; ++ struct device *dev = nu->tty->dev; + + /* + * Platform data cannot be used here since usually it is already used +@@ -116,9 +117,8 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) + * and check if DT entries were added. + */ + +- if (nu->tty->dev->parent && nu->tty->dev->parent->of_node) +- if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node, +- &config) == 0) ++ if (dev && dev->parent && dev->parent->of_node) ++ if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0) + pdata = &config; + + if (!pdata) { +@@ -131,7 +131,7 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) + } + + priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops, +- nu->tty->dev, pdata); ++ dev, pdata); + if (IS_ERR(priv)) + return PTR_ERR(priv); + +diff --git a/drivers/of/device.c b/drivers/of/device.c +index e5f47cec75f3..97a280d50d6d 100644 +--- a/drivers/of/device.c ++++ b/drivers/of/device.c +@@ -225,6 +225,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) + + return tsize; + } ++EXPORT_SYMBOL_GPL(of_device_get_modalias); + + /** + * of_device_uevent - Display OF related uevent information +@@ -287,3 +288,4 @@ int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) + + return 0; + } ++EXPORT_SYMBOL_GPL(of_device_uevent_modalias); +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index d7ffd66814bb..fca925543fae 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -945,6 +945,7 @@ static int pci_pm_thaw_noirq(struct device *dev) + return pci_legacy_resume_early(dev); + + pci_update_current_state(pci_dev, PCI_D0); ++ pci_restore_state(pci_dev); + + if (drv && drv->pm && drv->pm->thaw_noirq) + error = drv->pm->thaw_noirq(dev); +diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c +index 6b3da1bb0d63..2db681722d2c 100644 +--- a/drivers/spmi/spmi.c ++++ b/drivers/spmi/spmi.c +@@ -364,11 +364,23 @@ static int spmi_drv_remove(struct device *dev) + return 0; + } + ++static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env) ++{ ++ int ret; ++ ++ ret = of_device_uevent_modalias(dev, env); ++ if (ret != -ENODEV) ++ return ret; ++ ++ return 0; ++} ++ + static struct bus_type spmi_bus_type = { + .name = "spmi", + .match = spmi_device_match, + .probe = spmi_drv_probe, + .remove = spmi_drv_remove, ++ .uevent = spmi_drv_uevent, + }; + + /** +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index 82a7c27c517f..02c3feef4e36 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ ++ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {} /* Terminating entry */ + }; +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index dc1bd1f1bdfe..634ad3662ed6 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -868,6 +868,7 @@ DEF_TPG_ATTRIB(default_erl); + DEF_TPG_ATTRIB(t10_pi); + DEF_TPG_ATTRIB(fabric_prot_type); + DEF_TPG_ATTRIB(tpg_enabled_sendtargets); ++DEF_TPG_ATTRIB(login_keys_workaround); + + static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { + &iscsi_tpg_attrib_attr_authentication, +@@ -883,6 +884,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { + &iscsi_tpg_attrib_attr_t10_pi, + &iscsi_tpg_attrib_attr_fabric_prot_type, + &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets, ++ &iscsi_tpg_attrib_attr_login_keys_workaround, + NULL, + }; + +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index 9fc9117d0f22..549a2bbbf4df 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -818,7 +818,8 @@ static int iscsi_target_handle_csg_zero( + SENDER_TARGET, + login->rsp_buf, + &login->rsp_length, +- conn->param_list); ++ conn->param_list, ++ conn->tpg->tpg_attrib.login_keys_workaround); + if (ret < 0) + return -1; + +@@ -888,7 +889,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log + SENDER_TARGET, + login->rsp_buf, + &login->rsp_length, +- conn->param_list); ++ conn->param_list, ++ conn->tpg->tpg_attrib.login_keys_workaround); + if (ret < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); +diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c +index 6d1b0acbc5b3..76bde76edad1 100644 +--- a/drivers/target/iscsi/iscsi_target_parameters.c ++++ b/drivers/target/iscsi/iscsi_target_parameters.c +@@ -764,7 +764,8 @@ static int iscsi_check_for_auth_key(char *key) + return 0; + } + +-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) ++static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param, ++ bool keys_workaround) + { + if (IS_TYPE_BOOL_AND(param)) { + if (!strcmp(param->value, NO)) +@@ -772,19 +773,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) + } else if (IS_TYPE_BOOL_OR(param)) { + if (!strcmp(param->value, YES)) + SET_PSTATE_REPLY_OPTIONAL(param); +- /* +- * Required for gPXE iSCSI boot client +- */ +- if (!strcmp(param->name, IMMEDIATEDATA)) +- SET_PSTATE_REPLY_OPTIONAL(param); ++ ++ if (keys_workaround) { ++ /* ++ * Required for gPXE iSCSI boot client ++ */ ++ if (!strcmp(param->name, IMMEDIATEDATA)) ++ SET_PSTATE_REPLY_OPTIONAL(param); ++ } + } else if (IS_TYPE_NUMBER(param)) { + if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); +- /* +- * Required for gPXE iSCSI boot client +- */ +- if (!strcmp(param->name, MAXCONNECTIONS)) +- SET_PSTATE_REPLY_OPTIONAL(param); ++ ++ if (keys_workaround) { ++ /* ++ * Required for Mellanox Flexboot PXE boot ROM ++ */ ++ if (!strcmp(param->name, FIRSTBURSTLENGTH)) ++ SET_PSTATE_REPLY_OPTIONAL(param); ++ ++ /* ++ * Required for gPXE iSCSI boot client ++ */ ++ if (!strcmp(param->name, MAXCONNECTIONS)) ++ SET_PSTATE_REPLY_OPTIONAL(param); ++ } + } else if (IS_PHASE_DECLARATIVE(param)) + SET_PSTATE_REPLY_OPTIONAL(param); + } +@@ -1421,7 +1434,8 @@ int iscsi_encode_text_output( + u8 sender, + char *textbuf, + u32 *length, +- struct iscsi_param_list *param_list) ++ struct iscsi_param_list *param_list, ++ bool keys_workaround) + { + char *output_buf = NULL; + struct iscsi_extra_response *er; +@@ -1457,7 +1471,8 @@ int iscsi_encode_text_output( + *length += 1; + output_buf = textbuf + *length; + SET_PSTATE_PROPOSER(param); +- iscsi_check_proposer_for_optional_reply(param); ++ iscsi_check_proposer_for_optional_reply(param, ++ keys_workaround); + pr_debug("Sending key: %s=%s\n", + param->name, param->value); + } +diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h +index a0751e3f0813..17a58c2913f2 100644 +--- a/drivers/target/iscsi/iscsi_target_parameters.h ++++ b/drivers/target/iscsi/iscsi_target_parameters.h +@@ -40,7 +40,7 @@ extern int iscsi_extract_key_value(char *, char **, char **); + extern int iscsi_update_param_value(struct iscsi_param *, char *); + extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *); + extern int iscsi_encode_text_output(u8, u8, char *, u32 *, +- struct iscsi_param_list *); ++ struct iscsi_param_list *, bool); + extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); + extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *, + struct iscsi_param_list *); +diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c +index 205a509b0dfb..63e1dcc5914d 100644 +--- a/drivers/target/iscsi/iscsi_target_tpg.c ++++ b/drivers/target/iscsi/iscsi_target_tpg.c +@@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) + a->t10_pi = TA_DEFAULT_T10_PI; + a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; + a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS; ++ a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND; + } + + int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) +@@ -899,3 +900,21 @@ int iscsit_ta_tpg_enabled_sendtargets( + + return 0; + } ++ ++int iscsit_ta_login_keys_workaround( ++ struct iscsi_portal_group *tpg, ++ u32 flag) ++{ ++ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; ++ ++ if ((flag != 0) && (flag != 1)) { ++ pr_err("Illegal value %d\n", flag); ++ return -EINVAL; ++ } ++ ++ a->login_keys_workaround = flag; ++ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ", ++ tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF"); ++ ++ return 0; ++} +diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h +index 2da211920c18..901a712180f0 100644 +--- a/drivers/target/iscsi/iscsi_target_tpg.h ++++ b/drivers/target/iscsi/iscsi_target_tpg.h +@@ -39,5 +39,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); + extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); + extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); + extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); ++extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32); + + #endif /* ISCSI_TARGET_TPG_H */ +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 37c77db6e737..f71bedea973a 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -728,6 +728,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) + if (cmd->transport_state & CMD_T_ABORTED || + cmd->transport_state & CMD_T_STOP) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); ++ /* ++ * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(), ++ * release se_device->caw_sem obtained by sbc_compare_and_write() ++ * since target_complete_ok_work() or target_complete_failure_work() ++ * won't be called to invoke the normal CAW completion callbacks. ++ */ ++ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { ++ up(&dev->caw_sem); ++ } + complete_all(&cmd->t_transport_stop_comp); + return; + } else if (!success) { +diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c +index 5b4b47ed948b..87d87ac1c8a0 100644 +--- a/drivers/thermal/cpu_cooling.c ++++ b/drivers/thermal/cpu_cooling.c +@@ -191,8 +191,10 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) + mutex_lock(&cooling_list_lock); + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { + if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { ++ unsigned long level = get_level(cpufreq_dev, freq); ++ + mutex_unlock(&cooling_list_lock); +- return get_level(cpufreq_dev, freq); ++ return level; + } + } + mutex_unlock(&cooling_list_lock); +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 0b7194086c5a..df96f5f88c15 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1759,6 +1759,9 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, ++ { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */ ++ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ ++ }, + + { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ + .driver_info = CLEAR_HALT_CONDITIONS, +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index e9675e8f0e54..a8a2d5005e6e 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -768,6 +768,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + clear_bit(wIndex, &bus_state->resuming_ports); + + set_bit(wIndex, &bus_state->rexit_ports); ++ ++ xhci_test_and_clear_bit(xhci, port_array, wIndex, ++ PORT_PLC); + xhci_set_link_state(xhci, port_array, wIndex, + XDEV_U0); + +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index de7dce6eb474..ece0787d62bf 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -856,13 +856,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, + (ep->ep_state & EP_GETTING_NO_STREAMS)) { + int stream_id; + +- for (stream_id = 0; stream_id < ep->stream_info->num_streams; ++ for (stream_id = 1; stream_id < ep->stream_info->num_streams; + stream_id++) { ++ ring = ep->stream_info->stream_rings[stream_id]; ++ if (!ring) ++ continue; ++ + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Killing URBs for slot ID %u, ep index %u, stream %u", +- slot_id, ep_index, stream_id + 1); +- xhci_kill_ring_urbs(xhci, +- ep->stream_info->stream_rings[stream_id]); ++ slot_id, ep_index, stream_id); ++ xhci_kill_ring_urbs(xhci, ring); + } + } else { + ring = ep->ring; +diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c +index d82fa36c3465..005da0866836 100644 +--- a/drivers/usb/renesas_usbhs/common.c ++++ b/drivers/usb/renesas_usbhs/common.c +@@ -733,8 +733,10 @@ static int usbhsc_resume(struct device *dev) + struct usbhs_priv *priv = dev_get_drvdata(dev); + struct platform_device *pdev = usbhs_priv_to_pdev(priv); + +- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) ++ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) { + usbhsc_power_ctrl(priv, 1); ++ usbhs_mod_autonomy_mode(priv); ++ } + + usbhs_platform_call(priv, phy_reset, pdev); + +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c +index efc4fae123a4..8647d2c2a8c4 100644 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c +@@ -37,6 +37,7 @@ struct usbhsg_gpriv; + struct usbhsg_uep { + struct usb_ep ep; + struct usbhs_pipe *pipe; ++ spinlock_t lock; /* protect the pipe */ + + char ep_name[EP_NAME_SIZE]; + +@@ -638,10 +639,16 @@ usbhsg_ep_enable_end: + static int usbhsg_ep_disable(struct usb_ep *ep) + { + struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); +- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); ++ struct usbhs_pipe *pipe; ++ unsigned long flags; ++ int ret = 0; + +- if (!pipe) +- return -EINVAL; ++ spin_lock_irqsave(&uep->lock, flags); ++ pipe = usbhsg_uep_to_pipe(uep); ++ if (!pipe) { ++ ret = -EINVAL; ++ goto out; ++ } + + usbhsg_pipe_disable(uep); + usbhs_pipe_free(pipe); +@@ -649,6 +656,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep) + uep->pipe->mod_private = NULL; + uep->pipe = NULL; + ++out: ++ spin_unlock_irqrestore(&uep->lock, flags); ++ + return 0; + } + +@@ -698,8 +708,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) + { + struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); + struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); +- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); ++ struct usbhs_pipe *pipe; ++ unsigned long flags; + ++ spin_lock_irqsave(&uep->lock, flags); ++ pipe = usbhsg_uep_to_pipe(uep); + if (pipe) + usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); + +@@ -708,6 +721,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) + * even if the pipe is NULL. + */ + usbhsg_queue_pop(uep, ureq, -ECONNRESET); ++ spin_unlock_irqrestore(&uep->lock, flags); + + return 0; + } +@@ -854,10 +868,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) + { + struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); + struct usbhs_mod *mod = usbhs_mod_get_current(priv); +- struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); ++ struct usbhsg_uep *uep; + struct device *dev = usbhs_priv_to_dev(priv); + unsigned long flags; +- int ret = 0; ++ int ret = 0, i; + + /******************** spin lock ********************/ + usbhs_lock(priv, flags); +@@ -889,7 +903,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) + usbhs_sys_set_test_mode(priv, 0); + usbhs_sys_function_ctrl(priv, 0); + +- usbhsg_ep_disable(&dcp->ep); ++ /* disable all eps */ ++ usbhsg_for_each_uep_with_dcp(uep, gpriv, i) ++ usbhsg_ep_disable(&uep->ep); + + dev_dbg(dev, "stop gadget\n"); + +@@ -1072,6 +1088,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) + ret = -ENOMEM; + goto usbhs_mod_gadget_probe_err_gpriv; + } ++ spin_lock_init(&uep->lock); + + gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); + dev_info(dev, "%stransceiver found\n", +diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c +index 39afd7045c43..7bb5f8da5357 100644 +--- a/drivers/usb/storage/isd200.c ++++ b/drivers/usb/storage/isd200.c +@@ -1520,8 +1520,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) + + /* Make sure driver was initialized */ + +- if (us->extra == NULL) ++ if (us->extra == NULL) { + usb_stor_dbg(us, "ERROR Driver not initialized\n"); ++ srb->result = DID_ERROR << 16; ++ return; ++ } + + scsi_set_resid(srb, 0); + /* scsi_bufflen might change in protocol translation to ata */ +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c +index 6070b793cbcb..1e01e28f40f3 100644 +--- a/drivers/vfio/vfio.c ++++ b/drivers/vfio/vfio.c +@@ -296,6 +296,34 @@ static void vfio_group_put(struct vfio_group *group) + kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); + } + ++struct vfio_group_put_work { ++ struct work_struct work; ++ struct vfio_group *group; ++}; ++ ++static void vfio_group_put_bg(struct work_struct *work) ++{ ++ struct vfio_group_put_work *do_work; ++ ++ do_work = container_of(work, struct vfio_group_put_work, work); ++ ++ vfio_group_put(do_work->group); ++ kfree(do_work); ++} ++ ++static void vfio_group_schedule_put(struct vfio_group *group) ++{ ++ struct vfio_group_put_work *do_work; ++ ++ do_work = kmalloc(sizeof(*do_work), GFP_KERNEL); ++ if (WARN_ON(!do_work)) ++ return; ++ ++ INIT_WORK(&do_work->work, vfio_group_put_bg); ++ do_work->group = group; ++ schedule_work(&do_work->work); ++} ++ + /* Assume group_lock or group reference is held */ + static void vfio_group_get(struct vfio_group *group) + { +@@ -620,7 +648,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, + break; + } + +- vfio_group_put(group); ++ /* ++ * If we're the last reference to the group, the group will be ++ * released, which includes unregistering the iommu group notifier. ++ * We hold a read-lock on that notifier list, unregistering needs ++ * a write-lock... deadlock. Release our reference asynchronously ++ * to avoid that situation. ++ */ ++ vfio_group_schedule_put(group); + return NOTIFY_OK; + } + +@@ -1552,6 +1587,15 @@ void vfio_group_put_external_user(struct vfio_group *group) + } + EXPORT_SYMBOL_GPL(vfio_group_put_external_user); + ++bool vfio_external_group_match_file(struct vfio_group *test_group, ++ struct file *filep) ++{ ++ struct vfio_group *group = filep->private_data; ++ ++ return (filep->f_op == &vfio_group_fops) && (group == test_group); ++} ++EXPORT_SYMBOL_GPL(vfio_external_group_match_file); ++ + int vfio_external_user_iommu_id(struct vfio_group *group) + { + return iommu_group_id(group->iommu_group); +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c +index 9314b4ea2375..be7d187d53fd 100644 +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@ -247,6 +247,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, + if (ret < 0) + err = ret; + dput(last); ++ /* last_name no longer match cache index */ ++ if (fi->readdir_cache_idx >= 0) { ++ fi->readdir_cache_idx = -1; ++ fi->dir_release_count = 0; ++ } + } + return err; + } +diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c +index e9a8d676c6bc..83dcf7bfd7b8 100644 +--- a/fs/f2fs/acl.c ++++ b/fs/f2fs/acl.c +@@ -213,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, + switch (type) { + case ACL_TYPE_ACCESS: + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; +- if (acl) { ++ if (acl && !ipage) { + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) + return error; +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 5b21b1ca2341..348e0a05bd18 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -1135,11 +1135,13 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) + /* Force a full look up iff the parent directory has changed */ + if (!nfs_is_exclusive_create(dir, flags) && + nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) { +- +- if (nfs_lookup_verify_inode(inode, flags)) { ++ error = nfs_lookup_verify_inode(inode, flags); ++ if (error) { + if (flags & LOOKUP_RCU) + return -ECHILD; +- goto out_zap_parent; ++ if (error == -ESTALE) ++ goto out_zap_parent; ++ goto out_error; + } + goto out_valid; + } +@@ -1163,8 +1165,10 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) + trace_nfs_lookup_revalidate_enter(dir, dentry, flags); + error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label); + trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error); +- if (error) ++ if (error == -ESTALE || error == -ENOENT) + goto out_bad; ++ if (error) ++ goto out_error; + if (nfs_compare_fh(NFS_FH(inode), fhandle)) + goto out_bad; + if ((error = nfs_refresh_inode(inode, fattr)) != 0) +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index f714b98cfd74..668ac19af58f 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -1241,9 +1241,9 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat + return 0; + /* Has the inode gone and changed behind our back? */ + if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) +- return -EIO; ++ return -ESTALE; + if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) +- return -EIO; ++ return -ESTALE; + + if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && + inode->i_version != fattr->change_attr) +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 7be3166ba553..0e659d9c69a1 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -1235,8 +1235,8 @@ int udf_setsize(struct inode *inode, loff_t newsize) + return err; + } + set_size: +- truncate_setsize(inode, newsize); + up_write(&iinfo->i_data_sem); ++ truncate_setsize(inode, newsize); + } else { + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { + down_write(&iinfo->i_data_sem); +@@ -1253,9 +1253,9 @@ set_size: + udf_get_block); + if (err) + return err; ++ truncate_setsize(inode, newsize); + down_write(&iinfo->i_data_sem); + udf_clear_extent_cache(inode); +- truncate_setsize(inode, newsize); + udf_truncate_extents(inode); + up_write(&iinfo->i_data_sem); + } +diff --git a/include/linux/vfio.h b/include/linux/vfio.h +index ddb440975382..34851bf2e2c8 100644 +--- a/include/linux/vfio.h ++++ b/include/linux/vfio.h +@@ -85,6 +85,8 @@ extern void vfio_unregister_iommu_driver( + */ + extern struct vfio_group *vfio_group_get_external_user(struct file *filep); + extern void vfio_group_put_external_user(struct vfio_group *group); ++extern bool vfio_external_group_match_file(struct vfio_group *group, ++ struct file *filep); + extern int vfio_external_user_iommu_id(struct vfio_group *group); + extern long vfio_external_check_extension(struct vfio_group *group, + unsigned long arg); +diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h +index 373d3342002b..e0efe3fcf739 100644 +--- a/include/target/iscsi/iscsi_target_core.h ++++ b/include/target/iscsi/iscsi_target_core.h +@@ -64,6 +64,14 @@ + #define TA_DEFAULT_FABRIC_PROT_TYPE 0 + /* TPG status needs to be enabled to return sendtargets discovery endpoint info */ + #define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1 ++/* ++ * Used to control the sending of keys with optional to respond state bit, ++ * as a workaround for non RFC compliant initiators,that do not propose, ++ * nor respond to specific keys required for login to complete. ++ * ++ * See iscsi_check_proposer_for_optional_reply() for more details. ++ */ ++#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1 + + #define ISCSI_IOV_DATA_BUFFER 5 + +@@ -765,6 +773,7 @@ struct iscsi_tpg_attrib { + u8 t10_pi; + u32 fabric_prot_type; + u32 tpg_enabled_sendtargets; ++ u32 login_keys_workaround; + struct iscsi_portal_group *tpg; + }; + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 22350b15b4e7..784ab8fe8714 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6410,21 +6410,6 @@ static void perf_log_itrace_start(struct perf_event *event) + perf_output_end(&handle); + } + +-static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) +-{ +- /* +- * Due to interrupt latency (AKA "skid"), we may enter the +- * kernel before taking an overflow, even if the PMU is only +- * counting user events. +- * To avoid leaking information to userspace, we must always +- * reject kernel samples when exclude_kernel is set. +- */ +- if (event->attr.exclude_kernel && !user_mode(regs)) +- return false; +- +- return true; +-} +- + /* + * Generic event overflow handling, sampling. + */ +@@ -6472,12 +6457,6 @@ static int __perf_event_overflow(struct perf_event *event, + } + + /* +- * For security, drop the skid kernel samples if necessary. +- */ +- if (!sample_is_allowed(event, regs)) +- return ret; +- +- /* + * XXX event_limit might not quite work as expected on inherited + * events + */ +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index 2c3a23d77704..6fcc367ad531 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -616,7 +616,8 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, + * Rate limit to the tick as a hot fix to prevent DOS. Will be + * mopped up later. + */ +- if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC) ++ if (timr->it.alarm.interval.tv64 && ++ ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC) + timr->it.alarm.interval = ktime_set(0, TICK_NSEC); + + exp = timespec_to_ktime(new_setting->it_value); +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 34b2a0d5cf1a..eba904bae48c 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -3535,7 +3535,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) + int exclude_mod = 0; + int found = 0; + int ret; +- int clear_filter; ++ int clear_filter = 0; + + if (func) { + func_g.type = filter_parse_regex(func, len, &func_g.search, +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index c83d59913d78..d59ebd9d21df 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6737,6 +6737,7 @@ static int instance_rmdir(const char *name) + } + kfree(tr->topts); + ++ free_cpumask_var(tr->tracing_cpumask); + kfree(tr->name); + kfree(tr); + +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c +index 4b175df35184..906f88550cd8 100644 +--- a/net/bluetooth/smp.c ++++ b/net/bluetooth/smp.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -524,7 +525,7 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], + if (err) + return false; + +- return !memcmp(bdaddr->b, hash, 3); ++ return !crypto_memneq(bdaddr->b, hash, 3); + } + + int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) +@@ -577,7 +578,7 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) + /* This is unlikely, but we need to check that + * we didn't accidentially generate a debug key. + */ +- if (memcmp(smp->local_sk, debug_sk, 32)) ++ if (crypto_memneq(smp->local_sk, debug_sk, 32)) + break; + } + smp->debug_key = false; +@@ -991,7 +992,7 @@ static u8 smp_random(struct smp_chan *smp) + if (ret) + return SMP_UNSPECIFIED; + +- if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) { ++ if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) { + BT_ERR("Pairing failed (confirmation values mismatch)"); + return SMP_CONFIRM_FAILED; + } +@@ -1491,7 +1492,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) + smp->rrnd, r, cfm)) + return SMP_UNSPECIFIED; + +- if (memcmp(smp->pcnf, cfm, 16)) ++ if (crypto_memneq(smp->pcnf, cfm, 16)) + return SMP_CONFIRM_FAILED; + + smp->passkey_round++; +@@ -1875,7 +1876,7 @@ static u8 sc_send_public_key(struct smp_chan *smp) + /* This is unlikely, but we need to check that + * we didn't accidentially generate a debug key. + */ +- if (memcmp(smp->local_sk, debug_sk, 32)) ++ if (crypto_memneq(smp->local_sk, debug_sk, 32)) + break; + } + } +@@ -2140,7 +2141,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) + if (err) + return SMP_UNSPECIFIED; + +- if (memcmp(smp->pcnf, cfm, 16)) ++ if (crypto_memneq(smp->pcnf, cfm, 16)) + return SMP_CONFIRM_FAILED; + } else { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), +@@ -2621,7 +2622,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + if (err) + return SMP_UNSPECIFIED; + +- if (memcmp(cfm.confirm_val, smp->pcnf, 16)) ++ if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16)) + return SMP_CONFIRM_FAILED; + } + +@@ -2654,7 +2655,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + else + hcon->pending_sec_level = BT_SECURITY_FIPS; + +- if (!memcmp(debug_pk, smp->remote_pk, 64)) ++ if (!crypto_memneq(debug_pk, smp->remote_pk, 64)) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + if (smp->method == DSP_PASSKEY) { +@@ -2753,7 +2754,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) + if (err) + return SMP_UNSPECIFIED; + +- if (memcmp(check->e, e, 16)) ++ if (crypto_memneq(check->e, e, 16)) + return SMP_DHKEY_CHECK_FAILED; + + if (!hcon->out) { +@@ -3463,7 +3464,7 @@ static int __init test_ah(struct crypto_blkcipher *tfm_aes) + if (err) + return err; + +- if (memcmp(res, exp, 3)) ++ if (crypto_memneq(res, exp, 3)) + return -EINVAL; + + return 0; +@@ -3493,7 +3494,7 @@ static int __init test_c1(struct crypto_blkcipher *tfm_aes) + if (err) + return err; + +- if (memcmp(res, exp, 16)) ++ if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +@@ -3518,7 +3519,7 @@ static int __init test_s1(struct crypto_blkcipher *tfm_aes) + if (err) + return err; + +- if (memcmp(res, exp, 16)) ++ if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +@@ -3550,7 +3551,7 @@ static int __init test_f4(struct crypto_hash *tfm_cmac) + if (err) + return err; + +- if (memcmp(res, exp, 16)) ++ if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +@@ -3584,10 +3585,10 @@ static int __init test_f5(struct crypto_hash *tfm_cmac) + if (err) + return err; + +- if (memcmp(mackey, exp_mackey, 16)) ++ if (crypto_memneq(mackey, exp_mackey, 16)) + return -EINVAL; + +- if (memcmp(ltk, exp_ltk, 16)) ++ if (crypto_memneq(ltk, exp_ltk, 16)) + return -EINVAL; + + return 0; +@@ -3620,7 +3621,7 @@ static int __init test_f6(struct crypto_hash *tfm_cmac) + if (err) + return err; + +- if (memcmp(res, exp, 16)) ++ if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +@@ -3674,7 +3675,7 @@ static int __init test_h6(struct crypto_hash *tfm_cmac) + if (err) + return err; + +- if (memcmp(res, exp, 16)) ++ if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +diff --git a/net/key/af_key.c b/net/key/af_key.c +index e67c28e614b9..d8d95b6415e4 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -65,6 +65,10 @@ struct pfkey_sock { + } dump; + }; + ++static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, ++ xfrm_address_t *saddr, xfrm_address_t *daddr, ++ u16 *family); ++ + static inline struct pfkey_sock *pfkey_sk(struct sock *sk) + { + return (struct pfkey_sock *)sk; +@@ -1922,19 +1926,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) + + /* addresses present only in tunnel mode */ + if (t->mode == XFRM_MODE_TUNNEL) { +- u8 *sa = (u8 *) (rq + 1); +- int family, socklen; ++ int err; + +- family = pfkey_sockaddr_extract((struct sockaddr *)sa, +- &t->saddr); +- if (!family) +- return -EINVAL; +- +- socklen = pfkey_sockaddr_len(family); +- if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen), +- &t->id.daddr) != family) +- return -EINVAL; +- t->encap_family = family; ++ err = parse_sockaddr_pair( ++ (struct sockaddr *)(rq + 1), ++ rq->sadb_x_ipsecrequest_len - sizeof(*rq), ++ &t->saddr, &t->id.daddr, &t->encap_family); ++ if (err) ++ return err; + } else + t->encap_family = xp->family; + +@@ -1954,7 +1953,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol) + if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) + return -EINVAL; + +- while (len >= sizeof(struct sadb_x_ipsecrequest)) { ++ while (len >= sizeof(*rq)) { ++ if (len < rq->sadb_x_ipsecrequest_len || ++ rq->sadb_x_ipsecrequest_len < sizeof(*rq)) ++ return -EINVAL; ++ + if ((err = parse_ipsecrequest(xp, rq)) < 0) + return err; + len -= rq->sadb_x_ipsecrequest_len; +@@ -2417,7 +2420,6 @@ out: + return err; + } + +-#ifdef CONFIG_NET_KEY_MIGRATE + static int pfkey_sockaddr_pair_size(sa_family_t family) + { + return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); +@@ -2429,7 +2431,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, + { + int af, socklen; + +- if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) ++ if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) + return -EINVAL; + + af = pfkey_sockaddr_extract(sa, saddr); +@@ -2445,6 +2447,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, + return 0; + } + ++#ifdef CONFIG_NET_KEY_MIGRATE + static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, + struct xfrm_migrate *m) + { +@@ -2452,13 +2455,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, + struct sadb_x_ipsecrequest *rq2; + int mode; + +- if (len <= sizeof(struct sadb_x_ipsecrequest) || +- len < rq1->sadb_x_ipsecrequest_len) ++ if (len < sizeof(*rq1) || ++ len < rq1->sadb_x_ipsecrequest_len || ++ rq1->sadb_x_ipsecrequest_len < sizeof(*rq1)) + return -EINVAL; + + /* old endoints */ + err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), +- rq1->sadb_x_ipsecrequest_len, ++ rq1->sadb_x_ipsecrequest_len - sizeof(*rq1), + &m->old_saddr, &m->old_daddr, + &m->old_family); + if (err) +@@ -2467,13 +2471,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, + rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); + len -= rq1->sadb_x_ipsecrequest_len; + +- if (len <= sizeof(struct sadb_x_ipsecrequest) || +- len < rq2->sadb_x_ipsecrequest_len) ++ if (len <= sizeof(*rq2) || ++ len < rq2->sadb_x_ipsecrequest_len || ++ rq2->sadb_x_ipsecrequest_len < sizeof(*rq2)) + return -EINVAL; + + /* new endpoints */ + err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), +- rq2->sadb_x_ipsecrequest_len, ++ rq2->sadb_x_ipsecrequest_len - sizeof(*rq2), + &m->new_saddr, &m->new_daddr, + &m->new_family); + if (err) +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index 4da560005b0e..dd1649caa2b2 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -845,10 +845,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, + { + unsigned int verdict = NF_DROP; + +- if (IP_VS_FWD_METHOD(cp) != 0) { +- pr_err("shouldn't reach here, because the box is on the " +- "half connection in the tun/dr module.\n"); +- } ++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) ++ goto ignore_cp; + + /* Ensure the checksum is correct */ + if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { +@@ -882,6 +880,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, + ip_vs_notrack(skb); + else + ip_vs_update_conntrack(skb, cp, 0); ++ ++ignore_cp: + verdict = NF_ACCEPT; + + out: +@@ -1242,8 +1242,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in + */ + cp = pp->conn_out_get(ipvs, af, skb, &iph); + +- if (likely(cp)) ++ if (likely(cp)) { ++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) ++ goto ignore_cp; + return handle_response(af, skb, pd, cp, &iph, hooknum); ++ } + if (sysctl_nat_icmp_send(ipvs) && + (pp->protocol == IPPROTO_TCP || + pp->protocol == IPPROTO_UDP || +@@ -1285,9 +1288,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in + } + } + } ++ ++out: + IP_VS_DBG_PKT(12, af, pp, skb, iph.off, + "ip_vs_out: packet continues traversal as normal"); + return NF_ACCEPT; ++ ++ignore_cp: ++ __ip_vs_conn_put(cp); ++ goto out; + } + + /* +diff --git a/net/nfc/core.c b/net/nfc/core.c +index 1fe3d3b362c0..c5a2c7e733b3 100644 +--- a/net/nfc/core.c ++++ b/net/nfc/core.c +@@ -969,6 +969,8 @@ static void nfc_release(struct device *d) + kfree(se); + } + ++ ida_simple_remove(&nfc_index_ida, dev->idx); ++ + kfree(dev); + } + +@@ -1043,6 +1045,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, + int tx_headroom, int tx_tailroom) + { + struct nfc_dev *dev; ++ int rc; + + if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || + !ops->deactivate_target || !ops->im_transceive) +@@ -1055,6 +1058,15 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, + if (!dev) + return NULL; + ++ rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL); ++ if (rc < 0) ++ goto err_free_dev; ++ dev->idx = rc; ++ ++ dev->dev.class = &nfc_class; ++ dev_set_name(&dev->dev, "nfc%d", dev->idx); ++ device_initialize(&dev->dev); ++ + dev->ops = ops; + dev->supported_protocols = supported_protocols; + dev->tx_headroom = tx_headroom; +@@ -1077,6 +1089,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, + } + + return dev; ++ ++err_free_dev: ++ kfree(dev); ++ ++ return ERR_PTR(rc); + } + EXPORT_SYMBOL(nfc_allocate_device); + +@@ -1091,14 +1108,6 @@ int nfc_register_device(struct nfc_dev *dev) + + pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + +- dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL); +- if (dev->idx < 0) +- return dev->idx; +- +- dev->dev.class = &nfc_class; +- dev_set_name(&dev->dev, "nfc%d", dev->idx); +- device_initialize(&dev->dev); +- + mutex_lock(&nfc_devlist_mutex); + nfc_devlist_generation++; + rc = device_add(&dev->dev); +@@ -1136,12 +1145,10 @@ EXPORT_SYMBOL(nfc_register_device); + */ + void nfc_unregister_device(struct nfc_dev *dev) + { +- int rc, id; ++ int rc; + + pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + +- id = dev->idx; +- + if (dev->rfkill) { + rfkill_unregister(dev->rfkill); + rfkill_destroy(dev->rfkill); +@@ -1166,8 +1173,6 @@ void nfc_unregister_device(struct nfc_dev *dev) + nfc_devlist_generation++; + device_del(&dev->dev); + mutex_unlock(&nfc_devlist_mutex); +- +- ida_simple_remove(&nfc_index_ida, id); + } + EXPORT_SYMBOL(nfc_unregister_device); + +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c +index ecf0a0196f18..9c222a106c7f 100644 +--- a/net/nfc/llcp_sock.c ++++ b/net/nfc/llcp_sock.c +@@ -76,7 +76,8 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) + struct sockaddr_nfc_llcp llcp_addr; + int len, ret = 0; + +- if (!addr || addr->sa_family != AF_NFC) ++ if (!addr || alen < offsetofend(struct sockaddr, sa_family) || ++ addr->sa_family != AF_NFC) + return -EINVAL; + + pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); +@@ -150,7 +151,8 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, + struct sockaddr_nfc_llcp llcp_addr; + int len, ret = 0; + +- if (!addr || addr->sa_family != AF_NFC) ++ if (!addr || alen < offsetofend(struct sockaddr, sa_family) || ++ addr->sa_family != AF_NFC) + return -EINVAL; + + pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); +@@ -655,8 +657,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, + + pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); + +- if (!addr || len < sizeof(struct sockaddr_nfc) || +- addr->sa_family != AF_NFC) ++ if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC) + return -EINVAL; + + if (addr->service_name_len == 0 && addr->dsap == 0) +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c +index 10c99a578421..67583ad7f610 100644 +--- a/net/nfc/nci/core.c ++++ b/net/nfc/nci/core.c +@@ -1084,8 +1084,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, + return ndev; + + free_nfc: +- kfree(ndev->nfc_dev); +- ++ nfc_free_device(ndev->nfc_dev); + free_nci: + kfree(ndev); + return NULL; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index f58c1fba1026..12dfb457275d 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -873,7 +873,9 @@ static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info) + u32 device_idx, target_idx, protocol; + int rc; + +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || ++ !info->attrs[NFC_ATTR_TARGET_INDEX] || ++ !info->attrs[NFC_ATTR_PROTOCOLS]) + return -EINVAL; + + device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); +diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c +index bb82bb966000..c1addf49c4f2 100644 +--- a/sound/soc/soc-compress.c ++++ b/sound/soc/soc-compress.c +@@ -68,7 +68,8 @@ out: + static int soc_compr_open_fe(struct snd_compr_stream *cstream) + { + struct snd_soc_pcm_runtime *fe = cstream->private_data; +- struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream; ++ struct snd_pcm_substream *fe_substream = ++ fe->pcm->streams[cstream->direction].substream; + struct snd_soc_platform *platform = fe->platform; + struct snd_soc_dpcm *dpcm; + struct snd_soc_dapm_widget_list *list; +@@ -412,7 +413,8 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream, + struct snd_compr_params *params) + { + struct snd_soc_pcm_runtime *fe = cstream->private_data; +- struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream; ++ struct snd_pcm_substream *fe_substream = ++ fe->pcm->streams[cstream->direction].substream; + struct snd_soc_platform *platform = fe->platform; + int ret = 0, stream; + +diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c +index e9703c0829f1..07b5f5951b25 100644 +--- a/tools/perf/ui/browser.c ++++ b/tools/perf/ui/browser.c +@@ -702,7 +702,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser, + ui_browser__gotorc(browser, row, column + 1); + SLsmg_draw_hline(2); + +- if (row++ == 0) ++ if (++row == 0) + goto out; + } else + row = 0; +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +index 933a509a90f8..67282a759496 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +@@ -64,6 +64,25 @@ enum intel_pt_pkt_state { + INTEL_PT_STATE_FUP_NO_TIP, + }; + ++static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state) ++{ ++ switch (pkt_state) { ++ case INTEL_PT_STATE_NO_PSB: ++ case INTEL_PT_STATE_NO_IP: ++ case INTEL_PT_STATE_ERR_RESYNC: ++ case INTEL_PT_STATE_IN_SYNC: ++ case INTEL_PT_STATE_TNT: ++ return true; ++ case INTEL_PT_STATE_TIP: ++ case INTEL_PT_STATE_TIP_PGD: ++ case INTEL_PT_STATE_FUP: ++ case INTEL_PT_STATE_FUP_NO_TIP: ++ return false; ++ default: ++ return true; ++ }; ++} ++ + #ifdef INTEL_PT_STRICT + #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB + #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB +@@ -98,6 +117,7 @@ struct intel_pt_decoder { + uint64_t timestamp; + uint64_t tsc_timestamp; + uint64_t ref_timestamp; ++ uint64_t sample_timestamp; + uint64_t ret_addr; + uint64_t ctc_timestamp; + uint64_t ctc_delta; +@@ -140,6 +160,7 @@ struct intel_pt_decoder { + unsigned int fup_tx_flags; + unsigned int tx_flags; + uint64_t timestamp_insn_cnt; ++ uint64_t sample_insn_cnt; + uint64_t stuck_ip; + int no_progress; + int stuck_ip_prd; +@@ -896,6 +917,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder, + + decoder->tot_insn_cnt += insn_cnt; + decoder->timestamp_insn_cnt += insn_cnt; ++ decoder->sample_insn_cnt += insn_cnt; + decoder->period_insn_cnt += insn_cnt; + + if (err) { +@@ -1876,6 +1898,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) + break; + + case INTEL_PT_PSB: ++ intel_pt_clear_stack(&decoder->stack); + err = intel_pt_walk_psb(decoder); + if (err) + return err; +@@ -1901,6 +1924,8 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) + { + int err; + ++ decoder->set_fup_tx_flags = false; ++ + intel_pt_log("Scanning for full IP\n"); + err = intel_pt_walk_to_ip(decoder); + if (err) +@@ -2035,7 +2060,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) + + static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) + { +- uint64_t est = decoder->timestamp_insn_cnt << 1; ++ uint64_t est = decoder->sample_insn_cnt << 1; + + if (!decoder->cbr || !decoder->max_non_turbo_ratio) + goto out; +@@ -2043,7 +2068,7 @@ static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) + est *= decoder->max_non_turbo_ratio; + est /= decoder->cbr; + out: +- return decoder->timestamp + est; ++ return decoder->sample_timestamp + est; + } + + const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) +@@ -2060,6 +2085,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) + break; + case INTEL_PT_STATE_NO_IP: + decoder->last_ip = 0; ++ decoder->ip = 0; + /* Fall through */ + case INTEL_PT_STATE_ERR_RESYNC: + err = intel_pt_sync_ip(decoder); +@@ -2096,15 +2122,24 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) + } + } while (err == -ENOLINK); + +- decoder->state.err = err ? intel_pt_ext_err(err) : 0; +- decoder->state.timestamp = decoder->timestamp; ++ if (err) { ++ decoder->state.err = intel_pt_ext_err(err); ++ decoder->state.from_ip = decoder->ip; ++ decoder->sample_timestamp = decoder->timestamp; ++ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; ++ } else { ++ decoder->state.err = 0; ++ if (intel_pt_sample_time(decoder->pkt_state)) { ++ decoder->sample_timestamp = decoder->timestamp; ++ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; ++ } ++ } ++ ++ decoder->state.timestamp = decoder->sample_timestamp; + decoder->state.est_timestamp = intel_pt_est_timestamp(decoder); + decoder->state.cr3 = decoder->cr3; + decoder->state.tot_insn_cnt = decoder->tot_insn_cnt; + +- if (err) +- decoder->state.from_ip = decoder->ip; +- + return &decoder->state; + } + +diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c +index 1dd087da6f31..111e09c3f4bf 100644 +--- a/virt/kvm/vfio.c ++++ b/virt/kvm/vfio.c +@@ -47,6 +47,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) + return vfio_group; + } + ++static bool kvm_vfio_external_group_match_file(struct vfio_group *group, ++ struct file *filep) ++{ ++ bool ret, (*fn)(struct vfio_group *, struct file *); ++ ++ fn = symbol_get(vfio_external_group_match_file); ++ if (!fn) ++ return false; ++ ++ ret = fn(group, filep); ++ ++ symbol_put(vfio_external_group_match_file); ++ ++ return ret; ++} ++ + static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) + { + void (*fn)(struct vfio_group *); +@@ -171,18 +187,13 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) + if (!f.file) + return -EBADF; + +- vfio_group = kvm_vfio_group_get_external_user(f.file); +- fdput(f); +- +- if (IS_ERR(vfio_group)) +- return PTR_ERR(vfio_group); +- + ret = -ENOENT; + + mutex_lock(&kv->lock); + + list_for_each_entry(kvg, &kv->group_list, node) { +- if (kvg->vfio_group != vfio_group) ++ if (!kvm_vfio_external_group_match_file(kvg->vfio_group, ++ f.file)) + continue; + + list_del(&kvg->node); +@@ -196,7 +207,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) + + mutex_unlock(&kv->lock); + +- kvm_vfio_group_put_external_user(vfio_group); ++ fdput(f); + + kvm_vfio_update_coherency(dev); + diff --git a/patch/kernel/rk3328-default/patch-4.4.79-80.patch b/patch/kernel/rk3328-default/patch-4.4.79-80.patch new file mode 100644 index 000000000..3594fbc32 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.79-80.patch @@ -0,0 +1,3287 @@ +diff --git a/Makefile b/Makefile +index 1440a94b2474..dddd55adde24 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 79 ++SUBLEVEL = 80 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts +index 5f5e0f3d5b64..27cd4abfc74d 100644 +--- a/arch/arm/boot/dts/omap3-n900.dts ++++ b/arch/arm/boot/dts/omap3-n900.dts +@@ -697,6 +697,8 @@ + vmmc_aux-supply = <&vsim>; + bus-width = <8>; + non-removable; ++ no-sdio; ++ no-sd; + }; + + &mmc3 { +diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig +index f3142369f594..01116ee1284b 100644 +--- a/arch/arm/configs/s3c2410_defconfig ++++ b/arch/arm/configs/s3c2410_defconfig +@@ -87,9 +87,9 @@ CONFIG_IPV6_TUNNEL=m + CONFIG_NETFILTER=y + CONFIG_NF_CONNTRACK=m + CONFIG_NF_CONNTRACK_EVENTS=y +-CONFIG_NF_CT_PROTO_DCCP=m +-CONFIG_NF_CT_PROTO_SCTP=m +-CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_SCTP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y + CONFIG_NF_CONNTRACK_AMANDA=m + CONFIG_NF_CONNTRACK_FTP=m + CONFIG_NF_CONNTRACK_H323=m +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts +index ce5d848251fa..7b34822d61e9 100644 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts +@@ -26,7 +26,7 @@ + stdout-path = "serial0:115200n8"; + }; + +- memory { ++ memory@0 { + device_type = "memory"; + reg = <0x0 0x0 0x40000000>; + }; +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +index 857eda5c7217..172402cc1a0f 100644 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +@@ -71,7 +71,7 @@ + <1 10 0xf01>; + }; + +- amba_apu { ++ amba_apu: amba_apu@0 { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <1>; +@@ -191,7 +191,7 @@ + }; + + i2c0: i2c@ff020000 { +- compatible = "cdns,i2c-r1p10"; ++ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; + status = "disabled"; + interrupt-parent = <&gic>; + interrupts = <0 17 4>; +@@ -202,7 +202,7 @@ + }; + + i2c1: i2c@ff030000 { +- compatible = "cdns,i2c-r1p10"; ++ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; + status = "disabled"; + interrupt-parent = <&gic>; + interrupts = <0 18 4>; +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index 247bae758e1e..a4b466424a32 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -66,21 +66,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr) + break; + + pud = pud_offset(pgd, addr); +- printk(", *pud=%016llx", pud_val(*pud)); ++ pr_cont(", *pud=%016llx", pud_val(*pud)); + if (pud_none(*pud) || pud_bad(*pud)) + break; + + pmd = pmd_offset(pud, addr); +- printk(", *pmd=%016llx", pmd_val(*pmd)); ++ pr_cont(", *pmd=%016llx", pmd_val(*pmd)); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + break; + + pte = pte_offset_map(pmd, addr); +- printk(", *pte=%016llx", pte_val(*pte)); ++ pr_cont(", *pte=%016llx", pte_val(*pte)); + pte_unmap(pte); + } while(0); + +- printk("\n"); ++ pr_cont("\n"); + } + + #ifdef CONFIG_ARM64_HW_AFDBM +diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S +index 2d69a853b742..3a08b55609b6 100644 +--- a/arch/openrisc/kernel/vmlinux.lds.S ++++ b/arch/openrisc/kernel/vmlinux.lds.S +@@ -38,6 +38,8 @@ SECTIONS + /* Read-only sections, merged into text segment: */ + . = LOAD_BASE ; + ++ _text = .; ++ + /* _s_kernel_ro must be page aligned */ + . = ALIGN(PAGE_SIZE); + _s_kernel_ro = .; +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h +index 329771559cbb..8b3b46b7b0f2 100644 +--- a/arch/powerpc/include/asm/topology.h ++++ b/arch/powerpc/include/asm/topology.h +@@ -44,22 +44,8 @@ extern void __init dump_numa_cpu_topology(void); + extern int sysfs_add_device_to_node(struct device *dev, int nid); + extern void sysfs_remove_device_from_node(struct device *dev, int nid); + +-static inline int early_cpu_to_node(int cpu) +-{ +- int nid; +- +- nid = numa_cpu_lookup_table[cpu]; +- +- /* +- * Fall back to node 0 if nid is unset (it should be, except bugs). +- * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). +- */ +- return (nid < 0) ? 0 : nid; +-} + #else + +-static inline int early_cpu_to_node(int cpu) { return 0; } +- + static inline void dump_numa_cpu_topology(void) {} + + static inline int sysfs_add_device_to_node(struct device *dev, int nid) +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index fe6e800c1357..a20823210ac0 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -751,7 +751,7 @@ void __init setup_arch(char **cmdline_p) + + static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) + { +- return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align, ++ return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, + __pa(MAX_DMA_ADDRESS)); + } + +@@ -762,7 +762,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size) + + static int pcpu_cpu_distance(unsigned int from, unsigned int to) + { +- if (early_cpu_to_node(from) == early_cpu_to_node(to)) ++ if (cpu_to_node(from) == cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c +index 396dc44e783b..428563b195c3 100644 +--- a/arch/powerpc/kvm/book3s_hv.c ++++ b/arch/powerpc/kvm/book3s_hv.c +@@ -2687,6 +2687,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) + { + int r; + int srcu_idx; ++ unsigned long ebb_regs[3] = {}; /* shut up GCC */ ++ unsigned long user_tar = 0; ++ unsigned long proc_fscr = 0; ++ unsigned int user_vrsave; + + if (!vcpu->arch.sane) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; +@@ -2707,10 +2711,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } ++ /* Enable TM so we can read the TM SPRs */ ++ mtmsr(mfmsr() | MSR_TM); + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); +- current->thread.regs->msr &= ~MSR_TM; + } + #endif + +@@ -2736,6 +2741,17 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) + flush_fp_to_thread(current); + flush_altivec_to_thread(current); + flush_vsx_to_thread(current); ++ ++ /* Save userspace EBB and other register values */ ++ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { ++ ebb_regs[0] = mfspr(SPRN_EBBHR); ++ ebb_regs[1] = mfspr(SPRN_EBBRR); ++ ebb_regs[2] = mfspr(SPRN_BESCR); ++ user_tar = mfspr(SPRN_TAR); ++ proc_fscr = mfspr(SPRN_FSCR); ++ } ++ user_vrsave = mfspr(SPRN_VRSAVE); ++ + vcpu->arch.wqp = &vcpu->arch.vcore->wq; + vcpu->arch.pgdir = current->mm->pgd; + vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; +@@ -2757,6 +2773,29 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) + } + } while (is_kvmppc_resume_guest(r)); + ++ /* Restore userspace EBB and other register values */ ++ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { ++ mtspr(SPRN_EBBHR, ebb_regs[0]); ++ mtspr(SPRN_EBBRR, ebb_regs[1]); ++ mtspr(SPRN_BESCR, ebb_regs[2]); ++ mtspr(SPRN_TAR, user_tar); ++ mtspr(SPRN_FSCR, proc_fscr); ++ } ++ mtspr(SPRN_VRSAVE, user_vrsave); ++ ++ /* ++ * Since we don't do lazy TM reload, we need to reload ++ * the TM registers here. ++ */ ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM ++ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && ++ (current->thread.regs->msr & MSR_TM)) { ++ mtspr(SPRN_TFHAR, current->thread.tm_tfhar); ++ mtspr(SPRN_TFIAR, current->thread.tm_tfiar); ++ mtspr(SPRN_TEXASR, current->thread.tm_texasr); ++ } ++#endif ++ + out: + vcpu->arch.state = KVMPPC_VCPU_NOTREADY; + atomic_dec(&vcpu->kvm->arch.vcpus_running); +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index 1a743f87b37d..ffab9269bfe4 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -36,6 +36,13 @@ + #define NAPPING_CEDE 1 + #define NAPPING_NOVCPU 2 + ++/* Stack frame offsets for kvmppc_hv_entry */ ++#define SFS 112 ++#define STACK_SLOT_TRAP (SFS-4) ++#define STACK_SLOT_CIABR (SFS-16) ++#define STACK_SLOT_DAWR (SFS-24) ++#define STACK_SLOT_DAWRX (SFS-32) ++ + /* + * Call kvmppc_hv_entry in real mode. + * Must be called with interrupts hard-disabled. +@@ -274,10 +281,10 @@ kvm_novcpu_exit: + bl kvmhv_accumulate_time + #endif + 13: mr r3, r12 +- stw r12, 112-4(r1) ++ stw r12, STACK_SLOT_TRAP(r1) + bl kvmhv_commence_exit + nop +- lwz r12, 112-4(r1) ++ lwz r12, STACK_SLOT_TRAP(r1) + b kvmhv_switch_to_host + + /* +@@ -489,7 +496,7 @@ kvmppc_hv_entry: + */ + mflr r0 + std r0, PPC_LR_STKOFF(r1) +- stdu r1, -112(r1) ++ stdu r1, -SFS(r1) + + /* Save R1 in the PACA */ + std r1, HSTATE_HOST_R1(r13) +@@ -643,6 +650,16 @@ kvmppc_got_guest: + mtspr SPRN_PURR,r7 + mtspr SPRN_SPURR,r8 + ++ /* Save host values of some registers */ ++BEGIN_FTR_SECTION ++ mfspr r5, SPRN_CIABR ++ mfspr r6, SPRN_DAWR ++ mfspr r7, SPRN_DAWRX ++ std r5, STACK_SLOT_CIABR(r1) ++ std r6, STACK_SLOT_DAWR(r1) ++ std r7, STACK_SLOT_DAWRX(r1) ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ++ + BEGIN_FTR_SECTION + /* Set partition DABR */ + /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ +@@ -1266,8 +1283,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + */ + li r0, 0 + mtspr SPRN_IAMR, r0 +- mtspr SPRN_CIABR, r0 +- mtspr SPRN_DAWRX, r0 ++ mtspr SPRN_PSPB, r0 + mtspr SPRN_TCSCR, r0 + mtspr SPRN_WORT, r0 + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ +@@ -1283,6 +1299,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + std r6,VCPU_UAMOR(r9) + li r6,0 + mtspr SPRN_AMR,r6 ++ mtspr SPRN_UAMOR, r6 + + /* Switch DSCR back to host value */ + mfspr r8, SPRN_DSCR +@@ -1424,6 +1441,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + slbia + ptesync + ++ /* Restore host values of some registers */ ++BEGIN_FTR_SECTION ++ ld r5, STACK_SLOT_CIABR(r1) ++ ld r6, STACK_SLOT_DAWR(r1) ++ ld r7, STACK_SLOT_DAWRX(r1) ++ mtspr SPRN_CIABR, r5 ++ mtspr SPRN_DAWR, r6 ++ mtspr SPRN_DAWRX, r7 ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ++ + /* + * POWER7/POWER8 guest -> host partition switch code. + * We don't have to lock against tlbies but we do +@@ -1533,8 +1560,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + +- ld r0, 112+PPC_LR_STKOFF(r1) +- addi r1, r1, 112 ++ ld r0, SFS+PPC_LR_STKOFF(r1) ++ addi r1, r1, SFS + mtlr r0 + blr + +diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c +index 7c7fcc042549..fb695f142563 100644 +--- a/arch/powerpc/platforms/pseries/reconfig.c ++++ b/arch/powerpc/platforms/pseries/reconfig.c +@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np) + + of_detach_node(np); + of_node_put(parent); +- of_node_put(np); /* Must decrement the refcount */ + return 0; + } + +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c +index 62aca448726a..2116176c1721 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c +@@ -682,6 +682,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) + const char *name = th_names[bank]; + int err = 0; + ++ if (!dev) ++ return -ENODEV; ++ + if (is_shared_bank(bank)) { + nb = node_to_amd_nb(amd_get_nb_id(cpu)); + +diff --git a/crypto/authencesn.c b/crypto/authencesn.c +index 0c0468869e25..52154ef21b5e 100644 +--- a/crypto/authencesn.c ++++ b/crypto/authencesn.c +@@ -245,6 +245,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, + u8 *ihash = ohash + crypto_ahash_digestsize(auth); + u32 tmp[2]; + ++ if (!authsize) ++ goto decrypt; ++ + /* Move high-order bits of sequence number back. */ + scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); +@@ -253,6 +256,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, + if (crypto_memneq(ihash, ohash, authsize)) + return -EBADMSG; + ++decrypt: ++ + sg_init_table(areq_ctx->dst, 2); + dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); + +diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c +index 5ea5dc219f56..73c9c7fa9001 100644 +--- a/drivers/acpi/glue.c ++++ b/drivers/acpi/glue.c +@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) + if (check_children && list_empty(&adev->children)) + return -ENODEV; + +- return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; ++ /* ++ * If the device has a _HID (or _CID) returning a valid ACPI/PNP ++ * device ID, it is better to make it look less attractive here, so that ++ * the other device with the same _ADR value (that may not have a valid ++ * device ID) can be matched going forward. [This means a second spec ++ * violation in a row, so whatever we do here is best effort anyway.] ++ */ ++ return sta_present && list_empty(&adev->pnp.ids) ? ++ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; + } + + struct acpi_device *acpi_find_child_device(struct acpi_device *parent, +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index 3f94e5fc110a..78b0ece0c867 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -1188,7 +1188,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, + } + + dev->power.subsys_data->domain_data = &gpd_data->base; +- dev->pm_domain = &genpd->domain; + + spin_unlock_irq(&dev->power.lock); + +@@ -1207,7 +1206,6 @@ static void genpd_free_dev_data(struct device *dev, + { + spin_lock_irq(&dev->power.lock); + +- dev->pm_domain = NULL; + dev->power.subsys_data->domain_data = NULL; + + spin_unlock_irq(&dev->power.lock); +@@ -1248,6 +1246,8 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + if (ret) + goto out; + ++ dev->pm_domain = &genpd->domain; ++ + genpd->device_count++; + genpd->max_off_time_changed = true; + +@@ -1299,6 +1299,8 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, + if (genpd->detach_dev) + genpd->detach_dev(genpd, dev); + ++ dev->pm_domain = NULL; ++ + list_del_init(&pdd->list_node); + + mutex_unlock(&genpd->lock); +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c +index 41fb1a917b17..33e23a7a691f 100644 +--- a/drivers/block/xen-blkback/blkback.c ++++ b/drivers/block/xen-blkback/blkback.c +@@ -595,8 +595,6 @@ int xen_blkif_schedule(void *arg) + unsigned long timeout; + int ret; + +- xen_blkif_get(blkif); +- + while (!kthread_should_stop()) { + if (try_to_freeze()) + continue; +@@ -650,7 +648,6 @@ purge_gnt_list: + print_stats(blkif); + + blkif->xenblkd = NULL; +- xen_blkif_put(blkif); + + return 0; + } +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c +index f53cff42f8da..923308201375 100644 +--- a/drivers/block/xen-blkback/xenbus.c ++++ b/drivers/block/xen-blkback/xenbus.c +@@ -221,7 +221,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) + if (blkif->xenblkd) { + kthread_stop(blkif->xenblkd); + wake_up(&blkif->shutdown_wq); +- blkif->xenblkd = NULL; + } + + /* The above kthread_stop() guarantees that at this point we +@@ -266,9 +265,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) + + static void xen_blkif_free(struct xen_blkif *blkif) + { +- +- xen_blkif_disconnect(blkif); ++ WARN_ON(xen_blkif_disconnect(blkif)); + xen_vbd_free(&blkif->vbd); ++ kfree(blkif->be->mode); ++ kfree(blkif->be); + + /* Make sure everything is drained before shutting down */ + BUG_ON(blkif->persistent_gnt_c != 0); +@@ -445,8 +445,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) + xen_blkif_put(be->blkif); + } + +- kfree(be->mode); +- kfree(be); + return 0; + } + +diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c +index 096f0cef4da1..40d400fe5bb7 100644 +--- a/drivers/char/ipmi/ipmi_watchdog.c ++++ b/drivers/char/ipmi/ipmi_watchdog.c +@@ -1162,10 +1162,11 @@ static int wdog_reboot_handler(struct notifier_block *this, + ipmi_watchdog_state = WDOG_TIMEOUT_NONE; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); + } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { +- /* Set a long timer to let the reboot happens, but +- reboot if it hangs, but only if the watchdog ++ /* Set a long timer to let the reboot happen or ++ reset if it hangs, but only if the watchdog + timer was already running. */ +- timeout = 120; ++ if (timeout < 120) ++ timeout = 120; + pretimeout = 0; + ipmi_watchdog_state = WDOG_TIMEOUT_RESET; + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); +diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c +index 6d56877b2e0a..a0d9ac6b6cc9 100644 +--- a/drivers/char/tpm/tpm-chip.c ++++ b/drivers/char/tpm/tpm-chip.c +@@ -29,9 +29,8 @@ + #include "tpm.h" + #include "tpm_eventlog.h" + +-static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); +-static LIST_HEAD(tpm_chip_list); +-static DEFINE_SPINLOCK(driver_lock); ++DEFINE_IDR(dev_nums_idr); ++static DEFINE_MUTEX(idr_lock); + + struct class *tpm_class; + dev_t tpm_devt; +@@ -92,20 +91,30 @@ EXPORT_SYMBOL_GPL(tpm_put_ops); + */ + struct tpm_chip *tpm_chip_find_get(int chip_num) + { +- struct tpm_chip *pos, *chip = NULL; ++ struct tpm_chip *chip, *res = NULL; ++ int chip_prev; ++ ++ mutex_lock(&idr_lock); ++ ++ if (chip_num == TPM_ANY_NUM) { ++ chip_num = 0; ++ do { ++ chip_prev = chip_num; ++ chip = idr_get_next(&dev_nums_idr, &chip_num); ++ if (chip && !tpm_try_get_ops(chip)) { ++ res = chip; ++ break; ++ } ++ } while (chip_prev != chip_num); ++ } else { ++ chip = idr_find_slowpath(&dev_nums_idr, chip_num); ++ if (chip && !tpm_try_get_ops(chip)) ++ res = chip; ++ } + +- rcu_read_lock(); +- list_for_each_entry_rcu(pos, &tpm_chip_list, list) { +- if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) +- continue; ++ mutex_unlock(&idr_lock); + +- /* rcu prevents chip from being free'd */ +- if (!tpm_try_get_ops(pos)) +- chip = pos; +- break; +- } +- rcu_read_unlock(); +- return chip; ++ return res; + } + + /** +@@ -118,9 +127,10 @@ static void tpm_dev_release(struct device *dev) + { + struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); + +- spin_lock(&driver_lock); +- clear_bit(chip->dev_num, dev_mask); +- spin_unlock(&driver_lock); ++ mutex_lock(&idr_lock); ++ idr_remove(&dev_nums_idr, chip->dev_num); ++ mutex_unlock(&idr_lock); ++ + kfree(chip); + } + +@@ -173,6 +183,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, + const struct tpm_class_ops *ops) + { + struct tpm_chip *chip; ++ int rc; + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (chip == NULL) +@@ -180,21 +191,18 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, + + mutex_init(&chip->tpm_mutex); + init_rwsem(&chip->ops_sem); +- INIT_LIST_HEAD(&chip->list); + + chip->ops = ops; + +- spin_lock(&driver_lock); +- chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES); +- spin_unlock(&driver_lock); +- +- if (chip->dev_num >= TPM_NUM_DEVICES) { ++ mutex_lock(&idr_lock); ++ rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL); ++ mutex_unlock(&idr_lock); ++ if (rc < 0) { + dev_err(dev, "No available tpm device numbers\n"); + kfree(chip); +- return ERR_PTR(-ENOMEM); ++ return ERR_PTR(rc); + } +- +- set_bit(chip->dev_num, dev_mask); ++ chip->dev_num = rc; + + scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); + +@@ -252,19 +260,28 @@ static int tpm_add_char_device(struct tpm_chip *chip) + return rc; + } + ++ /* Make the chip available. */ ++ mutex_lock(&idr_lock); ++ idr_replace(&dev_nums_idr, chip, chip->dev_num); ++ mutex_unlock(&idr_lock); ++ + return rc; + } + + static void tpm_del_char_device(struct tpm_chip *chip) + { + cdev_del(&chip->cdev); ++ device_del(&chip->dev); ++ ++ /* Make the chip unavailable. */ ++ mutex_lock(&idr_lock); ++ idr_replace(&dev_nums_idr, NULL, chip->dev_num); ++ mutex_unlock(&idr_lock); + + /* Make the driver uncallable. */ + down_write(&chip->ops_sem); + chip->ops = NULL; + up_write(&chip->ops_sem); +- +- device_del(&chip->dev); + } + + static int tpm1_chip_register(struct tpm_chip *chip) +@@ -319,11 +336,6 @@ int tpm_chip_register(struct tpm_chip *chip) + if (rc) + goto out_err; + +- /* Make the chip available. */ +- spin_lock(&driver_lock); +- list_add_tail_rcu(&chip->list, &tpm_chip_list); +- spin_unlock(&driver_lock); +- + chip->flags |= TPM_CHIP_FLAG_REGISTERED; + + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { +@@ -360,11 +372,6 @@ void tpm_chip_unregister(struct tpm_chip *chip) + if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED)) + return; + +- spin_lock(&driver_lock); +- list_del_rcu(&chip->list); +- spin_unlock(&driver_lock); +- synchronize_rcu(); +- + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) + sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); + +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c +index 8588f2e4b9af..aaa5fa95dede 100644 +--- a/drivers/char/tpm/tpm-interface.c ++++ b/drivers/char/tpm/tpm-interface.c +@@ -1127,6 +1127,7 @@ static int __init tpm_init(void) + + static void __exit tpm_exit(void) + { ++ idr_destroy(&dev_nums_idr); + class_destroy(tpm_class); + unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES); + } +diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c +index 6a4056a3f7ee..06ac6e9657d2 100644 +--- a/drivers/char/tpm/tpm-sysfs.c ++++ b/drivers/char/tpm/tpm-sysfs.c +@@ -38,6 +38,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, + + struct tpm_chip *chip = dev_get_drvdata(dev); + ++ memset(&tpm_cmd, 0, sizeof(tpm_cmd)); ++ + tpm_cmd.header.in = tpm_readpubek_header; + err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0, + "attempting to read the PUBEK"); +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h +index e21e2c599e66..772d99b3a8e4 100644 +--- a/drivers/char/tpm/tpm.h ++++ b/drivers/char/tpm/tpm.h +@@ -34,7 +34,7 @@ + enum tpm_const { + TPM_MINOR = 224, /* officially assigned */ + TPM_BUFSIZE = 4096, +- TPM_NUM_DEVICES = 256, ++ TPM_NUM_DEVICES = 65536, + TPM_RETRY = 50, /* 5 seconds */ + }; + +@@ -200,8 +200,6 @@ struct tpm_chip { + acpi_handle acpi_dev_handle; + char ppi_version[TPM_PPI_VERSION_LEN + 1]; + #endif /* CONFIG_ACPI */ +- +- struct list_head list; + }; + + #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) +@@ -497,6 +495,7 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) + extern struct class *tpm_class; + extern dev_t tpm_devt; + extern const struct file_operations tpm_fops; ++extern struct idr dev_nums_idr; + + enum tpm_transmit_flags { + TPM_TRANSMIT_UNLOCKED = BIT(0), +diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h +index 690e3b4f8202..b36da3c1073f 100644 +--- a/drivers/dma/ioat/hw.h ++++ b/drivers/dma/ioat/hw.h +@@ -64,6 +64,8 @@ + #define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e + #define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f + ++#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 ++ + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ + #define IOAT_VER_2_0 0x20 /* Version 2.0 */ + #define IOAT_VER_3_0 0x30 /* Version 3.0 */ +diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c +index 4ef0c5e07912..abb75ebd65ea 100644 +--- a/drivers/dma/ioat/init.c ++++ b/drivers/dma/ioat/init.c +@@ -105,6 +105,8 @@ static struct pci_device_id ioat_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, + ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) }, ++ + /* I/OAT v3.3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, +@@ -250,10 +252,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev) + } + } + ++static inline bool is_skx_ioat(struct pci_dev *pdev) ++{ ++ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false; ++} ++ + static bool is_xeon_cb32(struct pci_dev *pdev) + { + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || +- is_hsw_ioat(pdev) || is_bdx_ioat(pdev); ++ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev); + } + + bool is_bwd_ioat(struct pci_dev *pdev) +@@ -1350,6 +1357,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + device->version = readb(device->reg_base + IOAT_VER_OFFSET); + if (device->version >= IOAT_VER_3_0) { ++ if (is_skx_ioat(pdev)) ++ device->version = IOAT_VER_3_2; + err = ioat3_dma_probe(device, ioat_dca_enabled); + + if (device->version >= IOAT_VER_3_3) +diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c +index a415edbe61b1..149ec2bd9bc6 100644 +--- a/drivers/dma/ti-dma-crossbar.c ++++ b/drivers/dma/ti-dma-crossbar.c +@@ -146,6 +146,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) + match = of_match_node(ti_am335x_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); ++ of_node_put(dma_node); + return -EINVAL; + } + +@@ -310,6 +311,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) + match = of_match_node(ti_dra7_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); ++ of_node_put(dma_node); + return -EINVAL; + } + +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index a3b54cc76495..b66ffd44ff26 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -204,7 +204,14 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, + void adreno_flush(struct msm_gpu *gpu) + { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); +- uint32_t wptr = get_wptr(gpu->rb); ++ uint32_t wptr; ++ ++ /* ++ * Mask wptr value that we calculate to fit in the HW range. This is ++ * to account for the possibility that the last command fit exactly into ++ * the ringbuffer and rb->next hasn't wrapped to zero yet ++ */ ++ wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); + + /* ensure writes to ringbuffer have hit system memory: */ + mb(); +diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c +index 1847f83b1e33..fed44d4e5b72 100644 +--- a/drivers/gpu/drm/msm/msm_gem_submit.c ++++ b/drivers/gpu/drm/msm/msm_gem_submit.c +@@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, + pagefault_disable(); + } + +- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { ++ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || ++ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) { + DRM_ERROR("invalid flags: %x\n", submit_bo.flags); + ret = -EINVAL; + goto out_unlock; +diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c +index 1f14b908b221..ae317271cf81 100644 +--- a/drivers/gpu/drm/msm/msm_ringbuffer.c ++++ b/drivers/gpu/drm/msm/msm_ringbuffer.c +@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) + struct msm_ringbuffer *ring; + int ret; + +- size = ALIGN(size, 4); /* size should be dword aligned */ ++ if (WARN_ON(!is_power_of_2(size))) ++ return ERR_PTR(-EINVAL); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +index c794b2c2d21e..6d8f21290aa2 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) + + if (bar->bar[0].mem) { + addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; +- nvkm_wr32(device, 0x001714, 0xc0000000 | addr); ++ nvkm_wr32(device, 0x001714, 0x80000000 | addr); + } + + return 0; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +index 48cb19949ca3..9255b9c096b6 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +@@ -282,26 +282,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) + * Page Flip + */ + +-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, +- struct drm_file *file) +-{ +- struct drm_pending_vblank_event *event; +- struct drm_device *dev = rcrtc->crtc.dev; +- unsigned long flags; +- +- /* Destroy the pending vertical blanking event associated with the +- * pending page flip, if any, and disable vertical blanking interrupts. +- */ +- spin_lock_irqsave(&dev->event_lock, flags); +- event = rcrtc->event; +- if (event && event->base.file_priv == file) { +- rcrtc->event = NULL; +- event->base.destroy(&event->base); +- drm_crtc_vblank_put(&rcrtc->crtc); +- } +- spin_unlock_irqrestore(&dev->event_lock, flags); +-} +- + static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) + { + struct drm_pending_vblank_event *event; +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +index 4b95d9d08c49..2bbe3f5aab65 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +@@ -67,8 +67,6 @@ enum rcar_du_output { + + int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index); + void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); +-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, +- struct drm_file *file); + void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); + void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c +index 40422f6b645e..bf4674aa6405 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c +@@ -144,91 +144,6 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table); + * DRM operations + */ + +-static int rcar_du_unload(struct drm_device *dev) +-{ +- struct rcar_du_device *rcdu = dev->dev_private; +- +- if (rcdu->fbdev) +- drm_fbdev_cma_fini(rcdu->fbdev); +- +- drm_kms_helper_poll_fini(dev); +- drm_mode_config_cleanup(dev); +- drm_vblank_cleanup(dev); +- +- dev->irq_enabled = 0; +- dev->dev_private = NULL; +- +- return 0; +-} +- +-static int rcar_du_load(struct drm_device *dev, unsigned long flags) +-{ +- struct platform_device *pdev = dev->platformdev; +- struct device_node *np = pdev->dev.of_node; +- struct rcar_du_device *rcdu; +- struct resource *mem; +- int ret; +- +- if (np == NULL) { +- dev_err(dev->dev, "no platform data\n"); +- return -ENODEV; +- } +- +- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); +- if (rcdu == NULL) { +- dev_err(dev->dev, "failed to allocate private data\n"); +- return -ENOMEM; +- } +- +- init_waitqueue_head(&rcdu->commit.wait); +- +- rcdu->dev = &pdev->dev; +- rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; +- rcdu->ddev = dev; +- dev->dev_private = rcdu; +- +- /* I/O resources */ +- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); +- if (IS_ERR(rcdu->mmio)) +- return PTR_ERR(rcdu->mmio); +- +- /* Initialize vertical blanking interrupts handling. Start with vblank +- * disabled for all CRTCs. +- */ +- ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); +- if (ret < 0) { +- dev_err(&pdev->dev, "failed to initialize vblank\n"); +- goto done; +- } +- +- /* DRM/KMS objects */ +- ret = rcar_du_modeset_init(rcdu); +- if (ret < 0) { +- dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); +- goto done; +- } +- +- dev->irq_enabled = 1; +- +- platform_set_drvdata(pdev, rcdu); +- +-done: +- if (ret) +- rcar_du_unload(dev); +- +- return ret; +-} +- +-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file) +-{ +- struct rcar_du_device *rcdu = dev->dev_private; +- unsigned int i; +- +- for (i = 0; i < rcdu->num_crtcs; ++i) +- rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file); +-} +- + static void rcar_du_lastclose(struct drm_device *dev) + { + struct rcar_du_device *rcdu = dev->dev_private; +@@ -269,11 +184,7 @@ static const struct file_operations rcar_du_fops = { + static struct drm_driver rcar_du_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME + | DRIVER_ATOMIC, +- .load = rcar_du_load, +- .unload = rcar_du_unload, +- .preclose = rcar_du_preclose, + .lastclose = rcar_du_lastclose, +- .set_busid = drm_platform_set_busid, + .get_vblank_counter = drm_vblank_no_hw_counter, + .enable_vblank = rcar_du_enable_vblank, + .disable_vblank = rcar_du_disable_vblank, +@@ -333,18 +244,104 @@ static const struct dev_pm_ops rcar_du_pm_ops = { + * Platform driver + */ + +-static int rcar_du_probe(struct platform_device *pdev) ++static int rcar_du_remove(struct platform_device *pdev) + { +- return drm_platform_init(&rcar_du_driver, pdev); ++ struct rcar_du_device *rcdu = platform_get_drvdata(pdev); ++ struct drm_device *ddev = rcdu->ddev; ++ ++ mutex_lock(&ddev->mode_config.mutex); ++ drm_connector_unplug_all(ddev); ++ mutex_unlock(&ddev->mode_config.mutex); ++ ++ drm_dev_unregister(ddev); ++ ++ if (rcdu->fbdev) ++ drm_fbdev_cma_fini(rcdu->fbdev); ++ ++ drm_kms_helper_poll_fini(ddev); ++ drm_mode_config_cleanup(ddev); ++ ++ drm_dev_unref(ddev); ++ ++ return 0; + } + +-static int rcar_du_remove(struct platform_device *pdev) ++static int rcar_du_probe(struct platform_device *pdev) + { +- struct rcar_du_device *rcdu = platform_get_drvdata(pdev); ++ struct device_node *np = pdev->dev.of_node; ++ struct rcar_du_device *rcdu; ++ struct drm_connector *connector; ++ struct drm_device *ddev; ++ struct resource *mem; ++ int ret; ++ ++ if (np == NULL) { ++ dev_err(&pdev->dev, "no device tree node\n"); ++ return -ENODEV; ++ } ++ ++ /* Allocate and initialize the DRM and R-Car device structures. */ ++ rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); ++ if (rcdu == NULL) ++ return -ENOMEM; ++ ++ init_waitqueue_head(&rcdu->commit.wait); ++ ++ rcdu->dev = &pdev->dev; ++ rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; ++ ++ platform_set_drvdata(pdev, rcdu); + +- drm_put_dev(rcdu->ddev); ++ /* I/O resources */ ++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); ++ if (IS_ERR(rcdu->mmio)) ++ ret = PTR_ERR(rcdu->mmio); ++ ++ /* DRM/KMS objects */ ++ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); ++ if (!ddev) ++ return -ENOMEM; ++ ++ drm_dev_set_unique(ddev, dev_name(&pdev->dev)); ++ ++ rcdu->ddev = ddev; ++ ddev->dev_private = rcdu; ++ ++ ret = rcar_du_modeset_init(rcdu); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); ++ goto error; ++ } ++ ++ ddev->irq_enabled = 1; ++ ++ /* Register the DRM device with the core and the connectors with ++ * sysfs. ++ */ ++ ret = drm_dev_register(ddev, 0); ++ if (ret) ++ goto error; ++ ++ mutex_lock(&ddev->mode_config.mutex); ++ drm_for_each_connector(connector, ddev) { ++ ret = drm_connector_register(connector); ++ if (ret < 0) ++ break; ++ } ++ mutex_unlock(&ddev->mode_config.mutex); ++ ++ if (ret < 0) ++ goto error; ++ ++ DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); + + return 0; ++ ++error: ++ rcar_du_remove(pdev); ++ ++ return ret; + } + + static struct platform_driver rcar_du_platform_driver = { +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +index 96f2eb43713c..6038be93c58d 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +@@ -55,12 +55,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { + .best_encoder = rcar_du_connector_best_encoder, + }; + +-static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector) +-{ +- drm_connector_unregister(connector); +- drm_connector_cleanup(connector); +-} +- + static enum drm_connector_status + rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) + { +@@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .detect = rcar_du_hdmi_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, +- .destroy = rcar_du_hdmi_connector_destroy, ++ .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + }; +@@ -108,9 +102,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, + return ret; + + drm_connector_helper_add(connector, &connector_helper_funcs); +- ret = drm_connector_register(connector); +- if (ret < 0) +- return ret; + + connector->dpms = DRM_MODE_DPMS_OFF; + drm_object_property_set_value(&connector->base, +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +index ca12e8ca5552..46429c4be8e5 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c +@@ -761,6 +761,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) + if (ret < 0) + return ret; + ++ /* Initialize vertical blanking interrupts handling. Start with vblank ++ * disabled for all CRTCs. ++ */ ++ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); ++ if (ret < 0) ++ return ret; ++ + /* Initialize the groups. */ + num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2); + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +index 0c43032fc693..e905f5da7aaa 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +@@ -62,12 +62,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { + .best_encoder = rcar_du_connector_best_encoder, + }; + +-static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) +-{ +- drm_connector_unregister(connector); +- drm_connector_cleanup(connector); +-} +- + static enum drm_connector_status + rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force) + { +@@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .detect = rcar_du_lvds_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, +- .destroy = rcar_du_lvds_connector_destroy, ++ .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + }; +@@ -117,9 +111,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, + return ret; + + drm_connector_helper_add(connector, &connector_helper_funcs); +- ret = drm_connector_register(connector); +- if (ret < 0) +- return ret; + + connector->dpms = DRM_MODE_DPMS_OFF; + drm_object_property_set_value(&connector->base, +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +index e0a5d8f93963..9d7e5c99caf6 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +@@ -31,12 +31,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { + .best_encoder = rcar_du_connector_best_encoder, + }; + +-static void rcar_du_vga_connector_destroy(struct drm_connector *connector) +-{ +- drm_connector_unregister(connector); +- drm_connector_cleanup(connector); +-} +- + static enum drm_connector_status + rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) + { +@@ -48,7 +42,7 @@ static const struct drm_connector_funcs connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .detect = rcar_du_vga_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, +- .destroy = rcar_du_vga_connector_destroy, ++ .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + }; +@@ -76,9 +70,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, + return ret; + + drm_connector_helper_add(connector, &connector_helper_funcs); +- ret = drm_connector_register(connector); +- if (ret < 0) +- return ret; + + connector->dpms = DRM_MODE_DPMS_OFF; + drm_object_property_set_value(&connector->base, +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index ecf15cf0c3fd..04fd0f2b6af0 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -471,7 +471,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) + { +- return capable(CAP_SYS_ADMIN) ? : -EINVAL; ++ return -EINVAL; + } + + static int vmw_cmd_ok(struct vmw_private *dev_priv, +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 936960202cf4..11a051bd8a8b 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -2427,6 +2427,7 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, + #if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE) +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index c7f8b70d15ee..37cbc2ecfc5f 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -777,6 +777,9 @@ + #define USB_VENDOR_ID_PETALYNX 0x18b1 + #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 + ++#define USB_VENDOR_ID_PETZL 0x2122 ++#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234 ++ + #define USB_VENDOR_ID_PHILIPS 0x0471 + #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 + +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index 9eca4b41fa0a..b7a73f1a8beb 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -2287,8 +2287,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + +- if (cmd.port_num < rdma_start_port(ib_dev) || +- cmd.port_num > rdma_end_port(ib_dev)) ++ if ((cmd.attr_mask & IB_QP_PORT) && ++ (cmd.port_num < rdma_start_port(ib_dev) || ++ cmd.port_num > rdma_end_port(ib_dev))) + return -EINVAL; + + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, +diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c +index deb89d63a728..e684be1bb7c0 100644 +--- a/drivers/irqchip/irq-keystone.c ++++ b/drivers/irqchip/irq-keystone.c +@@ -19,9 +19,9 @@ + #include + #include + #include ++#include + #include + #include +-#include + #include + #include + #include +@@ -39,6 +39,7 @@ struct keystone_irq_device { + struct irq_domain *irqd; + struct regmap *devctrl_regs; + u32 devctrl_offset; ++ raw_spinlock_t wa_lock; + }; + + static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) +@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d) + /* nothing to do here */ + } + +-static void keystone_irq_handler(struct irq_desc *desc) ++static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) + { +- unsigned int irq = irq_desc_get_irq(desc); +- struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); ++ struct keystone_irq_device *kirq = keystone_irq; ++ unsigned long wa_lock_flags; + unsigned long pending; + int src, virq; + + dev_dbg(kirq->dev, "start irq %d\n", irq); + +- chained_irq_enter(irq_desc_get_chip(desc), desc); +- + pending = keystone_irq_readl(kirq); + keystone_irq_writel(kirq, pending); + +@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc) + if (!virq) + dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n", + src, virq); ++ raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); + generic_handle_irq(virq); ++ raw_spin_unlock_irqrestore(&kirq->wa_lock, ++ wa_lock_flags); + } + } + +- chained_irq_exit(irq_desc_get_chip(desc), desc); +- + dev_dbg(kirq->dev, "end irq %d\n", irq); ++ return IRQ_HANDLED; + } + + static int keystone_irq_map(struct irq_domain *h, unsigned int virq, +@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev) + return -ENODEV; + } + ++ raw_spin_lock_init(&kirq->wa_lock); ++ + platform_set_drvdata(pdev, kirq); + +- irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); ++ ret = request_irq(kirq->irq, keystone_irq_handler, ++ 0, dev_name(dev), kirq); ++ if (ret) { ++ irq_domain_remove(kirq->irqd); ++ return ret; ++ } + + /* clear all source bits */ + keystone_irq_writel(kirq, ~0x0); +@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev) + struct keystone_irq_device *kirq = platform_get_drvdata(pdev); + int hwirq; + ++ free_irq(kirq->irq, kirq); ++ + for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) + irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); + +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c +index 17304705f2cf..05fa9f7af53c 100644 +--- a/drivers/irqchip/irq-mxs.c ++++ b/drivers/irqchip/irq-mxs.c +@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = { + .irq_ack = icoll_ack_irq, + .irq_mask = icoll_mask_irq, + .irq_unmask = icoll_unmask_irq, ++ .flags = IRQCHIP_MASK_ON_SUSPEND | ++ IRQCHIP_SKIP_SET_WAKE, + }; + + static struct irq_chip asm9260_icoll_chip = { + .irq_ack = icoll_ack_irq, + .irq_mask = asm9260_mask_irq, + .irq_unmask = asm9260_unmask_irq, ++ .flags = IRQCHIP_MASK_ON_SUSPEND | ++ IRQCHIP_SKIP_SET_WAKE, + }; + + asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) +diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c +index 9b856e1890d1..e4c43a17b333 100644 +--- a/drivers/isdn/i4l/isdn_common.c ++++ b/drivers/isdn/i4l/isdn_common.c +@@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) + if (arg) { + if (copy_from_user(bname, argp, sizeof(bname) - 1)) + return -EFAULT; ++ bname[sizeof(bname)-1] = 0; + } else + return -EINVAL; + ret = mutex_lock_interruptible(&dev->mtx); +diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c +index aa5dd5668528..dbad5c431bcb 100644 +--- a/drivers/isdn/i4l/isdn_net.c ++++ b/drivers/isdn/i4l/isdn_net.c +@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm) + char newname[10]; + + if (p) { +- /* Slave-Name MUST not be empty */ +- if (!strlen(p + 1)) ++ /* Slave-Name MUST not be empty or overflow 'newname' */ ++ if (strscpy(newname, p + 1, sizeof(newname)) <= 0) + return NULL; +- strcpy(newname, p + 1); + *p = 0; + /* Master must already exist */ + if (!(n = isdn_net_findif(parm))) +diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c +index 9c1e8adaf4fc..bf3fbd00a091 100644 +--- a/drivers/isdn/i4l/isdn_ppp.c ++++ b/drivers/isdn/i4l/isdn_ppp.c +@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s + id); + return NULL; + } else { +- rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); ++ rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC); + if (!rs) + return NULL; + rs->state = CCPResetIdle; +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c +index 6a4811f85705..9cf826df89b1 100644 +--- a/drivers/mailbox/mailbox.c ++++ b/drivers/mailbox/mailbox.c +@@ -104,11 +104,14 @@ static void tx_tick(struct mbox_chan *chan, int r) + /* Submit next message */ + msg_submit(chan); + ++ if (!mssg) ++ return; ++ + /* Notify the client */ +- if (mssg && chan->cl->tx_done) ++ if (chan->cl->tx_done) + chan->cl->tx_done(chan->cl, mssg, r); + +- if (chan->cl->tx_block) ++ if (r != -ETIME && chan->cl->tx_block) + complete(&chan->tx_complete); + } + +@@ -261,7 +264,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) + + msg_submit(chan); + +- if (chan->cl->tx_block && chan->active_req) { ++ if (chan->cl->tx_block) { + unsigned long wait; + int ret; + +@@ -272,8 +275,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) + + ret = wait_for_completion_timeout(&chan->tx_complete, wait); + if (ret == 0) { +- t = -EIO; +- tx_tick(chan, -EIO); ++ t = -ETIME; ++ tx_tick(chan, t); + } + } + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index fc182c4f2619..8f60520c8392 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -5821,6 +5821,8 @@ static void raid5_do_work(struct work_struct *work) + pr_debug("%d stripes handled\n", handled); + + spin_unlock_irq(&conf->device_lock); ++ ++ async_tx_issue_pending_all(); + blk_finish_plug(&plug); + + pr_debug("--- raid5worker inactive\n"); +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c +index 8001cde1db1e..503135a4f47a 100644 +--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c ++++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c +@@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl) + } + + if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS) +- ret = s5c73m3_af_run(state, ~af_lock); ++ ret = s5c73m3_af_run(state, !af_lock); + + return ret; + } +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c +index 2a9dd460a95f..e1f9e7cebf8f 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.c ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c +@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, + if (!buf) + return -ENOMEM; + ++ if (offset_in_page(buf)) { ++ dma_free_coherent(dev, PAGE_SIZE << order, ++ buf, sg_dma_address(mem)); ++ return -ENOMEM; ++ } ++ + sg_set_buf(mem, buf, PAGE_SIZE << order); +- BUG_ON(mem->offset); + sg_dma_len(mem) = PAGE_SIZE << order; + return 0; + } +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index 79ef799f88ab..c5ea1018cb47 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -326,6 +326,7 @@ enum cfg_version { + static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, ++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 480f3dae0780..4296066a7ad3 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -750,6 +750,7 @@ static struct sh_eth_cpu_data sh7734_data = { + .tsu = 1, + .hw_crc = 1, + .select_mii = 1, ++ .shift_rd0 = 1, + }; + + /* SH7763 */ +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 0bfbabad4431..1d1e5f7723ab 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -1442,7 +1442,7 @@ static struct phy_driver genphy_driver[] = { + .phy_id = 0xffffffff, + .phy_id_mask = 0xffffffff, + .name = "Generic PHY", +- .soft_reset = genphy_soft_reset, ++ .soft_reset = genphy_no_soft_reset, + .config_init = genphy_config_init, + .features = PHY_GBIT_FEATURES | SUPPORTED_MII | + SUPPORTED_AUI | SUPPORTED_FIBRE | +diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c +index f64b25c221e8..cd93220c9b45 100644 +--- a/drivers/net/usb/kaweth.c ++++ b/drivers/net/usb/kaweth.c +@@ -1009,6 +1009,7 @@ static int kaweth_probe( + struct net_device *netdev; + const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + int result = 0; ++ int rv = -EIO; + + dev_dbg(dev, + "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", +@@ -1029,6 +1030,7 @@ static int kaweth_probe( + kaweth = netdev_priv(netdev); + kaweth->dev = udev; + kaweth->net = netdev; ++ kaweth->intf = intf; + + spin_lock_init(&kaweth->device_lock); + init_waitqueue_head(&kaweth->term_wait); +@@ -1048,6 +1050,10 @@ static int kaweth_probe( + /* Download the firmware */ + dev_info(dev, "Downloading firmware...\n"); + kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); ++ if (!kaweth->firmware_buf) { ++ rv = -ENOMEM; ++ goto err_free_netdev; ++ } + if ((result = kaweth_download_firmware(kaweth, + "kaweth/new_code.bin", + 100, +@@ -1139,8 +1145,6 @@ err_fw: + + dev_dbg(dev, "Initializing net device.\n"); + +- kaweth->intf = intf; +- + kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kaweth->tx_urb) + goto err_free_netdev; +@@ -1204,7 +1208,7 @@ err_only_tx: + err_free_netdev: + free_netdev(netdev); + +- return -EIO; ++ return rv; + } + + /**************************************************************** +diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h +index 8f4f6a892581..cfed5808bc4e 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h ++++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h +@@ -639,6 +639,9 @@ ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + struct sk_buff *skb; + u32 cmd_id; + ++ if (!ar->wmi.ops->gen_vdev_spectral_conf) ++ return -EOPNOTSUPP; ++ + skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); +@@ -654,6 +657,9 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + struct sk_buff *skb; + u32 cmd_id; + ++ if (!ar->wmi.ops->gen_vdev_spectral_enable) ++ return -EOPNOTSUPP; ++ + skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, + enable); + if (IS_ERR(skb)) +diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c +index bb69a5949aea..85bca557a339 100644 +--- a/drivers/net/wireless/ath/wil6210/main.c ++++ b/drivers/net/wireless/ath/wil6210/main.c +@@ -330,18 +330,19 @@ static void wil_fw_error_worker(struct work_struct *work) + + wil->last_fw_recovery = jiffies; + ++ wil_info(wil, "fw error recovery requested (try %d)...\n", ++ wil->recovery_count); ++ if (!no_fw_recovery) ++ wil->recovery_state = fw_recovery_running; ++ if (wil_wait_for_recovery(wil) != 0) ++ return; ++ + mutex_lock(&wil->mutex); + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_MONITOR: +- wil_info(wil, "fw error recovery requested (try %d)...\n", +- wil->recovery_count); +- if (!no_fw_recovery) +- wil->recovery_state = fw_recovery_running; +- if (0 != wil_wait_for_recovery(wil)) +- break; +- ++ /* silent recovery, upper layers will see disconnect */ + __wil_down(wil); + __wil_up(wil); + break; +diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c +index 532db28145c7..a5d7332dfce5 100644 +--- a/drivers/nfc/fdp/i2c.c ++++ b/drivers/nfc/fdp/i2c.c +@@ -210,14 +210,14 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) + struct sk_buff *skb; + int r; + +- client = phy->i2c_dev; +- dev_dbg(&client->dev, "%s\n", __func__); +- + if (!phy || irq != phy->i2c_dev->irq) { + WARN_ON_ONCE(1); + return IRQ_NONE; + } + ++ client = phy->i2c_dev; ++ dev_dbg(&client->dev, "%s\n", __func__); ++ + r = fdp_nci_i2c_read(phy, &skb); + + if (r == -EREMOTEIO) +diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c +index efb2c1ceef98..957234272ef7 100644 +--- a/drivers/nvdimm/btt.c ++++ b/drivers/nvdimm/btt.c +@@ -1205,10 +1205,13 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, + struct page *page, int rw) + { + struct btt *btt = bdev->bd_disk->private_data; ++ int rc; + +- btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); +- page_endio(page, rw & WRITE, 0); +- return 0; ++ rc = btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); ++ if (rc == 0) ++ page_endio(page, rw & WRITE, 0); ++ ++ return rc; + } + + +diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c +index b7971d410b60..74e5360c53f0 100644 +--- a/drivers/nvmem/imx-ocotp.c ++++ b/drivers/nvmem/imx-ocotp.c +@@ -88,7 +88,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = { + + static const struct of_device_id imx_ocotp_dt_ids[] = { + { .compatible = "fsl,imx6q-ocotp", (void *)128 }, +- { .compatible = "fsl,imx6sl-ocotp", (void *)32 }, ++ { .compatible = "fsl,imx6sl-ocotp", (void *)64 }, + { .compatible = "fsl,imx6sx-ocotp", (void *)128 }, + { }, + }; +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index ce129e595b55..5c935847599c 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -248,6 +248,7 @@ struct fnic { + struct completion *remove_wait; /* device remove thread blocks */ + + atomic_t in_flight; /* io counter */ ++ bool internal_reset_inprogress; + u32 _reserved; /* fill hole */ + unsigned long state_flags; /* protected by host lock */ + enum fnic_state state; +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index 266b909fe854..82e4bc8c11c5 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -2533,6 +2533,19 @@ int fnic_host_reset(struct scsi_cmnd *sc) + unsigned long wait_host_tmo; + struct Scsi_Host *shost = sc->device->host; + struct fc_lport *lp = shost_priv(shost); ++ struct fnic *fnic = lport_priv(lp); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (fnic->internal_reset_inprogress == 0) { ++ fnic->internal_reset_inprogress = 1; ++ } else { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ "host reset in progress skipping another host reset\n"); ++ return SUCCESS; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* + * If fnic_reset is successful, wait for fabric login to complete +@@ -2553,6 +2566,9 @@ int fnic_host_reset(struct scsi_cmnd *sc) + } + } + ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ fnic->internal_reset_inprogress = 0; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return ret; + } + +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 5b2c37f1e908..9b5367294116 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -4981,15 +4981,14 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + static int + _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) + { +- int r, i; ++ int r, i, index; + unsigned long flags; + u32 reply_address; + u16 smid; + struct _tr_list *delayed_tr, *delayed_tr_next; + u8 hide_flag; + struct adapter_reply_queue *reply_q; +- long reply_post_free; +- u32 reply_post_free_sz, index = 0; ++ Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); +@@ -5061,27 +5060,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) + _base_assign_reply_queues(ioc); + + /* initialize Reply Post Free Queue */ +- reply_post_free_sz = ioc->reply_post_queue_depth * +- sizeof(Mpi2DefaultReplyDescriptor_t); +- reply_post_free = (long)ioc->reply_post[index].reply_post_free; ++ index = 0; ++ reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { ++ /* ++ * If RDPQ is enabled, switch to the next allocation. ++ * Otherwise advance within the contiguous region. ++ */ ++ if (ioc->rdpq_array_enable) { ++ reply_q->reply_post_free = ++ ioc->reply_post[index++].reply_post_free; ++ } else { ++ reply_q->reply_post_free = reply_post_free_contig; ++ reply_post_free_contig += ioc->reply_post_queue_depth; ++ } ++ + reply_q->reply_post_host_index = 0; +- reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) +- reply_post_free; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; +- /* +- * If RDPQ is enabled, switch to the next allocation. +- * Otherwise advance within the contiguous region. +- */ +- if (ioc->rdpq_array_enable) +- reply_post_free = (long) +- ioc->reply_post[++index].reply_post_free; +- else +- reply_post_free += reply_post_free_sz; + } + skip_init_reply_post_free_queue: + +diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c +index 2b3c25371d76..8175f997e82c 100644 +--- a/drivers/scsi/snic/snic_main.c ++++ b/drivers/scsi/snic/snic_main.c +@@ -584,6 +584,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!pool) { + SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); + ++ ret = -ENOMEM; + goto err_free_res; + } + +@@ -594,6 +595,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!pool) { + SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); + ++ ret = -ENOMEM; + goto err_free_dflt_sgl_pool; + } + +@@ -604,6 +606,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (!pool) { + SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); + ++ ret = -ENOMEM; + goto err_free_max_sgl_pool; + } + +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index 882cd6618cd5..87a0e47eeae6 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = { + + static int dw_spi_debugfs_init(struct dw_spi *dws) + { +- dws->debugfs = debugfs_create_dir("dw_spi", NULL); ++ char name[128]; ++ ++ snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev)); ++ dws->debugfs = debugfs_create_dir(name, NULL); + if (!dws->debugfs) + return -ENOMEM; + +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c +index 8fed55342b0f..b831f08e2769 100644 +--- a/drivers/staging/comedi/comedi_fops.c ++++ b/drivers/staging/comedi/comedi_fops.c +@@ -2901,9 +2901,6 @@ static int __init comedi_init(void) + + comedi_class->dev_groups = comedi_dev_groups; + +- /* XXX requires /proc interface */ +- comedi_proc_init(); +- + /* create devices files for legacy/manual use */ + for (i = 0; i < comedi_num_legacy_minors; i++) { + struct comedi_device *dev; +@@ -2921,6 +2918,9 @@ static int __init comedi_init(void) + mutex_unlock(&dev->mutex); + } + ++ /* XXX requires /proc interface */ ++ comedi_proc_init(); ++ + return 0; + } + module_init(comedi_init); +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c +index 99285b416308..ee579ba2b59e 100644 +--- a/drivers/usb/gadget/function/f_hid.c ++++ b/drivers/usb/gadget/function/f_hid.c +@@ -539,7 +539,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) + } + status = usb_ep_enable(hidg->out_ep); + if (status < 0) { +- ERROR(cdev, "Enable IN endpoint FAILED!\n"); ++ ERROR(cdev, "Enable OUT endpoint FAILED!\n"); + goto fail; + } + hidg->out_ep->driver_data = hidg; +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index 830e2fd47642..b31b84f56e8f 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -902,6 +902,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) + return ret; + + vdev->barmap[index] = pci_iomap(pdev, index, 0); ++ if (!vdev->barmap[index]) { ++ pci_release_selected_regions(pdev, 1 << index); ++ return -ENOMEM; ++ } + } + + vma->vm_private_data = vdev; +diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c +index 210db24d2204..4d39f7959adf 100644 +--- a/drivers/vfio/pci/vfio_pci_rdwr.c ++++ b/drivers/vfio/pci/vfio_pci_rdwr.c +@@ -190,7 +190,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, + if (!vdev->has_vga) + return -EINVAL; + +- switch (pos) { ++ if (pos > 0xbfffful) ++ return -EINVAL; ++ ++ switch ((u32)pos) { + case 0xa0000 ... 0xbffff: + count = min(count, (size_t)(0xc0000 - pos)); + iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); +diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c +index 07675d6f323e..d4530b54479c 100644 +--- a/drivers/video/fbdev/cobalt_lcdfb.c ++++ b/drivers/video/fbdev/cobalt_lcdfb.c +@@ -350,6 +350,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev) + info->screen_size = resource_size(res); + info->screen_base = devm_ioremap(&dev->dev, res->start, + info->screen_size); ++ if (!info->screen_base) { ++ framebuffer_release(info); ++ return -ENOMEM; ++ } ++ + info->fbops = &cobalt_lcd_fbops; + info->fix = cobalt_lcdfb_fix; + info->fix.smem_start = res->start; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index a61926cb01c0..bebd6517355d 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7521,11 +7521,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode, + * within our reservation, otherwise we need to adjust our inode + * counter appropriately. + */ +- if (dio_data->outstanding_extents) { ++ if (dio_data->outstanding_extents >= num_extents) { + dio_data->outstanding_extents -= num_extents; + } else { ++ /* ++ * If dio write length has been split due to no large enough ++ * contiguous space, we need to compensate our inode counter ++ * appropriately. ++ */ ++ u64 num_needed = num_extents - dio_data->outstanding_extents; ++ + spin_lock(&BTRFS_I(inode)->lock); +- BTRFS_I(inode)->outstanding_extents += num_extents; ++ BTRFS_I(inode)->outstanding_extents += num_needed; + spin_unlock(&BTRFS_I(inode)->lock); + } + } +diff --git a/fs/dcache.c b/fs/dcache.c +index 3000cbb54949..3ed642e0a0c2 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -269,6 +269,33 @@ static inline int dname_external(const struct dentry *dentry) + return dentry->d_name.name != dentry->d_iname; + } + ++void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) ++{ ++ spin_lock(&dentry->d_lock); ++ if (unlikely(dname_external(dentry))) { ++ struct external_name *p = external_name(dentry); ++ atomic_inc(&p->u.count); ++ spin_unlock(&dentry->d_lock); ++ name->name = p->name; ++ } else { ++ memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); ++ spin_unlock(&dentry->d_lock); ++ name->name = name->inline_name; ++ } ++} ++EXPORT_SYMBOL(take_dentry_name_snapshot); ++ ++void release_dentry_name_snapshot(struct name_snapshot *name) ++{ ++ if (unlikely(name->name != name->inline_name)) { ++ struct external_name *p; ++ p = container_of(name->name, struct external_name, name[0]); ++ if (unlikely(atomic_dec_and_test(&p->u.count))) ++ kfree_rcu(p, u.head); ++ } ++} ++EXPORT_SYMBOL(release_dentry_name_snapshot); ++ + static inline void __d_set_inode_and_type(struct dentry *dentry, + struct inode *inode, + unsigned type_flags) +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index 0f5d05bf2131..e49ba072bd64 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -669,7 +669,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, + { + int error; + struct dentry *dentry = NULL, *trap; +- const char *old_name; ++ struct name_snapshot old_name; + + trap = lock_rename(new_dir, old_dir); + /* Source or destination directories don't exist? */ +@@ -684,19 +684,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, + if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) + goto exit; + +- old_name = fsnotify_oldname_init(old_dentry->d_name.name); ++ take_dentry_name_snapshot(&old_name, old_dentry); + + error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir), + dentry); + if (error) { +- fsnotify_oldname_free(old_name); ++ release_dentry_name_snapshot(&old_name); + goto exit; + } + d_move(old_dentry, dentry); +- fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name, ++ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name, + d_is_dir(old_dentry), + NULL, old_dentry); +- fsnotify_oldname_free(old_name); ++ release_dentry_name_snapshot(&old_name); + unlock_rename(new_dir, old_dir); + dput(dentry); + return old_dentry; +diff --git a/fs/namei.c b/fs/namei.c +index 0b0acba72a71..3f96ae087488 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -4179,11 +4179,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + { + int error; + bool is_dir = d_is_dir(old_dentry); +- const unsigned char *old_name; + struct inode *source = old_dentry->d_inode; + struct inode *target = new_dentry->d_inode; + bool new_is_dir = false; + unsigned max_links = new_dir->i_sb->s_max_links; ++ struct name_snapshot old_name; + + /* + * Check source == target. +@@ -4237,7 +4237,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + if (error) + return error; + +- old_name = fsnotify_oldname_init(old_dentry->d_name.name); ++ take_dentry_name_snapshot(&old_name, old_dentry); + dget(new_dentry); + if (!is_dir || (flags & RENAME_EXCHANGE)) + lock_two_nondirectories(source, target); +@@ -4298,14 +4298,14 @@ out: + mutex_unlock(&target->i_mutex); + dput(new_dentry); + if (!error) { +- fsnotify_move(old_dir, new_dir, old_name, is_dir, ++ fsnotify_move(old_dir, new_dir, old_name.name, is_dir, + !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); + if (flags & RENAME_EXCHANGE) { + fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, + new_is_dir, NULL, new_dentry); + } + } +- fsnotify_oldname_free(old_name); ++ release_dentry_name_snapshot(&old_name); + + return error; + } +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c +index db39de2dd4cb..a64adc2fced9 100644 +--- a/fs/notify/fsnotify.c ++++ b/fs/notify/fsnotify.c +@@ -104,16 +104,20 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) + if (unlikely(!fsnotify_inode_watches_children(p_inode))) + __fsnotify_update_child_dentry_flags(p_inode); + else if (p_inode->i_fsnotify_mask & mask) { ++ struct name_snapshot name; ++ + /* we are notifying a parent so come up with the new mask which + * specifies these are events which came from a child. */ + mask |= FS_EVENT_ON_CHILD; + ++ take_dentry_name_snapshot(&name, dentry); + if (path) + ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, +- dentry->d_name.name, 0); ++ name.name, 0); + else + ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, +- dentry->d_name.name, 0); ++ name.name, 0); ++ release_dentry_name_snapshot(&name); + } + + dput(parent); +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c +index 905caba36529..59d93acc29c7 100644 +--- a/fs/pstore/ram.c ++++ b/fs/pstore/ram.c +@@ -413,7 +413,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, + for (i = 0; i < cxt->max_dump_cnt; i++) { + cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0, + &cxt->ecc_info, +- cxt->memtype); ++ cxt->memtype, 0); + if (IS_ERR(cxt->przs[i])) { + err = PTR_ERR(cxt->przs[i]); + dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", +@@ -450,7 +450,8 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt, + return -ENOMEM; + } + +- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype); ++ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, ++ cxt->memtype, 0); + if (IS_ERR(*prz)) { + int err = PTR_ERR(*prz); + +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c +index 364d2dffe5a6..27300533c2dd 100644 +--- a/fs/pstore/ram_core.c ++++ b/fs/pstore/ram_core.c +@@ -47,16 +47,15 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz) + return atomic_read(&prz->buffer->start); + } + +-static DEFINE_RAW_SPINLOCK(buffer_lock); +- + /* increase and wrap the start pointer, returning the old value */ + static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) + { + int old; + int new; +- unsigned long flags; ++ unsigned long flags = 0; + +- raw_spin_lock_irqsave(&buffer_lock, flags); ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK)) ++ raw_spin_lock_irqsave(&prz->buffer_lock, flags); + + old = atomic_read(&prz->buffer->start); + new = old + a; +@@ -64,7 +63,8 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) + new -= prz->buffer_size; + atomic_set(&prz->buffer->start, new); + +- raw_spin_unlock_irqrestore(&buffer_lock, flags); ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK)) ++ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); + + return old; + } +@@ -74,9 +74,10 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) + { + size_t old; + size_t new; +- unsigned long flags; ++ unsigned long flags = 0; + +- raw_spin_lock_irqsave(&buffer_lock, flags); ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK)) ++ raw_spin_lock_irqsave(&prz->buffer_lock, flags); + + old = atomic_read(&prz->buffer->size); + if (old == prz->buffer_size) +@@ -88,7 +89,8 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) + atomic_set(&prz->buffer->size, new); + + exit: +- raw_spin_unlock_irqrestore(&buffer_lock, flags); ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK)) ++ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); + } + + static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, +@@ -448,6 +450,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, + prz->buffer->sig); + } + ++ /* Rewind missing or invalid memory area. */ + prz->buffer->sig = sig; + persistent_ram_zap(prz); + +@@ -474,7 +477,7 @@ void persistent_ram_free(struct persistent_ram_zone *prz) + + struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + u32 sig, struct persistent_ram_ecc_info *ecc_info, +- unsigned int memtype) ++ unsigned int memtype, u32 flags) + { + struct persistent_ram_zone *prz; + int ret = -ENOMEM; +@@ -485,6 +488,10 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + goto err; + } + ++ /* Initialize general buffer state. */ ++ raw_spin_lock_init(&prz->buffer_lock); ++ prz->flags = flags; ++ + ret = persistent_ram_buffer_map(start, size, prz, memtype); + if (ret) + goto err; +diff --git a/fs/seq_file.c b/fs/seq_file.c +index d672e2fec459..6dc4296eed62 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -72,9 +72,10 @@ int seq_open(struct file *file, const struct seq_operations *op) + + mutex_init(&p->lock); + p->op = op; +-#ifdef CONFIG_USER_NS +- p->user_ns = file->f_cred->user_ns; +-#endif ++ ++ // No refcounting: the lifetime of 'p' is constrained ++ // to the lifetime of the file. ++ p->file = file; + + /* + * Wrappers around seq_open(e.g. swaps_open) need to be +diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c +index 187b80267ff9..a9063ac50c4e 100644 +--- a/fs/xfs/xfs_aops.c ++++ b/fs/xfs/xfs_aops.c +@@ -1426,6 +1426,26 @@ __xfs_get_blocks( + if (error) + goto out_unlock; + ++ /* ++ * The only time we can ever safely find delalloc blocks on direct I/O ++ * is a dio write to post-eof speculative preallocation. All other ++ * scenarios are indicative of a problem or misuse (such as mixing ++ * direct and mapped I/O). ++ * ++ * The file may be unmapped by the time we get here so we cannot ++ * reliably fail the I/O based on mapping. Instead, fail the I/O if this ++ * is a read or a write within eof. Otherwise, carry on but warn as a ++ * precuation if the file happens to be mapped. ++ */ ++ if (direct && imap.br_startblock == DELAYSTARTBLOCK) { ++ if (!create || offset < i_size_read(VFS_I(ip))) { ++ WARN_ON_ONCE(1); ++ error = -EIO; ++ goto out_unlock; ++ } ++ WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping)); ++ } ++ + /* for DAX, we convert unwritten extents directly */ + if (create && + (!nimaps || +@@ -1525,7 +1545,6 @@ __xfs_get_blocks( + set_buffer_new(bh_result); + + if (imap.br_startblock == DELAYSTARTBLOCK) { +- BUG_ON(direct); + if (create) { + set_buffer_uptodate(bh_result); + set_buffer_mapped(bh_result); +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index 8d7151eb6ceb..d516847e0fae 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -615,5 +615,11 @@ static inline struct inode *d_real_inode(struct dentry *dentry) + return d_backing_inode(d_real(dentry)); + } + ++struct name_snapshot { ++ const char *name; ++ char inline_name[DNAME_INLINE_LEN]; ++}; ++void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); ++void release_dentry_name_snapshot(struct name_snapshot *); + + #endif /* __LINUX_DCACHE_H */ +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h +index 7ee1774edee5..a7789559078b 100644 +--- a/include/linux/fsnotify.h ++++ b/include/linux/fsnotify.h +@@ -310,35 +310,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) + } + } + +-#if defined(CONFIG_FSNOTIFY) /* notify helpers */ +- +-/* +- * fsnotify_oldname_init - save off the old filename before we change it +- */ +-static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) +-{ +- return kstrdup(name, GFP_KERNEL); +-} +- +-/* +- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init +- */ +-static inline void fsnotify_oldname_free(const unsigned char *old_name) +-{ +- kfree(old_name); +-} +- +-#else /* CONFIG_FSNOTIFY */ +- +-static inline const char *fsnotify_oldname_init(const unsigned char *name) +-{ +- return NULL; +-} +- +-static inline void fsnotify_oldname_free(const unsigned char *old_name) +-{ +-} +- +-#endif /* CONFIG_FSNOTIFY */ +- + #endif /* _LINUX_FS_NOTIFY_H */ +diff --git a/include/linux/phy.h b/include/linux/phy.h +index 05fde31b6dc6..b64825d6ad26 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -785,6 +785,10 @@ int genphy_read_status(struct phy_device *phydev); + int genphy_suspend(struct phy_device *phydev); + int genphy_resume(struct phy_device *phydev); + int genphy_soft_reset(struct phy_device *phydev); ++static inline int genphy_no_soft_reset(struct phy_device *phydev) ++{ ++ return 0; ++} + void phy_driver_unregister(struct phy_driver *drv); + void phy_drivers_unregister(struct phy_driver *drv, int n); + int phy_driver_register(struct phy_driver *new_driver); +diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h +index 9c9d6c154c8e..6f4520a82197 100644 +--- a/include/linux/pstore_ram.h ++++ b/include/linux/pstore_ram.h +@@ -23,6 +23,13 @@ + #include + #include + ++/* ++ * Choose whether access to the RAM zone requires locking or not. If a zone ++ * can be written to from different CPUs like with ftrace for example, then ++ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. ++ */ ++#define PRZ_FLAG_NO_LOCK BIT(0) ++ + struct persistent_ram_buffer; + struct rs_control; + +@@ -39,6 +46,8 @@ struct persistent_ram_zone { + void *vaddr; + struct persistent_ram_buffer *buffer; + size_t buffer_size; ++ u32 flags; ++ raw_spinlock_t buffer_lock; + + /* ECC correction */ + char *par_buffer; +@@ -54,7 +63,7 @@ struct persistent_ram_zone { + + struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + u32 sig, struct persistent_ram_ecc_info *ecc_info, +- unsigned int memtype); ++ unsigned int memtype, u32 flags); + void persistent_ram_free(struct persistent_ram_zone *prz); + void persistent_ram_zap(struct persistent_ram_zone *prz); + +diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h +index dde00defbaa5..f3d45dd42695 100644 +--- a/include/linux/seq_file.h ++++ b/include/linux/seq_file.h +@@ -7,13 +7,10 @@ + #include + #include + #include ++#include ++#include + + struct seq_operations; +-struct file; +-struct path; +-struct inode; +-struct dentry; +-struct user_namespace; + + struct seq_file { + char *buf; +@@ -27,9 +24,7 @@ struct seq_file { + struct mutex lock; + const struct seq_operations *op; + int poll_event; +-#ifdef CONFIG_USER_NS +- struct user_namespace *user_ns; +-#endif ++ const struct file *file; + void *private; + }; + +@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *); + static inline struct user_namespace *seq_user_ns(struct seq_file *seq) + { + #ifdef CONFIG_USER_NS +- return seq->user_ns; ++ return seq->file->f_cred->user_ns; + #else + extern struct user_namespace init_user_ns; + return &init_user_ns; +diff --git a/kernel/resource.c b/kernel/resource.c +index 249b1eb1e6e1..a4a94e700fb9 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v) + { + struct resource *root = m->private; + struct resource *r = v, *p; ++ unsigned long long start, end; + int width = root->end < 0x10000 ? 4 : 8; + int depth; + + for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) + if (p->parent == root) + break; ++ ++ if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { ++ start = r->start; ++ end = r->end; ++ } else { ++ start = end = 0; ++ } ++ + seq_printf(m, "%*s%0*llx-%0*llx : %s\n", + depth * 2, "", +- width, (unsigned long long) r->start, +- width, (unsigned long long) r->end, ++ width, start, ++ width, end, + r->name ? r->name : ""); + return 0; + } +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index c436426a80dd..dece705b7f8c 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -5553,7 +5553,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) + + case CPU_UP_PREPARE: + rq->calc_load_update = calc_load_update; +- account_reset_rq(rq); + break; + + case CPU_ONLINE: +@@ -8253,11 +8252,20 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + if (IS_ERR(tg)) + return ERR_PTR(-ENOMEM); + +- sched_online_group(tg, parent); +- + return &tg->css; + } + ++/* Expose task group only after completing cgroup initialization */ ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ struct task_group *parent = css_tg(css->parent); ++ ++ if (parent) ++ sched_online_group(tg, parent); ++ return 0; ++} ++ + static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) + { + struct task_group *tg = css_tg(css); +@@ -8632,6 +8640,7 @@ static struct cftype cpu_files[] = { + + struct cgroup_subsys cpu_cgrp_subsys = { + .css_alloc = cpu_cgroup_css_alloc, ++ .css_online = cpu_cgroup_css_online, + .css_released = cpu_cgroup_css_released, + .css_free = cpu_cgroup_css_free, + .fork = cpu_cgroup_fork, +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 4e5db65d1aab..55d92a1ca070 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1770,16 +1770,3 @@ static inline u64 irq_time_read(int cpu) + } + #endif /* CONFIG_64BIT */ + #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ +- +-static inline void account_reset_rq(struct rq *rq) +-{ +-#ifdef CONFIG_IRQ_TIME_ACCOUNTING +- rq->prev_irq_time = 0; +-#endif +-#ifdef CONFIG_PARAVIRT +- rq->prev_steal_time = 0; +-#endif +-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +- rq->prev_steal_time_rq = 0; +-#endif +-} +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index e20ae2d3c498..5e4199d5a388 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -292,6 +292,10 @@ static void vlan_sync_address(struct net_device *dev, + if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) + return; + ++ /* vlan continues to inherit address of lower device */ ++ if (vlan_dev_inherit_address(vlandev, dev)) ++ goto out; ++ + /* vlan address was different from the old address and is equal to + * the new address */ + if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && +@@ -304,6 +308,7 @@ static void vlan_sync_address(struct net_device *dev, + !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) + dev_uc_add(dev, vlandev->dev_addr); + ++out: + ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); + } + +diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h +index 9d010a09ab98..cc1557978066 100644 +--- a/net/8021q/vlan.h ++++ b/net/8021q/vlan.h +@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev, + void vlan_setup(struct net_device *dev); + int register_vlan_dev(struct net_device *dev); + void unregister_vlan_dev(struct net_device *dev, struct list_head *head); ++bool vlan_dev_inherit_address(struct net_device *dev, ++ struct net_device *real_dev); + + static inline u32 vlan_get_ingress_priority(struct net_device *dev, + u16 vlan_tci) +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c +index fded86508117..ca4dc9031073 100644 +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -244,6 +244,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) + strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); + } + ++bool vlan_dev_inherit_address(struct net_device *dev, ++ struct net_device *real_dev) ++{ ++ if (dev->addr_assign_type != NET_ADDR_STOLEN) ++ return false; ++ ++ ether_addr_copy(dev->dev_addr, real_dev->dev_addr); ++ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); ++ return true; ++} ++ + static int vlan_dev_open(struct net_device *dev) + { + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); +@@ -254,7 +265,8 @@ static int vlan_dev_open(struct net_device *dev) + !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) + return -ENETDOWN; + +- if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { ++ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && ++ !vlan_dev_inherit_address(dev, real_dev)) { + err = dev_uc_add(real_dev, dev->dev_addr); + if (err < 0) + goto out; +@@ -558,8 +570,10 @@ static int vlan_dev_init(struct net_device *dev) + /* ipv6 shared card related stuff */ + dev->dev_id = real_dev->dev_id; + +- if (is_zero_ether_addr(dev->dev_addr)) +- eth_hw_addr_inherit(dev, real_dev); ++ if (is_zero_ether_addr(dev->dev_addr)) { ++ ether_addr_copy(dev->dev_addr, real_dev->dev_addr); ++ dev->addr_assign_type = NET_ADDR_STOLEN; ++ } + if (is_zero_ether_addr(dev->broadcast)) + memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); + +diff --git a/net/core/dev.c b/net/core/dev.c +index dc5d3d546150..4b0853194a03 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2550,9 +2550,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment); + static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) + { + if (tx_path) +- return skb->ip_summed != CHECKSUM_PARTIAL; +- else +- return skb->ip_summed == CHECKSUM_NONE; ++ return skb->ip_summed != CHECKSUM_PARTIAL && ++ skb->ip_summed != CHECKSUM_NONE; ++ ++ return skb->ip_summed == CHECKSUM_NONE; + } + + /** +@@ -2571,11 +2572,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) + struct sk_buff *__skb_gso_segment(struct sk_buff *skb, + netdev_features_t features, bool tx_path) + { ++ struct sk_buff *segs; ++ + if (unlikely(skb_needs_check(skb, tx_path))) { + int err; + +- skb_warn_bad_offload(skb); +- ++ /* We're going to init ->check field in TCP or UDP header */ + err = skb_cow_head(skb, 0); + if (err < 0) + return ERR_PTR(err); +@@ -2590,7 +2592,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, + skb_reset_mac_header(skb); + skb_reset_mac_len(skb); + +- return skb_mac_gso_segment(skb, features); ++ segs = skb_mac_gso_segment(skb, features); ++ ++ if (unlikely(skb_needs_check(skb, tx_path))) ++ skb_warn_bad_offload(skb); ++ ++ return segs; + } + EXPORT_SYMBOL(__skb_gso_segment); + +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 7d339fc1057f..150b4923fb72 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1361,7 +1361,7 @@ emsgsize: + */ + + cork->length += length; +- if (((length > mtu) || ++ if ((((length + fragheaderlen) > mtu) || + (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO) && +diff --git a/net/key/af_key.c b/net/key/af_key.c +index d8d95b6415e4..2e1050ec2cf0 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -63,6 +63,7 @@ struct pfkey_sock { + } u; + struct sk_buff *skb; + } dump; ++ struct mutex dump_lock; + }; + + static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, +@@ -143,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol, + { + struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); + struct sock *sk; ++ struct pfkey_sock *pfk; + int err; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) +@@ -157,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol, + if (sk == NULL) + goto out; + ++ pfk = pfkey_sk(sk); ++ mutex_init(&pfk->dump_lock); ++ + sock->ops = &pfkey_ops; + sock_init_data(sock, sk); + +@@ -285,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) + struct sadb_msg *hdr; + int rc; + ++ mutex_lock(&pfk->dump_lock); ++ if (!pfk->dump.dump) { ++ rc = 0; ++ goto out; ++ } ++ + rc = pfk->dump.dump(pfk); +- if (rc == -ENOBUFS) +- return 0; ++ if (rc == -ENOBUFS) { ++ rc = 0; ++ goto out; ++ } + + if (pfk->dump.skb) { +- if (!pfkey_can_dump(&pfk->sk)) +- return 0; ++ if (!pfkey_can_dump(&pfk->sk)) { ++ rc = 0; ++ goto out; ++ } + + hdr = (struct sadb_msg *) pfk->dump.skb->data; + hdr->sadb_msg_seq = 0; +@@ -302,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) + } + + pfkey_terminate_dump(pfk); ++ ++out: ++ mutex_unlock(&pfk->dump_lock); + return rc; + } + +@@ -1806,19 +1824,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms + struct xfrm_address_filter *filter = NULL; + struct pfkey_sock *pfk = pfkey_sk(sk); + +- if (pfk->dump.dump != NULL) ++ mutex_lock(&pfk->dump_lock); ++ if (pfk->dump.dump != NULL) { ++ mutex_unlock(&pfk->dump_lock); + return -EBUSY; ++ } + + proto = pfkey_satype2proto(hdr->sadb_msg_satype); +- if (proto == 0) ++ if (proto == 0) { ++ mutex_unlock(&pfk->dump_lock); + return -EINVAL; ++ } + + if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { + struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + + filter = kmalloc(sizeof(*filter), GFP_KERNEL); +- if (filter == NULL) ++ if (filter == NULL) { ++ mutex_unlock(&pfk->dump_lock); + return -ENOMEM; ++ } + + memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, + sizeof(xfrm_address_t)); +@@ -1834,6 +1859,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms + pfk->dump.dump = pfkey_dump_sa; + pfk->dump.done = pfkey_dump_sa_done; + xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); ++ mutex_unlock(&pfk->dump_lock); + + return pfkey_do_dump(pfk); + } +@@ -2693,14 +2719,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb + { + struct pfkey_sock *pfk = pfkey_sk(sk); + +- if (pfk->dump.dump != NULL) ++ mutex_lock(&pfk->dump_lock); ++ if (pfk->dump.dump != NULL) { ++ mutex_unlock(&pfk->dump_lock); + return -EBUSY; ++ } + + pfk->dump.msg_version = hdr->sadb_msg_version; + pfk->dump.msg_portid = hdr->sadb_msg_pid; + pfk->dump.dump = pfkey_dump_sp; + pfk->dump.done = pfkey_dump_sp_done; + xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); ++ mutex_unlock(&pfk->dump_lock); + + return pfkey_do_dump(pfk); + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 8a0fdd870395..77055a362041 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1216,7 +1216,7 @@ static inline int policy_to_flow_dir(int dir) + } + + static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, +- const struct flowi *fl) ++ const struct flowi *fl, u16 family) + { + struct xfrm_policy *pol; + struct net *net = sock_net(sk); +@@ -1225,8 +1225,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, + read_lock_bh(&net->xfrm.xfrm_policy_lock); + pol = rcu_dereference(sk->sk_policy[dir]); + if (pol != NULL) { +- bool match = xfrm_selector_match(&pol->selector, fl, +- sk->sk_family); ++ bool match = xfrm_selector_match(&pol->selector, fl, family); + int err = 0; + + if (match) { +@@ -2174,7 +2173,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, + sk = sk_const_to_full_sk(sk); + if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { + num_pols = 1; +- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); ++ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family); + err = xfrm_expand_policies(fl, family, pols, + &num_pols, &num_xfrms); + if (err < 0) +@@ -2453,7 +2452,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, + pol = NULL; + sk = sk_to_full_sk(sk); + if (sk && sk->sk_policy[dir]) { +- pol = xfrm_sk_policy_lookup(sk, dir, &fl); ++ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family); + if (IS_ERR(pol)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); + return 0; +diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c +index c1b87c5800b1..b3fddba4c084 100644 +--- a/sound/soc/codecs/nau8825.c ++++ b/sound/soc/codecs/nau8825.c +@@ -936,7 +936,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825, + NAU8825_FLL_INTEGER_MASK, fll_param->fll_int); + /* FLL pre-scaler */ + regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4, +- NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div); ++ NAU8825_FLL_REF_DIV_MASK, ++ fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT); + /* select divided VCO input */ + regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5, + NAU8825_FLL_FILTER_SW_MASK, 0x0000); +diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h +index dff8edb83bfd..a0b220726a63 100644 +--- a/sound/soc/codecs/nau8825.h ++++ b/sound/soc/codecs/nau8825.h +@@ -114,7 +114,8 @@ + #define NAU8825_FLL_INTEGER_MASK (0x3ff << 0) + + /* FLL4 (0x07) */ +-#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10) ++#define NAU8825_FLL_REF_DIV_SFT 10 ++#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT) + + /* FLL5 (0x08) */ + #define NAU8825_FLL_FILTER_SW_MASK (0x1 << 14) +diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c +index a564759845f9..5a3f544bb3a8 100644 +--- a/sound/soc/codecs/tlv320aic3x.c ++++ b/sound/soc/codecs/tlv320aic3x.c +@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = { + { 108, 0x00 }, { 109, 0x00 }, + }; + ++static bool aic3x_volatile_reg(struct device *dev, unsigned int reg) ++{ ++ switch (reg) { ++ case AIC3X_RESET: ++ return true; ++ default: ++ return false; ++ } ++} ++ + static const struct regmap_config aic3x_regmap = { + .reg_bits = 8, + .val_bits = 8, +@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = { + .max_register = DAC_ICC_ADJ, + .reg_defaults = aic3x_reg, + .num_reg_defaults = ARRAY_SIZE(aic3x_reg), ++ ++ .volatile_reg = aic3x_volatile_reg, ++ + .cache_type = REGCACHE_RBTREE, + }; + +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 65b936e251ea..a1e605bbc465 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2073,9 +2073,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: +- case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; + break; ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ++ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED; ++ break; + } + + out: +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c +index be1f511e4f54..ae2981460cd8 100644 +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb) + if (unlikely(atomic_read(&ep->chip->shutdown))) + goto exit_clear; + ++ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) ++ goto exit_clear; ++ + if (usb_pipeout(ep->pipe)) { + retire_outbound_urb(ep, ctx); + /* can be stopped during retire callback */ +diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c +index f1ce60065258..ec30c2fcbac0 100644 +--- a/tools/lib/traceevent/plugin_sched_switch.c ++++ b/tools/lib/traceevent/plugin_sched_switch.c +@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s, + trace_seq_printf(s, "%lld ", val); + + if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) +- trace_seq_printf(s, "[%lld] ", val); ++ trace_seq_printf(s, "[%d] ", (int) val); + + if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0) + write_state(s, val); +@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s, + trace_seq_printf(s, "%lld", val); + + if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0) +- trace_seq_printf(s, " [%lld]", val); ++ trace_seq_printf(s, " [%d]", (int) val); + + return 0; + } +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf +index 929a32ba15f5..b67e006d56cc 100644 +--- a/tools/perf/Makefile.perf ++++ b/tools/perf/Makefile.perf +@@ -563,9 +563,9 @@ install-tests: all install-gtk + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \ + $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' + +-install-bin: install-tools install-tests ++install-bin: install-tools install-tests install-traceevent-plugins + +-install: install-bin try-install-man install-traceevent-plugins ++install: install-bin try-install-man + + install-python_ext: + $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +index 67282a759496..eeeae0629ad3 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +@@ -110,6 +110,7 @@ struct intel_pt_decoder { + bool have_tma; + bool have_cyc; + bool fixup_last_mtc; ++ bool have_last_ip; + uint64_t pos; + uint64_t last_ip; + uint64_t ip; +@@ -145,8 +146,6 @@ struct intel_pt_decoder { + bool have_calc_cyc_to_tsc; + int exec_mode; + unsigned int insn_bytes; +- uint64_t sign_bit; +- uint64_t sign_bits; + uint64_t period; + enum intel_pt_period_type period_type; + uint64_t tot_insn_cnt; +@@ -214,9 +213,6 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) + decoder->data = params->data; + decoder->return_compression = params->return_compression; + +- decoder->sign_bit = (uint64_t)1 << 47; +- decoder->sign_bits = ~(((uint64_t)1 << 48) - 1); +- + decoder->period = params->period; + decoder->period_type = params->period_type; + +@@ -385,21 +381,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen) + return 0; + } + +-static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, +- const struct intel_pt_pkt *packet, ++static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet, + uint64_t last_ip) + { + uint64_t ip; + + switch (packet->count) { +- case 2: ++ case 1: + ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | + packet->payload; + break; +- case 4: ++ case 2: + ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | + packet->payload; + break; ++ case 3: ++ ip = packet->payload; ++ /* Sign-extend 6-byte ip */ ++ if (ip & (uint64_t)0x800000000000ULL) ++ ip |= (uint64_t)0xffff000000000000ULL; ++ break; ++ case 4: ++ ip = (last_ip & (uint64_t)0xffff000000000000ULL) | ++ packet->payload; ++ break; + case 6: + ip = packet->payload; + break; +@@ -407,16 +412,13 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, + return 0; + } + +- if (ip & decoder->sign_bit) +- return ip | decoder->sign_bits; +- + return ip; + } + + static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) + { +- decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet, +- decoder->last_ip); ++ decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip); ++ decoder->have_last_ip = true; + } + + static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) +@@ -1436,7 +1438,8 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) + + case INTEL_PT_FUP: + decoder->pge = true; +- intel_pt_set_last_ip(decoder); ++ if (decoder->packet.count) ++ intel_pt_set_last_ip(decoder); + break; + + case INTEL_PT_MODE_TSX: +@@ -1640,6 +1643,8 @@ next: + break; + + case INTEL_PT_PSB: ++ decoder->last_ip = 0; ++ decoder->have_last_ip = true; + intel_pt_clear_stack(&decoder->stack); + err = intel_pt_walk_psbend(decoder); + if (err == -EAGAIN) +@@ -1718,6 +1723,13 @@ next: + } + } + ++static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder) ++{ ++ return decoder->packet.count && ++ (decoder->have_last_ip || decoder->packet.count == 3 || ++ decoder->packet.count == 6); ++} ++ + /* Walk PSB+ packets to get in sync. */ + static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) + { +@@ -1739,8 +1751,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) + + case INTEL_PT_FUP: + decoder->pge = true; +- if (decoder->last_ip || decoder->packet.count == 6 || +- decoder->packet.count == 0) { ++ if (intel_pt_have_ip(decoder)) { + uint64_t current_ip = decoder->ip; + + intel_pt_set_ip(decoder); +@@ -1832,24 +1843,17 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; +- if (decoder->last_ip || decoder->packet.count == 6 || +- decoder->packet.count == 0) ++ if (intel_pt_have_ip(decoder)) + intel_pt_set_ip(decoder); + if (decoder->ip) + return 0; + break; + + case INTEL_PT_FUP: +- if (decoder->overflow) { +- if (decoder->last_ip || +- decoder->packet.count == 6 || +- decoder->packet.count == 0) +- intel_pt_set_ip(decoder); +- if (decoder->ip) +- return 0; +- } +- if (decoder->packet.count) +- intel_pt_set_last_ip(decoder); ++ if (intel_pt_have_ip(decoder)) ++ intel_pt_set_ip(decoder); ++ if (decoder->ip) ++ return 0; + break; + + case INTEL_PT_MTC: +@@ -1898,6 +1902,8 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) + break; + + case INTEL_PT_PSB: ++ decoder->last_ip = 0; ++ decoder->have_last_ip = true; + intel_pt_clear_stack(&decoder->stack); + err = intel_pt_walk_psb(decoder); + if (err) +@@ -2034,6 +2040,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) + + decoder->pge = false; + decoder->continuous_period = false; ++ decoder->have_last_ip = false; + decoder->last_ip = 0; + decoder->ip = 0; + intel_pt_clear_stack(&decoder->stack); +@@ -2042,6 +2049,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) + if (err) + return err; + ++ decoder->have_last_ip = true; + decoder->pkt_state = INTEL_PT_STATE_NO_IP; + + err = intel_pt_walk_psb(decoder); +@@ -2084,6 +2092,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) + err = intel_pt_sync(decoder); + break; + case INTEL_PT_STATE_NO_IP: ++ decoder->have_last_ip = false; + decoder->last_ip = 0; + decoder->ip = 0; + /* Fall through */ +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +index 9b2fce25162b..7528ae4f7e28 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +@@ -293,36 +293,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte, + const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) + { +- switch (byte >> 5) { ++ int ip_len; ++ ++ packet->count = byte >> 5; ++ ++ switch (packet->count) { + case 0: +- packet->count = 0; ++ ip_len = 0; + break; + case 1: + if (len < 3) + return INTEL_PT_NEED_MORE_BYTES; +- packet->count = 2; ++ ip_len = 2; + packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); + break; + case 2: + if (len < 5) + return INTEL_PT_NEED_MORE_BYTES; +- packet->count = 4; ++ ip_len = 4; + packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); + break; + case 3: +- case 6: ++ case 4: + if (len < 7) + return INTEL_PT_NEED_MORE_BYTES; +- packet->count = 6; ++ ip_len = 6; + memcpy_le64(&packet->payload, buf + 1, 6); + break; ++ case 6: ++ if (len < 9) ++ return INTEL_PT_NEED_MORE_BYTES; ++ ip_len = 8; ++ packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1)); ++ break; + default: + return INTEL_PT_BAD_PACKET; + } + + packet->type = type; + +- return packet->count + 1; ++ return ip_len + 1; + } + + static int intel_pt_get_mode(const unsigned char *buf, size_t len, +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c +index 27ae382feb2d..7c97ecaeae48 100644 +--- a/tools/perf/util/symbol-elf.c ++++ b/tools/perf/util/symbol-elf.c +@@ -488,6 +488,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) + break; + } else { + int n = namesz + descsz; ++ ++ if (n > (int)sizeof(bf)) { ++ n = sizeof(bf); ++ pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n", ++ __func__, filename, nhdr.n_namesz, nhdr.n_descsz); ++ } + if (read(fd, bf, n) != n) + break; + } diff --git a/patch/kernel/rk3328-default/patch-4.4.80-81.patch b/patch/kernel/rk3328-default/patch-4.4.80-81.patch new file mode 100644 index 000000000..1855567f0 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.80-81.patch @@ -0,0 +1,2077 @@ +diff --git a/Makefile b/Makefile +index dddd55adde24..d049e53a6960 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 80 ++SUBLEVEL = 81 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts +index cd316021d6ce..6c1b45c1af66 100644 +--- a/arch/arm/boot/dts/armada-388-gp.dts ++++ b/arch/arm/boot/dts/armada-388-gp.dts +@@ -89,7 +89,7 @@ + pinctrl-names = "default"; + pinctrl-0 = <&pca0_pins>; + interrupt-parent = <&gpio0>; +- interrupts = <18 IRQ_TYPE_EDGE_FALLING>; ++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; +@@ -101,7 +101,7 @@ + compatible = "nxp,pca9555"; + pinctrl-names = "default"; + interrupt-parent = <&gpio0>; +- interrupts = <18 IRQ_TYPE_EDGE_FALLING>; ++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; +diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h +index bfe2a2f5a644..22b73112b75f 100644 +--- a/arch/arm/include/asm/ftrace.h ++++ b/arch/arm/include/asm/ftrace.h +@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level) + + #define ftrace_return_address(n) return_address(n) + ++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME ++ ++static inline bool arch_syscall_match_sym_name(const char *sym, ++ const char *name) ++{ ++ if (!strcmp(sym, "sys_mmap2")) ++ sym = "sys_mmap_pgoff"; ++ else if (!strcmp(sym, "sys_statfs64_wrapper")) ++ sym = "sys_statfs64"; ++ else if (!strcmp(sym, "sys_fstatfs64_wrapper")) ++ sym = "sys_fstatfs64"; ++ else if (!strcmp(sym, "sys_arm_fadvise64_64")) ++ sym = "sys_fadvise64_64"; ++ ++ /* Ignore case since sym may start with "SyS" instead of "sys" */ ++ return !strcasecmp(sym, name); ++} ++ + #endif /* ifndef __ASSEMBLY__ */ + + #endif /* _ASM_ARM_FTRACE */ +diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h +index ec9c04de3664..ff05992dae7a 100644 +--- a/arch/sparc/include/asm/trap_block.h ++++ b/arch/sparc/include/asm/trap_block.h +@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS]; + void init_cur_cpu_trap(struct thread_info *); + void setup_tba(void); + extern int ncpus_probed; ++extern u64 cpu_mondo_counter[NR_CPUS]; + + unsigned long real_hard_smp_processor_id(void); + +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index 95a9fa0d2195..4511caa3b7e9 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -617,22 +617,48 @@ retry: + } + } + +-/* Multi-cpu list version. */ ++#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) ++#define MONDO_USEC_WAIT_MIN 2 ++#define MONDO_USEC_WAIT_MAX 100 ++#define MONDO_RETRY_LIMIT 500000 ++ ++/* Multi-cpu list version. ++ * ++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. ++ * Sometimes not all cpus receive the mondo, requiring us to re-send ++ * the mondo until all cpus have received, or cpus are truly stuck ++ * unable to receive mondo, and we timeout. ++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to ++ * perform guest service, such as PCIe error handling. Consider the ++ * service time, 1 second overall wait is reasonable for 1 cpu. ++ * Here two in-between mondo check wait time are defined: 2 usec for ++ * single cpu quick turn around and up to 100usec for large cpu count. ++ * Deliver mondo to large number of cpus could take longer, we adjusts ++ * the retry count as long as target cpus are making forward progress. ++ */ + static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) + { +- int retries, this_cpu, prev_sent, i, saw_cpu_error; ++ int this_cpu, tot_cpus, prev_sent, i, rem; ++ int usec_wait, retries, tot_retries; ++ u16 first_cpu = 0xffff; ++ unsigned long xc_rcvd = 0; + unsigned long status; ++ int ecpuerror_id = 0; ++ int enocpu_id = 0; + u16 *cpu_list; ++ u16 cpu; + + this_cpu = smp_processor_id(); +- + cpu_list = __va(tb->cpu_list_pa); +- +- saw_cpu_error = 0; +- retries = 0; ++ usec_wait = cnt * MONDO_USEC_WAIT_MIN; ++ if (usec_wait > MONDO_USEC_WAIT_MAX) ++ usec_wait = MONDO_USEC_WAIT_MAX; ++ retries = tot_retries = 0; ++ tot_cpus = cnt; + prev_sent = 0; ++ + do { +- int forward_progress, n_sent; ++ int n_sent, mondo_delivered, target_cpu_busy; + + status = sun4v_cpu_mondo_send(cnt, + tb->cpu_list_pa, +@@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) + + /* HV_EOK means all cpus received the xcall, we're done. */ + if (likely(status == HV_EOK)) +- break; ++ goto xcall_done; ++ ++ /* If not these non-fatal errors, panic */ ++ if (unlikely((status != HV_EWOULDBLOCK) && ++ (status != HV_ECPUERROR) && ++ (status != HV_ENOCPU))) ++ goto fatal_errors; + + /* First, see if we made any forward progress. ++ * ++ * Go through the cpu_list, count the target cpus that have ++ * received our mondo (n_sent), and those that did not (rem). ++ * Re-pack cpu_list with the cpus remain to be retried in the ++ * front - this simplifies tracking the truly stalled cpus. + * + * The hypervisor indicates successful sends by setting + * cpu list entries to the value 0xffff. ++ * ++ * EWOULDBLOCK means some target cpus did not receive the ++ * mondo and retry usually helps. ++ * ++ * ECPUERROR means at least one target cpu is in error state, ++ * it's usually safe to skip the faulty cpu and retry. ++ * ++ * ENOCPU means one of the target cpu doesn't belong to the ++ * domain, perhaps offlined which is unexpected, but not ++ * fatal and it's okay to skip the offlined cpu. + */ ++ rem = 0; + n_sent = 0; + for (i = 0; i < cnt; i++) { +- if (likely(cpu_list[i] == 0xffff)) ++ cpu = cpu_list[i]; ++ if (likely(cpu == 0xffff)) { + n_sent++; ++ } else if ((status == HV_ECPUERROR) && ++ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { ++ ecpuerror_id = cpu + 1; ++ } else if (status == HV_ENOCPU && !cpu_online(cpu)) { ++ enocpu_id = cpu + 1; ++ } else { ++ cpu_list[rem++] = cpu; ++ } + } + +- forward_progress = 0; +- if (n_sent > prev_sent) +- forward_progress = 1; ++ /* No cpu remained, we're done. */ ++ if (rem == 0) ++ break; + +- prev_sent = n_sent; ++ /* Otherwise, update the cpu count for retry. */ ++ cnt = rem; + +- /* If we get a HV_ECPUERROR, then one or more of the cpus +- * in the list are in error state. Use the cpu_state() +- * hypervisor call to find out which cpus are in error state. ++ /* Record the overall number of mondos received by the ++ * first of the remaining cpus. + */ +- if (unlikely(status == HV_ECPUERROR)) { +- for (i = 0; i < cnt; i++) { +- long err; +- u16 cpu; ++ if (first_cpu != cpu_list[0]) { ++ first_cpu = cpu_list[0]; ++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu); ++ } + +- cpu = cpu_list[i]; +- if (cpu == 0xffff) +- continue; ++ /* Was any mondo delivered successfully? */ ++ mondo_delivered = (n_sent > prev_sent); ++ prev_sent = n_sent; + +- err = sun4v_cpu_state(cpu); +- if (err == HV_CPU_STATE_ERROR) { +- saw_cpu_error = (cpu + 1); +- cpu_list[i] = 0xffff; +- } +- } +- } else if (unlikely(status != HV_EWOULDBLOCK)) +- goto fatal_mondo_error; ++ /* or, was any target cpu busy processing other mondos? */ ++ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); ++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu); + +- /* Don't bother rewriting the CPU list, just leave the +- * 0xffff and non-0xffff entries in there and the +- * hypervisor will do the right thing. +- * +- * Only advance timeout state if we didn't make any +- * forward progress. ++ /* Retry count is for no progress. If we're making progress, ++ * reset the retry count. + */ +- if (unlikely(!forward_progress)) { +- if (unlikely(++retries > 10000)) +- goto fatal_mondo_timeout; +- +- /* Delay a little bit to let other cpus catch up +- * on their cpu mondo queue work. +- */ +- udelay(2 * cnt); ++ if (likely(mondo_delivered || target_cpu_busy)) { ++ tot_retries += retries; ++ retries = 0; ++ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { ++ goto fatal_mondo_timeout; + } +- } while (1); + +- if (unlikely(saw_cpu_error)) +- goto fatal_mondo_cpu_error; ++ /* Delay a little bit to let other cpus catch up on ++ * their cpu mondo queue work. ++ */ ++ if (!mondo_delivered) ++ udelay(usec_wait); + +- return; ++ retries++; ++ } while (1); + +-fatal_mondo_cpu_error: +- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " +- "(including %d) were in error state\n", +- this_cpu, saw_cpu_error - 1); ++xcall_done: ++ if (unlikely(ecpuerror_id > 0)) { ++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", ++ this_cpu, ecpuerror_id - 1); ++ } else if (unlikely(enocpu_id > 0)) { ++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", ++ this_cpu, enocpu_id - 1); ++ } + return; + ++fatal_errors: ++ /* fatal errors include bad alignment, etc */ ++ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", ++ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); ++ panic("Unexpected SUN4V mondo error %lu\n", status); ++ + fatal_mondo_timeout: +- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " +- " progress after %d retries.\n", +- this_cpu, retries); +- goto dump_cpu_list_and_out; +- +-fatal_mondo_error: +- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", +- this_cpu, status); +- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " +- "mondo_block_pa(%lx)\n", +- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); +- +-dump_cpu_list_and_out: +- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); +- for (i = 0; i < cnt; i++) +- printk("%u ", cpu_list[i]); +- printk("]\n"); ++ /* some cpus being non-responsive to the cpu mondo */ ++ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", ++ this_cpu, first_cpu, (tot_retries + retries), tot_cpus); ++ panic("SUN4V mondo timeout panic\n"); + } + + static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); +diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S +index 559bc5e9c199..34631995859a 100644 +--- a/arch/sparc/kernel/sun4v_ivec.S ++++ b/arch/sparc/kernel/sun4v_ivec.S +@@ -26,6 +26,21 @@ sun4v_cpu_mondo: + ldxa [%g0] ASI_SCRATCHPAD, %g4 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 + ++ /* Get smp_processor_id() into %g3 */ ++ sethi %hi(trap_block), %g5 ++ or %g5, %lo(trap_block), %g5 ++ sub %g4, %g5, %g3 ++ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 ++ ++ /* Increment cpu_mondo_counter[smp_processor_id()] */ ++ sethi %hi(cpu_mondo_counter), %g5 ++ or %g5, %lo(cpu_mondo_counter), %g5 ++ sllx %g3, 3, %g3 ++ add %g5, %g3, %g5 ++ ldx [%g5], %g3 ++ add %g3, 1, %g3 ++ stx %g3, [%g5] ++ + /* Get CPU mondo queue base phys address into %g7. */ + ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 + +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c +index cc97a43268ee..d883c5951e8b 100644 +--- a/arch/sparc/kernel/traps_64.c ++++ b/arch/sparc/kernel/traps_64.c +@@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs) + } + } + ++u64 cpu_mondo_counter[NR_CPUS] = {0}; + struct trap_per_cpu trap_block[NR_CPUS]; + EXPORT_SYMBOL(trap_block); + +diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c +index 318b8465d302..06ceddb3a22e 100644 +--- a/arch/x86/boot/string.c ++++ b/arch/x86/boot/string.c +@@ -14,6 +14,7 @@ + + #include + #include "ctype.h" ++#include "string.h" + + int memcmp(const void *s1, const void *s2, size_t len) + { +diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h +index 725e820602b1..113588ddb43f 100644 +--- a/arch/x86/boot/string.h ++++ b/arch/x86/boot/string.h +@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len); + #define memset(d,c,l) __builtin_memset(d,c,l) + #define memcmp __builtin_memcmp + ++extern int strcmp(const char *str1, const char *str2); ++extern int strncmp(const char *cs, const char *ct, size_t count); ++extern size_t strlen(const char *s); ++extern char *strstr(const char *s1, const char *s2); ++extern size_t strnlen(const char *s, size_t maxlen); ++extern unsigned int atou(const char *s); ++extern unsigned long long simple_strtoull(const char *cp, char **endp, ++ unsigned int base); ++ + #endif /* BOOT_STRING_H */ +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index cec49ecf5f31..32187f8a49b4 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token) + if (hlist_unhashed(&n.link)) + break; + ++ rcu_irq_exit(); ++ + if (!n.halted) { + local_irq_enable(); + schedule(); +@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token) + /* + * We cannot reschedule. So halt. + */ +- rcu_irq_exit(); + native_safe_halt(); + local_irq_disable(); +- rcu_irq_enter(); + } ++ ++ rcu_irq_enter(); + } + if (!n.halted) + finish_wait(&n.wq, &wait); +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index e417e1a1d02c..5b2aee83d776 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -2832,10 +2832,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) + static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) + { + if (!sata_pmp_attached(ap)) { +- if (likely(devno < ata_link_max_devices(&ap->link))) ++ if (likely(devno >= 0 && ++ devno < ata_link_max_devices(&ap->link))) + return &ap->link.device[devno]; + } else { +- if (likely(devno < ap->nr_pmp_links)) ++ if (likely(devno >= 0 && ++ devno < ap->nr_pmp_links)) + return &ap->pmp_link[devno].device[0]; + } + +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c +index 6ca35495a5be..1e5cd39d0cc2 100644 +--- a/drivers/block/virtio_blk.c ++++ b/drivers/block/virtio_blk.c +@@ -641,11 +641,12 @@ static int virtblk_probe(struct virtio_device *vdev) + if (err) + goto out_put_disk; + +- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); ++ q = blk_mq_init_queue(&vblk->tag_set); + if (IS_ERR(q)) { + err = -ENOMEM; + goto out_free_tags; + } ++ vblk->disk->queue = q; + + q->queuedata = vblk; + +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c +index bf4674aa6405..bb9cd35d7fdf 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c +@@ -296,7 +296,7 @@ static int rcar_du_probe(struct platform_device *pdev) + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rcdu->mmio)) +- ret = PTR_ERR(rcdu->mmio); ++ return PTR_ERR(rcdu->mmio); + + /* DRM/KMS objects */ + ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); +diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c +index 6a81e084593b..2b59d80a09b8 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_fb.c ++++ b/drivers/gpu/drm/virtio/virtgpu_fb.c +@@ -338,7 +338,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, + info->fbops = &virtio_gpufb_ops; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + +- info->screen_base = obj->vmap; ++ info->screen_buffer = obj->vmap; + info->screen_size = obj->gem_base.size; + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(info, &vfbdev->helper, +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index b0edb66a291b..0b7f5a701c60 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -1581,7 +1581,7 @@ isert_rcv_completion(struct iser_rx_desc *desc, + struct isert_conn *isert_conn, + u32 xfer_len) + { +- struct ib_device *ib_dev = isert_conn->cm_id->device; ++ struct ib_device *ib_dev = isert_conn->device->ib_device; + struct iscsi_hdr *hdr; + u64 rx_dma; + int rx_buflen; +diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c +index a18fe5d47238..b4857cd7069e 100644 +--- a/drivers/media/pci/saa7164/saa7164-bus.c ++++ b/drivers/media/pci/saa7164/saa7164-bus.c +@@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, + msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size); + msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command); + msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector); ++ memcpy(msg, &msg_tmp, sizeof(*msg)); + + /* No need to update the read positions, because this was a peek */ + /* If the caller specifically want to peek, return */ + if (peekonly) { +- memcpy(msg, &msg_tmp, sizeof(*msg)); + goto peekout; + } + +@@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, + space_rem = bus->m_dwSizeGetRing - curr_grp; + + if (space_rem < sizeof(*msg)) { +- /* msg wraps around the ring */ +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem); +- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing, +- sizeof(*msg) - space_rem); + if (buf) + memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - + space_rem, buf_size); + + } else if (space_rem == sizeof(*msg)) { +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); + if (buf) + memcpy_fromio(buf, bus->m_pdwGetRing, buf_size); + } else { + /* Additional data wraps around the ring */ +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); + if (buf) { + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + + sizeof(*msg), space_rem - sizeof(*msg)); +@@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, + + } else { + /* No wrapping */ +- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); + if (buf) + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), + buf_size); + } +- /* Convert from little endian to CPU */ +- msg->size = le16_to_cpu((__force __le16)msg->size); +- msg->command = le32_to_cpu((__force __le32)msg->command); +- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); + + /* Update the read positions, adjusting the ring */ + saa7164_writel(bus->m_dwGetReadPos, new_grp); +diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c +index 7767e072d623..1f656a3a84b9 100644 +--- a/drivers/media/platform/davinci/vpfe_capture.c ++++ b/drivers/media/platform/davinci/vpfe_capture.c +@@ -1709,27 +1709,9 @@ static long vpfe_param_handler(struct file *file, void *priv, + + switch (cmd) { + case VPFE_CMD_S_CCDC_RAW_PARAMS: ++ ret = -EINVAL; + v4l2_warn(&vpfe_dev->v4l2_dev, +- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); +- if (ccdc_dev->hw_ops.set_params) { +- ret = ccdc_dev->hw_ops.set_params(param); +- if (ret) { +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, +- "Error setting parameters in CCDC\n"); +- goto unlock_out; +- } +- ret = vpfe_get_ccdc_image_format(vpfe_dev, +- &vpfe_dev->fmt); +- if (ret < 0) { +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, +- "Invalid image format at CCDC\n"); +- goto unlock_out; +- } +- } else { +- ret = -EINVAL; +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, +- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); +- } ++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); + break; + default: + ret = -ENOTTY; +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c +index a32659fcd266..efc21b1da211 100644 +--- a/drivers/media/rc/ir-lirc-codec.c ++++ b/drivers/media/rc/ir-lirc-codec.c +@@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, + return 0; + + case LIRC_GET_REC_RESOLUTION: +- val = dev->rx_resolution; ++ val = dev->rx_resolution / 1000; + break; + + case LIRC_SET_WIDEBAND_RECEIVER: +diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c +index ecc4a334c507..0a54e7dac0ab 100644 +--- a/drivers/net/ethernet/aurora/nb8800.c ++++ b/drivers/net/ethernet/aurora/nb8800.c +@@ -608,7 +608,7 @@ static void nb8800_mac_config(struct net_device *dev) + mac_mode |= HALF_DUPLEX; + + if (gigabit) { +- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) ++ if (phy_interface_is_rgmii(dev->phydev)) + mac_mode |= RGMII_MODE; + + mac_mode |= GMAC_MODE; +@@ -1295,11 +1295,10 @@ static int nb8800_tangox_init(struct net_device *dev) + break; + + case PHY_INTERFACE_MODE_RGMII: +- pad_mode = PAD_MODE_RGMII; +- break; +- ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: +- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; ++ pad_mode = PAD_MODE_RGMII; + break; + + default: +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 21e5b9ed1ead..3613469dc5c6 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -8722,11 +8722,14 @@ static void tg3_free_consistent(struct tg3 *tp) + tg3_mem_rx_release(tp); + tg3_mem_tx_release(tp); + ++ /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ ++ tg3_full_lock(tp, 0); + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } ++ tg3_full_unlock(tp); + } + + /* +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index cc199063612a..6c66d2979795 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -630,6 +630,10 @@ static void dump_command(struct mlx5_core_dev *dev, + pr_debug("\n"); + } + ++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); ++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, ++ struct mlx5_cmd_msg *msg); ++ + static void cmd_work_handler(struct work_struct *work) + { + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); +@@ -638,16 +642,27 @@ static void cmd_work_handler(struct work_struct *work) + struct mlx5_cmd_layout *lay; + struct semaphore *sem; + unsigned long flags; ++ int alloc_ret; + + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { +- ent->idx = alloc_ent(cmd); +- if (ent->idx < 0) { ++ alloc_ret = alloc_ent(cmd); ++ if (alloc_ret < 0) { ++ if (ent->callback) { ++ ent->callback(-EAGAIN, ent->context); ++ mlx5_free_cmd_msg(dev, ent->out); ++ free_msg(dev, ent->in); ++ free_cmd(ent); ++ } else { ++ ent->ret = -EAGAIN; ++ complete(&ent->done); ++ } + mlx5_core_err(dev, "failed to allocate command entry\n"); + up(sem); + return; + } ++ ent->idx = alloc_ret; + } else { + ent->idx = cmd->max_reg_cmds; + spin_lock_irqsave(&cmd->alloc_lock, flags); +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 4296066a7ad3..479af106aaeb 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -819,6 +819,7 @@ static struct sh_eth_cpu_data r8a7740_data = { + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, ++ .hw_crc = 1, + .tsu = 1, + .select_mii = 1, + .shift_rd0 = 1, +diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c +index bca6a1e72d1d..e1bb802d4a4d 100644 +--- a/drivers/net/irda/mcs7780.c ++++ b/drivers/net/irda/mcs7780.c +@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) + static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) + { + struct usb_device *dev = mcs->usbdev; +- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, +- MCS_RD_RTYPE, 0, reg, val, 2, +- msecs_to_jiffies(MCS_CTRL_TIMEOUT)); ++ void *dmabuf; ++ int ret; ++ ++ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); ++ if (!dmabuf) ++ return -ENOMEM; ++ ++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, ++ MCS_RD_RTYPE, 0, reg, dmabuf, 2, ++ msecs_to_jiffies(MCS_CTRL_TIMEOUT)); ++ ++ memcpy(val, dmabuf, sizeof(__u16)); ++ kfree(dmabuf); + + return ret; + } +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 851c0e121807..49d9f0a789fe 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -541,6 +541,9 @@ void phy_stop_machine(struct phy_device *phydev) + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) + phydev->state = PHY_UP; + mutex_unlock(&phydev->lock); ++ ++ /* Now we can run the state machine synchronously */ ++ phy_state_machine(&phydev->state_queue.work); + } + + /** +@@ -918,6 +921,15 @@ void phy_state_machine(struct work_struct *work) + if (old_link != phydev->link) + phydev->state = PHY_CHANGELINK; + } ++ /* ++ * Failsafe: check that nobody set phydev->link=0 between two ++ * poll cycles, otherwise we won't leave RUNNING state as long ++ * as link remains down. ++ */ ++ if (!phydev->link && phydev->state == PHY_RUNNING) { ++ phydev->state = PHY_CHANGELINK; ++ dev_err(&phydev->dev, "no link in PHY_RUNNING\n"); ++ } + break; + case PHY_CHANGELINK: + err = phy_read_status(phydev); +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 1d1e5f7723ab..8179727d3423 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -1368,6 +1368,8 @@ static int phy_remove(struct device *dev) + { + struct phy_device *phydev = to_phy_device(dev); + ++ cancel_delayed_work_sync(&phydev->state_queue); ++ + mutex_lock(&phydev->lock); + phydev->state = PHY_DOWN; + mutex_unlock(&phydev->lock); +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h +index 0333ab0fd926..34173b5e886f 100644 +--- a/drivers/net/xen-netback/common.h ++++ b/drivers/net/xen-netback/common.h +@@ -201,6 +201,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ + unsigned long remaining_credit; + struct timer_list credit_timeout; + u64 credit_window_start; ++ bool rate_limited; + + /* Statistics */ + struct xenvif_stats stats; +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c +index e7bd63eb2876..60b26f32d31d 100644 +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget) + + if (work_done < budget) { + napi_complete(napi); +- xenvif_napi_schedule_or_enable_events(queue); ++ /* If the queue is rate-limited, it shall be ++ * rescheduled in the timer callback. ++ */ ++ if (likely(!queue->rate_limited)) ++ xenvif_napi_schedule_or_enable_events(queue); + } + + return work_done; +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index 1049c34e7d43..72ee1c305cc4 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -687,6 +687,7 @@ static void tx_add_credit(struct xenvif_queue *queue) + max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ + + queue->remaining_credit = min(max_credit, max_burst); ++ queue->rate_limited = false; + } + + void xenvif_tx_credit_callback(unsigned long data) +@@ -1184,8 +1185,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) + msecs_to_jiffies(queue->credit_usec / 1000); + + /* Timer could already be pending in rare cases. */ +- if (timer_pending(&queue->credit_timeout)) ++ if (timer_pending(&queue->credit_timeout)) { ++ queue->rate_limited = true; + return true; ++ } + + /* Passed the point where we can replenish credit? */ + if (time_after_eq64(now, next_credit)) { +@@ -1200,6 +1203,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) + mod_timer(&queue->credit_timeout, + next_credit); + queue->credit_window_start = next_credit; ++ queue->rate_limited = true; + + return true; + } +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c +index 6b942d9e5b74..1ed85dfc008d 100644 +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, + struct qla_hw_data *ha = vha->hw; + ssize_t rval = 0; + ++ mutex_lock(&ha->optrom_mutex); ++ + if (ha->optrom_state != QLA_SREADING) +- return 0; ++ goto out; + +- mutex_lock(&ha->optrom_mutex); + rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, + ha->optrom_region_size); ++ ++out: + mutex_unlock(&ha->optrom_mutex); + + return rval; +@@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + +- if (ha->optrom_state != QLA_SWRITING) ++ mutex_lock(&ha->optrom_mutex); ++ ++ if (ha->optrom_state != QLA_SWRITING) { ++ mutex_unlock(&ha->optrom_mutex); + return -EINVAL; +- if (off > ha->optrom_region_size) ++ } ++ if (off > ha->optrom_region_size) { ++ mutex_unlock(&ha->optrom_mutex); + return -ERANGE; ++ } + if (off + count > ha->optrom_region_size) + count = ha->optrom_region_size - off; + +- mutex_lock(&ha->optrom_mutex); + memcpy(&ha->optrom_buffer[off], buf, count); + mutex_unlock(&ha->optrom_mutex); + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index a180c000e246..31d5d9c0e10b 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -3965,6 +3965,8 @@ int iscsi_target_tx_thread(void *arg) + { + int ret = 0; + struct iscsi_conn *conn = arg; ++ bool conn_freed = false; ++ + /* + * Allow ourselves to be interrupted by SIGINT so that a + * connection recovery / failure event can be triggered externally. +@@ -3990,12 +3992,14 @@ get_immediate: + goto transport_err; + + ret = iscsit_handle_response_queue(conn); +- if (ret == 1) ++ if (ret == 1) { + goto get_immediate; +- else if (ret == -ECONNRESET) ++ } else if (ret == -ECONNRESET) { ++ conn_freed = true; + goto out; +- else if (ret < 0) ++ } else if (ret < 0) { + goto transport_err; ++ } + } + + transport_err: +@@ -4005,8 +4009,13 @@ transport_err: + * responsible for cleaning up the early connection failure. + */ + if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) +- iscsit_take_action_for_connection_exit(conn); ++ iscsit_take_action_for_connection_exit(conn, &conn_freed); + out: ++ if (!conn_freed) { ++ while (!kthread_should_stop()) { ++ msleep(100); ++ } ++ } + return 0; + } + +@@ -4105,6 +4114,7 @@ int iscsi_target_rx_thread(void *arg) + u32 checksum = 0, digest = 0; + struct iscsi_conn *conn = arg; + struct kvec iov; ++ bool conn_freed = false; + /* + * Allow ourselves to be interrupted by SIGINT so that a + * connection recovery / failure event can be triggered externally. +@@ -4116,7 +4126,7 @@ int iscsi_target_rx_thread(void *arg) + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); + if (rc < 0 || iscsi_target_check_conn_state(conn)) +- return 0; ++ goto out; + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { + struct completion comp; +@@ -4201,7 +4211,13 @@ int iscsi_target_rx_thread(void *arg) + transport_err: + if (!signal_pending(current)) + atomic_set(&conn->transport_failed, 1); +- iscsit_take_action_for_connection_exit(conn); ++ iscsit_take_action_for_connection_exit(conn, &conn_freed); ++out: ++ if (!conn_freed) { ++ while (!kthread_should_stop()) { ++ msleep(100); ++ } ++ } + return 0; + } + +@@ -4575,8 +4591,11 @@ static void iscsit_logout_post_handler_closesession( + * always sleep waiting for RX/TX thread shutdown to complete + * within iscsit_close_connection(). + */ +- if (conn->conn_transport->transport_type == ISCSI_TCP) ++ if (conn->conn_transport->transport_type == ISCSI_TCP) { + sleep = cmpxchg(&conn->tx_thread_active, true, false); ++ if (!sleep) ++ return; ++ } + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); +@@ -4592,8 +4611,11 @@ static void iscsit_logout_post_handler_samecid( + { + int sleep = 1; + +- if (conn->conn_transport->transport_type == ISCSI_TCP) ++ if (conn->conn_transport->transport_type == ISCSI_TCP) { + sleep = cmpxchg(&conn->tx_thread_active, true, false); ++ if (!sleep) ++ return; ++ } + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); +diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c +index 210f6e4830e3..6c88fb021444 100644 +--- a/drivers/target/iscsi/iscsi_target_erl0.c ++++ b/drivers/target/iscsi/iscsi_target_erl0.c +@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) + } + } + +-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) ++void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) + { ++ *conn_freed = false; ++ + spin_lock_bh(&conn->state_lock); + if (atomic_read(&conn->connection_exit)) { + spin_unlock_bh(&conn->state_lock); +@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) + if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { + spin_unlock_bh(&conn->state_lock); + iscsit_close_connection(conn); ++ *conn_freed = true; + return; + } + +@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) + spin_unlock_bh(&conn->state_lock); + + iscsit_handle_connection_cleanup(conn); ++ *conn_freed = true; + } +diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h +index a9e2f9497fb2..fbc1d84a63c3 100644 +--- a/drivers/target/iscsi/iscsi_target_erl0.h ++++ b/drivers/target/iscsi/iscsi_target_erl0.h +@@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); + extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); + extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); + extern void iscsit_fall_back_to_erl0(struct iscsi_session *); +-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); ++extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); + + #endif /*** ISCSI_TARGET_ERL0_H ***/ +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 4a137b0ae3dc..b19edffa7d98 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -1436,5 +1436,9 @@ int iscsi_target_login_thread(void *arg) + break; + } + ++ while (!kthread_should_stop()) { ++ msleep(100); ++ } ++ + return 0; + } +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index 549a2bbbf4df..58c629aec73c 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -489,14 +489,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) + + static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); + +-static bool iscsi_target_sk_state_check(struct sock *sk) ++static bool __iscsi_target_sk_check_close(struct sock *sk) + { + if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { +- pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," ++ pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," + "returning FALSE\n"); +- return false; ++ return true; + } +- return true; ++ return false; ++} ++ ++static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) ++{ ++ bool state = false; ++ ++ if (conn->sock) { ++ struct sock *sk = conn->sock->sk; ++ ++ read_lock_bh(&sk->sk_callback_lock); ++ state = (__iscsi_target_sk_check_close(sk) || ++ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); ++ read_unlock_bh(&sk->sk_callback_lock); ++ } ++ return state; ++} ++ ++static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) ++{ ++ bool state = false; ++ ++ if (conn->sock) { ++ struct sock *sk = conn->sock->sk; ++ ++ read_lock_bh(&sk->sk_callback_lock); ++ state = test_bit(flag, &conn->login_flags); ++ read_unlock_bh(&sk->sk_callback_lock); ++ } ++ return state; ++} ++ ++static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) ++{ ++ bool state = false; ++ ++ if (conn->sock) { ++ struct sock *sk = conn->sock->sk; ++ ++ write_lock_bh(&sk->sk_callback_lock); ++ state = (__iscsi_target_sk_check_close(sk) || ++ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); ++ if (!state) ++ clear_bit(flag, &conn->login_flags); ++ write_unlock_bh(&sk->sk_callback_lock); ++ } ++ return state; + } + + static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) +@@ -536,6 +582,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) + + pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", + conn, current->comm, current->pid); ++ /* ++ * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() ++ * before initial PDU processing in iscsi_target_start_negotiation() ++ * has completed, go ahead and retry until it's cleared. ++ * ++ * Otherwise if the TCP connection drops while this is occuring, ++ * iscsi_target_start_negotiation() will detect the failure, call ++ * cancel_delayed_work_sync(&conn->login_work), and cleanup the ++ * remaining iscsi connection resources from iscsi_np process context. ++ */ ++ if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { ++ schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); ++ return; ++ } + + spin_lock(&tpg->tpg_state_lock); + state = (tpg->tpg_state == TPG_STATE_ACTIVE); +@@ -543,26 +603,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) + + if (!state) { + pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); +- iscsi_target_restore_sock_callbacks(conn); +- iscsi_target_login_drop(conn, login); +- iscsit_deaccess_np(np, tpg, tpg_np); +- return; ++ goto err; + } + +- if (conn->sock) { +- struct sock *sk = conn->sock->sk; +- +- read_lock_bh(&sk->sk_callback_lock); +- state = iscsi_target_sk_state_check(sk); +- read_unlock_bh(&sk->sk_callback_lock); +- +- if (!state) { +- pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); +- iscsi_target_restore_sock_callbacks(conn); +- iscsi_target_login_drop(conn, login); +- iscsit_deaccess_np(np, tpg, tpg_np); +- return; +- } ++ if (iscsi_target_sk_check_close(conn)) { ++ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); ++ goto err; + } + + conn->login_kworker = current; +@@ -580,34 +626,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) + flush_signals(current); + conn->login_kworker = NULL; + +- if (rc < 0) { +- iscsi_target_restore_sock_callbacks(conn); +- iscsi_target_login_drop(conn, login); +- iscsit_deaccess_np(np, tpg, tpg_np); +- return; +- } ++ if (rc < 0) ++ goto err; + + pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", + conn, current->comm, current->pid); + + rc = iscsi_target_do_login(conn, login); + if (rc < 0) { +- iscsi_target_restore_sock_callbacks(conn); +- iscsi_target_login_drop(conn, login); +- iscsit_deaccess_np(np, tpg, tpg_np); ++ goto err; + } else if (!rc) { +- if (conn->sock) { +- struct sock *sk = conn->sock->sk; +- +- write_lock_bh(&sk->sk_callback_lock); +- clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); +- write_unlock_bh(&sk->sk_callback_lock); +- } ++ if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) ++ goto err; + } else if (rc == 1) { + iscsi_target_nego_release(conn); + iscsi_post_login_handler(np, conn, zero_tsih); + iscsit_deaccess_np(np, tpg, tpg_np); + } ++ return; ++ ++err: ++ iscsi_target_restore_sock_callbacks(conn); ++ iscsi_target_login_drop(conn, login); ++ iscsit_deaccess_np(np, tpg, tpg_np); + } + + static void iscsi_target_do_cleanup(struct work_struct *work) +@@ -655,31 +696,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) + orig_state_change(sk); + return; + } ++ state = __iscsi_target_sk_check_close(sk); ++ pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); ++ + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" + " conn: %p\n", conn); ++ if (state) ++ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } +- if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { ++ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", + conn); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } ++ /* ++ * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, ++ * but only queue conn->login_work -> iscsi_target_do_login_rx() ++ * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. ++ * ++ * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() ++ * will detect the dropped TCP connection from delayed workqueue context. ++ * ++ * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial ++ * iscsi_target_start_negotiation() is running, iscsi_target_do_login() ++ * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() ++ * via iscsi_target_sk_check_and_clear() is responsible for detecting the ++ * dropped TCP connection in iscsi_np process context, and cleaning up ++ * the remaining iscsi connection resources. ++ */ ++ if (state) { ++ pr_debug("iscsi_target_sk_state_change got failed state\n"); ++ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); ++ state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); ++ write_unlock_bh(&sk->sk_callback_lock); + +- state = iscsi_target_sk_state_check(sk); +- write_unlock_bh(&sk->sk_callback_lock); +- +- pr_debug("iscsi_target_sk_state_change: state: %d\n", state); ++ orig_state_change(sk); + +- if (!state) { +- pr_debug("iscsi_target_sk_state_change got failed state\n"); +- schedule_delayed_work(&conn->login_cleanup_work, 0); ++ if (!state) ++ schedule_delayed_work(&conn->login_work, 0); + return; + } ++ write_unlock_bh(&sk->sk_callback_lock); ++ + orig_state_change(sk); + } + +@@ -944,6 +1008,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo + if (iscsi_target_handle_csg_one(conn, login) < 0) + return -1; + if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { ++ /* ++ * Check to make sure the TCP connection has not ++ * dropped asynchronously while session reinstatement ++ * was occuring in this kthread context, before ++ * transitioning to full feature phase operation. ++ */ ++ if (iscsi_target_sk_check_close(conn)) ++ return -1; ++ + login->tsih = conn->sess->tsih; + login->login_complete = 1; + iscsi_target_restore_sock_callbacks(conn); +@@ -970,21 +1043,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo + break; + } + +- if (conn->sock) { +- struct sock *sk = conn->sock->sk; +- bool state; +- +- read_lock_bh(&sk->sk_callback_lock); +- state = iscsi_target_sk_state_check(sk); +- read_unlock_bh(&sk->sk_callback_lock); +- +- if (!state) { +- pr_debug("iscsi_target_do_login() failed state for" +- " conn: %p\n", conn); +- return -1; +- } +- } +- + return 0; + } + +@@ -1248,16 +1306,28 @@ int iscsi_target_start_negotiation( + { + int ret; + ++ if (conn->sock) { ++ struct sock *sk = conn->sock->sk; ++ ++ write_lock_bh(&sk->sk_callback_lock); ++ set_bit(LOGIN_FLAGS_READY, &conn->login_flags); ++ set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); ++ write_unlock_bh(&sk->sk_callback_lock); ++ } ++ /* ++ * If iscsi_target_do_login returns zero to signal more PDU ++ * exchanges are required to complete the login, go ahead and ++ * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection ++ * is still active. ++ * ++ * Otherwise if TCP connection dropped asynchronously, go ahead ++ * and perform connection cleanup now. ++ */ + ret = iscsi_target_do_login(conn, login); +- if (!ret) { +- if (conn->sock) { +- struct sock *sk = conn->sock->sk; ++ if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) ++ ret = -1; + +- write_lock_bh(&sk->sk_callback_lock); +- set_bit(LOGIN_FLAGS_READY, &conn->login_flags); +- write_unlock_bh(&sk->sk_callback_lock); +- } +- } else if (ret < 0) { ++ if (ret < 0) { + cancel_delayed_work_sync(&conn->login_work); + cancel_delayed_work_sync(&conn->login_cleanup_work); + iscsi_target_restore_sock_callbacks(conn); +diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c +index f916d18ccb48..b070ddf1dc37 100644 +--- a/drivers/target/target_core_fabric_configfs.c ++++ b/drivers/target/target_core_fabric_configfs.c +@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( + pr_err("Source se_lun->lun_se_dev does not exist\n"); + return -EINVAL; + } ++ if (lun->lun_shutdown) { ++ pr_err("Unable to create mappedlun symlink because" ++ " lun->lun_shutdown=true\n"); ++ return -EINVAL; ++ } + se_tpg = lun->lun_tpg; + + nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c +index 899c33b3c734..f69f4902dc07 100644 +--- a/drivers/target/target_core_tpg.c ++++ b/drivers/target/target_core_tpg.c +@@ -673,6 +673,8 @@ void core_tpg_remove_lun( + */ + struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + ++ lun->lun_shutdown = true; ++ + core_clear_lun_from_tpg(lun, tpg); + /* + * Wait for any active I/O references to percpu se_lun->lun_ref to +@@ -694,6 +696,8 @@ void core_tpg_remove_lun( + } + if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) + hlist_del_rcu(&lun->link); ++ ++ lun->lun_shutdown = false; + mutex_unlock(&tpg->tpg_lun_mutex); + + percpu_ref_exit(&lun->lun_ref); +diff --git a/fs/ext4/file.c b/fs/ext4/file.c +index 8772bfc3415b..45ef9975caec 100644 +--- a/fs/ext4/file.c ++++ b/fs/ext4/file.c +@@ -500,6 +500,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, + lastoff = page_offset(page); + bh = head = page_buffers(page); + do { ++ if (lastoff + bh->b_size <= startoff) ++ goto next; + if (buffer_uptodate(bh) || + buffer_unwritten(bh)) { + if (whence == SEEK_DATA) +@@ -514,6 +516,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, + unlock_page(page); + goto out; + } ++next: + lastoff += bh->b_size; + bh = bh->b_this_page; + } while (bh != head); +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 34038e3598d5..74516efd874c 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -1926,7 +1926,8 @@ retry: + n_desc_blocks = o_desc_blocks + + le16_to_cpu(es->s_reserved_gdt_blocks); + n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); +- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); ++ n_blocks_count = (ext4_fsblk_t)n_group * ++ EXT4_BLOCKS_PER_GROUP(sb); + n_group--; /* set to last group number */ + } + +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 86e1cb899957..4f666368aa85 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -1078,6 +1078,8 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) + unsigned int total, fsmeta; + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); ++ unsigned int main_segs, blocks_per_seg; ++ int i; + + total = le32_to_cpu(raw_super->segment_count); + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); +@@ -1089,6 +1091,20 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) + if (unlikely(fsmeta >= total)) + return 1; + ++ main_segs = le32_to_cpu(raw_super->segment_count_main); ++ blocks_per_seg = sbi->blocks_per_seg; ++ ++ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { ++ if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || ++ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) ++ return 1; ++ } ++ for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { ++ if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || ++ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) ++ return 1; ++ } ++ + if (unlikely(f2fs_cp_error(sbi))) { + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); + return 1; +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 2ccccbfcd532..36f4695aa604 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -503,6 +503,10 @@ struct mm_struct { + * PROT_NONE or PROT_NUMA mapped page. + */ + bool tlb_flush_pending; ++#endif ++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH ++ /* See flush_tlb_batched_pending() */ ++ bool tlb_flush_batched; + #endif + struct uprobes_state uprobes_state; + #ifdef CONFIG_X86_INTEL_MPX +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 352213b360d7..eff7c1fad26f 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -801,6 +801,16 @@ struct signal_struct { + + #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + ++#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ ++ SIGNAL_STOP_CONTINUED) ++ ++static inline void signal_set_stop_flags(struct signal_struct *sig, ++ unsigned int flags) ++{ ++ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); ++ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; ++} ++ + /* If true, all threads except ->group_exit_task have pending SIGKILL */ + static inline int signal_group_exit(const struct signal_struct *sig) + { +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 2037a861e367..8a2a9ffaf5de 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -203,7 +203,7 @@ size_t ksize(const void *); + * (PAGE_SIZE*2). Larger requests are passed to the page allocator. + */ + #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) +-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) ++#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) + #ifndef KMALLOC_SHIFT_LOW + #define KMALLOC_SHIFT_LOW 3 + #endif +@@ -216,7 +216,7 @@ size_t ksize(const void *); + * be allocated from the same page. + */ + #define KMALLOC_SHIFT_HIGH PAGE_SHIFT +-#define KMALLOC_SHIFT_MAX 30 ++#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) + #ifndef KMALLOC_SHIFT_LOW + #define KMALLOC_SHIFT_LOW 3 + #endif +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h +index 0197358f1e81..262d5c95dfc8 100644 +--- a/include/linux/workqueue.h ++++ b/include/linux/workqueue.h +@@ -311,6 +311,7 @@ enum { + + __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ + __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ ++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */ + + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ +@@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, + * Pointer to the allocated workqueue on success, %NULL on failure. + */ + #define alloc_ordered_workqueue(fmt, flags, args...) \ +- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) ++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ ++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) + + #define create_workqueue(name) \ + alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) +diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h +index e0f4109e64c6..c2aa73e5e6bb 100644 +--- a/include/net/iw_handler.h ++++ b/include/net/iw_handler.h +@@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, + memcpy(stream + lcp_len, + ((char *) &iwe->u) + IW_EV_POINT_OFF, + IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); +- memcpy(stream + point_len, extra, iwe->u.data.length); ++ if (iwe->u.data.length && extra) ++ memcpy(stream + point_len, extra, iwe->u.data.length); + stream += event_len; + } + return stream; +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h +index ce13cf20f625..d33b17ba51d2 100644 +--- a/include/net/sctp/sctp.h ++++ b/include/net/sctp/sctp.h +@@ -444,6 +444,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) + + #define _sctp_walk_params(pos, chunk, end, member)\ + for (pos.v = chunk->member;\ ++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ ++ (void *)chunk + end) &&\ + pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ + ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ + pos.v += WORD_ROUND(ntohs(pos.p->length))) +@@ -454,6 +456,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) + #define _sctp_walk_errors(err, chunk_hdr, end)\ + for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ + sizeof(sctp_chunkhdr_t));\ ++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ ++ (void *)chunk_hdr + end) &&\ + (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ + ntohs(err->length) >= sizeof(sctp_errhdr_t); \ + err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) +diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h +index e0efe3fcf739..fdda45f26f75 100644 +--- a/include/target/iscsi/iscsi_target_core.h ++++ b/include/target/iscsi/iscsi_target_core.h +@@ -562,6 +562,7 @@ struct iscsi_conn { + #define LOGIN_FLAGS_READ_ACTIVE 1 + #define LOGIN_FLAGS_CLOSED 2 + #define LOGIN_FLAGS_READY 4 ++#define LOGIN_FLAGS_INITIAL_PDU 8 + unsigned long login_flags; + struct delayed_work login_work; + struct delayed_work login_cleanup_work; +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index ed66414b91f0..1adf8739980c 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -714,6 +714,7 @@ struct se_lun { + #define SE_LUN_LINK_MAGIC 0xffff7771 + u32 lun_link_magic; + u32 lun_access; ++ bool lun_shutdown; + u32 lun_index; + + /* RELATIVE TARGET PORT IDENTIFER */ +diff --git a/kernel/signal.c b/kernel/signal.c +index b92a047ddc82..5d50ea899b6d 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task) + * fresh group stop. Read comment in do_signal_stop() for details. + */ + if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { +- sig->flags = SIGNAL_STOP_STOPPED; ++ signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); + return true; + } + return false; +@@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) + * will take ->siglock, notice SIGNAL_CLD_MASK, and + * notify its parent. See get_signal_to_deliver(). + */ +- signal->flags = why | SIGNAL_STOP_CONTINUED; ++ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); + signal->group_stop_count = 0; + signal->group_exit_code = 0; + } +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 2c2f971f3e75..23231237f2e2 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -3647,8 +3647,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, + return -EINVAL; + + /* creating multiple pwqs breaks ordering guarantee */ +- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) +- return -EINVAL; ++ if (!list_empty(&wq->pwqs)) { ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) ++ return -EINVAL; ++ ++ wq->flags &= ~__WQ_ORDERED; ++ } + + ctx = apply_wqattrs_prepare(wq, attrs); + +@@ -3834,6 +3838,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, + struct workqueue_struct *wq; + struct pool_workqueue *pwq; + ++ /* ++ * Unbound && max_active == 1 used to imply ordered, which is no ++ * longer the case on NUMA machines due to per-node pools. While ++ * alloc_ordered_workqueue() is the right way to create an ordered ++ * workqueue, keep the previous behavior to avoid subtle breakages ++ * on NUMA. ++ */ ++ if ((flags & WQ_UNBOUND) && max_active == 1) ++ flags |= __WQ_ORDERED; ++ + /* see the comment above the definition of WQ_POWER_EFFICIENT */ + if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) + flags |= WQ_UNBOUND; +@@ -4022,13 +4036,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) + struct pool_workqueue *pwq; + + /* disallow meddling with max_active for ordered workqueues */ +- if (WARN_ON(wq->flags & __WQ_ORDERED)) ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + return; + + max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); + + mutex_lock(&wq->mutex); + ++ wq->flags &= ~__WQ_ORDERED; + wq->saved_max_active = max_active; + + for_each_pwq(pwq, wq) +@@ -5154,7 +5169,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) + * attributes breaks ordering guarantee. Disallow exposing ordered + * workqueues. + */ +- if (WARN_ON(wq->flags & __WQ_ORDERED)) ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + return -EINVAL; + + wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 8c15b29d5adc..b53b375e14bd 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED + + config DEBUG_INFO_SPLIT + bool "Produce split debuginfo in .dwo files" +- depends on DEBUG_INFO ++ depends on DEBUG_INFO && !FRV + help + Generate debug info into separate .dwo files. This significantly + reduces the build directory size for builds with DEBUG_INFO, +diff --git a/mm/internal.h b/mm/internal.h +index 6979b2bd3227..f63f4393d633 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -453,6 +453,7 @@ struct tlbflush_unmap_batch; + #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + void try_to_unmap_flush(void); + void try_to_unmap_flush_dirty(void); ++void flush_tlb_batched_pending(struct mm_struct *mm); + #else + static inline void try_to_unmap_flush(void) + { +@@ -460,6 +461,8 @@ static inline void try_to_unmap_flush(void) + static inline void try_to_unmap_flush_dirty(void) + { + } +- ++static inline void flush_tlb_batched_pending(struct mm_struct *mm) ++{ ++} + #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ + #endif /* __MM_INTERNAL_H */ +diff --git a/mm/memory.c b/mm/memory.c +index e6fa13484447..9ac55172aa7b 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1127,6 +1127,7 @@ again: + init_rss_vec(rss); + start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte = start_pte; ++ flush_tlb_batched_pending(mm); + arch_enter_lazy_mmu_mode(); + do { + pte_t ptent = *pte; +diff --git a/mm/mprotect.c b/mm/mprotect.c +index ef5be8eaab00..c0b4b2a49462 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -72,6 +72,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + if (!pte) + return 0; + ++ flush_tlb_batched_pending(vma->vm_mm); + arch_enter_lazy_mmu_mode(); + do { + oldpte = *pte; +diff --git a/mm/mremap.c b/mm/mremap.c +index c25bc6268e46..fe7b7f65f4f4 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -135,6 +135,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, + new_ptl = pte_lockptr(mm, new_pmd); + if (new_ptl != old_ptl) + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); ++ flush_tlb_batched_pending(vma->vm_mm); + arch_enter_lazy_mmu_mode(); + + for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index bd17a6bdf131..f9d648fce8cd 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -1527,14 +1527,14 @@ int move_freepages(struct zone *zone, + #endif + + for (page = start_page; page <= end_page;) { +- /* Make sure we are not inadvertently changing nodes */ +- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); +- + if (!pfn_valid_within(page_to_pfn(page))) { + page++; + continue; + } + ++ /* Make sure we are not inadvertently changing nodes */ ++ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); ++ + if (!PageBuddy(page)) { + page++; + continue; +@@ -5847,8 +5847,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s) + } + + if (pages && s) +- pr_info("Freeing %s memory: %ldK (%p - %p)\n", +- s, pages << (PAGE_SHIFT - 10), start, end); ++ pr_info("Freeing %s memory: %ldK\n", ++ s, pages << (PAGE_SHIFT - 10)); + + return pages; + } +diff --git a/mm/rmap.c b/mm/rmap.c +index b577fbb98d4b..ede183c32f45 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -648,6 +648,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, + cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); + tlb_ubc->flush_required = true; + ++ /* ++ * Ensure compiler does not re-order the setting of tlb_flush_batched ++ * before the PTE is cleared. ++ */ ++ barrier(); ++ mm->tlb_flush_batched = true; ++ + /* + * If the PTE was dirty then it's best to assume it's writable. The + * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() +@@ -675,6 +682,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) + + return should_defer; + } ++ ++/* ++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to ++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel ++ * operation such as mprotect or munmap to race between reclaim unmapping ++ * the page and flushing the page. If this race occurs, it potentially allows ++ * access to data via a stale TLB entry. Tracking all mm's that have TLB ++ * batching in flight would be expensive during reclaim so instead track ++ * whether TLB batching occurred in the past and if so then do a flush here ++ * if required. This will cost one additional flush per reclaim cycle paid ++ * by the first operation at risk such as mprotect and mumap. ++ * ++ * This must be called under the PTL so that an access to tlb_flush_batched ++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise ++ * via the PTL. ++ */ ++void flush_tlb_batched_pending(struct mm_struct *mm) ++{ ++ if (mm->tlb_flush_batched) { ++ flush_tlb_mm(mm); ++ ++ /* ++ * Do not allow the compiler to re-order the clearing of ++ * tlb_flush_batched before the tlb is flushed. ++ */ ++ barrier(); ++ mm->tlb_flush_batched = false; ++ } ++} + #else + static void set_tlb_ubc_flush_pending(struct mm_struct *mm, + struct page *page, bool writable) +diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c +index b94b1d293506..151e047ce072 100644 +--- a/net/core/dev_ioctl.c ++++ b/net/core/dev_ioctl.c +@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) + + if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) + return -EFAULT; ++ ifr.ifr_name[IFNAMSIZ-1] = 0; + + error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); + if (error) +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 2ec5324a7ff7..5b3d611d8b5f 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1742,7 +1742,8 @@ static int do_setlink(const struct sk_buff *skb, + struct sockaddr *sa; + int len; + +- len = sizeof(sa_family_t) + dev->addr_len; ++ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, ++ sizeof(*sa)); + sa = kmalloc(len, GFP_KERNEL); + if (!sa) { + err = -ENOMEM; +diff --git a/net/dccp/feat.c b/net/dccp/feat.c +index 1704948e6a12..f227f002c73d 100644 +--- a/net/dccp/feat.c ++++ b/net/dccp/feat.c +@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk) + * singleton values (which always leads to failure). + * These settings can still (later) be overridden via sockopts. + */ +- if (ccid_get_builtin_ccids(&tx.val, &tx.len) || +- ccid_get_builtin_ccids(&rx.val, &rx.len)) ++ if (ccid_get_builtin_ccids(&tx.val, &tx.len)) + return -ENOBUFS; ++ if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { ++ kfree(tx.val); ++ return -ENOBUFS; ++ } + + if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || + !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index 6467bf392e1b..e217f17997a4 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -635,6 +635,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) + goto drop_and_free; + + inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); ++ reqsk_put(req); + return 0; + + drop_and_free: +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 3470ad1843bb..09a9ab65f4e1 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -376,6 +376,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) + goto drop_and_free; + + inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); ++ reqsk_put(req); + return 0; + + drop_and_free: +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 66dcb529fd9c..0cb240c749bf 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -1319,13 +1319,14 @@ static struct pernet_operations fib_net_ops = { + + void __init ip_fib_init(void) + { +- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); +- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); +- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); ++ fib_trie_init(); + + register_pernet_subsys(&fib_net_ops); ++ + register_netdevice_notifier(&fib_netdev_notifier); + register_inetaddr_notifier(&fib_inetaddr_notifier); + +- fib_trie_init(); ++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); ++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); ++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); + } +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 2b7283303650..5d58a6703a43 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -922,7 +922,8 @@ static int __ip_append_data(struct sock *sk, + csummode = CHECKSUM_PARTIAL; + + cork->length += length; +- if (((length > mtu) || (skb && skb_is_gso(skb))) && ++ if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || ++ (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c +index 4cbe9f0a4281..731b91409625 100644 +--- a/net/ipv4/syncookies.c ++++ b/net/ipv4/syncookies.c +@@ -337,6 +337,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) + treq = tcp_rsk(req); + treq->rcv_isn = ntohl(th->seq) - 1; + treq->snt_isn = cookie; ++ treq->txhash = net_tx_rndhash(); + req->mss = mss; + ireq->ir_num = ntohs(th->dest); + ireq->ir_rmt_port = th->source; +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 150b4923fb72..0de3245ea42f 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -647,8 +647,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + *prevhdr = NEXTHDR_FRAGMENT; + tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); + if (!tmp_hdr) { +- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), +- IPSTATS_MIB_FRAGFAILS); + err = -ENOMEM; + goto fail; + } +@@ -767,8 +765,6 @@ slow_path: + frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + + hroom + troom, GFP_ATOMIC); + if (!frag) { +- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), +- IPSTATS_MIB_FRAGFAILS); + err = -ENOMEM; + goto fail; + } +@@ -1361,7 +1357,7 @@ emsgsize: + */ + + cork->length += length; +- if ((((length + fragheaderlen) > mtu) || ++ if ((((length + (skb ? skb->len : headersize)) > mtu) || + (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO) && +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index 8b56c5240429..f9f02581c4ca 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident); + + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + { +- u16 offset = sizeof(struct ipv6hdr); ++ unsigned int offset = sizeof(struct ipv6hdr); + unsigned int packet_len = skb_tail_pointer(skb) - + skb_network_header(skb); + int found_rhdr = 0; +@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + + while (offset <= packet_len) { + struct ipv6_opt_hdr *exthdr; ++ unsigned int len; + + switch (**nexthdr) { + +@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + + offset); +- offset += ipv6_optlen(exthdr); ++ len = ipv6_optlen(exthdr); ++ if (len + offset >= IPV6_MAXPLEN) ++ return -EINVAL; ++ offset += len; + *nexthdr = &exthdr->nexthdr; + } + +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c +index eaf7ac496d50..aee87282d352 100644 +--- a/net/ipv6/syncookies.c ++++ b/net/ipv6/syncookies.c +@@ -210,6 +210,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) + treq->snt_synack.v64 = 0; + treq->rcv_isn = ntohl(th->seq) - 1; + treq->snt_isn = cookie; ++ treq->txhash = net_tx_rndhash(); + + /* + * We need to lookup the dst_entry to get the correct window size. +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c +index ad58d2a6284e..6a2507f24b0f 100644 +--- a/net/openvswitch/conntrack.c ++++ b/net/openvswitch/conntrack.c +@@ -577,8 +577,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, + + nla_for_each_nested(a, attr, rem) { + int type = nla_type(a); +- int maxlen = ovs_ct_attr_lens[type].maxlen; +- int minlen = ovs_ct_attr_lens[type].minlen; ++ int maxlen; ++ int minlen; + + if (type > OVS_CT_ATTR_MAX) { + OVS_NLERR(log, +@@ -586,6 +586,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, + type, OVS_CT_ATTR_MAX); + return -EINVAL; + } ++ ++ maxlen = ovs_ct_attr_lens[type].maxlen; ++ minlen = ovs_ct_attr_lens[type].minlen; + if (nla_len(a) < minlen || nla_len(a) > maxlen) { + OVS_NLERR(log, + "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index f8d6a0ca9c03..061771ca2582 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -4225,7 +4225,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + register_prot_hook(sk); + } + spin_unlock(&po->bind_lock); +- if (closing && (po->tp_version > TPACKET_V2)) { ++ if (pg_vec && (po->tp_version > TPACKET_V2)) { + /* Because we don't support block-based V3 on tx-ring */ + if (!tx_ring) + prb_shutdown_retire_blk_timer(po, rb_queue); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 46a34039ecdc..5cab24f52825 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), ++ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), + +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index a1e605bbc465..977066ba1769 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, + dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n", + be->dai_link->name, event, dir); + ++ if ((event == SND_SOC_DAPM_STREAM_STOP) && ++ (be->dpcm[dir].users >= 1)) ++ continue; ++ + snd_soc_dapm_stream_event(be, dir, event); + } + diff --git a/patch/kernel/rk3328-default/patch-4.4.81-82.patch b/patch/kernel/rk3328-default/patch-4.4.81-82.patch new file mode 100644 index 000000000..f61b7675a --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.81-82.patch @@ -0,0 +1,330 @@ +diff --git a/Makefile b/Makefile +index d049e53a6960..52f2dd8dcebd 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 81 ++SUBLEVEL = 82 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index 1f1ff7e7b9cf..ba079e279b58 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -1629,12 +1629,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) + + int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) + { ++ if (!kvm->arch.pgd) ++ return 0; + trace_kvm_age_hva(start, end); + return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); + } + + int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) + { ++ if (!kvm->arch.pgd) ++ return 0; + trace_kvm_test_age_hva(hva); + return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); + } +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index 0e2919dd8df3..1395eeb6005f 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) + insn_count = bpf_jit_insn(jit, fp, i); + if (insn_count < 0) + return -1; +- jit->addrs[i + 1] = jit->prg; /* Next instruction address */ ++ /* Next instruction address */ ++ jit->addrs[i + insn_count] = jit->prg; + } + bpf_jit_epilogue(jit); + +diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h +index 349dd23e2876..0cdeb2b483a0 100644 +--- a/arch/sparc/include/asm/mmu_context_64.h ++++ b/arch/sparc/include/asm/mmu_context_64.h +@@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm); + void __tsb_context_switch(unsigned long pgd_pa, + struct tsb_config *tsb_base, + struct tsb_config *tsb_huge, +- unsigned long tsb_descr_pa); ++ unsigned long tsb_descr_pa, ++ unsigned long secondary_ctx); + +-static inline void tsb_context_switch(struct mm_struct *mm) ++static inline void tsb_context_switch_ctx(struct mm_struct *mm, ++ unsigned long ctx) + { + __tsb_context_switch(__pa(mm->pgd), + &mm->context.tsb_block[0], +@@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm) + #else + NULL + #endif +- , __pa(&mm->context.tsb_descr[0])); ++ , __pa(&mm->context.tsb_descr[0]), ++ ctx); + } + ++#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0) ++ + void tsb_grow(struct mm_struct *mm, + unsigned long tsb_index, + unsigned long mm_rss); +@@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str + * cpu0 to update it's TSB because at that point the cpu_vm_mask + * only had cpu1 set in it. + */ +- load_secondary_context(mm); +- tsb_context_switch(mm); ++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); + + /* Any time a processor runs a context on an address space + * for the first time, we must flush that context out of the +diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S +index 395ec1800530..7d961f6e3907 100644 +--- a/arch/sparc/kernel/tsb.S ++++ b/arch/sparc/kernel/tsb.S +@@ -375,6 +375,7 @@ tsb_flush: + * %o1: TSB base config pointer + * %o2: TSB huge config pointer, or NULL if none + * %o3: Hypervisor TSB descriptor physical address ++ * %o4: Secondary context to load, if non-zero + * + * We have to run this whole thing with interrupts + * disabled so that the current cpu doesn't change +@@ -387,6 +388,17 @@ __tsb_context_switch: + rdpr %pstate, %g1 + wrpr %g1, PSTATE_IE, %pstate + ++ brz,pn %o4, 1f ++ mov SECONDARY_CONTEXT, %o5 ++ ++661: stxa %o4, [%o5] ASI_DMMU ++ .section .sun4v_1insn_patch, "ax" ++ .word 661b ++ stxa %o4, [%o5] ASI_MMU ++ .previous ++ flush %g6 ++ ++1: + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) + + stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] +diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c +index 17bd2e167e07..df707a8ad311 100644 +--- a/arch/sparc/power/hibernate.c ++++ b/arch/sparc/power/hibernate.c +@@ -35,6 +35,5 @@ void restore_processor_state(void) + { + struct mm_struct *mm = current->active_mm; + +- load_secondary_context(mm); +- tsb_context_switch(mm); ++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); + } +diff --git a/mm/mempool.c b/mm/mempool.c +index 004d42b1dfaf..7924f4f58a6d 100644 +--- a/mm/mempool.c ++++ b/mm/mempool.c +@@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool) + void *element = pool->elements[--pool->curr_nr]; + + BUG_ON(pool->curr_nr < 0); +- check_element(pool, element); + kasan_unpoison_element(pool, element); ++ check_element(pool, element); + return element; + } + +diff --git a/net/core/dev.c b/net/core/dev.c +index 4b0853194a03..24d243084aab 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2551,7 +2551,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) + { + if (tx_path) + return skb->ip_summed != CHECKSUM_PARTIAL && +- skb->ip_summed != CHECKSUM_NONE; ++ skb->ip_summed != CHECKSUM_UNNECESSARY; + + return skb->ip_summed == CHECKSUM_NONE; + } +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 5d58a6703a43..09c73dd541c5 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -922,11 +922,12 @@ static int __ip_append_data(struct sock *sk, + csummode = CHECKSUM_PARTIAL; + + cork->length += length; +- if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || +- (skb && skb_is_gso(skb))) && ++ if ((skb && skb_is_gso(skb)) || ++ (((length + (skb ? skb->len : fragheaderlen)) > mtu) && ++ (skb_queue_len(queue) <= 1) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && +- (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { ++ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) { + err = ip_ufo_append_data(sk, queue, getfrag, from, length, + hh_len, fragheaderlen, transhdrlen, + maxfraglen, flags); +@@ -1242,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, + return -EINVAL; + + if ((size + skb->len > mtu) && ++ (skb_queue_len(&sk->sk_write_queue) == 1) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO)) { + if (skb->ip_summed != CHECKSUM_PARTIAL) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 8f13b2eaabf8..f0dabd125c43 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2503,8 +2503,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) + struct tcp_sock *tp = tcp_sk(sk); + + /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ +- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || +- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { ++ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && ++ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { + tp->snd_cwnd = tp->snd_ssthresh; + tp->snd_cwnd_stamp = tcp_time_stamp; + } +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 3fdcdc730f71..850d1b5bfd81 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk) + struct sk_buff *buff; + int err; + ++ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) ++ return -EHOSTUNREACH; /* Routing failure or similar. */ ++ + tcp_connect_init(sk); + + if (unlikely(tp->repair)) { +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index ebb34d0c5e80..1ec12a4f327e 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -606,7 +606,8 @@ static void tcp_keepalive_timer (unsigned long data) + goto death; + } + +- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) ++ if (!sock_flag(sk, SOCK_KEEPOPEN) || ++ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) + goto out; + + elapsed = keepalive_time_when(tp); +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index e9513e397c4f..301e60829c7e 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) + if (is_udplite) /* UDP-Lite */ + csum = udplite_csum(skb); + +- else if (sk->sk_no_check_tx) { /* UDP csum disabled */ ++ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ + + skb->ip_summed = CHECKSUM_NONE; + goto send; +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index 6396f1c80ae9..6dfc3daf7c21 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + +- skb->ip_summed = CHECKSUM_NONE; ++ skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Fragment the skb. IP headers of the fragments are updated in + * inet_gso_segment() +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 0de3245ea42f..e22339fad10b 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1357,11 +1357,12 @@ emsgsize: + */ + + cork->length += length; +- if ((((length + (skb ? skb->len : headersize)) > mtu) || +- (skb && skb_is_gso(skb))) && ++ if ((skb && skb_is_gso(skb)) || ++ (((length + (skb ? skb->len : headersize)) > mtu) && ++ (skb_queue_len(queue) <= 1) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO) && +- (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { ++ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) { + err = ip6_ufo_append_data(sk, queue, getfrag, from, length, + hh_len, fragheaderlen, exthdrlen, + transhdrlen, mtu, flags, fl6); +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c +index 01582966ffa0..2e3c12eeca07 100644 +--- a/net/ipv6/udp_offload.c ++++ b/net/ipv6/udp_offload.c +@@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + +- skb->ip_summed = CHECKSUM_NONE; ++ skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Check if there is enough headroom to insert fragment header. */ + tnl_hlen = skb_tnl_header_len(skb); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 061771ca2582..148ec130d99d 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv + + if (optlen != sizeof(val)) + return -EINVAL; +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) +- return -EBUSY; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; + if (val > INT_MAX) + return -EINVAL; +- po->tp_reserve = val; +- return 0; ++ lock_sock(sk); ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ++ ret = -EBUSY; ++ } else { ++ po->tp_reserve = val; ++ ret = 0; ++ } ++ release_sock(sk); ++ return ret; + } + case PACKET_LOSS: + { +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c +index d05869646515..0915d448ba23 100644 +--- a/net/sched/act_ipt.c ++++ b/net/sched/act_ipt.c +@@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int + return PTR_ERR(target); + + t->u.kernel.target = target; ++ memset(&par, 0, sizeof(par)); + par.table = table; +- par.entryinfo = NULL; + par.target = target; + par.targinfo = t->data; + par.hook_mask = hook; diff --git a/patch/kernel/rk3328-default/patch-4.4.82-83.patch b/patch/kernel/rk3328-default/patch-4.4.82-83.patch new file mode 100644 index 000000000..104c0c331 --- /dev/null +++ b/patch/kernel/rk3328-default/patch-4.4.82-83.patch @@ -0,0 +1,476 @@ +diff --git a/Makefile b/Makefile +index 52f2dd8dcebd..7f67b35caf99 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 82 ++SUBLEVEL = 83 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c +index fa24d5196615..c7122919a8c0 100644 +--- a/drivers/iio/accel/bmc150-accel-core.c ++++ b/drivers/iio/accel/bmc150-accel-core.c +@@ -194,7 +194,6 @@ struct bmc150_accel_data { + struct device *dev; + int irq; + struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; +- atomic_t active_intr; + struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; + struct mutex mutex; + u8 fifo_mode, watermark; +@@ -489,11 +488,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, + goto out_fix_power_state; + } + +- if (state) +- atomic_inc(&data->active_intr); +- else +- atomic_dec(&data->active_intr); +- + return 0; + + out_fix_power_state: +@@ -1704,8 +1698,7 @@ static int bmc150_accel_resume(struct device *dev) + struct bmc150_accel_data *data = iio_priv(indio_dev); + + mutex_lock(&data->mutex); +- if (atomic_read(&data->active_intr)) +- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); ++ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); + bmc150_accel_fifo_set_mode(data); + mutex_unlock(&data->mutex); + +diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c +index b10f629cc44b..1dbc2143cdfc 100644 +--- a/drivers/iio/adc/vf610_adc.c ++++ b/drivers/iio/adc/vf610_adc.c +@@ -77,7 +77,7 @@ + #define VF610_ADC_ADSTS_MASK 0x300 + #define VF610_ADC_ADLPC_EN 0x80 + #define VF610_ADC_ADHSC_EN 0x400 +-#define VF610_ADC_REFSEL_VALT 0x100 ++#define VF610_ADC_REFSEL_VALT 0x800 + #define VF610_ADC_REFSEL_VBG 0x1000 + #define VF610_ADC_ADTRG_HARD 0x2000 + #define VF610_ADC_AVGS_8 0x4000 +diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c +index 12731d6b89ec..ec1b2e798cc1 100644 +--- a/drivers/iio/light/tsl2563.c ++++ b/drivers/iio/light/tsl2563.c +@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) + struct tsl2563_chip *chip = iio_priv(dev_info); + + iio_push_event(dev_info, +- IIO_UNMOD_EVENT_CODE(IIO_LIGHT, ++ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, + 0, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_EITHER), +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c +index 71ccf6a90b22..2551e4adb33f 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c +@@ -194,8 +194,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd) + + spin_unlock_irqrestore(&bank->slock, flags); + +- exynos_irq_unmask(irqd); +- + return 0; + } + +@@ -216,8 +214,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd) + shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; + mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; + +- exynos_irq_mask(irqd); +- + spin_lock_irqsave(&bank->slock, flags); + + con = readl(d->virt_base + reg_con); +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +index 862a096c5dba..be5c71df148d 100644 +--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c ++++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +@@ -811,6 +811,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = { + SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */ + SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */ + SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ ++ SUNXI_FUNCTION(0x5, "sim"), /* DET */ + SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ + SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c +index d97aa2827412..8eb7179da342 100644 +--- a/drivers/staging/iio/resolver/ad2s1210.c ++++ b/drivers/staging/iio/resolver/ad2s1210.c +@@ -468,7 +468,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, + long m) + { + struct ad2s1210_state *st = iio_priv(indio_dev); +- bool negative; ++ u16 negative; + int ret = 0; + u16 pos; + s16 vel; +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 31d5d9c0e10b..1ff1c83e2df5 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -418,6 +418,7 @@ int iscsit_reset_np_thread( + return 0; + } + np->np_thread_state = ISCSI_NP_THREAD_RESET; ++ atomic_inc(&np->np_reset_count); + + if (np->np_thread) { + spin_unlock_bh(&np->np_thread_lock); +@@ -1996,6 +1997,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + cmd->data_direction = DMA_NONE; ++ kfree(cmd->text_in_ptr); + cmd->text_in_ptr = NULL; + + return 0; +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index b19edffa7d98..bc2cbffec27e 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -1219,9 +1219,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) + flush_signals(current); + + spin_lock_bh(&np->np_thread_lock); +- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { ++ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; ++ spin_unlock_bh(&np->np_thread_lock); + complete(&np->np_restart_comp); ++ return 1; + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { + spin_unlock_bh(&np->np_thread_lock); + goto exit; +@@ -1254,7 +1256,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) + goto exit; + } else if (rc < 0) { + spin_lock_bh(&np->np_thread_lock); +- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { ++ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { ++ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + spin_unlock_bh(&np->np_thread_lock); + complete(&np->np_restart_comp); + iscsit_put_transport(conn->conn_transport); +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index b403596818db..5c0952995280 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1851,7 +1851,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, + /* No more submits can occur */ + spin_lock_irq(&hcd_urb_list_lock); + rescan: +- list_for_each_entry (urb, &ep->urb_list, urb_list) { ++ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { + int is_in; + + if (urb->unlinked) +@@ -2448,6 +2448,8 @@ void usb_hc_died (struct usb_hcd *hcd) + } + if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { + hcd = hcd->shared_hcd; ++ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); ++ set_bit(HCD_FLAG_DEAD, &hcd->flags); + if (hcd->rh_registered) { + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 1d59d489a1ad..cdf4be3939f5 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -4661,7 +4661,8 @@ hub_power_remaining(struct usb_hub *hub) + static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, + u16 portchange) + { +- int status, i; ++ int status = -ENODEV; ++ int i; + unsigned unit_load; + struct usb_device *hdev = hub->hdev; + struct usb_hcd *hcd = bus_to_hcd(hdev->bus); +@@ -4865,9 +4866,10 @@ loop: + + done: + hub_port_disable(hub, port1, 1); +- if (hcd->driver->relinquish_port && !hub->hdev->parent) +- hcd->driver->relinquish_port(hcd, port1); +- ++ if (hcd->driver->relinquish_port && !hub->hdev->parent) { ++ if (status != -ENOTCONN && status != -ENODEV) ++ hcd->driver->relinquish_port(hcd, port1); ++ } + } + + /* Handle physical or logical connection change events. +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 3116edfcdc18..574da2b4529c 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { + /* appletouch */ + { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ ++ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* Avision AV600U */ + { USB_DEVICE(0x0638, 0x0a13), .driver_info = + USB_QUIRK_STRING_FETCH_255 }, +@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { + { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, ++ { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech Optical Mouse M90/M100 */ + { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c +index f9400564cb72..03b9a372636f 100644 +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -89,6 +89,7 @@ enum amd_chipset_gen { + AMD_CHIPSET_HUDSON2, + AMD_CHIPSET_BOLTON, + AMD_CHIPSET_YANGTZE, ++ AMD_CHIPSET_TAISHAN, + AMD_CHIPSET_UNKNOWN, + }; + +@@ -132,6 +133,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) + pinfo->sb_type.gen = AMD_CHIPSET_SB700; + else if (rev >= 0x40 && rev <= 0x4f) + pinfo->sb_type.gen = AMD_CHIPSET_SB800; ++ } ++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, ++ 0x145c, NULL); ++ if (pinfo->smbus_dev) { ++ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; + } else { + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); +@@ -251,11 +257,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) + { + /* Make sure amd chipset type has already been initialized */ + usb_amd_find_chipset_info(); +- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) +- return 0; +- +- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); +- return 1; ++ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || ++ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { ++ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); ++ return 1; ++ } ++ return 0; + } + EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); + +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c +index 13d5614f37f1..0d843e0f8055 100644 +--- a/drivers/usb/musb/musb_host.c ++++ b/drivers/usb/musb/musb_host.c +@@ -138,6 +138,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) + "Could not flush host TX%d fifo: csr: %04x\n", + ep->epnum, csr)) + return; ++ mdelay(1); + } + } + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index b0dc6da3d970..41a6513646de 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -135,6 +135,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ ++ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index ebe51f11105d..fe123153b1a5 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index 1db4b61bdf7b..a51b28379850 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, ++ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, + { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h +index 09d9be88209e..3b5a15d1dc0d 100644 +--- a/drivers/usb/serial/pl2303.h ++++ b/drivers/usb/serial/pl2303.h +@@ -27,6 +27,7 @@ + #define ATEN_VENDOR_ID 0x0557 + #define ATEN_VENDOR_ID2 0x0547 + #define ATEN_PRODUCT_ID 0x2008 ++#define ATEN_PRODUCT_UC485 0x2021 + #define ATEN_PRODUCT_ID2 0x2118 + + #define IODATA_VENDOR_ID 0x04bb +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index 53341a77d89f..a37ed1e59e99 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -123,9 +123,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, + /* Reported-by: Benjamin Tissoires */ + UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, + "Initio Corporation", +- "", ++ "INIC-3069", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, +- US_FL_NO_ATA_1X), ++ US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), + + /* Reported-by: Tom Arild Naess */ + UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 11538a8be9f0..1a063cbfe503 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) + { + struct fuse_file *ff; + +- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); ++ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL); + if (unlikely(!ff)) + return NULL; + +diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig +index f31fd0dd92c6..b1daeafbea92 100644 +--- a/fs/nfs/Kconfig ++++ b/fs/nfs/Kconfig +@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT + config PNFS_BLOCK + tristate + depends on NFS_V4_1 && BLK_DEV_DM ++ depends on 64BIT || LBDAF + default NFS_V4 + + config PNFS_OBJLAYOUT +diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c +index e125e55de86d..2603d7589946 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c ++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c +@@ -30,6 +30,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) + { + nfs4_print_deviceid(&mirror_ds->id_node.deviceid); + nfs4_pnfs_ds_put(mirror_ds->ds); ++ kfree(mirror_ds->ds_versions); + kfree_rcu(mirror_ds, id_node.rcu); + } + +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h +index 85a868ccb493..8397dc235e84 100644 +--- a/include/linux/cpuset.h ++++ b/include/linux/cpuset.h +@@ -16,6 +16,7 @@ + + #ifdef CONFIG_CPUSETS + ++extern struct static_key cpusets_pre_enable_key; + extern struct static_key cpusets_enabled_key; + static inline bool cpusets_enabled(void) + { +@@ -30,12 +31,14 @@ static inline int nr_cpusets(void) + + static inline void cpuset_inc(void) + { ++ static_key_slow_inc(&cpusets_pre_enable_key); + static_key_slow_inc(&cpusets_enabled_key); + } + + static inline void cpuset_dec(void) + { + static_key_slow_dec(&cpusets_enabled_key); ++ static_key_slow_dec(&cpusets_pre_enable_key); + } + + extern int cpuset_init(void); +@@ -104,7 +107,7 @@ extern void cpuset_print_current_mems_allowed(void); + */ + static inline unsigned int read_mems_allowed_begin(void) + { +- if (!cpusets_enabled()) ++ if (!static_key_false(&cpusets_pre_enable_key)) + return 0; + + return read_seqcount_begin(¤t->mems_allowed_seq); +@@ -118,7 +121,7 @@ static inline unsigned int read_mems_allowed_begin(void) + */ + static inline bool read_mems_allowed_retry(unsigned int seq) + { +- if (!cpusets_enabled()) ++ if (!static_key_false(&cpusets_enabled_key)) + return false; + + return read_seqcount_retry(¤t->mems_allowed_seq, seq); +diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h +index fdda45f26f75..22f442ab85f9 100644 +--- a/include/target/iscsi/iscsi_target_core.h ++++ b/include/target/iscsi/iscsi_target_core.h +@@ -784,6 +784,7 @@ struct iscsi_np { + int np_sock_type; + enum np_thread_state_table np_thread_state; + bool enabled; ++ atomic_t np_reset_count; + enum iscsi_timer_flags_table np_login_timer_flags; + u32 np_exports; + enum np_flags_table np_flags; +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 3b5e5430f5d0..8ccd66a97c8b 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -60,6 +60,7 @@ + #include + #include + ++struct static_key cpusets_pre_enable_key __read_mostly = STATIC_KEY_INIT_FALSE; + struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE; + + /* See "Frequency meter" comments, below. */ +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index f9d648fce8cd..53286b2f5b1c 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -6804,7 +6804,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, + + /* Make sure the range is really isolated. */ + if (test_pages_isolated(outer_start, end, false)) { +- pr_info("%s: [%lx, %lx) PFNs busy\n", ++ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", + __func__, outer_start, end); + ret = -EBUSY; + goto done;